diff options
author | Samuel Holland <samuel@sholland.org> | 2017-09-15 14:37:26 -0500 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2017-09-15 22:39:38 +0200 |
commit | c441326a37f5f8e1b237b475844206a8ca693f4d (patch) | |
tree | bccd25fd9b42212892b229e439bce77667c6c16e | |
parent | compat: add READ_ONCE/WRITE_ONCE for old kernels (diff) | |
download | wireguard-monolithic-historical-c441326a37f5f8e1b237b475844206a8ca693f4d.tar.xz wireguard-monolithic-historical-c441326a37f5f8e1b237b475844206a8ca693f4d.zip |
data: avoid running parallel/serial work on the same CPU
-rw-r--r-- | src/data.c | 9 |
1 files changed, 8 insertions, 1 deletions
@@ -98,8 +98,15 @@ static inline bool queue_enqueue_per_peer(struct crypt_queue *queue, struct cryp static inline void queue_enqueue_per_device(struct crypt_queue __percpu *queue, struct crypt_ctx *ctx, struct workqueue_struct *wq, int *next_cpu) { + struct crypt_queue *cpu_queue; int cpu = cpumask_next_online(next_cpu); - struct crypt_queue *cpu_queue = per_cpu_ptr(queue, cpu); + /* Avoid running parallel work on the same CPU as the one handling all + * of the serial work. This improves overall throughput and especially + * throughput stability where we have at least two cores left for + * parallel work. */ + if (cpu == ctx->peer->serial_work_cpu && num_online_cpus() >= 3) + cpu = cpumask_next_online(next_cpu); + cpu_queue = per_cpu_ptr(queue, cpu); queue_enqueue(cpu_queue, &ctx->per_device_head, 0); queue_work_on(cpu, wq, &cpu_queue->work); } |