From 6c51343dba4d3059e18b047ee52d1219c2ab7816 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 1 Aug 2018 19:09:18 +0200 Subject: queueing: ensure strictly ordered loads and stores We don't want a consumer to read plaintext when it's supposed to be reading ciphertext, which means we need to synchronize across cores. Suggested-by: Jann Horn --- src/compat/compat.h | 28 ++++++++++++++++++++++++++++ src/queueing.h | 6 +++--- src/receive.c | 2 +- src/send.c | 2 +- 4 files changed, 33 insertions(+), 5 deletions(-) diff --git a/src/compat/compat.h b/src/compat/compat.h index 7943ab8..5b3075b 100644 --- a/src/compat/compat.h +++ b/src/compat/compat.h @@ -610,6 +610,34 @@ static inline void *skb_put_data(struct sk_buff *skb, const void *data, unsigned #define NAPI_STATE_NO_BUSY_POLL NAPI_STATE_SCHED #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) +#ifndef smp_store_release +#define smp_store_release(p, v) \ +do { \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) +#endif +#ifndef smp_load_acquire +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + smp_mb(); \ + ___p1; \ +}) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) +#include +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif +#ifndef atomic_set_release +#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif +#endif + /* https://lkml.kernel.org/r/20170624021727.17835-1-Jason@zx2c4.com */ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include diff --git a/src/queueing.h b/src/queueing.h index 52cac40..6a1de33 100644 --- a/src/queueing.h +++ b/src/queueing.h @@ -120,7 +120,7 @@ static inline int queue_enqueue_per_device_and_peer(struct crypt_queue *device_q { int cpu; - atomic_set(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); + atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); /* We first queue this up for the peer ingestion, but the consumer * will wait for the state to change to CRYPTED or DEAD before. */ @@ -142,7 +142,7 @@ static inline void queue_enqueue_per_peer(struct crypt_queue *queue, struct sk_b * peer can be freed from below us. */ struct wireguard_peer *peer = peer_get(PACKET_PEER(skb)); - atomic_set(&PACKET_CB(skb)->state, state); + atomic_set_release(&PACKET_CB(skb)->state, state); queue_work_on(cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), peer->device->packet_crypt_wq, &queue->work); peer_put(peer); } @@ -153,7 +153,7 @@ static inline void queue_enqueue_per_peer_napi(struct crypt_queue *queue, struct * peer can be freed from below us. */ struct wireguard_peer *peer = peer_get(PACKET_PEER(skb)); - atomic_set(&PACKET_CB(skb)->state, state); + atomic_set_release(&PACKET_CB(skb)->state, state); napi_schedule(&peer->napi); peer_put(peer); } diff --git a/src/receive.c b/src/receive.c index 5e231c9..12af8ed 100644 --- a/src/receive.c +++ b/src/receive.c @@ -382,7 +382,7 @@ int packet_rx_poll(struct napi_struct *napi, int budget) if (unlikely(budget <= 0)) return 0; - while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read(&PACKET_CB(skb)->state)) != PACKET_STATE_UNCRYPTED) { + while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != PACKET_STATE_UNCRYPTED) { __ptr_ring_discard_one(&queue->ring); peer = PACKET_PEER(skb); keypair = PACKET_CB(skb)->keypair; diff --git a/src/send.c b/src/send.c index 823b344..788ff60 100644 --- a/src/send.c +++ b/src/send.c @@ -223,7 +223,7 @@ void packet_tx_worker(struct work_struct *work) struct sk_buff *first; enum packet_state state; - while ((first = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read(&PACKET_CB(first)->state)) != PACKET_STATE_UNCRYPTED) { + while ((first = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read_acquire(&PACKET_CB(first)->state)) != PACKET_STATE_UNCRYPTED) { __ptr_ring_discard_one(&queue->ring); peer = PACKET_PEER(first); keypair = PACKET_CB(first)->keypair; -- cgit v1.2.3-59-g8ed1b