aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-01 18:45:23 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-08-01 20:54:47 +0200
commit67996396b3bd4efa893940b999811abfa2948016 (patch)
tree916e104e031b0f64682e6b088e632d412a9caf57
parentpeer: ensure resources are freed when creation fails (diff)
downloadwireguard-monolithic-historical-67996396b3bd4efa893940b999811abfa2948016.tar.xz
wireguard-monolithic-historical-67996396b3bd4efa893940b999811abfa2948016.zip
queueing: document double-adding and reference conditions
-rw-r--r--src/queueing.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/src/queueing.h b/src/queueing.h
index 4967eef..52cac40 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -121,8 +121,14 @@ static inline int queue_enqueue_per_device_and_peer(struct crypt_queue *device_q
int cpu;
atomic_set(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
+ /* We first queue this up for the peer ingestion, but the consumer
+ * will wait for the state to change to CRYPTED or DEAD before.
+ */
if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
return -ENOSPC;
+ /* Then we queue it up in the device queue, which consumes the
+ * packet as soon as it can.
+ */
cpu = cpumask_next_online(next_cpu);
if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
return -EPIPE;
@@ -132,6 +138,9 @@ static inline int queue_enqueue_per_device_and_peer(struct crypt_queue *device_q
static inline void queue_enqueue_per_peer(struct crypt_queue *queue, struct sk_buff *skb, enum packet_state state)
{
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+ */
struct wireguard_peer *peer = peer_get(PACKET_PEER(skb));
atomic_set(&PACKET_CB(skb)->state, state);
queue_work_on(cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), peer->device->packet_crypt_wq, &queue->work);
@@ -140,6 +149,9 @@ static inline void queue_enqueue_per_peer(struct crypt_queue *queue, struct sk_b
static inline void queue_enqueue_per_peer_napi(struct crypt_queue *queue, struct sk_buff *skb, enum packet_state state)
{
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+ */
struct wireguard_peer *peer = peer_get(PACKET_PEER(skb));
atomic_set(&PACKET_CB(skb)->state, state);
napi_schedule(&peer->napi);