summaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-01 15:35:42 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-08-01 19:24:04 +0200
commit13f19049ed1e19b26e80e0b549744a4ca2c44443 (patch)
tree421deb4f6d06a1d90acd9b5326a9ec583d5b0d86
parentallowedips: free root inside of RCU callback (diff)
downloadwireguard-monolithic-historical-13f19049ed1e19b26e80e0b549744a4ca2c44443.tar.xz
wireguard-monolithic-historical-13f19049ed1e19b26e80e0b549744a4ca2c44443.zip
queueing: keep reference to peer after setting atomic state bit
After we atomic_set, the peer is allowed to be freed, which means if we want to continue to reference it, we need to bump the reference count. This was introduced a few commits ago by b713ab0e when implementing some simplification suggestions.
-rw-r--r--src/peer.h3
-rw-r--r--src/queueing.h8
2 files changed, 8 insertions, 3 deletions
diff --git a/src/peer.h b/src/peer.h
index 70120ee..059fa64 100644
--- a/src/peer.h
+++ b/src/peer.h
@@ -63,9 +63,10 @@ struct wireguard_peer {
struct wireguard_peer *peer_create(struct wireguard_device *wg, const u8 public_key[NOISE_PUBLIC_KEY_LEN], const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]);
struct wireguard_peer * __must_check peer_get_maybe_zero(struct wireguard_peer *peer);
-static inline void peer_get(struct wireguard_peer *peer)
+static inline struct wireguard_peer *peer_get(struct wireguard_peer *peer)
{
kref_get(&peer->refcount);
+ return peer;
}
void peer_put(struct wireguard_peer *peer);
void peer_remove(struct wireguard_peer *peer);
diff --git a/src/queueing.h b/src/queueing.h
index 3fb7b5c..4967eef 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -132,14 +132,18 @@ static inline int queue_enqueue_per_device_and_peer(struct crypt_queue *device_q
static inline void queue_enqueue_per_peer(struct crypt_queue *queue, struct sk_buff *skb, enum packet_state state)
{
+ struct wireguard_peer *peer = peer_get(PACKET_PEER(skb));
atomic_set(&PACKET_CB(skb)->state, state);
- queue_work_on(cpumask_choose_online(&PACKET_PEER(skb)->serial_work_cpu, PACKET_PEER(skb)->internal_id), PACKET_PEER(skb)->device->packet_crypt_wq, &queue->work);
+ queue_work_on(cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), peer->device->packet_crypt_wq, &queue->work);
+ peer_put(peer);
}
static inline void queue_enqueue_per_peer_napi(struct crypt_queue *queue, struct sk_buff *skb, enum packet_state state)
{
+ struct wireguard_peer *peer = peer_get(PACKET_PEER(skb));
atomic_set(&PACKET_CB(skb)->state, state);
- napi_schedule(&PACKET_PEER(skb)->napi);
+ napi_schedule(&peer->napi);
+ peer_put(peer);
}
#ifdef DEBUG