summaryrefslogtreecommitdiffstatshomepage
path: root/src/send.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-11-29 22:05:17 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2016-11-29 22:13:52 +0100
commitc37fb9031a1753af23a4a07c4478f848a4e6d193 (patch)
tree0be3597d503ed848ae1f562aa6588152caeddb7d /src/send.c
parenttests: be sure we get all messages (diff)
downloadwireguard-monolithic-historical-c37fb9031a1753af23a4a07c4478f848a4e6d193.tar.xz
wireguard-monolithic-historical-c37fb9031a1753af23a4a07c4478f848a4e6d193.zip
send: send packet initiation only after requeuing to prevent race
Diffstat (limited to '')
-rw-r--r--src/send.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/src/send.c b/src/send.c
index b2ae84b..5f3f165 100644
--- a/src/send.c
+++ b/src/send.c
@@ -158,11 +158,6 @@ int packet_send_queue(struct wireguard_peer *peer)
switch (packet_create_data(&queue, peer, message_create_data_done)) {
case 0:
break;
- case -ENOKEY:
- /* ENOKEY means that we don't have a valid session for the peer, which
- * means we should initiate a session, and then requeue everything. */
- packet_queue_handshake_initiation(peer);
- goto requeue;
case -EBUSY:
/* EBUSY happens when the parallel workers are all filled up, in which
* case we should requeue everything. */
@@ -170,7 +165,7 @@ int packet_send_queue(struct wireguard_peer *peer)
/* First, we mark that we should try to do this later, when existing
* jobs are done. */
peer->need_resend_queue = true;
- requeue:
+
/* We stick the remaining skbs from local_queue at the top of the peer's
* queue again, setting the top of local_queue to be the skb that begins
* the requeueing. */
@@ -178,6 +173,16 @@ int packet_send_queue(struct wireguard_peer *peer)
skb_queue_splice(&queue, &peer->tx_packet_queue);
spin_unlock_irqrestore(&peer->tx_packet_queue.lock, flags);
break;
+ case -ENOKEY:
+ /* ENOKEY means that we don't have a valid session for the peer, which
+ * means we should initiate a session, but after requeuing like above. */
+
+ spin_lock_irqsave(&peer->tx_packet_queue.lock, flags);
+ skb_queue_splice(&queue, &peer->tx_packet_queue);
+ spin_unlock_irqrestore(&peer->tx_packet_queue.lock, flags);
+
+ packet_queue_handshake_initiation(peer);
+ break;
default:
/* If we failed for any other reason, we want to just free the packets and
* forget about them. We do this unlocked, since we're the only ones with