aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/send.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-04 02:50:51 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-08-04 05:14:15 +0200
commitb003c4e93d6e1ce0cae32901dc7b5d425453800d (patch)
tree1e6cdf7cd6e3cfdadf6cc86e58997e821d64bd1c /src/send.c
parentversion: bump snapshot (diff)
downloadwireguard-monolithic-historical-b003c4e93d6e1ce0cae32901dc7b5d425453800d.tar.xz
wireguard-monolithic-historical-b003c4e93d6e1ce0cae32901dc7b5d425453800d.zip
send: switch handshake stamp to an atomic
Rather than abusing the handshake lock, we're much better off just using a boring atomic64 for this. It's simpler and performs better. Also, while we're at it, we set the handshake stamp both before and after the calculations, in case the calculations block for a really long time waiting for the RNG to initialize. Otherwise it's possible that when the RNG finally initializes, two handshakes are sent back to back, which isn't sensible.
Diffstat (limited to 'src/send.c')
-rw-r--r--src/send.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/src/send.c b/src/send.c
index 481d153..3fc2a17 100644
--- a/src/send.c
+++ b/src/send.c
@@ -23,20 +23,17 @@ static void packet_send_handshake_initiation(struct wireguard_peer *peer)
{
struct message_handshake_initiation packet;
- down_write(&peer->handshake.lock);
- if (!has_expired(peer->last_sent_handshake, REKEY_TIMEOUT)) {
- up_write(&peer->handshake.lock);
+ if (!has_expired(atomic64_read(&peer->last_sent_handshake), REKEY_TIMEOUT))
return; /* This function is rate limited. */
- }
- peer->last_sent_handshake = ktime_get_boot_fast_ns();
- up_write(&peer->handshake.lock);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr);
if (noise_handshake_create_initiation(&packet, &peer->handshake)) {
cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
timers_any_authenticated_packet_traversal(peer);
timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
socket_send_buffer_to_peer(peer, &packet, sizeof(struct message_handshake_initiation), HANDSHAKE_DSCP);
timers_handshake_initiated(peer);
}
@@ -55,11 +52,11 @@ void packet_send_queued_handshake_initiation(struct wireguard_peer *peer, bool i
if (!is_retry)
peer->timer_handshake_attempts = 0;
- /* First checking the timestamp here is just an optimization; it will
- * be caught while properly locked inside the actual work queue.
- */
rcu_read_lock_bh();
- if (!has_expired(peer->last_sent_handshake, REKEY_TIMEOUT) || unlikely(peer->is_dead))
+ /* We check last_sent_handshake here in addition to the actual function we're queueing
+ * up, so that we don't queue things if not strictly necessary.
+ */
+ if (!has_expired(atomic64_read(&peer->last_sent_handshake), REKEY_TIMEOUT) || unlikely(peer->is_dead))
goto out;
peer_get(peer);
@@ -74,8 +71,8 @@ void packet_send_handshake_response(struct wireguard_peer *peer)
{
struct message_handshake_response packet;
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr);
- peer->last_sent_handshake = ktime_get_boot_fast_ns();
if (noise_handshake_create_response(&packet, &peer->handshake)) {
cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
@@ -83,6 +80,7 @@ void packet_send_handshake_response(struct wireguard_peer *peer)
timers_session_derived(peer);
timers_any_authenticated_packet_traversal(peer);
timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
socket_send_buffer_to_peer(peer, &packet, sizeof(struct message_handshake_response), HANDSHAKE_DSCP);
}
}