summaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-04 02:50:51 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-08-04 05:14:15 +0200
commitb003c4e93d6e1ce0cae32901dc7b5d425453800d (patch)
tree1e6cdf7cd6e3cfdadf6cc86e58997e821d64bd1c
parentversion: bump snapshot (diff)
downloadwireguard-monolithic-historical-b003c4e93d6e1ce0cae32901dc7b5d425453800d.tar.xz
wireguard-monolithic-historical-b003c4e93d6e1ce0cae32901dc7b5d425453800d.zip
send: switch handshake stamp to an atomic
Rather than abusing the handshake lock, we're much better off just using a boring atomic64 for this. It's simpler and performs better. Also, while we're at it, we set the handshake stamp both before and after the calculations, in case the calculations block for a really long time waiting for the RNG to initialize. Otherwise it's possible that when the RNG finally initializes, two handshakes are sent back to back, which isn't sensible.
-rw-r--r--src/device.c2
-rw-r--r--src/peer.c2
-rw-r--r--src/peer.h2
-rw-r--r--src/send.c20
4 files changed, 12 insertions, 14 deletions
diff --git a/src/device.c b/src/device.c
index fd1aa60..297125a 100644
--- a/src/device.c
+++ b/src/device.c
@@ -105,7 +105,7 @@ static int stop(struct net_device *dev)
timers_stop(peer);
noise_handshake_clear(&peer->handshake);
noise_keypairs_clear(&peer->keypairs);
- peer->last_sent_handshake = ktime_get_boot_fast_ns() - (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC;
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns() - (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
}
mutex_unlock(&wg->device_update_lock);
skb_queue_purge(&wg->incoming_handshakes);
diff --git a/src/peer.c b/src/peer.c
index 97a472a..b9703d5 100644
--- a/src/peer.c
+++ b/src/peer.c
@@ -50,7 +50,7 @@ struct wireguard_peer *peer_create(struct wireguard_device *wg, const u8 public_
rwlock_init(&peer->endpoint_lock);
kref_init(&peer->refcount);
skb_queue_head_init(&peer->staged_packet_queue);
- peer->last_sent_handshake = ktime_get_boot_fast_ns() - (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC;
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns() - (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
netif_napi_add(wg->dev, &peer->napi, packet_rx_poll, NAPI_POLL_WEIGHT);
napi_enable(&peer->napi);
diff --git a/src/peer.h b/src/peer.h
index 8daa053..29d2e00 100644
--- a/src/peer.h
+++ b/src/peer.h
@@ -43,7 +43,7 @@ struct wireguard_peer {
struct dst_cache endpoint_cache;
rwlock_t endpoint_lock;
struct noise_handshake handshake;
- u64 last_sent_handshake;
+ atomic64_t last_sent_handshake;
struct work_struct transmit_handshake_work, clear_peer_work;
struct cookie latest_cookie;
struct hlist_node pubkey_hash;
diff --git a/src/send.c b/src/send.c
index 481d153..3fc2a17 100644
--- a/src/send.c
+++ b/src/send.c
@@ -23,20 +23,17 @@ static void packet_send_handshake_initiation(struct wireguard_peer *peer)
{
struct message_handshake_initiation packet;
- down_write(&peer->handshake.lock);
- if (!has_expired(peer->last_sent_handshake, REKEY_TIMEOUT)) {
- up_write(&peer->handshake.lock);
+ if (!has_expired(atomic64_read(&peer->last_sent_handshake), REKEY_TIMEOUT))
return; /* This function is rate limited. */
- }
- peer->last_sent_handshake = ktime_get_boot_fast_ns();
- up_write(&peer->handshake.lock);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr);
if (noise_handshake_create_initiation(&packet, &peer->handshake)) {
cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
timers_any_authenticated_packet_traversal(peer);
timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
socket_send_buffer_to_peer(peer, &packet, sizeof(struct message_handshake_initiation), HANDSHAKE_DSCP);
timers_handshake_initiated(peer);
}
@@ -55,11 +52,11 @@ void packet_send_queued_handshake_initiation(struct wireguard_peer *peer, bool i
if (!is_retry)
peer->timer_handshake_attempts = 0;
- /* First checking the timestamp here is just an optimization; it will
- * be caught while properly locked inside the actual work queue.
- */
rcu_read_lock_bh();
- if (!has_expired(peer->last_sent_handshake, REKEY_TIMEOUT) || unlikely(peer->is_dead))
+ /* We check last_sent_handshake here in addition to the actual function we're queueing
+ * up, so that we don't queue things if not strictly necessary.
+ */
+ if (!has_expired(atomic64_read(&peer->last_sent_handshake), REKEY_TIMEOUT) || unlikely(peer->is_dead))
goto out;
peer_get(peer);
@@ -74,8 +71,8 @@ void packet_send_handshake_response(struct wireguard_peer *peer)
{
struct message_handshake_response packet;
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr);
- peer->last_sent_handshake = ktime_get_boot_fast_ns();
if (noise_handshake_create_response(&packet, &peer->handshake)) {
cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
@@ -83,6 +80,7 @@ void packet_send_handshake_response(struct wireguard_peer *peer)
timers_session_derived(peer);
timers_any_authenticated_packet_traversal(peer);
timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
socket_send_buffer_to_peer(peer, &packet, sizeof(struct message_handshake_response), HANDSHAKE_DSCP);
}
}