aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2022-09-12 01:02:18 -0700
committerJohn Baldwin <jhb@FreeBSD.org>2022-10-27 17:35:58 -0700
commitaee9dbb13d14ab66101050ee640fa6dc89a37a7b (patch)
tree42fb4ece2b973510efae9d3bd7521ac55922163e
parentcompat: Add shims for atomic_load/store_bool. (diff)
downloadwireguard-freebsd-aee9dbb13d14ab66101050ee640fa6dc89a37a7b.tar.xz
wireguard-freebsd-aee9dbb13d14ab66101050ee640fa6dc89a37a7b.zip
Use atomic(9) instead of concurrency-kit atomics.
Kernel sanitizers only support atomic(9) operations. Reported-by: Mark Johnston <markj@FreeBSD.org> Signed-off-by: John Baldwin <jhb@FreeBSD.org>
-rw-r--r--src/if_wg.c50
-rw-r--r--src/support.h8
-rw-r--r--src/wg_noise.c76
3 files changed, 63 insertions, 71 deletions
diff --git a/src/if_wg.c b/src/if_wg.c
index 04907e1..22fb995 100644
--- a/src/if_wg.c
+++ b/src/if_wg.c
@@ -792,10 +792,10 @@ wg_socket_set(struct wg_softc *sc, struct socket *new_so4, struct socket *new_so
sx_assert(&sc->sc_lock, SX_XLOCKED);
- so4 = ck_pr_load_ptr(&so->so_so4);
- so6 = ck_pr_load_ptr(&so->so_so6);
- ck_pr_store_ptr(&so->so_so4, new_so4);
- ck_pr_store_ptr(&so->so_so6, new_so6);
+ so4 = atomic_load_ptr(&so->so_so4);
+ so6 = atomic_load_ptr(&so->so_so6);
+ atomic_store_ptr(&so->so_so4, new_so4);
+ atomic_store_ptr(&so->so_so6, new_so6);
if (!so4 && !so6)
return;
@@ -898,8 +898,8 @@ wg_send(struct wg_softc *sc, struct wg_endpoint *e, struct mbuf *m)
sa = &e->e_remote.r_sa;
NET_EPOCH_ENTER(et);
- so4 = ck_pr_load_ptr(&so->so_so4);
- so6 = ck_pr_load_ptr(&so->so_so6);
+ so4 = atomic_load_ptr(&so->so_so4);
+ so6 = atomic_load_ptr(&so->so_so6);
if (e->e_remote.r_sa.sa_family == AF_INET && so4 != NULL)
ret = sosend(so4, sa, NULL, m, control, 0, curthread);
else if (e->e_remote.r_sa.sa_family == AF_INET6 && so6 != NULL)
@@ -952,7 +952,7 @@ out:
static void
wg_timers_enable(struct wg_peer *peer)
{
- ck_pr_store_bool(&peer->p_enabled, true);
+ atomic_store_bool(&peer->p_enabled, true);
wg_timers_run_persistent_keepalive(peer);
}
@@ -971,9 +971,9 @@ wg_timers_disable(struct wg_peer *peer)
*
* We should also pull NET_EPOCH_WAIT out of the FOREACH(peer) loops, but the
* performance impact is acceptable for the time being. */
- ck_pr_store_bool(&peer->p_enabled, false);
+ atomic_store_bool(&peer->p_enabled, false);
NET_EPOCH_WAIT();
- ck_pr_store_bool(&peer->p_need_another_keepalive, false);
+ atomic_store_bool(&peer->p_need_another_keepalive, false);
callout_stop(&peer->p_new_handshake);
callout_stop(&peer->p_send_keepalive);
@@ -987,9 +987,9 @@ wg_timers_set_persistent_keepalive(struct wg_peer *peer, uint16_t interval)
{
struct epoch_tracker et;
if (interval != peer->p_persistent_keepalive_interval) {
- ck_pr_store_16(&peer->p_persistent_keepalive_interval, interval);
+ atomic_store_16(&peer->p_persistent_keepalive_interval, interval);
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled))
+ if (atomic_load_bool(&peer->p_enabled))
wg_timers_run_persistent_keepalive(peer);
NET_EPOCH_EXIT(et);
}
@@ -1009,7 +1009,7 @@ wg_timers_event_data_sent(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake))
+ if (atomic_load_bool(&peer->p_enabled) && !callout_pending(&peer->p_new_handshake))
callout_reset(&peer->p_new_handshake, MSEC_2_TICKS(
NEW_HANDSHAKE_TIMEOUT * 1000 +
arc4random_uniform(REKEY_TIMEOUT_JITTER)),
@@ -1022,13 +1022,13 @@ wg_timers_event_data_received(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled)) {
+ if (atomic_load_bool(&peer->p_enabled)) {
if (!callout_pending(&peer->p_send_keepalive))
callout_reset(&peer->p_send_keepalive,
MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000),
wg_timers_run_send_keepalive, peer);
else
- ck_pr_store_bool(&peer->p_need_another_keepalive, true);
+ atomic_store_bool(&peer->p_need_another_keepalive, true);
}
NET_EPOCH_EXIT(et);
}
@@ -1051,8 +1051,8 @@ wg_timers_event_any_authenticated_packet_traversal(struct wg_peer *peer)
struct epoch_tracker et;
uint16_t interval;
NET_EPOCH_ENTER(et);
- interval = ck_pr_load_16(&peer->p_persistent_keepalive_interval);
- if (ck_pr_load_bool(&peer->p_enabled) && interval > 0)
+ interval = atomic_load_16(&peer->p_persistent_keepalive_interval);
+ if (atomic_load_bool(&peer->p_enabled) && interval > 0)
callout_reset(&peer->p_persistent_keepalive,
MSEC_2_TICKS(interval * 1000),
wg_timers_run_persistent_keepalive, peer);
@@ -1064,7 +1064,7 @@ wg_timers_event_handshake_initiated(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled))
+ if (atomic_load_bool(&peer->p_enabled))
callout_reset(&peer->p_retry_handshake, MSEC_2_TICKS(
REKEY_TIMEOUT * 1000 +
arc4random_uniform(REKEY_TIMEOUT_JITTER)),
@@ -1077,7 +1077,7 @@ wg_timers_event_handshake_complete(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled)) {
+ if (atomic_load_bool(&peer->p_enabled)) {
mtx_lock(&peer->p_handshake_mtx);
callout_stop(&peer->p_retry_handshake);
peer->p_handshake_retries = 0;
@@ -1093,7 +1093,7 @@ wg_timers_event_session_derived(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled))
+ if (atomic_load_bool(&peer->p_enabled))
callout_reset(&peer->p_zero_key_material,
MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000),
wg_timers_run_zero_key_material, peer);
@@ -1105,7 +1105,7 @@ wg_timers_event_want_initiation(struct wg_peer *peer)
{
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled))
+ if (atomic_load_bool(&peer->p_enabled))
wg_timers_run_send_initiation(peer, false);
NET_EPOCH_EXIT(et);
}
@@ -1145,7 +1145,7 @@ wg_timers_run_retry_handshake(void *_peer)
callout_stop(&peer->p_send_keepalive);
wg_queue_purge(&peer->p_stage_queue);
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled) &&
+ if (atomic_load_bool(&peer->p_enabled) &&
!callout_pending(&peer->p_zero_key_material))
callout_reset(&peer->p_zero_key_material,
MSEC_2_TICKS(REJECT_AFTER_TIME * 3 * 1000),
@@ -1162,9 +1162,9 @@ wg_timers_run_send_keepalive(void *_peer)
wg_send_keepalive(peer);
NET_EPOCH_ENTER(et);
- if (ck_pr_load_bool(&peer->p_enabled) &&
- ck_pr_load_bool(&peer->p_need_another_keepalive)) {
- ck_pr_store_bool(&peer->p_need_another_keepalive, false);
+ if (atomic_load_bool(&peer->p_enabled) &&
+ atomic_load_bool(&peer->p_need_another_keepalive)) {
+ atomic_store_bool(&peer->p_need_another_keepalive, false);
callout_reset(&peer->p_send_keepalive,
MSEC_2_TICKS(KEEPALIVE_TIMEOUT * 1000),
wg_timers_run_send_keepalive, peer);
@@ -1201,7 +1201,7 @@ wg_timers_run_persistent_keepalive(void *_peer)
{
struct wg_peer *peer = _peer;
- if (ck_pr_load_16(&peer->p_persistent_keepalive_interval) > 0)
+ if (atomic_load_16(&peer->p_persistent_keepalive_interval) > 0)
wg_send_keepalive(peer);
}
diff --git a/src/support.h b/src/support.h
index ee6b03c..ddaae38 100644
--- a/src/support.h
+++ b/src/support.h
@@ -23,14 +23,6 @@
#define IFT_WIREGUARD IFT_PPP
#endif
-#ifndef ck_pr_store_bool
-#define ck_pr_store_bool(dst, val) ck_pr_store_8((uint8_t *)(dst), (uint8_t)(val))
-#endif
-
-#ifndef ck_pr_load_bool
-#define ck_pr_load_bool(src) ((bool)ck_pr_load_8((uint8_t *)(src)))
-#endif
-
static inline int
sogetsockaddr(struct socket *so, struct sockaddr **nam)
{
diff --git a/src/wg_noise.c b/src/wg_noise.c
index d562ed1..c26180a 100644
--- a/src/wg_noise.c
+++ b/src/wg_noise.c
@@ -562,16 +562,16 @@ noise_remote_keypairs_clear(struct noise_remote *r)
struct noise_keypair *kp;
mtx_lock(&r->r_keypair_mtx);
- kp = ck_pr_load_ptr(&r->r_next);
- ck_pr_store_ptr(&r->r_next, NULL);
+ kp = atomic_load_ptr(&r->r_next);
+ atomic_store_ptr(&r->r_next, NULL);
noise_keypair_drop(kp);
- kp = ck_pr_load_ptr(&r->r_current);
- ck_pr_store_ptr(&r->r_current, NULL);
+ kp = atomic_load_ptr(&r->r_current);
+ atomic_store_ptr(&r->r_current, NULL);
noise_keypair_drop(kp);
- kp = ck_pr_load_ptr(&r->r_previous);
- ck_pr_store_ptr(&r->r_previous, NULL);
+ kp = atomic_load_ptr(&r->r_previous);
+ atomic_store_ptr(&r->r_previous, NULL);
noise_keypair_drop(kp);
mtx_unlock(&r->r_keypair_mtx);
}
@@ -585,12 +585,12 @@ noise_remote_expire_current(struct noise_remote *r)
noise_remote_handshake_clear(r);
NET_EPOCH_ENTER(et);
- kp = ck_pr_load_ptr(&r->r_next);
+ kp = atomic_load_ptr(&r->r_next);
if (kp != NULL)
- ck_pr_store_bool(&kp->kp_can_send, false);
- kp = ck_pr_load_ptr(&r->r_current);
+ atomic_store_bool(&kp->kp_can_send, false);
+ kp = atomic_load_ptr(&r->r_current);
if (kp != NULL)
- ck_pr_store_bool(&kp->kp_can_send, false);
+ atomic_store_bool(&kp->kp_can_send, false);
NET_EPOCH_EXIT(et);
}
@@ -604,24 +604,24 @@ noise_add_new_keypair(struct noise_local *l, struct noise_remote *r,
/* Insert into the keypair table */
mtx_lock(&r->r_keypair_mtx);
- next = ck_pr_load_ptr(&r->r_next);
- current = ck_pr_load_ptr(&r->r_current);
- previous = ck_pr_load_ptr(&r->r_previous);
+ next = atomic_load_ptr(&r->r_next);
+ current = atomic_load_ptr(&r->r_current);
+ previous = atomic_load_ptr(&r->r_previous);
if (kp->kp_is_initiator) {
if (next != NULL) {
- ck_pr_store_ptr(&r->r_next, NULL);
- ck_pr_store_ptr(&r->r_previous, next);
+ atomic_store_ptr(&r->r_next, NULL);
+ atomic_store_ptr(&r->r_previous, next);
noise_keypair_drop(current);
} else {
- ck_pr_store_ptr(&r->r_previous, current);
+ atomic_store_ptr(&r->r_previous, current);
}
noise_keypair_drop(previous);
- ck_pr_store_ptr(&r->r_current, kp);
+ atomic_store_ptr(&r->r_current, kp);
} else {
- ck_pr_store_ptr(&r->r_next, kp);
+ atomic_store_ptr(&r->r_next, kp);
noise_keypair_drop(next);
- ck_pr_store_ptr(&r->r_previous, NULL);
+ atomic_store_ptr(&r->r_previous, NULL);
noise_keypair_drop(previous);
}
@@ -702,10 +702,10 @@ noise_keypair_current(struct noise_remote *r)
struct noise_keypair *kp, *ret = NULL;
NET_EPOCH_ENTER(et);
- kp = ck_pr_load_ptr(&r->r_current);
- if (kp != NULL && ck_pr_load_bool(&kp->kp_can_send)) {
+ kp = atomic_load_ptr(&r->r_current);
+ if (kp != NULL && atomic_load_bool(&kp->kp_can_send)) {
if (noise_timer_expired(kp->kp_birthdate, REJECT_AFTER_TIME, 0))
- ck_pr_store_bool(&kp->kp_can_send, false);
+ atomic_store_bool(&kp->kp_can_send, false);
else if (refcount_acquire_if_not_zero(&kp->kp_refcnt))
ret = kp;
}
@@ -726,20 +726,20 @@ noise_keypair_received_with(struct noise_keypair *kp)
struct noise_keypair *old;
struct noise_remote *r = kp->kp_remote;
- if (kp != ck_pr_load_ptr(&r->r_next))
+ if (kp != atomic_load_ptr(&r->r_next))
return (0);
mtx_lock(&r->r_keypair_mtx);
- if (kp != ck_pr_load_ptr(&r->r_next)) {
+ if (kp != atomic_load_ptr(&r->r_next)) {
mtx_unlock(&r->r_keypair_mtx);
return (0);
}
- old = ck_pr_load_ptr(&r->r_previous);
- ck_pr_store_ptr(&r->r_previous, ck_pr_load_ptr(&r->r_current));
+ old = atomic_load_ptr(&r->r_previous);
+ atomic_store_ptr(&r->r_previous, atomic_load_ptr(&r->r_current));
noise_keypair_drop(old);
- ck_pr_store_ptr(&r->r_current, kp);
- ck_pr_store_ptr(&r->r_next, NULL);
+ atomic_store_ptr(&r->r_current, kp);
+ atomic_store_ptr(&r->r_next, NULL);
mtx_unlock(&r->r_keypair_mtx);
return (ECONNRESET);
@@ -791,11 +791,11 @@ noise_keypair_remote(struct noise_keypair *kp)
int
noise_keypair_nonce_next(struct noise_keypair *kp, uint64_t *send)
{
- if (!ck_pr_load_bool(&kp->kp_can_send))
+ if (!atomic_load_bool(&kp->kp_can_send))
return (EINVAL);
#ifdef __LP64__
- *send = ck_pr_faa_64(&kp->kp_nonce_send, 1);
+ *send = atomic_fetchadd_64(&kp->kp_nonce_send, 1);
#else
rw_wlock(&kp->kp_nonce_lock);
*send = kp->kp_nonce_send++;
@@ -803,7 +803,7 @@ noise_keypair_nonce_next(struct noise_keypair *kp, uint64_t *send)
#endif
if (*send < REJECT_AFTER_MESSAGES)
return (0);
- ck_pr_store_bool(&kp->kp_can_send, false);
+ atomic_store_bool(&kp->kp_can_send, false);
return (EINVAL);
}
@@ -834,7 +834,7 @@ noise_keypair_nonce_check(struct noise_keypair *kp, uint64_t recv)
(i + index_current) &
((COUNTER_BITS_TOTAL / COUNTER_BITS) - 1)] = 0;
#ifdef __LP64__
- ck_pr_store_64(&kp->kp_nonce_recv, recv);
+ atomic_store_64(&kp->kp_nonce_recv, recv);
#else
kp->kp_nonce_recv = recv;
#endif
@@ -861,12 +861,12 @@ noise_keep_key_fresh_send(struct noise_remote *r)
uint64_t nonce;
NET_EPOCH_ENTER(et);
- current = ck_pr_load_ptr(&r->r_current);
- keep_key_fresh = current != NULL && ck_pr_load_bool(&current->kp_can_send);
+ current = atomic_load_ptr(&r->r_current);
+ keep_key_fresh = current != NULL && atomic_load_bool(&current->kp_can_send);
if (!keep_key_fresh)
goto out;
#ifdef __LP64__
- nonce = ck_pr_load_64(&current->kp_nonce_send);
+ nonce = atomic_load_64(&current->kp_nonce_send);
#else
rw_rlock(&current->kp_nonce_lock);
nonce = current->kp_nonce_send;
@@ -890,8 +890,8 @@ noise_keep_key_fresh_recv(struct noise_remote *r)
int keep_key_fresh;
NET_EPOCH_ENTER(et);
- current = ck_pr_load_ptr(&r->r_current);
- keep_key_fresh = current != NULL && ck_pr_load_bool(&current->kp_can_send) &&
+ current = atomic_load_ptr(&r->r_current);
+ keep_key_fresh = current != NULL && atomic_load_bool(&current->kp_can_send) &&
current->kp_is_initiator && noise_timer_expired(current->kp_birthdate,
REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT, 0);
NET_EPOCH_EXIT(et);
@@ -919,7 +919,7 @@ noise_keypair_decrypt(struct noise_keypair *kp, uint64_t nonce, struct mbuf *m)
int ret;
#ifdef __LP64__
- cur_nonce = ck_pr_load_64(&kp->kp_nonce_recv);
+ cur_nonce = atomic_load_64(&kp->kp_nonce_recv);
#else
rw_rlock(&kp->kp_nonce_lock);
cur_nonce = kp->kp_nonce_recv;