summaryrefslogtreecommitdiffstats
path: root/src/if_wg.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/if_wg.c')
-rw-r--r--src/if_wg.c1429
1 files changed, 1322 insertions, 107 deletions
diff --git a/src/if_wg.c b/src/if_wg.c
index 3f1a1c284db..88604065c48 100644
--- a/src/if_wg.c
+++ b/src/if_wg.c
@@ -28,16 +28,13 @@
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/protosw.h>
-#include <sys/mpq.h>
-#include <sys/fixedmap.h>
-#include <sys/bloombucket.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/pfvar.h>
-#include <net/wireguard.h>
#include <net/if_wg.h>
+#include <net/if_wg_vars.h>
#include <netinet/in.h>
#include <netinet/ip.h>
@@ -45,6 +42,10 @@
#include <netinet/icmp6.h>
#include <netinet/in_pcb.h>
+#include <crypto/blake2s.h>
+#include <crypto/curve25519.h>
+#include <crypto/chachapoly.h>
+
#include "bpfilter.h"
#if NBPFILTER > 0
#include <net/bpf.h>
@@ -63,6 +64,19 @@ struct wg_tag {
} t_state;
};
+/*
+struct wg_tag {
+ union wg_ip t_ip;
+ struct wg_peer *t_peer;
+ struct noise_keypair *t_keypair;
+ enum wg_pkt_state {
+ WG_PKT_STATE_NEW = 0,
+ WG_PKT_STATE_DONE,
+ WG_PKT_STATE_DEAD,
+ } t_state;
+};
+*/
+
struct wg_route {
union wg_ip r_ip;
struct wg_peer *r_peer;
@@ -98,9 +112,12 @@ struct wg_softc {
struct mpq sc_rx_slow_queue;
};
-struct bloom_bucket wg_bb;
-struct bloom_bucket wg_bb_verified;
-
+CTASSERT(WG_KEY_SIZE == CHACHA20POLY1305_KEY_SIZE);
+CTASSERT(WG_HASH_SIZE == BLAKE2S_HASH_SIZE);
+CTASSERT(WG_KEY_SIZE == BLAKE2S_KEY_SIZE);
+CTASSERT(WG_KEY_SIZE == WG_HASH_SIZE);
+CTASSERT(WG_MSG_PADDING_SIZE == CHACHA20POLY1305_AUTHTAG_SIZE);
+CTASSERT(WG_XNONCE_SIZE == XCHACHA20POLY1305_NONCE_SIZE);
#define DPRINTF(sc, str, ...) do { if (ISSET((sc)->sc_if.if_flags, IFF_DEBUG)) \
printf("%s: " str, (sc)->sc_if.if_xname, __VA_ARGS__); } while (0)
@@ -118,6 +135,1028 @@ struct bloom_bucket wg_bb_verified;
#define drop_pkt_err(e) do { err = e; goto drop; } while (0)
#define peer_route(p) ((struct wg_route *)(p)->p_arg)
+#define ret_error(err) do { ret = err; goto leave; } while (0)
+
+/* wireguard.c */
+void wg_kdf(uint8_t [WG_HASH_SIZE], uint8_t [WG_HASH_SIZE],
+ uint8_t [WG_HASH_SIZE], uint8_t [WG_KEY_SIZE], uint8_t *, size_t);
+void wg_hash2(uint8_t [WG_HASH_SIZE], uint8_t *, size_t, uint8_t *, size_t);
+void wg_mix_hash(struct wg_handshake *, uint8_t *, size_t);
+void wg_mix_dh(struct wg_handshake *, uint8_t [WG_KEY_SIZE], uint8_t [WG_KEY_SIZE]);
+void wg_mix_psk(struct wg_handshake *, uint8_t [WG_KEY_SIZE]);
+void wg_handshake_encrypt(struct wg_handshake *, uint8_t *, uint8_t *, size_t);
+int wg_handshake_decrypt(struct wg_handshake *, uint8_t *, uint8_t *, size_t);
+void wg_timestamp_get(uint8_t [WG_TIMESTAMP_SIZE]);
+int wg_timespec_timedout(struct timespec *, time_t);
+enum wg_pkt_type wg_pkt_type(uint8_t *, size_t);
+void wg_keypair_generate(struct wg_keypair *);
+enum wg_error wg_msg_initiation_valid_mac2(struct wg_msg_initiation *, struct wg_cookie *);
+enum wg_error wg_msg_response_valid_mac2(struct wg_msg_response *, struct wg_cookie *);
+
+struct wg_session *wg_peer_hs_session(struct wg_peer *);
+struct wg_session *wg_peer_ks_session(struct wg_peer *);
+
+struct wg_session *wg_peer_new_session(struct wg_peer *);
+void wg_session_drop(struct wg_session *);
+
+/* Some crappy API */
+void
+wg_device_init(struct wg_device *dev, int ipl,
+ void (*notify_fn)(struct wg_peer *),
+ void (*outq_fn)(struct wg_peer *, enum wg_pkt_type, uint32_t),
+ void (*cleanup_fn)(struct wg_peer *), void *arg)
+{
+ bzero(dev, sizeof(*dev));
+ dev->d_arg = arg;
+ dev->d_outq = outq_fn;
+ dev->d_notify = notify_fn;
+ dev->d_cleanup = cleanup_fn;
+ fm_init(&dev->d_peers, 4, ipl);
+ fm_init(&dev->d_sessions, 12, ipl);
+ mtx_init(&dev->d_mtx, ipl);
+ /* d_cookie_maker, d_keypair initialised to 0 */
+}
+
+void
+wg_device_setkey(struct wg_device *dev, struct wg_privkey *key)
+{
+ mtx_enter(&dev->d_mtx);
+ wg_keypair_from_key(&dev->d_keypair, key);
+ mtx_leave(&dev->d_mtx);
+}
+
+void
+wg_device_getkey(struct wg_device *dev, struct wg_keypair *kp)
+{
+ mtx_enter(&dev->d_mtx);
+ *kp = dev->d_keypair;
+ mtx_leave(&dev->d_mtx);
+}
+
+void
+wg_device_destroy(struct wg_device *dev)
+{
+ struct map_item *item;
+
+ /* TODO lock fixed map */
+ FM_FOREACH_FILLED(item, &dev->d_peers)
+ wg_peer_drop(item->value);
+
+ fm_destroy(&dev->d_sessions);
+ fm_destroy(&dev->d_peers);
+}
+
+struct wg_peer *
+wg_device_new_peer(struct wg_device *dev, struct wg_pubkey *key, void *arg)
+{
+ struct wg_peer *peer;
+ peer = malloc(sizeof(*peer), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ peer->p_arg = arg;
+ peer->p_device = dev;
+ peer->p_remote = *key;
+ mtx_init(&peer->p_mtx, dev->d_mtx.mtx_wantipl);
+
+ mtx_enter(&peer->p_mtx);
+ peer->p_id = fm_insert(&dev->d_peers, peer);
+ mtx_leave(&peer->p_mtx);
+
+ /* All other elements of wg_peer are nulled by M_ZERO */
+ return peer;
+}
+
+struct wg_session *
+wg_peer_new_session(struct wg_peer *peer)
+{
+ struct wg_session *session, *old_session;
+ session = malloc(sizeof(*session), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ getnanotime(&session->s_created);
+ session->s_peer = peer;
+ session->s_state = WG_STATE_NEW;
+ mtx_init(&session->s_mtx, peer->p_mtx.mtx_wantipl);
+
+ mtx_enter(&peer->p_mtx);
+
+ old_session = peer->p_hs_session;
+ peer->p_hs_session = session;
+ mtx_leave(&peer->p_mtx);
+
+ mtx_enter(&session->s_mtx);
+ session->s_local_id = fm_insert(&peer->p_device->d_sessions, session);
+ mtx_leave(&session->s_mtx);
+
+ if (old_session)
+ wg_session_drop(old_session);
+
+ return session;
+}
+
+struct wg_peer *
+wg_device_ref_peerkey(struct wg_device *dev, struct wg_pubkey *key)
+{
+ /* For the time being, we just iterate through peers to find the
+ * matching peer */
+ struct wg_peer *peer = NULL;
+ struct map_item *item;
+
+ /* TODO lock, or use better data structure */
+ FM_FOREACH_FILLED(item, &dev->d_peers) {
+ peer = item->value;
+ if (memcmp(key->k, peer->p_remote.k, sizeof(key->k)) == 0)
+ break;
+ else
+ peer = NULL;
+ }
+ if (peer)
+ peer = fm_lookup(&dev->d_peers, peer->p_id);
+
+ return peer;
+}
+
+void
+wg_peer_put(struct wg_peer *peer)
+{
+ fm_put(&peer->p_device->d_peers, peer->p_id);
+}
+
+void
+wg_peer_drop(struct wg_peer *peer)
+{
+ fm_drop(&peer->p_device->d_peers, peer->p_id);
+ peer->p_device->d_cleanup(peer);
+ wg_peer_clean(peer);
+ free(peer, M_DEVBUF, sizeof(*peer));
+}
+
+void
+wg_session_put(struct wg_session *session)
+{
+ fm_put(&session->s_peer->p_device->d_sessions, session->s_local_id);
+}
+
+void
+wg_session_drop(struct wg_session *session)
+{
+ fm_drop(&session->s_peer->p_device->d_sessions, session->s_local_id);
+ free(session, M_DEVBUF, sizeof(*session));
+}
+
+void
+wg_peer_reset_attempts(struct wg_peer *peer)
+{
+ mtx_enter(&peer->p_mtx);
+ peer->p_attempts = 0;
+ mtx_leave(&peer->p_mtx);
+}
+
+void
+wg_peer_clean(struct wg_peer *peer)
+{
+ struct wg_session *hs, *ks, *ks_old;
+
+ mtx_enter(&peer->p_mtx);
+ hs = peer->p_hs_session;
+ ks = peer->p_ks_session;
+ ks_old = peer->p_ks_session_old;
+
+ peer->p_hs_session = NULL;
+ peer->p_ks_session = NULL;
+ peer->p_ks_session_old = NULL;
+ mtx_leave(&peer->p_mtx);
+
+ if (hs != NULL)
+ wg_session_drop(hs);
+ if (ks != NULL)
+ wg_session_drop(ks);
+ if (ks_old != NULL)
+ wg_session_drop(ks_old);
+
+}
+
+void
+wg_session_promote(struct wg_session *session)
+{
+ struct wg_session *old_session;
+ struct wg_peer *peer = session->s_peer;
+ struct wg_keyset *ks = &session->s_keyset;
+ struct wg_handshake *hs = &session->s_handshake;
+
+ /* TODO make better */
+ /* Setup session: derive keys, initialise the antireplay structure */
+ mtx_enter(&session->s_mtx);
+ if (session->s_state == WG_STATE_RECV_RESPONSE) {
+ session->s_state = WG_STATE_INITIATOR;
+ wg_kdf(ks->k_txkey.k, ks->k_rxkey.k, NULL, hs->h_ck, NULL, 0);
+ } else if (session->s_state == WG_STATE_MADE_RESPONSE) {
+ session->s_state = WG_STATE_RESPONDER;
+ wg_kdf(ks->k_rxkey.k, ks->k_txkey.k, NULL, hs->h_ck, NULL, 0);
+ } else {
+ mtx_leave(&session->s_mtx);
+ return;
+ }
+
+ antireplay_init(&ks->k_ar);
+ mtx_leave(&session->s_mtx);
+
+
+ mtx_enter(&peer->p_mtx);
+ old_session = peer->p_ks_session_old;
+ if (peer->p_ks_session != NULL)
+ peer->p_ks_session_old = peer->p_ks_session;
+ peer->p_ks_session = peer->p_hs_session;
+ peer->p_hs_session = NULL;
+ mtx_leave(&peer->p_mtx);
+
+ if (old_session != NULL)
+ wg_session_drop(old_session);
+
+ peer->p_device->d_notify(peer);
+}
+
+void
+wg_peer_setshared(struct wg_peer *peer, struct wg_privkey *key)
+{
+ mtx_enter(&peer->p_mtx);
+ peer->p_shared = *key;
+ mtx_leave(&peer->p_mtx);
+}
+
+void
+wg_peer_getshared(struct wg_peer *peer, struct wg_privkey *key)
+{
+ mtx_enter(&peer->p_mtx);
+ *key = peer->p_shared;
+ mtx_leave(&peer->p_mtx);
+}
+
+struct timespec
+wg_peer_last_handshake(struct wg_peer *peer)
+{
+ struct timespec ret = { 0, 0 };
+ mtx_enter(&peer->p_mtx);
+ if (peer->p_ks_session != NULL)
+ ret = peer->p_ks_session->s_created;
+ mtx_leave(&peer->p_mtx);
+ return ret;
+}
+
+struct wg_session *
+wg_peer_last_session(struct wg_peer *peer)
+{
+ uint32_t id = 0;
+ struct wg_session *session;
+
+ mtx_enter(&peer->p_mtx);
+ if (peer->p_ks_session != NULL)
+ id = peer->p_ks_session->s_local_id;
+ mtx_leave(&peer->p_mtx);
+
+ if ((session = fm_lookup(&peer->p_device->d_sessions, id)) == NULL)
+ return NULL;
+
+ if (!wg_timespec_timedout(&session->s_created, WG_REJECT_AFTER_TIME) &&
+ session->s_keyset.k_txcounter < WG_REJECT_AFTER_MESSAGES)
+ return session;
+
+ wg_session_put(session);
+ return NULL;
+}
+
+/* Crypto */
+enum wg_error
+wg_device_rx_initiation(struct wg_device *dev, struct wg_msg_initiation *init,
+ struct wg_session **s)
+{
+ struct wg_peer *peer;
+ struct wg_keypair kp;
+ struct wg_handshake hs;
+ struct wg_timestamp ts;
+ struct wg_pubkey remote;
+ struct wg_session *session;
+
+ enum wg_error ret = WG_OK;
+
+ *s = NULL;
+
+ /* We want to ensure that the keypair is not modified during the
+ * handshake, so we take a local copy here and bzero it before
+ * returning */
+ wg_device_getkey(dev, &kp);
+
+ /* Noise handshake */
+ memcpy(hs.h_remote.k, init->ephemeral, WG_KEY_SIZE);
+ wg_keypair_generate(&hs.h_local);
+
+ wg_hash2(hs.h_ck, WG_CONSTRUCTION, strlen(WG_CONSTRUCTION), NULL, 0);
+ memcpy(hs.h_hash, hs.h_ck, WG_HASH_SIZE);
+ wg_mix_hash(&hs, WG_IDENTIFIER, strlen(WG_IDENTIFIER));
+
+ wg_mix_hash(&hs, kp.pub.k, WG_KEY_SIZE);
+ wg_kdf(hs.h_ck, NULL, NULL, hs.h_ck, hs.h_remote.k, WG_KEY_SIZE);
+ wg_mix_hash(&hs, hs.h_remote.k, WG_KEY_SIZE);
+ wg_mix_dh(&hs, kp.priv.k, hs.h_remote.k);
+
+ if (!wg_handshake_decrypt(&hs, remote.k, init->static_pub,
+ WG_ENCRYPTED_SIZE(sizeof(remote.k))))
+ ret_error(WG_DECRYPT);
+
+ wg_mix_hash(&hs, init->static_pub, sizeof(init->static_pub));
+ wg_mix_dh(&hs, kp.priv.k, remote.k);
+
+ if (!wg_handshake_decrypt(&hs, ts.t, init->timestamp,
+ WG_ENCRYPTED_SIZE(sizeof(ts.t))))
+ ret_error(WG_DECRYPT);
+
+ wg_mix_hash(&hs, init->timestamp, sizeof(init->timestamp));
+ wg_hash2(hs.h_k, WG_MAC1, strlen(WG_MAC1), kp.pub.k, WG_KEY_SIZE);
+
+ blake2s(hs.h_mac, (void *)init, hs.h_k, sizeof(hs.h_mac),
+ offsetof(struct wg_msg_initiation, mac1), sizeof(hs.h_k));
+
+ /* Check MAC matches */
+ if (timingsafe_bcmp(hs.h_mac, init->mac1, WG_MAC_SIZE))
+ ret_error(WG_MAC);
+
+ /* Lookup peer key that was specified in the packet, as we need to
+ * know what peer this is for. */
+ if ((peer = wg_device_ref_peerkey(dev, &remote)) == NULL)
+ ret_error(WG_UNKNOWN_PEER);
+
+ /* We want to ensure this packet is not replayed, so we validate that
+ * the timestamp (not necessarily representative of the real time) is
+ * greater than the last one we have received */
+ mtx_enter(&peer->p_mtx);
+ if (memcmp(ts.t, peer->p_timestamp.t, sizeof(ts.t)) >= 0)
+ peer->p_timestamp = ts;
+ mtx_leave(&peer->p_mtx);
+
+ if (memcmp(ts.t, peer->p_timestamp.t, sizeof(ts.t)) != 0) {
+ wg_peer_put(peer);
+ ret_error(WG_TIMESTAMP);
+ }
+
+ session = wg_peer_new_session(peer);
+
+ mtx_enter(&session->s_mtx);
+ session->s_remote_id = init->sender;
+ session->s_handshake = hs;
+ session->s_state = WG_STATE_RECV_INITIATION;
+ mtx_leave(&session->s_mtx);
+
+ dev->d_outq(peer, WG_PKT_RESPONSE, session->s_local_id);
+ wg_peer_put(peer);
+ *s = session;
+leave:
+ if (ret != WG_OK)
+ wg_session_put(session);
+ explicit_bzero(&kp, sizeof(kp));
+ explicit_bzero(&hs, sizeof(hs));
+ return ret;
+}
+
+enum wg_error
+wg_device_rx_response(struct wg_device *dev, struct wg_msg_response *resp,
+ struct wg_session **s)
+{
+ struct wg_keypair kp;
+ struct wg_handshake hs;
+ struct wg_privkey shared;
+ struct wg_session *session;
+
+ enum wg_error ret = WG_OK;
+
+ if ((session = fm_lookup(&dev->d_sessions, resp->receiver)) == NULL)
+ return WG_ID;
+
+ /* Load requried values */
+ wg_device_getkey(dev, &kp);
+
+ mtx_enter(&session->s_mtx);
+ hs = session->s_handshake;
+ mtx_leave(&session->s_mtx);
+
+ mtx_enter(&session->s_peer->p_mtx);
+ shared = session->s_peer->p_shared;
+ mtx_leave(&session->s_peer->p_mtx);
+
+ /* Noise recv handshake */
+ memcpy(hs.h_remote.k, resp->ephemeral, WG_KEY_SIZE);
+
+ wg_kdf(hs.h_ck, NULL, NULL, hs.h_ck, hs.h_remote.k, WG_KEY_SIZE);
+ wg_mix_hash(&hs, hs.h_remote.k, WG_KEY_SIZE);
+
+ wg_mix_dh(&hs, hs.h_local.priv.k, hs.h_remote.k);
+ wg_mix_dh(&hs, kp.priv.k, hs.h_remote.k);
+
+ wg_mix_psk(&hs, shared.k);
+
+ if (!wg_handshake_decrypt(&hs, NULL, resp->empty, WG_ENCRYPTED_SIZE(0)))
+ ret_error(WG_DECRYPT);
+
+ wg_mix_hash(&hs, resp->empty, WG_ENCRYPTED_SIZE(0));
+
+ wg_hash2(hs.h_k, WG_MAC1, strlen(WG_MAC1), kp.pub.k,
+ WG_KEY_SIZE);
+ blake2s(hs.h_mac, (void *)resp, hs.h_k, sizeof(hs.h_mac),
+ offsetof(struct wg_msg_response, mac1), sizeof(hs.h_k));
+
+ /* Compare macs */
+ if (timingsafe_bcmp(hs.h_mac, resp->mac1, WG_MAC_SIZE))
+ ret_error(WG_MAC);
+
+ /* Update session only if we are in correct state */
+ mtx_enter(&session->s_mtx);
+ if (session->s_state == WG_STATE_MADE_INITIATION) {
+ session->s_handshake = hs;
+ session->s_remote_id = resp->sender;
+ session->s_state = WG_STATE_RECV_RESPONSE;
+ } else {
+ ret = WG_STATE;
+ }
+ mtx_leave(&session->s_mtx);
+
+ wg_session_promote(session);
+
+ *s = session;
+leave:
+ if (ret != WG_OK)
+ wg_session_put(session);
+ explicit_bzero(&shared, sizeof(shared));
+ explicit_bzero(&kp, sizeof(kp));
+ explicit_bzero(&hs, sizeof(hs));
+ return ret;
+
+}
+
+enum wg_error
+wg_device_rx_cookie(struct wg_device *dev, struct wg_msg_cookie *cookie,
+ struct wg_session **s)
+{
+ uint8_t key[WG_KEY_SIZE];
+ uint8_t value[WG_COOKIE_SIZE];
+ struct wg_session *session;
+
+ enum wg_error ret = WG_OK;
+
+ if ((session = fm_lookup(&dev->d_sessions, cookie->receiver)) == NULL)
+ return WG_ID;
+
+ wg_hash2(key, WG_COOKIE, strlen(WG_COOKIE),
+ session->s_peer->p_remote.k, WG_KEY_SIZE);
+
+ /* TODO lock for h_mac? */
+ if(!xchacha20poly1305_decrypt(value, cookie->value,
+ sizeof(cookie->value), session->s_handshake.h_mac, WG_MAC_SIZE,
+ cookie->nonce, key))
+ ret_error(WG_DECRYPT);
+
+ /* Update peer with new cookie data */
+ mtx_enter(&session->s_peer->p_mtx);
+ memcpy(session->s_peer->p_cookie.cookie, value,
+ sizeof(session->s_peer->p_cookie.cookie));
+ getnanotime(&session->s_peer->p_cookie.time);
+ mtx_leave(&session->s_peer->p_mtx);
+
+ *s = session;
+leave:
+ if (ret != WG_OK)
+ wg_session_put(session);
+ return ret;
+}
+
+enum wg_error
+wg_device_rx_transport(struct wg_device *dev, struct wg_msg_transport *msg,
+ size_t len, struct wg_session **s)
+{
+ struct wg_session *session;
+ enum wg_error ret = WG_OK;
+ size_t data_len = len - offsetof(struct wg_msg_transport, data);
+ uint64_t counter = letoh64(msg->counter);
+
+ if ((session = fm_lookup(&dev->d_sessions, msg->receiver)) == NULL)
+ return WG_ID;
+
+ wg_session_promote(session);
+
+ /* TODO fix locks, at the moment we just kinda don't care */
+ if (session->s_state != WG_STATE_INITIATOR &&
+ session->s_state != WG_STATE_RESPONDER)
+ ret_error(WG_STATE);
+
+ if (wg_timespec_timedout(&session->s_created, WG_REJECT_AFTER_TIME) ||
+ session->s_keyset.k_rxcounter > WG_REJECT_AFTER_MESSAGES)
+ ret_error(WG_REJECT);
+
+ if (!chacha20poly1305_decrypt(msg->data, msg->data, data_len, NULL, 0,
+ msg->counter, session->s_keyset.k_rxkey.k))
+ ret_error(WG_DECRYPT);
+
+ if (antireplay_update(&session->s_keyset.k_ar, counter))
+ ret_error(WG_REPLAY);
+
+ session->s_keyset.k_rxcounter = counter;
+ session->s_peer->p_rx_bytes += data_len - WG_MAC_SIZE;
+
+ if (session->s_state == WG_STATE_RESPONDER &&
+ wg_timespec_timedout(&session->s_created,
+ WG_REKEY_AFTER_TIME_RECV) &&
+ wg_timespec_timedout(&session->s_peer->p_last_initiation,
+ WG_REKEY_TIMEOUT))
+ dev->d_outq(session->s_peer, WG_PKT_INITIATION, session->s_peer->p_id);
+
+ *s = session;
+leave:
+ if (ret != WG_OK)
+ wg_session_put(session);
+ return ret;
+}
+
+
+enum wg_error
+wg_device_tx_initiation(struct wg_device *dev, struct wg_msg_initiation *init,
+ uint32_t id, struct wg_session **s)
+{
+ struct wg_peer *peer;
+ struct wg_keypair kp;
+ struct wg_handshake hs;
+ struct wg_session *session = NULL;
+
+ enum wg_error ret = WG_OK;
+
+ if ((peer = fm_lookup(&dev->d_peers, id)) == NULL)
+ return WG_ID;
+
+ mtx_enter(&peer->p_mtx);
+ if (!wg_timespec_timedout(&peer->p_last_initiation, WG_REKEY_TIMEOUT))
+ ret = WG_HS_RATE;
+ if (peer->p_attempts >= WG_REKEY_ATTEMPT_COUNT)
+ ret = WG_HS_ATTEMPTS;
+ if (ret == WG_OK) {
+ getnanotime(&peer->p_last_initiation);
+ peer->p_attempts++;
+ }
+ mtx_leave(&peer->p_mtx);
+
+ if (ret != WG_OK)
+ goto leave;
+
+ /* We need to generate the session here first, so we can use s_local_id
+ * below. We also want to operate on a local handshake, so we don't
+ * have to lock the session. */
+ session = wg_peer_new_session(peer);
+
+ wg_device_getkey(dev, &kp);
+ wg_keypair_generate(&hs.h_local);
+
+ /* Noise handshake */
+ init->type = WG_MSG_INITIATION;
+ init->sender = session->s_local_id;
+ memcpy(init->ephemeral, hs.h_local.pub.k, WG_KEY_SIZE);
+
+ wg_hash2(hs.h_ck, WG_CONSTRUCTION, strlen(WG_CONSTRUCTION), NULL, 0);
+ memcpy(hs.h_hash, hs.h_ck, WG_HASH_SIZE);
+ wg_mix_hash(&hs, WG_IDENTIFIER, strlen(WG_IDENTIFIER));
+
+ wg_mix_hash(&hs, peer->p_remote.k, WG_KEY_SIZE);
+ wg_kdf(hs.h_ck, NULL, NULL, hs.h_ck, hs.h_local.pub.k, WG_KEY_SIZE);
+ wg_mix_hash(&hs, hs.h_local.pub.k, WG_KEY_SIZE);
+ wg_mix_dh(&hs, hs.h_local.priv.k, peer->p_remote.k);
+
+ wg_handshake_encrypt(&hs, init->static_pub, kp.pub.k,
+ WG_KEY_SIZE);
+
+ wg_mix_hash(&hs, init->static_pub, WG_ENCRYPTED_SIZE(WG_KEY_SIZE));
+ wg_mix_dh(&hs, kp.priv.k, peer->p_remote.k);
+
+ wg_timestamp_get(init->timestamp);
+
+ wg_handshake_encrypt(&hs, init->timestamp, init->timestamp, WG_TIMESTAMP_SIZE);
+ wg_mix_hash(&hs, init->timestamp, WG_ENCRYPTED_SIZE(WG_TIMESTAMP_SIZE));
+ wg_hash2(hs.h_k, WG_MAC1, strlen(WG_MAC1), peer->p_remote.k, WG_KEY_SIZE);
+
+ blake2s(init->mac1, (void *) init, hs.h_k, sizeof(init->mac1),
+ offsetof(struct wg_msg_initiation, mac1), sizeof(hs.h_k));
+ memcpy(hs.h_mac, init->mac1, sizeof(hs.h_mac));
+
+ /* TODO lock for cookie time? */
+ if (wg_timespec_timedout(&peer->p_cookie.time, WG_COOKIE_VALID_TIME))
+ bzero(init->mac2, WG_MAC_SIZE);
+ else
+ blake2s(init->mac2, (void *)init, peer->p_cookie.cookie,
+ sizeof(init->mac2), offsetof(struct wg_msg_initiation, mac2),
+ sizeof(peer->p_cookie.cookie));
+
+ mtx_enter(&session->s_mtx);
+ session->s_handshake = hs;
+ session->s_state = WG_STATE_MADE_INITIATION;
+ mtx_leave(&session->s_mtx);
+
+ *s = session;
+leave:
+ /* if (ret != WG_OK)
+ wg_session_put(session); */
+ wg_peer_put(peer);
+ explicit_bzero(&kp, sizeof(kp));
+ explicit_bzero(&hs, sizeof(hs));
+ return ret;
+}
+
+enum wg_error
+wg_device_tx_response(struct wg_device *dev, struct wg_msg_response *resp,
+ uint32_t id, struct wg_session **s)
+{
+ struct wg_handshake hs;
+ struct wg_session *session;
+
+ enum wg_error ret = WG_OK;
+
+ if ((session = fm_lookup(&dev->d_sessions, id)) == NULL)
+ return WG_ID;
+
+ resp->type = WG_MSG_RESPONSE;
+ resp->sender = session->s_local_id;
+ resp->receiver = session->s_remote_id;
+
+ mtx_enter(&session->s_mtx);
+ hs = session->s_handshake;
+ mtx_leave(&session->s_mtx);
+
+ /* Noise handshake */
+ wg_kdf(hs.h_ck, NULL, NULL, hs.h_ck, hs.h_local.pub.k, WG_KEY_SIZE);
+ wg_mix_hash(&hs, hs.h_local.pub.k, WG_KEY_SIZE);
+
+ memcpy(resp->ephemeral, hs.h_local.pub.k, WG_KEY_SIZE);
+
+ wg_mix_dh(&hs, hs.h_local.priv.k, hs.h_remote.k);
+ wg_mix_dh(&hs, hs.h_local.priv.k, session->s_peer->p_remote.k);
+
+ wg_mix_psk(&hs, session->s_peer->p_shared.k);
+
+ wg_handshake_encrypt(&hs, resp->empty, NULL, 0);
+
+ wg_mix_hash(&hs, resp->empty, WG_ENCRYPTED_SIZE(0));
+
+ wg_hash2(hs.h_k, WG_MAC1, strlen(WG_MAC1), session->s_peer->p_remote.k, WG_KEY_SIZE);
+ blake2s(resp->mac1, (void *)resp, hs.h_k, sizeof(resp->mac1),
+ offsetof(struct wg_msg_response, mac1), sizeof(hs.h_k));
+ memcpy(hs.h_mac, resp->mac1, sizeof(hs.h_mac));
+
+ /* TODO lock for cookie time? */
+ if (wg_timespec_timedout(&session->s_peer->p_cookie.time, WG_COOKIE_VALID_TIME))
+ bzero(resp->mac2, WG_MAC_SIZE);
+ else
+ blake2s(resp->mac2, (void *)resp, session->s_peer->p_cookie.cookie,
+ sizeof(resp->mac2), offsetof(struct wg_msg_response, mac2),
+ sizeof(session->s_peer->p_cookie.cookie));
+
+ mtx_enter(&session->s_mtx);
+ if (session->s_state == WG_STATE_RECV_INITIATION)
+ session->s_state = WG_STATE_MADE_RESPONSE;
+ mtx_leave(&session->s_mtx);
+
+ if (session->s_state != WG_STATE_MADE_RESPONSE)
+ session->s_state = WG_STATE_MADE_RESPONSE;
+ ret_error(WG_STATE);
+
+ *s = session;
+leave:
+ if (ret != WG_OK)
+ wg_session_put(session);
+ return ret;
+}
+
+enum wg_error
+wg_device_tx_cookie(struct wg_device *dev, struct wg_cookie *c,
+ uint32_t sender, uint8_t mac[WG_MAC_SIZE], struct wg_msg_cookie *msg)
+{
+ uint8_t key[WG_KEY_SIZE]; // Same as WG_HASH_SIZE
+
+ msg->type = WG_MSG_COOKIE;
+ msg->receiver = sender;
+ arc4random_buf(msg->nonce, sizeof(msg->nonce));
+
+ wg_hash2(key, WG_COOKIE, strlen(WG_COOKIE), dev->d_keypair.pub.k,
+ WG_KEY_SIZE);
+ xchacha20poly1305_encrypt(msg->value, c->cookie, WG_MAC_SIZE, mac,
+ WG_MAC_SIZE, msg->nonce, key);
+
+ explicit_bzero(key, sizeof(key));
+ return WG_OK;
+}
+
+enum wg_error
+wg_device_tx_transport(struct wg_device *dev, struct wg_msg_transport *msg,
+ size_t len, uint32_t id, struct wg_session **s)
+{
+ struct wg_session *session;
+
+ enum wg_error ret = WG_OK;
+
+ if ((session = fm_lookup(&dev->d_sessions, id)) == NULL)
+ return WG_ID;
+
+ /* TODO we should do some locking */
+ if (session->s_state != WG_STATE_INITIATOR &&
+ session->s_state != WG_STATE_RESPONDER)
+ ret_error(WG_STATE);
+
+ if (wg_timespec_timedout(&session->s_created, WG_REJECT_AFTER_TIME) ||
+ session->s_keyset.k_txcounter > WG_REJECT_AFTER_MESSAGES)
+ ret_error(WG_REJECT);
+
+ msg->type = WG_MSG_TRANSPORT;
+ msg->receiver = session->s_remote_id;
+ msg->counter = htole64(session->s_keyset.k_txcounter++);
+
+ chacha20poly1305_encrypt(msg->data, msg->data, len, NULL, 0, msg->counter,
+ session->s_keyset.k_txkey.k);
+
+ /* Packet OK, but we do want a rekey */
+ if (((session->s_state == WG_STATE_INITIATOR &&
+ wg_timespec_timedout(&session->s_created, WG_REKEY_AFTER_TIME)) ||
+ session->s_keyset.k_txcounter > WG_REKEY_AFTER_MESSAGES) &&
+ wg_timespec_timedout(&session->s_peer->p_last_initiation,
+ WG_REKEY_TIMEOUT))
+ dev->d_outq(session->s_peer, WG_PKT_INITIATION, session->s_peer->p_id);
+
+ session->s_peer->p_tx_bytes += len;
+ *s = session;
+leave:
+ if (ret != WG_OK)
+ wg_session_put(session);
+ return ret;
+}
+
+/* WireGuard crypto helper functions */
+void
+wg_kdf(uint8_t first[WG_HASH_SIZE], uint8_t second[WG_HASH_SIZE],
+ uint8_t third[WG_HASH_SIZE], uint8_t key[WG_KEY_SIZE], uint8_t * input,
+ size_t input_len)
+{
+ uint8_t buffer[WG_HASH_SIZE + 1];
+ uint8_t secret[WG_HASH_SIZE];
+
+ blake2s_hmac(secret, input, key, WG_HASH_SIZE, input_len, WG_KEY_SIZE);
+
+ if (!first)
+ return;
+
+ buffer[0] = 1;
+ blake2s_hmac(buffer, buffer, secret, WG_HASH_SIZE, 1, WG_KEY_SIZE);
+ memcpy(first, buffer, WG_HASH_SIZE);
+
+ if (!second)
+ return;
+
+ buffer[WG_HASH_SIZE] = 2;
+ blake2s_hmac(buffer, buffer, secret, WG_HASH_SIZE, WG_HASH_SIZE + 1, WG_KEY_SIZE);
+ memcpy(second, buffer, WG_HASH_SIZE);
+
+ if (!third)
+ return;
+
+ buffer[WG_HASH_SIZE] = 3;
+ blake2s_hmac(buffer, buffer, secret, WG_HASH_SIZE, WG_HASH_SIZE + 1, WG_KEY_SIZE);
+ memcpy(third, buffer, WG_HASH_SIZE);
+}
+
+void
+wg_hash2(uint8_t out[WG_HASH_SIZE], uint8_t * in1, size_t in1_len, uint8_t * in2, size_t in2_len)
+{
+ struct blake2s_state s;
+ blake2s_init(&s, WG_HASH_SIZE);
+ blake2s_update(&s, in1, in1_len);
+ blake2s_update(&s, in2, in2_len);
+ blake2s_final(&s, out, WG_HASH_SIZE);
+}
+
+void
+wg_mix_hash(struct wg_handshake * hs, uint8_t * in, size_t in_len)
+{
+ wg_hash2(hs->h_hash, hs->h_hash, WG_HASH_SIZE, in, in_len);
+}
+
+void
+wg_mix_dh(struct wg_handshake * hs, uint8_t priv[WG_KEY_SIZE], uint8_t pub[WG_KEY_SIZE])
+{
+ uint8_t DH [WG_KEY_SIZE];
+ crypto_scalarmult_curve25519(DH, priv, pub);
+ wg_kdf(hs->h_ck, hs->h_k, NULL, hs->h_ck, DH, WG_KEY_SIZE);
+ explicit_bzero(DH, WG_KEY_SIZE);
+}
+
+void
+wg_mix_psk(struct wg_handshake * hs, uint8_t psk[WG_KEY_SIZE])
+{
+ uint8_t t [WG_HASH_SIZE];
+ wg_kdf(hs->h_ck, t, hs->h_k, hs->h_ck, psk, WG_KEY_SIZE);
+ wg_mix_hash(hs, t, WG_HASH_SIZE);
+ explicit_bzero(t, WG_HASH_SIZE);
+}
+
+void
+wg_handshake_encrypt(struct wg_handshake *hs, uint8_t *dst, uint8_t *src, size_t srclen)
+{
+ return chacha20poly1305_encrypt(dst, src, srclen, hs->h_hash, WG_HASH_SIZE, 0, hs->h_k);
+}
+
+int
+wg_handshake_decrypt(struct wg_handshake *hs, uint8_t *dst, uint8_t *src, size_t srclen)
+{
+ return chacha20poly1305_decrypt(dst, src, srclen, hs->h_hash, WG_HASH_SIZE, 0, hs->h_k);
+}
+
+void
+wg_timestamp_get(uint8_t ts[WG_TIMESTAMP_SIZE])
+{
+ struct timespec tv;
+ getnanotime(&tv);
+ *(uint64_t *) (ts) = htobe64(0x400000000000000aULL + tv.tv_sec);
+ *(uint32_t *) (ts + 8) = htobe32(tv.tv_nsec);
+}
+
+int
+wg_timespec_timedout(struct timespec * start, time_t timeout)
+{
+ struct timespec now;
+
+ getnanotime(&now);
+
+ return now.tv_sec == start->tv_sec + timeout ?
+ now.tv_nsec > start->tv_nsec :
+ now.tv_sec > start->tv_sec + timeout;
+}
+
+enum wg_pkt_type
+wg_pkt_type(uint8_t *buf, size_t len)
+{
+ struct wg_msg_unknown *msg;
+
+ if (len < sizeof(*msg))
+ return WG_PKT_UNKNOWN;
+ else
+ msg = (struct wg_msg_unknown *)buf;
+
+ if (msg->type == WG_MSG_INITIATION && len == wg_pkt_len[msg->type])
+ return WG_PKT_INITIATION;
+ else if (msg->type == WG_MSG_RESPONSE && len == wg_pkt_len[msg->type])
+ return WG_PKT_RESPONSE;
+ else if (msg->type == WG_MSG_COOKIE && len == wg_pkt_len[msg->type])
+ return WG_PKT_COOKIE;
+ else if (msg->type == WG_MSG_TRANSPORT && len >= wg_pkt_len[msg->type])
+ return WG_PKT_TRANSPORT;
+ else
+ return WG_PKT_UNKNOWN;
+}
+
+void
+wg_keypair_from_key(struct wg_keypair *kp, const struct wg_privkey *key)
+{
+ const uint8_t basepoint[WG_KEY_SIZE] = {9};
+
+ /* memmove as key may overlap with kp->priv */
+ memmove(kp->priv.k, key->k, WG_KEY_SIZE);
+
+ /* We don't care if the input is not a valid key, we just set
+ * the bits and be done with it. The curve25519 library *SHOULD*
+ * do this, but since this may be returned to the user, we do
+ * it here too. */
+ kp->priv.k[0] &= 248;
+ kp->priv.k[31] &= 127;
+ kp->priv.k[31] |= 64;
+
+ crypto_scalarmult_curve25519(kp->pub.k, kp->priv.k, basepoint);
+}
+
+void
+wg_keypair_generate(struct wg_keypair *kp)
+{
+ arc4random_buf(kp->priv.k, sizeof(kp->priv.k));
+ wg_keypair_from_key(kp, &kp->priv);
+}
+
+enum wg_error
+wg_msg_initiation_valid_mac2(struct wg_msg_initiation *msg, struct wg_cookie *c)
+{
+ uint8_t mac[WG_MAC_SIZE];
+ blake2s(mac, (uint8_t *)msg, c->cookie, WG_MAC_SIZE,
+ offsetof(struct wg_msg_initiation, mac2), WG_COOKIE_SIZE);
+ return timingsafe_bcmp(mac, msg->mac2, WG_MAC_SIZE) ? WG_MAC : WG_OK;
+}
+
+enum wg_error
+wg_msg_response_valid_mac2(struct wg_msg_response *msg, struct wg_cookie *c)
+{
+ uint8_t mac[WG_MAC_SIZE];
+ blake2s(mac, (uint8_t *)msg, c->cookie, WG_MAC_SIZE,
+ offsetof(struct wg_msg_response, mac2), WG_COOKIE_SIZE);
+ return timingsafe_bcmp(mac, msg->mac2, WG_MAC_SIZE) ? WG_MAC : WG_OK;
+}
+
+void
+wg_cookie_from_token(struct wg_cookie *c, struct wg_cookie_maker *cm,
+ uint8_t *ip, uint8_t ip_len)
+{
+ if (wg_timespec_timedout(&cm->time, WG_COOKIE_VALID_TIME)) {
+ getnanotime(&cm->time);
+ arc4random_buf(cm->seed, sizeof(cm->seed));
+ }
+
+ blake2s(c->cookie, ip, cm->seed, WG_MAC_SIZE, ip_len, WG_COOKIE_SIZE);
+}
+
+/* Timer Functions */
+void
+wg_timer_setup(struct wg_timers *t, void *p, void (*keepalive)(void *),
+ void (*broken)(void *), void (*reinit)(void *), void (*cleanup)(void *))
+{
+ assert(p != NULL);
+ assert(keepalive != NULL);
+ assert(broken != NULL);
+ assert(reinit != NULL);
+ assert(cleanup != NULL);
+
+ timeout_set_proc(&t->t_ka, keepalive, p);
+ timeout_set_proc(&t->t_pka, keepalive, p);
+ timeout_set_proc(&t->t_broken, broken, p);
+ timeout_set_proc(&t->t_reinit, reinit, p);
+ timeout_set_proc(&t->t_cleanup, cleanup, p);
+}
+
+void
+wg_timer_stop(struct wg_timers *t)
+{
+ /* TODO need barrier? */
+ timeout_del(&t->t_ka);
+ timeout_del(&t->t_pka);
+ timeout_del(&t->t_broken);
+ timeout_del(&t->t_reinit);
+ timeout_del(&t->t_cleanup);
+}
+
+void
+wg_timer_persistent_keepalive_tick(struct wg_timers *t)
+{
+ if (t->t_pka_interval > 0)
+ timeout_add_sec(&t->t_pka, t->t_pka_interval);
+}
+
+uint16_t
+wg_timer_persistent_keepalive_get(struct wg_timers *t)
+{
+ return t->t_pka_interval;
+}
+
+void
+wg_timer_persistent_keepalive_set(struct wg_timers *t, uint16_t interval)
+{
+ t->t_pka_interval = interval;
+}
+
+void
+wg_timer_cleanup_tick(struct wg_timers *t)
+{
+ timeout_add_sec(&t->t_cleanup, WG_REJECT_AFTER_TIME * 3);
+}
+
+void
+wg_timer_keepalive_flag(struct wg_timers *t)
+{
+ timeout_add_sec(&t->t_ka, WG_KEEPALIVE_TIMEOUT);
+}
+
+void
+wg_timer_keepalive_unflag(struct wg_timers *t)
+{
+ timeout_del(&t->t_ka);
+}
+
+void
+wg_timer_broken_flag(struct wg_timers *t)
+{
+ if (timeout_pending(&t->t_broken) == 0)
+ timeout_add_sec(&t->t_broken, WG_REKEY_TIMEOUT + WG_KEEPALIVE_TIMEOUT);
+}
+
+void
+wg_timer_broken_unflag(struct wg_timers *t)
+{
+ timeout_del(&t->t_broken);
+}
+
+void
+wg_timer_reinit_flag(struct wg_timers *t)
+{
+ timeout_add_sec(&t->t_reinit, WG_REKEY_TIMEOUT);
+}
+
+void
+wg_timer_reinit_unflag(struct wg_timers *t)
+{
+ timeout_del(&t->t_reinit);
+}
+
/* if_wg.c */
int wg_softc_route_add(struct wg_softc *, struct wg_cidr *, struct wg_route *);
int wg_softc_route_delete(struct wg_softc *, struct wg_cidr *);
@@ -164,6 +1203,278 @@ MPQ_WORKER(wg_tx_task_fn, wg_encrypt, wg_output_deliver);
MPQ_WORKER(wg_rx_slow_task_fn, wg_decrypt_hs, m_freem);
MPQ_WORKER(wg_rx_task_fn, wg_decrypt, wg_input_deliver);
+/*
+ * The following defines assist the antireplay_check function. *
+ * ANTIREPLAY_INTEGER: the integer in the bitmap corresponding to num *
+ * ANTIREPLAY_INTEGERBIT: the integer with corresponding single bit set
+ */
+#define ANTIREPLAY_INTEGER(ctx, num) (ctx->ar_bitmap[num % ARB_BITS / ARI_BITS])
+#define ANTIREPLAY_INTEGERBIT(num) (1llu << (num & (ARI_BITS - 1)))
+
+void
+antireplay_init(struct antireplay *ctx)
+{
+ /* We just zero out the struct, expecting that then ctx->ar_head == 0 */
+ explicit_bzero(ctx, sizeof(struct antireplay));
+}
+
+int
+antireplay_update(struct antireplay *ctx, uint64_t num)
+{
+ /* Bits after ctx->ar_head need to be zeroed. This is called when num is
+ * in front of ctx->ar_head, and those bits need to be set to 0 */
+ if (num < ctx->ar_head + ARB_BITS / ARI_BITS) {
+ for (; ctx->ar_head <= num; ctx->ar_head += ARI_BITS) {
+ ANTIREPLAY_INTEGER(ctx, (ctx->ar_head + 1)) = 0;
+ }
+ } else {
+ bzero(ctx->ar_bitmap, ARB_BITS / ARI_BITS);
+ }
+
+ if (ctx->ar_head > (num + ARB_BITS - ARI_BITS)) {
+ /* Expired */
+ return 1;
+ } else if (ANTIREPLAY_INTEGER(ctx, num) & ANTIREPLAY_INTEGERBIT(num)) {
+ /* Replayed */
+ return 1;
+ } else {
+ /* Unseen */
+ return 0;
+ }
+}
+
+/*
+ * fixedmap.h
+ */
+
+//#define FMPRINTF(...) printf("fm: " __VA_ARGS__)
+#define FMPRINTF(...) 0
+
+void
+fm_init(struct fixed_map *fm, size_t size, int ipl)
+{
+ bzero(fm, sizeof(*fm));
+ mtx_init(&fm->mtx, ipl);
+ fm_resize(fm, size);
+}
+
+void
+fm_resize(struct fixed_map *fm, size_t size)
+{
+ size_t i;
+ size_t xsize;
+ struct map_item *xmap;
+
+ for (xsize = 1; xsize < size; xsize <<= 1);
+
+ mtx_enter(&fm->mtx);
+ if (xsize < fm->size)
+ goto leave;
+ mtx_leave(&fm->mtx);
+
+ xmap = mallocarray(xsize, sizeof(*xmap), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ mtx_enter(&fm->mtx);
+ if (fm->map != NULL) {
+ for (i = 0; i < fm->size; i++)
+ xmap[fm->map[i].key % fm->size] = fm->map[i];
+ free(fm->map, M_DEVBUF, 0);
+ }
+ fm->size = xsize;
+ fm->map = xmap;
+leave:
+ mtx_leave(&fm->mtx);
+}
+
+void
+fm_destroy(struct fixed_map *fm)
+{
+ struct map_item *item;
+ FM_FOREACH_FILLED(item, fm)
+ panic("non empty fixedmap");
+ free(fm->map, M_DEVBUF, 0);
+}
+
+uint32_t
+fm_insert(struct fixed_map *fm, void *v)
+{
+ uint32_t k = 0;
+ struct map_item *item = NULL, *iter;
+
+ mtx_enter(&fm->mtx);
+
+ FM_FOREACH_EMPTY(iter, fm) {
+ item = iter;
+ break;
+ }
+
+ if (item == NULL)
+ panic("fixed map not large enough");
+
+ while (k == 0)
+ k = (arc4random() & ~(fm->size - 1)) + (item - fm->map);
+
+ item->key = k;
+ item->state = FM_ITEM_FILLED;
+ item->value = v;
+ refcnt_init(&item->refcnt);
+ mtx_leave(&fm->mtx);
+
+ FMPRINTF("insert %x in %p - %d\n", k, fm, item->refcnt.refs);
+
+ /* We want to take another reference, as one reference is in the map
+ * and the other one gets returned */
+ refcnt_take(&item->refcnt);
+ return k;
+}
+
+void *
+fm_lookup(struct fixed_map *fm, uint32_t k)
+{
+ void *v = NULL;
+ struct map_item *item;
+
+ mtx_enter(&fm->mtx);
+ item = fm->map + (k & (fm->size - 1));
+ if (item->key == k) {
+ refcnt_take(&item->refcnt);
+ v = item->value;
+ FMPRINTF("lookup %x in %p - %d\n", k, fm, item->refcnt.refs);
+ } else {
+ FMPRINTF("lookup %x in %p - failed\n", k, fm);
+ }
+ mtx_leave(&fm->mtx);
+ return v;
+}
+
+void
+fm_put(struct fixed_map *fm, uint32_t k)
+{
+ struct map_item *item;
+
+ mtx_enter(&fm->mtx);
+ item = fm->map + (k & (fm->size - 1));
+ if (item->key != k)
+ panic("element should be in map");
+ refcnt_rele_wake(&item->refcnt);
+ FMPRINTF("put %x in %p - %d\n", k, fm, item->refcnt.refs);
+ mtx_leave(&fm->mtx);
+}
+
+void
+fm_drop(struct fixed_map *fm, uint32_t k)
+{
+ struct map_item *item;
+
+ mtx_enter(&fm->mtx);
+ item = fm->map + (k & (fm->size - 1));
+ if (item->key != k)
+ panic("element should be in map");
+ FMPRINTF("drop %x in %p - %d\n", k, fm, item->refcnt.refs);
+ mtx_leave(&fm->mtx);
+
+ refcnt_finalize(&item->refcnt, "fm_drop");
+
+ mtx_enter(&fm->mtx);
+ item->key = 0;
+ item->state = FM_ITEM_EMPTY;
+ item->value = NULL;
+ mtx_leave(&fm->mtx);
+}
+
+/*
+ * mpq.h
+ */
+
+void
+mpq_init(struct mpq *mpq, int ipl)
+{
+ mpq->mpq_cursor = NULL;
+ mpq->mpq_serializer = NULL;
+ mtx_init(&mpq->mpq_mtx, ipl);
+ ml_init(&mpq->mpq_list);
+}
+
+int
+mpq_full(struct mpq *mpq)
+{
+ int full;
+ mtx_enter(&mpq->mpq_mtx);
+ full = ml_len(&mpq->mpq_list) >= MPQ_LEN;
+ mtx_leave(&mpq->mpq_mtx);
+ return full;
+}
+
+int
+mpq_serialize_try_enter(struct mpq *mpq)
+{
+ int error = 1;
+ mtx_enter(&mpq->mpq_mtx);
+ if (mpq->mpq_serializer == NULL) {
+ mpq->mpq_serializer = curcpu();
+ error = 0;
+ }
+ mtx_leave(&mpq->mpq_mtx);
+ return error;
+}
+
+void
+mpq_serialize_leave(struct mpq *mpq)
+{
+ mtx_enter(&mpq->mpq_mtx);
+ mpq->mpq_serializer = NULL;
+ mtx_leave(&mpq->mpq_mtx);
+}
+
+void
+mpq_threaddone(struct mpq *mpq, struct mbuf *m)
+{
+ mtx_enter(&mpq->mpq_mtx);
+ SET(m->m_flags, M_LINK0);
+ mtx_leave(&mpq->mpq_mtx);
+}
+
+void
+mpq_enqueue(struct mpq *mpq, struct mbuf *m)
+{
+ CLR(m->m_flags, M_LINK0);
+ mtx_enter(&mpq->mpq_mtx);
+ if (ml_len(&mpq->mpq_list) < MPQ_LEN) {
+ ml_enqueue(&mpq->mpq_list, m);
+ if (mpq->mpq_cursor == NULL)
+ mpq->mpq_cursor = m;
+ } else {
+ m_freem(m);
+ }
+ mtx_leave(&mpq->mpq_mtx);
+}
+
+struct mbuf
+*mpq_dethread(struct mpq *mpq)
+{
+ struct mbuf *m;
+ mtx_enter(&mpq->mpq_mtx);
+ m = mpq->mpq_cursor;
+ if (mpq->mpq_cursor != NULL)
+ mpq->mpq_cursor = MBUF_LIST_NEXT(mpq->mpq_cursor);
+ mtx_leave(&mpq->mpq_mtx);
+ return m;
+}
+
+struct mbuf
+*mpq_dequeue(struct mpq *mpq)
+{
+ struct mbuf *m;
+ mtx_enter(&mpq->mpq_mtx);
+ m = MBUF_LIST_FIRST(&mpq->mpq_list);
+ if (m != NULL && ISSET(m->m_flags, M_LINK0))
+ m = ml_dequeue(&mpq->mpq_list);
+ else
+ m = NULL;
+ mtx_leave(&mpq->mpq_mtx);
+ return m;
+}
+
int
wg_softc_route_add(struct wg_softc *sc, struct wg_cidr *cidr, struct wg_route *r)
{
@@ -267,100 +1578,6 @@ wg_softc_route_lookup(struct wg_softc * sc, struct mbuf * m, bool out)
return r;
}
-int
-wg_mbuf_ratelimit(struct wg_softc *sc, struct mbuf *m)
-{
- enum wg_error e;
- struct wg_cookie c;
- struct wg_msg_response *resp;
- struct wg_msg_initiation *init;
- struct wg_tag *tag = wg_mbuf_get_tag(m);
- union wg_ip *ip = &tag->t_ip;
-
- uint8_t *mac, *token, token_len;
- uint32_t sender;
-
- /* Get token from source IP address */
- token = AF_VAL(ip->sa.sa_family,
- (uint8_t *) &satosin(&ip->sa)->sin_addr,
- (uint8_t *) &satosin6(&ip->sa)->sin6_addr);
- /* Use upper 8 octets for INET6, so we filter based on a /64 subnet */
- token_len = AF_VAL(ip->sa.sa_family, 4, 8);
-
- if (token == NULL)
- panic("invalid af");
-
- /* If we don't want to rate limit, return OK */
- if (!bb_recv(&wg_bb, token, token_len))
- return 0;
-
- /*
- * From here on, the peer has been potentially sending many
- * packets. We need to verify if they are just spoofing IP src
- * addresses, or they received a false positive from wg_bb.
- */
-
- /* Calculate cookie and mac from packet */
- wg_cookie_from_token(&c, &sc->sc_dev.d_cookie_maker, token, token_len);
-
- switch (wg_pkt_type(mtod(m, uint8_t *), m->m_pkthdr.len)) {
- case WG_PKT_INITIATION:
- init = mtod(m, struct wg_msg_initiation *);
- e = wg_msg_initiation_valid_mac2(init, &c);
- sender = init->sender;
- mac = init->mac1;
- break;
- case WG_PKT_RESPONSE:
- resp = mtod(m, struct wg_msg_response *);
- e = wg_msg_response_valid_mac2(resp, &c);
- sender = resp->sender;
- mac = resp->mac1;
- break;
- default:
- panic("only ratelimit initiation and response");
- }
-
- /* If mac is invalid, and wg_bb is under high load, it is likely a
- * bruteforce attack with a spoofed source address. We can use the
- * cookie to validate the source address. TODO calcluate a good
- * default value for high load, rather than just 10. */
- if (bb_load(&wg_bb) > 10 && e == WG_MAC) {
- int error;
- struct socket *so;
- struct mbuf peernam;
- struct mbuf *m = m_clget(NULL, M_WAITOK,
- sizeof(struct wg_msg_cookie));
- struct wg_msg_cookie *cookie = mtod(m, struct wg_msg_cookie *);
-
- /* TODO print ip */
- DPRINTF(sc, "transmit cookie %d\n", 0);
-
- wg_device_tx_cookie(&sc->sc_dev, &c, sender, mac, cookie);
-
- bzero(&peernam, sizeof(struct mbuf));
-
- peernam.m_type = MT_SONAME;
- peernam.m_data = (caddr_t) ip;
- peernam.m_len = ip->sa.sa_len;
-
- so = AF_VAL(ip->sa.sa_family, sc->sc_so4, sc->sc_so6);
- int s = solock(so);
- if (so) {
- if ((error = so->so_proto->pr_usrreq(so, PRU_SEND, m,
- &peernam, NULL, NULL)) != 0)
- DPRINTF(sc, "unable to send: %d\n", error);
- }
- sounlock(so, s);
- }
-
- /* If we get to here, we either have a valid packet, or we are under
- * an attack from a coordinated bruteforce attack, where the attacker
- * has control of all the source addresses. For the time being, we
- * will just accept this and try to process the packet. */
-
- return 0;
-}
-
struct wg_tag *
wg_mbuf_get_tag(struct mbuf *m)
{
@@ -583,8 +1800,8 @@ wg_decrypt_hs(struct mbuf *m)
switch (tag->t_type) {
case WG_PKT_INITIATION:
- if (wg_mbuf_ratelimit(tag->t_sc, m))
- drop_pkt_err(WG_RATELIMIT);
+ /* if (wg_mbuf_ratelimit(tag->t_sc, m))
+ drop_pkt_err(WG_RATELIMIT); */
if ((err = wg_device_rx_initiation(&tag->t_sc->sc_dev,
mtod(m, struct wg_msg_initiation *), &session)) != WG_OK)
@@ -592,8 +1809,8 @@ wg_decrypt_hs(struct mbuf *m)
break;
case WG_PKT_RESPONSE:
- if (wg_mbuf_ratelimit(tag->t_sc, m))
- drop_pkt_err(WG_RATELIMIT);
+ /* if (wg_mbuf_ratelimit(tag->t_sc, m))
+ drop_pkt_err(WG_RATELIMIT); */
if ((err = wg_device_rx_response(&tag->t_sc->sc_dev,
mtod(m, struct wg_msg_response *), &session)) != WG_OK)
@@ -894,8 +2111,6 @@ void
wgattach(int nwg)
{
if_clone_attach(&wg_cloner);
- /* entries: 1024, keys: 3, rate: 5sec, threshold: 5 */
- bb_init(&wg_bb, 1024, 3, 5, 5, M_DEVBUF, M_WAITOK);
}
int