summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Dunwoodie <ncon@noconroy.net>2020-06-17 17:26:03 +1000
committerMatt Dunwoodie <ncon@noconroy.net>2020-06-17 17:26:03 +1000
commit65e11a878a402275a5be09b986e5b003c17e6596 (patch)
treedefd06130a8bfd8e86d93d148a316cce7ed81438
parentReplace hacky branch prediction with __predict_false (diff)
downloadwireguard-openbsd-65e11a878a402275a5be09b986e5b003c17e6596.tar.xz
wireguard-openbsd-65e11a878a402275a5be09b986e5b003c17e6596.zip
Replace sc_index_lock with sc_index_mtx
Since wg_index_get is called in wg_input (and thus from other parts of the network stack), in order to not introduce any new sleep points, we want to use a mutex rather than rwlock. This also fixes a minor bug where sc_index_lock wasn't initialised with rw_init (instead sc_peer_lock) however that had no impact as 'sc' already zeroed.
-rw-r--r--src/if_wg.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/if_wg.c b/src/if_wg.c
index 78e78aad482..06ae1a05d73 100644
--- a/src/if_wg.c
+++ b/src/if_wg.c
@@ -249,7 +249,7 @@ struct wg_softc {
TAILQ_HEAD(,wg_peer) sc_peer_seq;
u_long sc_peer_mask;
- struct rwlock sc_index_lock;
+ struct mutex sc_index_mtx;
LIST_HEAD(,wg_index) *sc_index;
u_long sc_index_mask;
@@ -1928,7 +1928,7 @@ wg_index_set(void *_sc, struct noise_remote *remote)
index->i_value = remote;
- rw_enter_write(&sc->sc_index_lock);
+ mtx_enter(&sc->sc_index_mtx);
assign_id:
key = index->i_key = arc4random();
key &= sc->sc_index_mask;
@@ -1938,7 +1938,7 @@ assign_id:
LIST_INSERT_HEAD(&sc->sc_index[key], index, i_entry);
- rw_exit_write(&sc->sc_index_lock);
+ mtx_leave(&sc->sc_index_mtx);
/* Likewise, no need to lock for index here. */
return index->i_key;
@@ -1952,13 +1952,13 @@ wg_index_get(void *_sc, uint32_t key0)
struct noise_remote *remote = NULL;
uint32_t key = key0 & sc->sc_index_mask;
- rw_enter_read(&sc->sc_index_lock);
+ mtx_enter(&sc->sc_index_mtx);
LIST_FOREACH(iter, &sc->sc_index[key], i_entry)
if (iter->i_key == key0) {
remote = iter->i_value;
break;
}
- rw_exit_read(&sc->sc_index_lock);
+ mtx_leave(&sc->sc_index_mtx);
return remote;
}
@@ -1970,13 +1970,13 @@ wg_index_drop(void *_sc, uint32_t key0)
struct wg_peer *peer = NULL;
uint32_t key = key0 & sc->sc_index_mask;
- rw_enter_write(&sc->sc_index_lock);
+ mtx_enter(&sc->sc_index_mtx);
LIST_FOREACH(iter, &sc->sc_index[key], i_entry)
if (iter->i_key == key0) {
LIST_REMOVE(iter, i_entry);
break;
}
- rw_exit_write(&sc->sc_index_lock);
+ mtx_leave(&sc->sc_index_mtx);
/* We expect a peer */
peer = CONTAINER_OF(iter->i_value, struct wg_peer, p_remote);
@@ -2620,7 +2620,7 @@ wg_clone_create(struct if_clone *ifc, int unit)
M_NOWAIT, &sc->sc_peer_mask)) == NULL)
goto ret_04;
- rw_init(&sc->sc_peer_lock, "wg_index");
+ mtx_init(&sc->sc_index_mtx, IPL_NET);
if ((sc->sc_index = hashinit(HASHTABLE_INDEX_SIZE, M_DEVBUF,
M_NOWAIT, &sc->sc_index_mask)) == NULL)
goto ret_05;