aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-04-03 05:20:25 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2017-04-04 03:44:35 +0200
commit293e9d604f949db9501d0ce01570350198e59c0b (patch)
treed9d02ae5be1ebc9eb7e76c90baa781e45e9189fe /src/hashtables.c
parentqemu: new stable kernel (diff)
downloadwireguard-monolithic-historical-293e9d604f949db9501d0ce01570350198e59c0b.tar.xz
wireguard-monolithic-historical-293e9d604f949db9501d0ce01570350198e59c0b.zip
locking: always use _bh
All locks are potentially between user context and softirq, which means we need to take the _bh variant.
Diffstat (limited to '')
-rw-r--r--src/hashtables.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 4cb8441..efd7111 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -36,15 +36,15 @@ void pubkey_hashtable_remove(struct pubkey_hashtable *table, struct wireguard_pe
struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
struct wireguard_peer *iter_peer, *peer = NULL;
- rcu_read_lock();
- hlist_for_each_entry_rcu(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) {
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) {
if (!memcmp(pubkey, iter_peer->handshake.remote_static, NOISE_PUBLIC_KEY_LEN)) {
peer = iter_peer;
break;
}
}
peer = peer_get(peer);
- rcu_read_unlock();
+ rcu_read_unlock_bh();
return peer;
}
@@ -65,60 +65,60 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
{
struct index_hashtable_entry *existing_entry;
- spin_lock(&table->lock);
+ spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
- spin_unlock(&table->lock);
+ spin_unlock_bh(&table->lock);
- rcu_read_lock();
+ rcu_read_lock_bh();
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
- hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
+ hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */
}
/* Once we've found an unused slot, we lock it, and then double-check
* that nobody else stole it from us. */
- spin_lock(&table->lock);
- hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
+ spin_lock_bh(&table->lock);
+ hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index) {
- spin_unlock(&table->lock);
+ spin_unlock_bh(&table->lock);
goto search_unused_slot; /* If it was stolen, we start over. */
}
}
/* Otherwise, we know we have it exclusively (since we're locked), so we insert. */
hlist_add_head_rcu(&entry->index_hash, index_bucket(table, entry->index));
- spin_unlock(&table->lock);
+ spin_unlock_bh(&table->lock);
- rcu_read_unlock();
+ rcu_read_unlock_bh();
return entry->index;
}
void index_hashtable_replace(struct index_hashtable *table, struct index_hashtable_entry *old, struct index_hashtable_entry *new)
{
- spin_lock(&table->lock);
+ spin_lock_bh(&table->lock);
new->index = old->index;
hlist_replace_rcu(&old->index_hash, &new->index_hash);
INIT_HLIST_NODE(&old->index_hash);
- spin_unlock(&table->lock);
+ spin_unlock_bh(&table->lock);
}
void index_hashtable_remove(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
- spin_lock(&table->lock);
+ spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
- spin_unlock(&table->lock);
+ spin_unlock_bh(&table->lock);
}
/* Returns a strong reference to a entry->peer */
struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *table, const enum index_hashtable_type type_mask, const __le32 index)
{
struct index_hashtable_entry *iter_entry, *entry = NULL;
- rcu_read_lock();
- hlist_for_each_entry_rcu(iter_entry, index_bucket(table, index), index_hash) {
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), index_hash) {
if (iter_entry->index == index && (iter_entry->type & type_mask)) {
entry = iter_entry;
break;
@@ -129,6 +129,6 @@ struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *tab
if (unlikely(!entry->peer))
entry = NULL;
}
- rcu_read_unlock();
+ rcu_read_unlock_bh();
return entry;
}