From 293e9d604f949db9501d0ce01570350198e59c0b Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 3 Apr 2017 05:20:25 +0200 Subject: locking: always use _bh All locks are potentially between user context and softirq, which means we need to take the _bh variant. --- src/hashtables.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'src/hashtables.c') diff --git a/src/hashtables.c b/src/hashtables.c index 4cb8441..efd7111 100644 --- a/src/hashtables.c +++ b/src/hashtables.c @@ -36,15 +36,15 @@ void pubkey_hashtable_remove(struct pubkey_hashtable *table, struct wireguard_pe struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) { struct wireguard_peer *iter_peer, *peer = NULL; - rcu_read_lock(); - hlist_for_each_entry_rcu(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) { + rcu_read_lock_bh(); + hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) { if (!memcmp(pubkey, iter_peer->handshake.remote_static, NOISE_PUBLIC_KEY_LEN)) { peer = iter_peer; break; } } peer = peer_get(peer); - rcu_read_unlock(); + rcu_read_unlock_bh(); return peer; } @@ -65,60 +65,60 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta { struct index_hashtable_entry *existing_entry; - spin_lock(&table->lock); + spin_lock_bh(&table->lock); hlist_del_init_rcu(&entry->index_hash); - spin_unlock(&table->lock); + spin_unlock_bh(&table->lock); - rcu_read_lock(); + rcu_read_lock_bh(); search_unused_slot: /* First we try to find an unused slot, randomly, while unlocked. */ entry->index = (__force __le32)get_random_u32(); - hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) { + hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) { if (existing_entry->index == entry->index) goto search_unused_slot; /* If it's already in use, we continue searching. */ } /* Once we've found an unused slot, we lock it, and then double-check * that nobody else stole it from us. */ - spin_lock(&table->lock); - hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) { + spin_lock_bh(&table->lock); + hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) { if (existing_entry->index == entry->index) { - spin_unlock(&table->lock); + spin_unlock_bh(&table->lock); goto search_unused_slot; /* If it was stolen, we start over. */ } } /* Otherwise, we know we have it exclusively (since we're locked), so we insert. */ hlist_add_head_rcu(&entry->index_hash, index_bucket(table, entry->index)); - spin_unlock(&table->lock); + spin_unlock_bh(&table->lock); - rcu_read_unlock(); + rcu_read_unlock_bh(); return entry->index; } void index_hashtable_replace(struct index_hashtable *table, struct index_hashtable_entry *old, struct index_hashtable_entry *new) { - spin_lock(&table->lock); + spin_lock_bh(&table->lock); new->index = old->index; hlist_replace_rcu(&old->index_hash, &new->index_hash); INIT_HLIST_NODE(&old->index_hash); - spin_unlock(&table->lock); + spin_unlock_bh(&table->lock); } void index_hashtable_remove(struct index_hashtable *table, struct index_hashtable_entry *entry) { - spin_lock(&table->lock); + spin_lock_bh(&table->lock); hlist_del_init_rcu(&entry->index_hash); - spin_unlock(&table->lock); + spin_unlock_bh(&table->lock); } /* Returns a strong reference to a entry->peer */ struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *table, const enum index_hashtable_type type_mask, const __le32 index) { struct index_hashtable_entry *iter_entry, *entry = NULL; - rcu_read_lock(); - hlist_for_each_entry_rcu(iter_entry, index_bucket(table, index), index_hash) { + rcu_read_lock_bh(); + hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), index_hash) { if (iter_entry->index == index && (iter_entry->type & type_mask)) { entry = iter_entry; break; @@ -129,6 +129,6 @@ struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *tab if (unlikely(!entry->peer)) entry = NULL; } - rcu_read_unlock(); + rcu_read_unlock_bh(); return entry; } -- cgit v1.2.3-59-g8ed1b