aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-10-25 17:13:46 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2017-10-31 17:25:23 +0100
commit7491cd4c22601eb0c3bedfea740f2aec3b8372ed (patch)
treee5b8c3adf9e17e9abf3c028ea163ee7840659826 /src/hashtables.c
parentpeer: store total number of peers instead of iterating (diff)
downloadwireguard-monolithic-historical-7491cd4c22601eb0c3bedfea740f2aec3b8372ed.tar.xz
wireguard-monolithic-historical-7491cd4c22601eb0c3bedfea740f2aec3b8372ed.zip
global: infuriating kernel iterator style
One types: for (i = 0 ... So one should also type: for_each_obj (obj ... But the upstream kernel style guidelines are insane, and so we must instead do: for_each_obj(obj ... Ugly, but one must choose his battles wisely.
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 4a3798c..a0c0c64 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -38,7 +38,7 @@ struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, c
struct wireguard_peer *iter_peer, *peer = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh (iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) {
+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) {
if (!memcmp(pubkey, iter_peer->handshake.remote_static, NOISE_PUBLIC_KEY_LEN)) {
peer = iter_peer;
break;
@@ -97,7 +97,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
- hlist_for_each_entry_rcu_bh (existing_entry, index_bucket(table, entry->index), index_hash) {
+ hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */
}
@@ -105,7 +105,7 @@ search_unused_slot:
/* Once we've found an unused slot, we lock it, and then double-check
* that nobody else stole it from us. */
spin_lock_bh(&table->lock);
- hlist_for_each_entry_rcu_bh (existing_entry, index_bucket(table, entry->index), index_hash) {
+ hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index) {
spin_unlock_bh(&table->lock);
goto search_unused_slot; /* If it was stolen, we start over. */
@@ -145,7 +145,7 @@ struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *tab
struct index_hashtable_entry *iter_entry, *entry = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh (iter_entry, index_bucket(table, index), index_hash) {
+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), index_hash) {
if (iter_entry->index == index) {
if (likely(iter_entry->type & type_mask))
entry = iter_entry;