aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-10-08 03:54:28 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-10-08 18:51:34 +0200
commit780a5974d0256eed1e03a096b43f066043f8eb15 (patch)
tree3ba7923f145fd27706c11f956b1e485fd7dca5d6 /src/hashtables.c
parentglobal: rename struct wireguard_ to struct wg_ (diff)
downloadwireguard-monolithic-historical-780a5974d0256eed1e03a096b43f066043f8eb15.tar.xz
wireguard-monolithic-historical-780a5974d0256eed1e03a096b43f066043f8eb15.zip
global: more nits
Diffstat (limited to '')
-rw-r--r--src/hashtables.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 86b15a0..ee55b8e 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -51,8 +51,8 @@ wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
struct wg_peer *iter_peer, *peer = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh (iter_peer, pubkey_bucket(table, pubkey),
- pubkey_hash) {
+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
+ pubkey_hash) {
if (!memcmp(pubkey, iter_peer->handshake.remote_static,
NOISE_PUBLIC_KEY_LEN)) {
peer = iter_peer;
@@ -118,9 +118,9 @@ __le32 wg_index_hashtable_insert(struct index_hashtable *table,
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
- hlist_for_each_entry_rcu_bh (existing_entry,
- index_bucket(table, entry->index),
- index_hash) {
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
if (existing_entry->index == entry->index)
/* If it's already in use, we continue searching. */
goto search_unused_slot;
@@ -130,9 +130,9 @@ search_unused_slot:
* that nobody else stole it from us.
*/
spin_lock_bh(&table->lock);
- hlist_for_each_entry_rcu_bh (existing_entry,
- index_bucket(table, entry->index),
- index_hash) {
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
if (existing_entry->index == entry->index) {
spin_unlock_bh(&table->lock);
/* If it was stolen, we start over. */
@@ -189,8 +189,8 @@ wg_index_hashtable_lookup(struct index_hashtable *table,
struct index_hashtable_entry *iter_entry, *entry = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh (iter_entry, index_bucket(table, index),
- index_hash) {
+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
+ index_hash) {
if (iter_entry->index == index) {
if (likely(iter_entry->type & type_mask))
entry = iter_entry;