aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-23 11:35:55 -0700
committerJason A. Donenfeld <Jason@zx2c4.com>2018-08-28 23:20:13 -0600
commitf103d1e2b420231c37684750cd36a825425fc313 (patch)
tree4a8d86eda58d149d85a2d99a53e9b2370fc58b5f /src/hashtables.c
parentwg-quick: check correct variable for route deduplication (diff)
downloadwireguard-monolithic-historical-f103d1e2b420231c37684750cd36a825425fc313.tar.xz
wireguard-monolithic-historical-f103d1e2b420231c37684750cd36a825425fc313.zip
global: run through clang-format
This is the worst commit in the whole repo, making the code much less readable, but so it goes with upstream maintainers. We are now woefully wrapped at 80 columns.
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c105
1 files changed, 69 insertions, 36 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index ac6df59..4ba2288 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -7,12 +7,16 @@
#include "peer.h"
#include "noise.h"
-static inline struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+static inline struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
- /* siphash gives us a secure 64bit number based on a random key. Since the bits are
- * uniformly distributed, we can then mask off to get the bits we need.
+ /* siphash gives us a secure 64bit number based on a random key. Since
+ * the bits are uniformly distributed, we can then mask off to get the
+ * bits we need.
*/
- return &table->hashtable[siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+ return &table->hashtable[
+ siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key) &
+ (HASH_SIZE(table->hashtable) - 1)];
}
void pubkey_hashtable_init(struct pubkey_hashtable *table)
@@ -22,14 +26,17 @@ void pubkey_hashtable_init(struct pubkey_hashtable *table)
mutex_init(&table->lock);
}
-void pubkey_hashtable_add(struct pubkey_hashtable *table, struct wireguard_peer *peer)
+void pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer)
{
mutex_lock(&table->lock);
- hlist_add_head_rcu(&peer->pubkey_hash, pubkey_bucket(table, peer->handshake.remote_static));
+ hlist_add_head_rcu(&peer->pubkey_hash,
+ pubkey_bucket(table, peer->handshake.remote_static));
mutex_unlock(&table->lock);
}
-void pubkey_hashtable_remove(struct pubkey_hashtable *table, struct wireguard_peer *peer)
+void pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer)
{
mutex_lock(&table->lock);
hlist_del_init_rcu(&peer->pubkey_hash);
@@ -37,13 +44,17 @@ void pubkey_hashtable_remove(struct pubkey_hashtable *table, struct wireguard_pe
}
/* Returns a strong reference to a peer */
-struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+struct wireguard_peer *
+pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
struct wireguard_peer *iter_peer, *peer = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) {
- if (!memcmp(pubkey, iter_peer->handshake.remote_static, NOISE_PUBLIC_KEY_LEN)) {
+ hlist_for_each_entry_rcu_bh (iter_peer, pubkey_bucket(table, pubkey),
+ pubkey_hash) {
+ if (!memcmp(pubkey, iter_peer->handshake.remote_static,
+ NOISE_PUBLIC_KEY_LEN)) {
peer = iter_peer;
break;
}
@@ -53,12 +64,14 @@ struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, c
return peer;
}
-static inline struct hlist_head *index_bucket(struct index_hashtable *table, const __le32 index)
+static inline struct hlist_head *index_bucket(struct index_hashtable *table,
+ const __le32 index)
{
- /* Since the indices are random and thus all bits are uniformly distributed,
- * we can find its bucket simply by masking.
+ /* Since the indices are random and thus all bits are uniformly
+ * distributed, we can find its bucket simply by masking.
*/
- return &table->hashtable[(__force u32)index & (HASH_SIZE(table->hashtable) - 1)];
+ return &table->hashtable[(__force u32)index &
+ (HASH_SIZE(table->hashtable) - 1)];
}
void index_hashtable_init(struct index_hashtable *table)
@@ -67,9 +80,10 @@ void index_hashtable_init(struct index_hashtable *table)
spin_lock_init(&table->lock);
}
-/* At the moment, we limit ourselves to 2^20 total peers, which generally might amount to 2^20*3
- * items in this hashtable. The algorithm below works by picking a random number and testing it.
- * We can see that these limits mean we usually succeed pretty quickly:
+/* At the moment, we limit ourselves to 2^20 total peers, which generally might
+ * amount to 2^20*3 items in this hashtable. The algorithm below works by
+ * picking a random number and testing it. We can see that these limits mean we
+ * usually succeed pretty quickly:
*
* >>> def calculation(tries, size):
* ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32))
@@ -83,13 +97,15 @@ void index_hashtable_init(struct index_hashtable *table)
* >>> calculation(4, 2**20 * 3)
* 3.9261394135792216e-10
*
- * At the moment, we don't do any masking, so this algorithm isn't exactly constant time in
- * either the random guessing or in the hash list lookup. We could require a minimum of 3
- * tries, which would successfully mask the guessing. TODO: this would not, however, help
- * with the growing hash lengths.
+ * At the moment, we don't do any masking, so this algorithm isn't exactly
+ * constant time in either the random guessing or in the hash list lookup. We
+ * could require a minimum of 3 tries, which would successfully mask the
+ * guessing. this would not, however, help with the growing hash lengths, which
+ * is another thing to consider moving forward.
*/
-__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
+__le32 index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
@@ -102,23 +118,32 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
- hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
+ hlist_for_each_entry_rcu_bh (existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
if (existing_entry->index == entry->index)
- goto search_unused_slot; /* If it's already in use, we continue searching. */
+ /* If it's already in use, we continue searching. */
+ goto search_unused_slot;
}
/* Once we've found an unused slot, we lock it, and then double-check
* that nobody else stole it from us.
*/
spin_lock_bh(&table->lock);
- hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
+ hlist_for_each_entry_rcu_bh (existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
if (existing_entry->index == entry->index) {
spin_unlock_bh(&table->lock);
- goto search_unused_slot; /* If it was stolen, we start over. */
+ /* If it was stolen, we start over. */
+ goto search_unused_slot;
}
}
- /* Otherwise, we know we have it exclusively (since we're locked), so we insert. */
- hlist_add_head_rcu(&entry->index_hash, index_bucket(table, entry->index));
+ /* Otherwise, we know we have it exclusively (since we're locked),
+ * so we insert.
+ */
+ hlist_add_head_rcu(&entry->index_hash,
+ index_bucket(table, entry->index));
spin_unlock_bh(&table->lock);
rcu_read_unlock_bh();
@@ -126,7 +151,9 @@ search_unused_slot:
return entry->index;
}
-bool index_hashtable_replace(struct index_hashtable *table, struct index_hashtable_entry *old, struct index_hashtable_entry *new)
+bool index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new)
{
if (unlikely(hlist_unhashed(&old->index_hash)))
return false;
@@ -134,17 +161,19 @@ bool index_hashtable_replace(struct index_hashtable *table, struct index_hashtab
new->index = old->index;
hlist_replace_rcu(&old->index_hash, &new->index_hash);
- /* Calling init here NULLs out index_hash, and in fact after this function returns,
- * it's theoretically possible for this to get reinserted elsewhere. That means
- * the RCU lookup below might either terminate early or jump between buckets, in which
- * case the packet simply gets dropped, which isn't terrible.
+ /* Calling init here NULLs out index_hash, and in fact after this
+ * function returns, it's theoretically possible for this to get
+ * reinserted elsewhere. That means the RCU lookup below might either
+ * terminate early or jump between buckets, in which case the packet
+ * simply gets dropped, which isn't terrible.
*/
INIT_HLIST_NODE(&old->index_hash);
spin_unlock_bh(&table->lock);
return true;
}
-void index_hashtable_remove(struct index_hashtable *table, struct index_hashtable_entry *entry)
+void index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
{
spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -152,12 +181,16 @@ void index_hashtable_remove(struct index_hashtable *table, struct index_hashtabl
}
/* Returns a strong reference to a entry->peer */
-struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *table, const enum index_hashtable_type type_mask, const __le32 index, struct wireguard_peer **peer)
+struct index_hashtable_entry *
+index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wireguard_peer **peer)
{
struct index_hashtable_entry *iter_entry, *entry = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), index_hash) {
+ hlist_for_each_entry_rcu_bh (iter_entry, index_bucket(table, index),
+ index_hash) {
if (iter_entry->index == index) {
if (likely(iter_entry->type & type_mask))
entry = iter_entry;