aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJonathan Neuschäfer <j.neuschaefer@gmx.net>2018-07-21 03:34:46 +0200
committerJonathan Neuschäfer <j.neuschaefer@gmx.net>2018-07-21 03:34:46 +0200
commit9d0d777b2bbe4358e1b64b4d09683371ed0ccca9 (patch)
treec1f4af8acf1a135705abbd2b6bbc22618d17bb3c /src/hashtables.c
parentreceive: check against proper return value type (diff)
downloadwireguard-monolithic-historical-jn/rhashtable.tar.xz
wireguard-monolithic-historical-jn/rhashtable.zip
hashtables: switch to rhashtablejn/rhashtable
NOTE: Due to a limitation in the rhashtable API, the siphash key (or "seed") is reduced to 32 bits of random, rather than 128 bits that were used before. Signed-off-by: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c123
1 files changed, 67 insertions, 56 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 0e5235d..a549fb6 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -7,66 +7,84 @@
#include "peer.h"
#include "noise.h"
-static inline struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+static u32 pubkey_hashfn(const void *data, u32 len, u32 seed)
{
- /* siphash gives us a secure 64bit number based on a random key. Since the bits are
- * uniformly distributed, we can then mask off to get the bits we need.
- */
- return &table->hashtable[siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+ /* The rhashtable API only allows 32 bits of seed, while siphash expects 128 bits. */
+ siphash_key_t key = { .key = { seed } };
+
+ return siphash(data, len, &key);
+}
+
+static const struct rhashtable_params pubkey_params = {
+ .key_offset = offsetof(struct wireguard_peer, handshake.remote_static),
+ .key_len = FIELD_SIZEOF(struct wireguard_peer, handshake.remote_static),
+ .head_offset = offsetof(struct wireguard_peer, pubkey_hash),
+ .hashfn = pubkey_hashfn,
+ .automatic_shrinking = true,
+};
+
+int pubkey_hashtable_init(struct pubkey_hashtable *table)
+{
+ return rhashtable_init(&table->hashtable, &pubkey_params);
}
-void pubkey_hashtable_init(struct pubkey_hashtable *table)
+void pubkey_hashtable_cleanup(struct pubkey_hashtable *table)
{
- get_random_bytes(&table->key, sizeof(table->key));
- hash_init(table->hashtable);
- mutex_init(&table->lock);
+ rhashtable_destroy(&table->hashtable);
}
void pubkey_hashtable_add(struct pubkey_hashtable *table, struct wireguard_peer *peer)
{
- mutex_lock(&table->lock);
- hlist_add_head_rcu(&peer->pubkey_hash, pubkey_bucket(table, peer->handshake.remote_static));
- mutex_unlock(&table->lock);
+ /* TODO: does this really work with hash collisions? */
+ rhashtable_insert_fast(&table->hashtable, &peer->pubkey_hash, pubkey_params);
}
void pubkey_hashtable_remove(struct pubkey_hashtable *table, struct wireguard_peer *peer)
{
- mutex_lock(&table->lock);
- hlist_del_init_rcu(&peer->pubkey_hash);
- mutex_unlock(&table->lock);
+ rhashtable_remove_fast(&table->hashtable, &peer->pubkey_hash, pubkey_params);
}
/* Returns a strong reference to a peer */
struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
- struct wireguard_peer *iter_peer, *peer = NULL;
+ struct wireguard_peer *peer;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) {
- if (!memcmp(pubkey, iter_peer->handshake.remote_static, NOISE_PUBLIC_KEY_LEN)) {
- peer = iter_peer;
- break;
- }
- }
- peer = peer_get(peer);
+ peer = peer_get(rhashtable_lookup_fast(&table->hashtable, pubkey, pubkey_params));
rcu_read_unlock_bh();
+
return peer;
}
-static inline struct hlist_head *index_bucket(struct index_hashtable *table, const __le32 index)
+static u32 index_hashfn(const void *data, u32 len, u32 seed)
{
- /* Since the indices are random and thus all bits are uniformly distributed,
- * we can find its bucket simply by masking.
- */
- return &table->hashtable[(__force u32)index & (HASH_SIZE(table->hashtable) - 1)];
+ BUG_ON(len != 4);
+ return *(u32 *)data;
}
-void index_hashtable_init(struct index_hashtable *table)
+static const struct rhashtable_params index_params = {
+ .key_offset = offsetof(struct index_hashtable_entry, index),
+ .key_len = FIELD_SIZEOF(struct index_hashtable_entry, index),
+ .head_offset = offsetof(struct index_hashtable_entry, index_hash),
+ .hashfn = index_hashfn,
+ .automatic_shrinking = true,
+};
+
+int index_hashtable_init(struct index_hashtable *table)
{
- hash_init(table->hashtable);
+ int ret;
+
+ ret = rhashtable_init(&table->hashtable, &index_params);
spin_lock_init(&table->lock);
+ return ret;
}
+void index_hashtable_cleanup(struct index_hashtable *table)
+{
+ rhashtable_destroy(&table->hashtable);
+}
+
+
/* At the moment, we limit ourselves to 2^20 total peers, which generally might amount to 2^20*3
* items in this hashtable. The algorithm below works by picking a random number and testing it.
* We can see that these limits mean we usually succeed pretty quickly:
@@ -93,32 +111,28 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
{
struct index_hashtable_entry *existing_entry;
- spin_lock_bh(&table->lock);
- hlist_del_init_rcu(&entry->index_hash);
- spin_unlock_bh(&table->lock);
-
rcu_read_lock_bh();
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
- hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
- if (existing_entry->index == entry->index)
- goto search_unused_slot; /* If it's already in use, we continue searching. */
+ existing_entry = rhashtable_lookup_fast(&table->hashtable, &entry->index, index_params);
+ if (existing_entry && existing_entry->index == entry->index) {
+ goto search_unused_slot; /* If it's already in use, we continue searching. */
}
/* Once we've found an unused slot, we lock it, and then double-check
* that nobody else stole it from us.
*/
spin_lock_bh(&table->lock);
- hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
- if (existing_entry->index == entry->index) {
- spin_unlock_bh(&table->lock);
- goto search_unused_slot; /* If it was stolen, we start over. */
- }
+ existing_entry = rhashtable_lookup_fast(&table->hashtable, &entry->index, index_params);
+ if (existing_entry && existing_entry->index == entry->index) {
+ spin_unlock_bh(&table->lock);
+ goto search_unused_slot; /* If it was stolen, we start over. */
}
+
/* Otherwise, we know we have it exclusively (since we're locked), so we insert. */
- hlist_add_head_rcu(&entry->index_hash, index_bucket(table, entry->index));
+ rhashtable_insert_fast(&table->hashtable, &entry->index_hash, index_params);
spin_unlock_bh(&table->lock);
rcu_read_unlock_bh();
@@ -128,12 +142,15 @@ search_unused_slot:
bool index_hashtable_replace(struct index_hashtable *table, struct index_hashtable_entry *old, struct index_hashtable_entry *new)
{
- if (unlikely(hlist_unhashed(&old->index_hash)))
+ int ret;
+
+ if (unlikely(rhashtable_lookup_fast(&table->hashtable, old, index_params)))
return false;
+
spin_lock_bh(&table->lock);
new->index = old->index;
- hlist_replace_rcu(&old->index_hash, &new->index_hash);
- INIT_HLIST_NODE(&old->index_hash);
+ ret = rhashtable_replace_fast(&table->hashtable, &old->index_hash, &new->index_hash, index_params);
+ WARN_ON_ONCE(ret != 0);
spin_unlock_bh(&table->lock);
return true;
}
@@ -141,24 +158,18 @@ bool index_hashtable_replace(struct index_hashtable *table, struct index_hashtab
void index_hashtable_remove(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
spin_lock_bh(&table->lock);
- hlist_del_init_rcu(&entry->index_hash);
+ rhashtable_remove_fast(&table->hashtable, &entry->index_hash, index_params);
spin_unlock_bh(&table->lock);
}
/* Returns a strong reference to a entry->peer */
struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *table, const enum index_hashtable_type type_mask, const __le32 index)
{
- struct index_hashtable_entry *iter_entry, *entry = NULL;
+ struct index_hashtable_entry *entry = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), index_hash) {
- if (iter_entry->index == index) {
- if (likely(iter_entry->type & type_mask))
- entry = iter_entry;
- break;
- }
- }
- if (likely(entry)) {
+ entry = rhashtable_lookup_fast(&table->hashtable, &index, index_params);
+ if (likely(entry && entry->type & type_mask)) {
entry->peer = peer_get(entry->peer);
if (unlikely(!entry->peer))
entry = NULL;