aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-01-24 04:50:35 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2017-02-13 21:55:17 +0100
commitd78302d1ec947665f9d880e25b3a7e79fb56fc0e (patch)
treeb9d56e1b4ef5ae40e539a7a44b68651f43c078ee /src/hashtables.c
parentsocket: synchronize net on socket tear down (diff)
downloadwireguard-monolithic-historical-d78302d1ec947665f9d880e25b3a7e79fb56fc0e.tar.xz
wireguard-monolithic-historical-d78302d1ec947665f9d880e25b3a7e79fb56fc0e.zip
compat: backport siphash & dst_cache from mainline
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 507e84a..a412265 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -3,20 +3,17 @@
#include "hashtables.h"
#include "peer.h"
#include "noise.h"
-#include "crypto/siphash.h"
-
-#include <linux/hashtable.h>
static inline struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
/* siphash gives us a secure 64bit number based on a random key. Since the bits are
* uniformly distributed, we can then mask off to get the bits we need. */
- return &table->hashtable[siphash(pubkey, NOISE_PUBLIC_KEY_LEN, table->key) & (HASH_SIZE(table->hashtable) - 1)];
+ return &table->hashtable[siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key) & (HASH_SIZE(table->hashtable) - 1)];
}
void pubkey_hashtable_init(struct pubkey_hashtable *table)
{
- get_random_bytes(table->key, sizeof(table->key));
+ get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
mutex_init(&table->lock);
}
@@ -60,7 +57,7 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
- get_random_bytes(table->key, sizeof(table->key));
+ get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -78,7 +75,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, table->key);
+ entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, &table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */