aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-12-13 03:20:53 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2016-12-16 06:35:58 +0100
commit986a8046a2b297279569ebd160a5410f8f195185 (patch)
tree282abc5d74b64060c52c936177bcb3e9ddf054b0 /src/hashtables.c
parenttools: fix latest-handshake typo in documentation (diff)
downloadwireguard-monolithic-historical-986a8046a2b297279569ebd160a5410f8f195185.tar.xz
wireguard-monolithic-historical-986a8046a2b297279569ebd160a5410f8f195185.zip
siphash: update against upstream submission
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index b6161bb..45c9737 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -3,20 +3,20 @@
#include "hashtables.h"
#include "peer.h"
#include "noise.h"
-#include "crypto/siphash24.h"
+#include "crypto/siphash.h"
#include <linux/hashtable.h>
static inline struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
- /* siphash24 gives us a secure 64bit number based on a random key. Since the bits are
+ /* siphash gives us a secure 64bit number based on a random key. Since the bits are
* uniformly distributed, we can then mask off to get the bits we need. */
- return &table->hashtable[siphash24(pubkey, NOISE_PUBLIC_KEY_LEN, table->key) & (HASH_SIZE(table->hashtable) - 1)];
+ return &table->hashtable[siphash(pubkey, NOISE_PUBLIC_KEY_LEN, table->key) & (HASH_SIZE(table->hashtable) - 1)];
}
void pubkey_hashtable_init(struct pubkey_hashtable *table)
{
- get_random_bytes(table->key, SIPHASH24_KEY_LEN);
+ get_random_bytes(table->key, sizeof(table->key));
hash_init(table->hashtable);
mutex_init(&table->lock);
}
@@ -60,7 +60,7 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
- get_random_bytes(table->key, SIPHASH24_KEY_LEN);
+ get_random_bytes(table->key, sizeof(table->key));
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -85,7 +85,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
rand = get_random_u64();
- entry->index = (__force __le32)siphash24((u8 *)&rand, sizeof(rand), table->key);
+ entry->index = (__force __le32)siphash_1u64(rand, table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */