aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-03-16 15:28:16 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2017-03-19 15:34:46 +0100
commitfc6856b5b9dc056e7a54436f1f7eb0b9a6f68895 (patch)
treee9cf000911d62752d98fce7b10d612264c281818 /src/hashtables.c
parenttimers: elide enable check (diff)
downloadwireguard-monolithic-historical-fc6856b5b9dc056e7a54436f1f7eb0b9a6f68895.tar.xz
wireguard-monolithic-historical-fc6856b5b9dc056e7a54436f1f7eb0b9a6f68895.zip
hashtables: get_random_int is now more secure, so expose directly
On 4.11, get_random_u32 now either uses chacha or rdrand, rather than the horrible former MD5 construction, so we feel more comfortable exposing RNG output directly. On older kernels, we fall back to something a bit disgusting.
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index a412265..4cb8441 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -57,7 +57,6 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
- get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -65,7 +64,6 @@ void index_hashtable_init(struct index_hashtable *table)
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
- u32 counter = get_random_int();
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -75,7 +73,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, &table->key);
+ entry->index = (__force __le32)get_random_u32();
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */