From fc6856b5b9dc056e7a54436f1f7eb0b9a6f68895 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 16 Mar 2017 15:28:16 +0100 Subject: hashtables: get_random_int is now more secure, so expose directly On 4.11, get_random_u32 now either uses chacha or rdrand, rather than the horrible former MD5 construction, so we feel more comfortable exposing RNG output directly. On older kernels, we fall back to something a bit disgusting. --- src/compat/compat.h | 19 +++++++++++++++++++ src/hashtables.c | 4 +--- src/hashtables.h | 1 - 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/compat/compat.h b/src/compat/compat.h index 141cad7..4e6010f 100644 --- a/src/compat/compat.h +++ b/src/compat/compat.h @@ -171,6 +171,25 @@ static inline void skb_reset_tc(struct sk_buff *skb) } #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) +#include +static inline u32 get_random_u32(void) +{ + static siphash_key_t key; + static u32 counter = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + static bool has_seeded = false; + if (unlikely(!has_seeded)) { + get_random_bytes(&key, sizeof(key)); + has_seeded = true; + } +#else + get_random_once(&key, sizeof(key)); +#endif + return siphash_2u32(counter++, get_random_int(), &key); +} +#endif + /* https://lkml.org/lkml/2015/6/12/415 */ #include static inline struct net_device *netdev_pub(void *dev) diff --git a/src/hashtables.c b/src/hashtables.c index a412265..4cb8441 100644 --- a/src/hashtables.c +++ b/src/hashtables.c @@ -57,7 +57,6 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con void index_hashtable_init(struct index_hashtable *table) { - get_random_bytes(&table->key, sizeof(table->key)); hash_init(table->hashtable); spin_lock_init(&table->lock); } @@ -65,7 +64,6 @@ void index_hashtable_init(struct index_hashtable *table) __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry) { struct index_hashtable_entry *existing_entry; - u32 counter = get_random_int(); spin_lock(&table->lock); hlist_del_init_rcu(&entry->index_hash); @@ -75,7 +73,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta search_unused_slot: /* First we try to find an unused slot, randomly, while unlocked. */ - entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, &table->key); + entry->index = (__force __le32)get_random_u32(); hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) { if (existing_entry->index == entry->index) goto search_unused_slot; /* If it's already in use, we continue searching. */ diff --git a/src/hashtables.h b/src/hashtables.h index c66780a..9fa47d5 100644 --- a/src/hashtables.h +++ b/src/hashtables.h @@ -24,7 +24,6 @@ struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, c struct index_hashtable { DECLARE_HASHTABLE(hashtable, 10); - siphash_key_t key; spinlock_t lock; }; -- cgit v1.2.3-59-g8ed1b