aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-07-05 17:14:59 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2016-07-22 16:48:51 +0200
commit86bd8deff00b748049bf156ccb8aaf59e5c33658 (patch)
treee258d47b1133edc1d203e625d0dd39e8cdd593fe /src/hashtables.c
parentcookie: do not expose csprng directly (diff)
downloadwireguard-monolithic-historical-86bd8deff00b748049bf156ccb8aaf59e5c33658.tar.xz
wireguard-monolithic-historical-86bd8deff00b748049bf156ccb8aaf59e5c33658.zip
index hashtable: run random indices through siphash
If /dev/urandom is a NOBUS RNG backdoor, like the infamous Dual_EC_DRBG, then sending 4 bytes of raw RNG output over the wire directly might not be such a great idea. This mitigates that vulnerability by, at some point before the indices are generated, creating a random secret. Then, for each session index, we simply run SipHash24 on an incrementing counter. This is probably overkill because /dev/urandom is probably not a backdoored RNG, and itself already uses several rounds of SHA-1 for mixing. If the kernel RNG is backdoored, there may very well be bigger problems at play. Four bytes is also not so many bytes.
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 8911625..db7c23b 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -60,6 +60,8 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
+ get_random_bytes(table->key, SIPHASH24_KEY_LEN);
+ atomic64_set(&table->counter, 0);
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -67,6 +69,7 @@ void index_hashtable_init(struct index_hashtable *table)
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
+ uint64_t counter;
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -76,7 +79,8 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- get_random_bytes(&entry->index, sizeof(entry->index));
+ counter = atomic64_inc_return(&table->counter);
+ entry->index = (__force __le32)siphash24((uint8_t *)&counter, sizeof(counter), table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */