aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-08-21 20:13:17 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2016-08-22 01:47:28 +0200
commitd2a8e52e8574d24670f205ed6d2a6c78e1f5da2f (patch)
tree9e115056e0be48f9fd0eca45ba2d91e3bf251fdf /src/hashtables.c
parenttests: test jumbo frames with more transfer (diff)
downloadwireguard-monolithic-historical-d2a8e52e8574d24670f205ed6d2a6c78e1f5da2f.tar.xz
wireguard-monolithic-historical-d2a8e52e8574d24670f205ed6d2a6c78e1f5da2f.zip
hashtables: use rdrand() instead of counter
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 965605b..2fb4322 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -61,7 +61,6 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
get_random_bytes(table->key, SIPHASH24_KEY_LEN);
- atomic64_set(&table->counter, 0);
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -69,7 +68,7 @@ void index_hashtable_init(struct index_hashtable *table)
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
- uint64_t counter;
+ uint64_t rand;
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -79,8 +78,8 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- counter = atomic64_inc_return(&table->counter);
- entry->index = (__force __le32)siphash24((uint8_t *)&counter, sizeof(counter), table->key);
+ rand = get_random_long();
+ entry->index = (__force __le32)siphash24((uint8_t *)&rand, sizeof(rand), table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */