From 719f1dc2012da30f9378c41dcc394671e1a0920c Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 28 Oct 2019 17:55:33 +0100 Subject: peerlookup: fall back to get_random_bytes for Ryzen 3000 bug In case get_random_u32() fails after 32 tries -- something that should only happen on the Ryzen 3000 which returns -1 everytime if you have the wrong CPU microcode -- we fall back to get_random_bytes(), which is slower, but at least works. --- src/peerlookup.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'src/peerlookup.c') diff --git a/src/peerlookup.c b/src/peerlookup.c index e4deb33..8144284 100644 --- a/src/peerlookup.c +++ b/src/peerlookup.c @@ -120,6 +120,7 @@ __le32 wg_index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry) { struct index_hashtable_entry *existing_entry; + unsigned int tries = 0; spin_lock_bh(&table->lock); hlist_del_init_rcu(&entry->index_hash); @@ -129,7 +130,10 @@ __le32 wg_index_hashtable_insert(struct index_hashtable *table, search_unused_slot: /* First we try to find an unused slot, randomly, while unlocked. */ - entry->index = (__force __le32)get_random_u32(); + if (++tries > 32) /* Work around AMD Ryzen 3000 RDRAND bug. */ + get_random_bytes(&entry->index, sizeof(entry->index)); + else + entry->index = (__force __le32)get_random_u32(); hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) { -- cgit v1.2.3-59-g8ed1b