aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2019-10-28 17:55:33 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2019-10-28 17:55:33 +0100
commit719f1dc2012da30f9378c41dcc394671e1a0920c (patch)
treebfe2be99725ff8ee0cf8475c86d4db6308659f3c
parentwg-quick: android: check for null in binder cleanup functions (diff)
downloadwireguard-monolithic-historical-jd/ryzen-bug.tar.xz
wireguard-monolithic-historical-jd/ryzen-bug.zip
peerlookup: fall back to get_random_bytes for Ryzen 3000 bugjd/ryzen-bug
In case get_random_u32() fails after 32 tries -- something that should only happen on the Ryzen 3000 which returns -1 everytime if you have the wrong CPU microcode -- we fall back to get_random_bytes(), which is slower, but at least works.
-rw-r--r--src/peerlookup.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/src/peerlookup.c b/src/peerlookup.c
index e4deb33..8144284 100644
--- a/src/peerlookup.c
+++ b/src/peerlookup.c
@@ -120,6 +120,7 @@ __le32 wg_index_hashtable_insert(struct index_hashtable *table,
struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
+ unsigned int tries = 0;
spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -129,7 +130,10 @@ __le32 wg_index_hashtable_insert(struct index_hashtable *table,
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- entry->index = (__force __le32)get_random_u32();
+ if (++tries > 32) /* Work around AMD Ryzen 3000 RDRAND bug. */
+ get_random_bytes(&entry->index, sizeof(entry->index));
+ else
+ entry->index = (__force __le32)get_random_u32();
hlist_for_each_entry_rcu_bh(existing_entry,
index_bucket(table, entry->index),
index_hash) {