aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-09-09 11:31:38 -0700
committerDavid S. Miller <davem@davemloft.net>2020-09-09 11:31:38 -0700
commit99dc4a5dfe940ec986f915da36dda196798bd977 (patch)
tree4c71c1a13438f2e6ebd8b9fec36a77f8efcd5d94
parenthsr: avoid newline at end of message in NL_SET_ERR_MSG_MOD (diff)
parentwireguard: peerlookup: take lock before checking hash in replace operation (diff)
downloadwireguard-linux-99dc4a5dfe940ec986f915da36dda196798bd977.tar.xz
wireguard-linux-99dc4a5dfe940ec986f915da36dda196798bd977.zip
Merge branch 'wireguard-fixes'
Jason A. Donenfeld says: ==================== wireguard fixes for 5.9-rc5 Yesterday, Eric reported a race condition found by syzbot. This series contains two commits, one that fixes the direct issue, and another that addresses the more general issue, as a defense in depth. 1) The basic problem syzbot unearthed was that one particular mutation of handshake->entry was not protected by the handshake mutex like the other cases, so this patch basically just reorders a line to make sure the mutex is actually taken at the right point. Most of the work here went into making sure the race was fully understood and making a reproducer (which syzbot was unable to do itself, due to the rarity of the race). 2) Eric's initial suggestion for fixing this was taking a spinlock around the hash table replace function where the null ptr deref was happening. This doesn't address the main problem in the most precise possible way like (1) does, but it is a good suggestion for defense-in-depth, in case related issues come up in the future, and basically costs nothing from a performance perspective. I thought it aided in implementing a good general rule: all mutators of that hash table take the table lock. So that's part of this series as a companion. Both of these contain Fixes: tags and are good candidates for stable. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/wireguard/noise.c5
-rw-r--r--drivers/net/wireguard/peerlookup.c11
2 files changed, 9 insertions, 7 deletions
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index 3dd3b76790d0..c0cfd9b36c0b 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -87,15 +87,12 @@ static void handshake_zero(struct noise_handshake *handshake)
void wg_noise_handshake_clear(struct noise_handshake *handshake)
{
+ down_write(&handshake->lock);
wg_index_hashtable_remove(
handshake->entry.peer->device->index_hashtable,
&handshake->entry);
- down_write(&handshake->lock);
handshake_zero(handshake);
up_write(&handshake->lock);
- wg_index_hashtable_remove(
- handshake->entry.peer->device->index_hashtable,
- &handshake->entry);
}
static struct noise_keypair *keypair_create(struct wg_peer *peer)
diff --git a/drivers/net/wireguard/peerlookup.c b/drivers/net/wireguard/peerlookup.c
index e4deb331476b..f2783aa7a88f 100644
--- a/drivers/net/wireguard/peerlookup.c
+++ b/drivers/net/wireguard/peerlookup.c
@@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct index_hashtable *table,
struct index_hashtable_entry *old,
struct index_hashtable_entry *new)
{
- if (unlikely(hlist_unhashed(&old->index_hash)))
- return false;
+ bool ret;
+
spin_lock_bh(&table->lock);
+ ret = !hlist_unhashed(&old->index_hash);
+ if (unlikely(!ret))
+ goto out;
+
new->index = old->index;
hlist_replace_rcu(&old->index_hash, &new->index_hash);
@@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct index_hashtable *table,
* simply gets dropped, which isn't terrible.
*/
INIT_HLIST_NODE(&old->index_hash);
+out:
spin_unlock_bh(&table->lock);
- return true;
+ return ret;
}
void wg_index_hashtable_remove(struct index_hashtable *table,