| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
| 2 | From: "Jason A. Donenfeld" <Jason@zx2c4.com> |
| 3 | Date: Wed, 9 Sep 2020 13:58:15 +0200 |
| 4 | Subject: [PATCH] wireguard: peerlookup: take lock before checking hash in |
| 5 | replace operation |
| 6 | |
| 7 | commit 6147f7b1e90ff09bd52afc8b9206a7fcd133daf7 upstream. |
| 8 | |
| 9 | Eric's suggested fix for the previous commit's mentioned race condition |
| 10 | was to simply take the table->lock in wg_index_hashtable_replace(). The |
| 11 | table->lock of the hash table is supposed to protect the bucket heads, |
| 12 | not the entires, but actually, since all the mutator functions are |
| 13 | already taking it, it makes sense to take it too for the test to |
| 14 | hlist_unhashed, as a defense in depth measure, so that it no longer |
| 15 | races with deletions, regardless of what other locks are protecting |
| 16 | individual entries. This is sensible from a performance perspective |
| 17 | because, as Eric pointed out, the case of being unhashed is already the |
| 18 | unlikely case, so this won't add common contention. And comparing |
| 19 | instructions, this basically doesn't make much of a difference other |
| 20 | than pushing and popping %r13, used by the new `bool ret`. More |
| 21 | generally, I like the idea of locking consistency across table mutator |
| 22 | functions, and this might let me rest slightly easier at night. |
| 23 | |
| 24 | Suggested-by: Eric Dumazet <edumazet@google.com> |
| 25 | Link: https://lore.kernel.org/wireguard/20200908145911.4090480-1-edumazet@google.com/ |
| 26 | Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") |
| 27 | Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> |
| 28 | Signed-off-by: David S. Miller <davem@davemloft.net> |
| 29 | Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> |
| 30 | --- |
| 31 | drivers/net/wireguard/peerlookup.c | 11 ++++++++--- |
| 32 | 1 file changed, 8 insertions(+), 3 deletions(-) |
| 33 | |
| 34 | --- a/drivers/net/wireguard/peerlookup.c |
| 35 | +++ b/drivers/net/wireguard/peerlookup.c |
| 36 | @@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct i |
| 37 | struct index_hashtable_entry *old, |
| 38 | struct index_hashtable_entry *new) |
| 39 | { |
| 40 | - if (unlikely(hlist_unhashed(&old->index_hash))) |
| 41 | - return false; |
| 42 | + bool ret; |
| 43 | + |
| 44 | spin_lock_bh(&table->lock); |
| 45 | + ret = !hlist_unhashed(&old->index_hash); |
| 46 | + if (unlikely(!ret)) |
| 47 | + goto out; |
| 48 | + |
| 49 | new->index = old->index; |
| 50 | hlist_replace_rcu(&old->index_hash, &new->index_hash); |
| 51 | |
| 52 | @@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct i |
| 53 | * simply gets dropped, which isn't terrible. |
| 54 | */ |
| 55 | INIT_HLIST_NODE(&old->index_hash); |
| 56 | +out: |
| 57 | spin_unlock_bh(&table->lock); |
| 58 | - return true; |
| 59 | + return ret; |
| 60 | } |
| 61 | |
| 62 | void wg_index_hashtable_remove(struct index_hashtable *table, |