blob: 499b36bc5fdcef08528a1df5c4db28992225a945 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: "Jason A. Donenfeld" <Jason@zx2c4.com>
3Date: Wed, 6 May 2020 15:33:04 -0600
4Subject: [PATCH] wireguard: send/receive: cond_resched() when processing
5 worker ringbuffers
6
7commit 4005f5c3c9d006157ba716594e0d70c88a235c5e upstream.
8
9Users with pathological hardware reported CPU stalls on CONFIG_
10PREEMPT_VOLUNTARY=y, because the ringbuffers would stay full, meaning
11these workers would never terminate. That turned out not to be okay on
12systems without forced preemption, which Sultan observed. This commit
13adds a cond_resched() to the bottom of each loop iteration, so that
14these workers don't hog the core. Note that we don't need this on the
15napi poll worker, since that terminates after its budget is expended.
16
17Suggested-by: Sultan Alsawaf <sultan@kerneltoast.com>
18Reported-by: Wang Jian <larkwang@gmail.com>
19Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
20Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
21Signed-off-by: David S. Miller <davem@davemloft.net>
22Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
23---
24 drivers/net/wireguard/receive.c | 2 ++
25 drivers/net/wireguard/send.c | 4 ++++
26 2 files changed, 6 insertions(+)
27
28--- a/drivers/net/wireguard/receive.c
29+++ b/drivers/net/wireguard/receive.c
30@@ -516,6 +516,8 @@ void wg_packet_decrypt_worker(struct wor
31 &PACKET_CB(skb)->keypair->receiving)) ?
32 PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
33 wg_queue_enqueue_per_peer_napi(skb, state);
34+ if (need_resched())
35+ cond_resched();
36 }
37 }
38
39--- a/drivers/net/wireguard/send.c
40+++ b/drivers/net/wireguard/send.c
41@@ -281,6 +281,8 @@ void wg_packet_tx_worker(struct work_str
42
43 wg_noise_keypair_put(keypair, false);
44 wg_peer_put(peer);
45+ if (need_resched())
46+ cond_resched();
47 }
48 }
49
50@@ -304,6 +306,8 @@ void wg_packet_encrypt_worker(struct wor
51 }
52 wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
53 state);
54+ if (need_resched())
55+ cond_resched();
56 }
57 }
58