blob: 34686f84a74c2e3729d0580d79de7ad634de1fb3 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From: Felix Fietkau <nbd@nbd.name>
2Date: Mon, 8 Feb 2021 11:34:08 -0800
3Subject: [PATCH] net: extract napi poll functionality to __napi_poll()
4
5This commit introduces a new function __napi_poll() which does the main
6logic of the existing napi_poll() function, and will be called by other
7functions in later commits.
8This idea and implementation is done by Felix Fietkau <nbd@nbd.name> and
9is proposed as part of the patch to move napi work to work_queue
10context.
11This commit by itself is a code restructure.
12
13Signed-off-by: Felix Fietkau <nbd@nbd.name>
14Signed-off-by: Wei Wang <weiwan@google.com>
15Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
16Signed-off-by: David S. Miller <davem@davemloft.net>
17---
18
19--- a/net/core/dev.c
20+++ b/net/core/dev.c
21@@ -6328,15 +6328,10 @@ void netif_napi_del(struct napi_struct *
22 }
23 EXPORT_SYMBOL(netif_napi_del);
24
25-static int napi_poll(struct napi_struct *n, struct list_head *repoll)
26+static int __napi_poll(struct napi_struct *n, bool *repoll)
27 {
28- void *have;
29 int work, weight;
30
31- list_del_init(&n->poll_list);
32-
33- have = netpoll_poll_lock(n);
34-
35 weight = n->weight;
36
37 /* This NAPI_STATE_SCHED test is for avoiding a race
38@@ -6354,7 +6349,7 @@ static int napi_poll(struct napi_struct
39 WARN_ON_ONCE(work > weight);
40
41 if (likely(work < weight))
42- goto out_unlock;
43+ return work;
44
45 /* Drivers must not modify the NAPI state if they
46 * consume the entire weight. In such cases this code
47@@ -6363,7 +6358,7 @@ static int napi_poll(struct napi_struct
48 */
49 if (unlikely(napi_disable_pending(n))) {
50 napi_complete(n);
51- goto out_unlock;
52+ return work;
53 }
54
55 if (n->gro_bitmask) {
56@@ -6381,12 +6376,29 @@ static int napi_poll(struct napi_struct
57 if (unlikely(!list_empty(&n->poll_list))) {
58 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
59 n->dev ? n->dev->name : "backlog");
60- goto out_unlock;
61+ return work;
62 }
63
64- list_add_tail(&n->poll_list, repoll);
65+ *repoll = true;
66+
67+ return work;
68+}
69+
70+static int napi_poll(struct napi_struct *n, struct list_head *repoll)
71+{
72+ bool do_repoll = false;
73+ void *have;
74+ int work;
75+
76+ list_del_init(&n->poll_list);
77+
78+ have = netpoll_poll_lock(n);
79+
80+ work = __napi_poll(n, &do_repoll);
81+
82+ if (do_repoll)
83+ list_add_tail(&n->poll_list, repoll);
84
85-out_unlock:
86 netpoll_poll_unlock(have);
87
88 return work;