| From: Felix Fietkau <nbd@nbd.name> |
| Date: Mon, 8 Feb 2021 11:34:08 -0800 |
| Subject: [PATCH] net: extract napi poll functionality to __napi_poll() |
| |
| This commit introduces a new function __napi_poll() which does the main |
| logic of the existing napi_poll() function, and will be called by other |
| functions in later commits. |
| This idea and implementation is done by Felix Fietkau <nbd@nbd.name> and |
| is proposed as part of the patch to move napi work to work_queue |
| context. |
| This commit by itself is a code restructure. |
| |
| Signed-off-by: Felix Fietkau <nbd@nbd.name> |
| Signed-off-by: Wei Wang <weiwan@google.com> |
| Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| --- |
| |
| --- a/net/core/dev.c |
| +++ b/net/core/dev.c |
| @@ -6328,15 +6328,10 @@ void netif_napi_del(struct napi_struct * |
| } |
| EXPORT_SYMBOL(netif_napi_del); |
| |
| -static int napi_poll(struct napi_struct *n, struct list_head *repoll) |
| +static int __napi_poll(struct napi_struct *n, bool *repoll) |
| { |
| - void *have; |
| int work, weight; |
| |
| - list_del_init(&n->poll_list); |
| - |
| - have = netpoll_poll_lock(n); |
| - |
| weight = n->weight; |
| |
| /* This NAPI_STATE_SCHED test is for avoiding a race |
| @@ -6354,7 +6349,7 @@ static int napi_poll(struct napi_struct |
| WARN_ON_ONCE(work > weight); |
| |
| if (likely(work < weight)) |
| - goto out_unlock; |
| + return work; |
| |
| /* Drivers must not modify the NAPI state if they |
| * consume the entire weight. In such cases this code |
| @@ -6363,7 +6358,7 @@ static int napi_poll(struct napi_struct |
| */ |
| if (unlikely(napi_disable_pending(n))) { |
| napi_complete(n); |
| - goto out_unlock; |
| + return work; |
| } |
| |
| if (n->gro_bitmask) { |
| @@ -6381,12 +6376,29 @@ static int napi_poll(struct napi_struct |
| if (unlikely(!list_empty(&n->poll_list))) { |
| pr_warn_once("%s: Budget exhausted after napi rescheduled\n", |
| n->dev ? n->dev->name : "backlog"); |
| - goto out_unlock; |
| + return work; |
| } |
| |
| - list_add_tail(&n->poll_list, repoll); |
| + *repoll = true; |
| + |
| + return work; |
| +} |
| + |
| +static int napi_poll(struct napi_struct *n, struct list_head *repoll) |
| +{ |
| + bool do_repoll = false; |
| + void *have; |
| + int work; |
| + |
| + list_del_init(&n->poll_list); |
| + |
| + have = netpoll_poll_lock(n); |
| + |
| + work = __napi_poll(n, &do_repoll); |
| + |
| + if (do_repoll) |
| + list_add_tail(&n->poll_list, repoll); |
| |
| -out_unlock: |
| netpoll_poll_unlock(have); |
| |
| return work; |