blob: 9df3a8258df8438cd2c1189baf571a0733edca46 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From b531d492d5ef1cf9dba0f4888eb5fd8624a6d762 Mon Sep 17 00:00:00 2001
2From: Felix Fietkau <nbd@nbd.name>
3Date: Fri, 7 Jul 2017 17:23:42 +0200
4Subject: net: sched: switch default qdisc from pfifo_fast to fq_codel and remove pfifo_fast
5
6Signed-off-by: Felix Fietkau <nbd@nbd.name>
7---
8 net/sched/sch_generic.c | 140 ------------------------------------------------
9 1 file changed, 140 deletions(-)
10
11--- a/net/sched/sch_generic.c
12+++ b/net/sched/sch_generic.c
13@@ -620,230 +620,6 @@ struct Qdisc_ops noqueue_qdisc_ops __rea
14 .owner = THIS_MODULE,
15 };
16
17-static const u8 prio2band[TC_PRIO_MAX + 1] = {
18- 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
19-};
20-
21-/* 3-band FIFO queue: old style, but should be a bit faster than
22- generic prio+fifo combination.
23- */
24-
25-#define PFIFO_FAST_BANDS 3
26-
27-/*
28- * Private data for a pfifo_fast scheduler containing:
29- * - rings for priority bands
30- */
31-struct pfifo_fast_priv {
32- struct skb_array q[PFIFO_FAST_BANDS];
33-};
34-
35-static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
36- int band)
37-{
38- return &priv->q[band];
39-}
40-
41-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
42- struct sk_buff **to_free)
43-{
44- int band = prio2band[skb->priority & TC_PRIO_MAX];
45- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
46- struct skb_array *q = band2list(priv, band);
47- unsigned int pkt_len = qdisc_pkt_len(skb);
48- int err;
49-
50- err = skb_array_produce(q, skb);
51-
52- if (unlikely(err)) {
53- if (qdisc_is_percpu_stats(qdisc))
54- return qdisc_drop_cpu(skb, qdisc, to_free);
55- else
56- return qdisc_drop(skb, qdisc, to_free);
57- }
58-
59- qdisc_update_stats_at_enqueue(qdisc, pkt_len);
60- return NET_XMIT_SUCCESS;
61-}
62-
63-static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
64-{
65- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
66- struct sk_buff *skb = NULL;
67- bool need_retry = true;
68- int band;
69-
70-retry:
71- for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
72- struct skb_array *q = band2list(priv, band);
73-
74- if (__skb_array_empty(q))
75- continue;
76-
77- skb = __skb_array_consume(q);
78- }
79- if (likely(skb)) {
80- qdisc_update_stats_at_dequeue(qdisc, skb);
81- } else if (need_retry &&
82- test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
83- /* Delay clearing the STATE_MISSED here to reduce
84- * the overhead of the second spin_trylock() in
85- * qdisc_run_begin() and __netif_schedule() calling
86- * in qdisc_run_end().
87- */
88- clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
89-
90- /* Make sure dequeuing happens after clearing
91- * STATE_MISSED.
92- */
93- smp_mb__after_atomic();
94-
95- need_retry = false;
96-
97- goto retry;
98- } else {
99- WRITE_ONCE(qdisc->empty, true);
100- }
101-
102- return skb;
103-}
104-
105-static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
106-{
107- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
108- struct sk_buff *skb = NULL;
109- int band;
110-
111- for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
112- struct skb_array *q = band2list(priv, band);
113-
114- skb = __skb_array_peek(q);
115- }
116-
117- return skb;
118-}
119-
120-static void pfifo_fast_reset(struct Qdisc *qdisc)
121-{
122- int i, band;
123- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
124-
125- for (band = 0; band < PFIFO_FAST_BANDS; band++) {
126- struct skb_array *q = band2list(priv, band);
127- struct sk_buff *skb;
128-
129- /* NULL ring is possible if destroy path is due to a failed
130- * skb_array_init() in pfifo_fast_init() case.
131- */
132- if (!q->ring.queue)
133- continue;
134-
135- while ((skb = __skb_array_consume(q)) != NULL)
136- kfree_skb(skb);
137- }
138-
139- if (qdisc_is_percpu_stats(qdisc)) {
140- for_each_possible_cpu(i) {
141- struct gnet_stats_queue *q;
142-
143- q = per_cpu_ptr(qdisc->cpu_qstats, i);
144- q->backlog = 0;
145- q->qlen = 0;
146- }
147- }
148-}
149-
150-static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
151-{
152- struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
153-
154- memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
155- if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
156- goto nla_put_failure;
157- return skb->len;
158-
159-nla_put_failure:
160- return -1;
161-}
162-
163-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
164- struct netlink_ext_ack *extack)
165-{
166- unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
167- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
168- int prio;
169-
170- /* guard against zero length rings */
171- if (!qlen)
172- return -EINVAL;
173-
174- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
175- struct skb_array *q = band2list(priv, prio);
176- int err;
177-
178- err = skb_array_init(q, qlen, GFP_KERNEL);
179- if (err)
180- return -ENOMEM;
181- }
182-
183- /* Can by-pass the queue discipline */
184- qdisc->flags |= TCQ_F_CAN_BYPASS;
185- return 0;
186-}
187-
188-static void pfifo_fast_destroy(struct Qdisc *sch)
189-{
190- struct pfifo_fast_priv *priv = qdisc_priv(sch);
191- int prio;
192-
193- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
194- struct skb_array *q = band2list(priv, prio);
195-
196- /* NULL ring is possible if destroy path is due to a failed
197- * skb_array_init() in pfifo_fast_init() case.
198- */
199- if (!q->ring.queue)
200- continue;
201- /* Destroy ring but no need to kfree_skb because a call to
202- * pfifo_fast_reset() has already done that work.
203- */
204- ptr_ring_cleanup(&q->ring, NULL);
205- }
206-}
207-
208-static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
209- unsigned int new_len)
210-{
211- struct pfifo_fast_priv *priv = qdisc_priv(sch);
212- struct skb_array *bands[PFIFO_FAST_BANDS];
213- int prio;
214-
215- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
216- struct skb_array *q = band2list(priv, prio);
217-
218- bands[prio] = q;
219- }
220-
221- return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
222- GFP_KERNEL);
223-}
224-
225-struct Qdisc_ops pfifo_fast_ops __read_mostly = {
226- .id = "pfifo_fast",
227- .priv_size = sizeof(struct pfifo_fast_priv),
228- .enqueue = pfifo_fast_enqueue,
229- .dequeue = pfifo_fast_dequeue,
230- .peek = pfifo_fast_peek,
231- .init = pfifo_fast_init,
232- .destroy = pfifo_fast_destroy,
233- .reset = pfifo_fast_reset,
234- .dump = pfifo_fast_dump,
235- .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
236- .owner = THIS_MODULE,
237- .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
238-};
239-EXPORT_SYMBOL(pfifo_fast_ops);
240-
241 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
242 const struct Qdisc_ops *ops,
243 struct netlink_ext_ack *extack)