blob: 4cdf2dd04701f5411e2b5673bb388849d3796761 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 *
5 * Authors: Martin Devera, <devik@cdi.cz>
6 *
7 * Credits (in time order) for older HTB versions:
8 * Stef Coene <stef.coene@docum.org>
9 * HTB support at LARTC mailing list
10 * Ondrej Kraus, <krauso@barr.cz>
11 * found missing INIT_QDISC(htb)
12 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
13 * helped a lot to locate nasty class stall bug
14 * Andi Kleen, Jamal Hadi, Bert Hubert
15 * code review and helpful comments on shaping
16 * Tomasz Wrona, <tw@eter.tym.pl>
17 * created test case so that I was able to fix nasty bug
18 * Wilfried Weissmann
19 * spotted bug in dequeue code and helped with fix
20 * Jiri Fojtasek
21 * fixed requeue routine
22 * and many others. thanks.
23 */
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/types.h>
27#include <linux/kernel.h>
28#include <linux/string.h>
29#include <linux/errno.h>
30#include <linux/skbuff.h>
31#include <linux/list.h>
32#include <linux/compiler.h>
33#include <linux/rbtree.h>
34#include <linux/workqueue.h>
35#include <linux/slab.h>
36#include <net/netlink.h>
37#include <net/sch_generic.h>
38#include <net/pkt_sched.h>
39#include <net/pkt_cls.h>
40
41/* HTB algorithm.
42 Author: devik@cdi.cz
43 ========================================================================
44 HTB is like TBF with multiple classes. It is also similar to CBQ because
45 it allows to assign priority to each class in hierarchy.
46 In fact it is another implementation of Floyd's formal sharing.
47
48 Levels:
49 Each class is assigned level. Leaf has ALWAYS level 0 and root
50 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51 one less than their parent.
52*/
53
54static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
55#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
56
57#if HTB_VER >> 16 != TC_HTB_PROTOVER
58#error "Mismatched sch_htb.c and pkt_sch.h"
59#endif
60
61/* Module parameter and sysfs export */
62module_param (htb_hysteresis, int, 0640);
63MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
64
65static int htb_rate_est = 0; /* htb classes have a default rate estimator */
66module_param(htb_rate_est, int, 0640);
67MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
68
69/* used internaly to keep status of single class */
70enum htb_cmode {
71 HTB_CANT_SEND, /* class can't send and can't borrow */
72 HTB_MAY_BORROW, /* class can't send but may borrow */
73 HTB_CAN_SEND /* class can send */
74};
75
76struct htb_prio {
77 union {
78 struct rb_root row;
79 struct rb_root feed;
80 };
81 struct rb_node *ptr;
82 /* When class changes from state 1->2 and disconnects from
83 * parent's feed then we lost ptr value and start from the
84 * first child again. Here we store classid of the
85 * last valid ptr (used when ptr is NULL).
86 */
87 u32 last_ptr_id;
88};
89
90/* interior & leaf nodes; props specific to leaves are marked L:
91 * To reduce false sharing, place mostly read fields at beginning,
92 * and mostly written ones at the end.
93 */
94struct htb_class {
95 struct Qdisc_class_common common;
96 struct psched_ratecfg rate;
97 struct psched_ratecfg ceil;
98 s64 buffer, cbuffer;/* token bucket depth/rate */
99 s64 mbuffer; /* max wait time */
100 u32 prio; /* these two are used only by leaves... */
101 int quantum; /* but stored for parent-to-leaf return */
102
103 struct tcf_proto __rcu *filter_list; /* class attached filters */
104 struct tcf_block *block;
105 int filter_cnt;
106
107 int level; /* our level (see above) */
108 unsigned int children;
109 struct htb_class *parent; /* parent class */
110
111 struct net_rate_estimator __rcu *rate_est;
112
113 /*
114 * Written often fields
115 */
116 struct gnet_stats_basic_packed bstats;
117 struct tc_htb_xstats xstats; /* our special stats */
118
119 /* token bucket parameters */
120 s64 tokens, ctokens;/* current number of tokens */
121 s64 t_c; /* checkpoint time */
122
123 union {
124 struct htb_class_leaf {
125 int deficit[TC_HTB_MAXDEPTH];
126 struct Qdisc *q;
127 } leaf;
128 struct htb_class_inner {
129 struct htb_prio clprio[TC_HTB_NUMPRIO];
130 } inner;
131 };
132 s64 pq_key;
133
134 int prio_activity; /* for which prios are we active */
135 enum htb_cmode cmode; /* current mode of the class */
136 struct rb_node pq_node; /* node for event queue */
137 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
138
139 unsigned int drops ____cacheline_aligned_in_smp;
140 unsigned int overlimits;
141};
142
143struct htb_level {
144 struct rb_root wait_pq;
145 struct htb_prio hprio[TC_HTB_NUMPRIO];
146};
147
148struct htb_sched {
149 struct Qdisc_class_hash clhash;
150 int defcls; /* class where unclassified flows go to */
151 int rate2quantum; /* quant = rate / rate2quantum */
152
153 /* filters for qdisc itself */
154 struct tcf_proto __rcu *filter_list;
155 struct tcf_block *block;
156
157#define HTB_WARN_TOOMANYEVENTS 0x1
158 unsigned int warned; /* only one warning */
159 int direct_qlen;
160 struct work_struct work;
161
162 /* non shaped skbs; let them go directly thru */
163 struct qdisc_skb_head direct_queue;
164 u32 direct_pkts;
165 u32 overlimits;
166
167 struct qdisc_watchdog watchdog;
168
169 s64 now; /* cached dequeue time */
170
171 /* time of nearest event per level (row) */
172 s64 near_ev_cache[TC_HTB_MAXDEPTH];
173
174 int row_mask[TC_HTB_MAXDEPTH];
175
176 struct htb_level hlevel[TC_HTB_MAXDEPTH];
177};
178
179/* find class in global hash table using given handle */
180static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
181{
182 struct htb_sched *q = qdisc_priv(sch);
183 struct Qdisc_class_common *clc;
184
185 clc = qdisc_class_find(&q->clhash, handle);
186 if (clc == NULL)
187 return NULL;
188 return container_of(clc, struct htb_class, common);
189}
190
191static unsigned long htb_search(struct Qdisc *sch, u32 handle)
192{
193 return (unsigned long)htb_find(handle, sch);
194}
195/**
196 * htb_classify - classify a packet into class
197 *
198 * It returns NULL if the packet should be dropped or -1 if the packet
199 * should be passed directly thru. In all other cases leaf class is returned.
200 * We allow direct class selection by classid in priority. The we examine
201 * filters in qdisc and in inner nodes (if higher filter points to the inner
202 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
203 * internal fifo (direct). These packets then go directly thru. If we still
204 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
205 * then finish and return direct queue.
206 */
207#define HTB_DIRECT ((struct htb_class *)-1L)
208
209static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
210 int *qerr)
211{
212 struct htb_sched *q = qdisc_priv(sch);
213 struct htb_class *cl;
214 struct tcf_result res;
215 struct tcf_proto *tcf;
216 int result;
217
218 /* allow to select class by setting skb->priority to valid classid;
219 * note that nfmark can be used too by attaching filter fw with no
220 * rules in it
221 */
222 if (skb->priority == sch->handle)
223 return HTB_DIRECT; /* X:0 (direct flow) selected */
224 cl = htb_find(skb->priority, sch);
225 if (cl) {
226 if (cl->level == 0)
227 return cl;
228 /* Start with inner filter chain if a non-leaf class is selected */
229 tcf = rcu_dereference_bh(cl->filter_list);
230 } else {
231 tcf = rcu_dereference_bh(q->filter_list);
232 }
233
234 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
235 while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
236#ifdef CONFIG_NET_CLS_ACT
237 switch (result) {
238 case TC_ACT_QUEUED:
239 case TC_ACT_STOLEN:
240 case TC_ACT_TRAP:
241 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
242 /* fall through */
243 case TC_ACT_SHOT:
244 return NULL;
245 }
246#endif
247 cl = (void *)res.class;
248 if (!cl) {
249 if (res.classid == sch->handle)
250 return HTB_DIRECT; /* X:0 (direct flow) */
251 cl = htb_find(res.classid, sch);
252 if (!cl)
253 break; /* filter selected invalid classid */
254 }
255 if (!cl->level)
256 return cl; /* we hit leaf; return it */
257
258 /* we have got inner class; apply inner filter chain */
259 tcf = rcu_dereference_bh(cl->filter_list);
260 }
261 /* classification failed; try to use default class */
262 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
263 if (!cl || cl->level)
264 return HTB_DIRECT; /* bad default .. this is safe bet */
265 return cl;
266}
267
268/**
269 * htb_add_to_id_tree - adds class to the round robin list
270 *
271 * Routine adds class to the list (actually tree) sorted by classid.
272 * Make sure that class is not already on such list for given prio.
273 */
274static void htb_add_to_id_tree(struct rb_root *root,
275 struct htb_class *cl, int prio)
276{
277 struct rb_node **p = &root->rb_node, *parent = NULL;
278
279 while (*p) {
280 struct htb_class *c;
281 parent = *p;
282 c = rb_entry(parent, struct htb_class, node[prio]);
283
284 if (cl->common.classid > c->common.classid)
285 p = &parent->rb_right;
286 else
287 p = &parent->rb_left;
288 }
289 rb_link_node(&cl->node[prio], parent, p);
290 rb_insert_color(&cl->node[prio], root);
291}
292
293/**
294 * htb_add_to_wait_tree - adds class to the event queue with delay
295 *
296 * The class is added to priority event queue to indicate that class will
297 * change its mode in cl->pq_key microseconds. Make sure that class is not
298 * already in the queue.
299 */
300static void htb_add_to_wait_tree(struct htb_sched *q,
301 struct htb_class *cl, s64 delay)
302{
303 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
304
305 cl->pq_key = q->now + delay;
306 if (cl->pq_key == q->now)
307 cl->pq_key++;
308
309 /* update the nearest event cache */
310 if (q->near_ev_cache[cl->level] > cl->pq_key)
311 q->near_ev_cache[cl->level] = cl->pq_key;
312
313 while (*p) {
314 struct htb_class *c;
315 parent = *p;
316 c = rb_entry(parent, struct htb_class, pq_node);
317 if (cl->pq_key >= c->pq_key)
318 p = &parent->rb_right;
319 else
320 p = &parent->rb_left;
321 }
322 rb_link_node(&cl->pq_node, parent, p);
323 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
324}
325
326/**
327 * htb_next_rb_node - finds next node in binary tree
328 *
329 * When we are past last key we return NULL.
330 * Average complexity is 2 steps per call.
331 */
332static inline void htb_next_rb_node(struct rb_node **n)
333{
334 *n = rb_next(*n);
335}
336
337/**
338 * htb_add_class_to_row - add class to its row
339 *
340 * The class is added to row at priorities marked in mask.
341 * It does nothing if mask == 0.
342 */
343static inline void htb_add_class_to_row(struct htb_sched *q,
344 struct htb_class *cl, int mask)
345{
346 q->row_mask[cl->level] |= mask;
347 while (mask) {
348 int prio = ffz(~mask);
349 mask &= ~(1 << prio);
350 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
351 }
352}
353
354/* If this triggers, it is a bug in this code, but it need not be fatal */
355static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
356{
357 if (RB_EMPTY_NODE(rb)) {
358 WARN_ON(1);
359 } else {
360 rb_erase(rb, root);
361 RB_CLEAR_NODE(rb);
362 }
363}
364
365
366/**
367 * htb_remove_class_from_row - removes class from its row
368 *
369 * The class is removed from row at priorities marked in mask.
370 * It does nothing if mask == 0.
371 */
372static inline void htb_remove_class_from_row(struct htb_sched *q,
373 struct htb_class *cl, int mask)
374{
375 int m = 0;
376 struct htb_level *hlevel = &q->hlevel[cl->level];
377
378 while (mask) {
379 int prio = ffz(~mask);
380 struct htb_prio *hprio = &hlevel->hprio[prio];
381
382 mask &= ~(1 << prio);
383 if (hprio->ptr == cl->node + prio)
384 htb_next_rb_node(&hprio->ptr);
385
386 htb_safe_rb_erase(cl->node + prio, &hprio->row);
387 if (!hprio->row.rb_node)
388 m |= 1 << prio;
389 }
390 q->row_mask[cl->level] &= ~m;
391}
392
393/**
394 * htb_activate_prios - creates active classe's feed chain
395 *
396 * The class is connected to ancestors and/or appropriate rows
397 * for priorities it is participating on. cl->cmode must be new
398 * (activated) mode. It does nothing if cl->prio_activity == 0.
399 */
400static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
401{
402 struct htb_class *p = cl->parent;
403 long m, mask = cl->prio_activity;
404
405 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
406 m = mask;
407 while (m) {
408 unsigned int prio = ffz(~m);
409
410 if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
411 break;
412 m &= ~(1 << prio);
413
414 if (p->inner.clprio[prio].feed.rb_node)
415 /* parent already has its feed in use so that
416 * reset bit in mask as parent is already ok
417 */
418 mask &= ~(1 << prio);
419
420 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
421 }
422 p->prio_activity |= mask;
423 cl = p;
424 p = cl->parent;
425
426 }
427 if (cl->cmode == HTB_CAN_SEND && mask)
428 htb_add_class_to_row(q, cl, mask);
429}
430
431/**
432 * htb_deactivate_prios - remove class from feed chain
433 *
434 * cl->cmode must represent old mode (before deactivation). It does
435 * nothing if cl->prio_activity == 0. Class is removed from all feed
436 * chains and rows.
437 */
438static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
439{
440 struct htb_class *p = cl->parent;
441 long m, mask = cl->prio_activity;
442
443 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
444 m = mask;
445 mask = 0;
446 while (m) {
447 int prio = ffz(~m);
448 m &= ~(1 << prio);
449
450 if (p->inner.clprio[prio].ptr == cl->node + prio) {
451 /* we are removing child which is pointed to from
452 * parent feed - forget the pointer but remember
453 * classid
454 */
455 p->inner.clprio[prio].last_ptr_id = cl->common.classid;
456 p->inner.clprio[prio].ptr = NULL;
457 }
458
459 htb_safe_rb_erase(cl->node + prio,
460 &p->inner.clprio[prio].feed);
461
462 if (!p->inner.clprio[prio].feed.rb_node)
463 mask |= 1 << prio;
464 }
465
466 p->prio_activity &= ~mask;
467 cl = p;
468 p = cl->parent;
469
470 }
471 if (cl->cmode == HTB_CAN_SEND && mask)
472 htb_remove_class_from_row(q, cl, mask);
473}
474
475static inline s64 htb_lowater(const struct htb_class *cl)
476{
477 if (htb_hysteresis)
478 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
479 else
480 return 0;
481}
482static inline s64 htb_hiwater(const struct htb_class *cl)
483{
484 if (htb_hysteresis)
485 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
486 else
487 return 0;
488}
489
490
491/**
492 * htb_class_mode - computes and returns current class mode
493 *
494 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
495 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
496 * from now to time when cl will change its state.
497 * Also it is worth to note that class mode doesn't change simply
498 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
499 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
500 * mode transitions per time unit. The speed gain is about 1/6.
501 */
502static inline enum htb_cmode
503htb_class_mode(struct htb_class *cl, s64 *diff)
504{
505 s64 toks;
506
507 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
508 *diff = -toks;
509 return HTB_CANT_SEND;
510 }
511
512 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
513 return HTB_CAN_SEND;
514
515 *diff = -toks;
516 return HTB_MAY_BORROW;
517}
518
519/**
520 * htb_change_class_mode - changes classe's mode
521 *
522 * This should be the only way how to change classe's mode under normal
523 * cirsumstances. Routine will update feed lists linkage, change mode
524 * and add class to the wait event queue if appropriate. New mode should
525 * be different from old one and cl->pq_key has to be valid if changing
526 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
527 */
528static void
529htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
530{
531 enum htb_cmode new_mode = htb_class_mode(cl, diff);
532
533 if (new_mode == cl->cmode)
534 return;
535
536 if (new_mode == HTB_CANT_SEND) {
537 cl->overlimits++;
538 q->overlimits++;
539 }
540
541 if (cl->prio_activity) { /* not necessary: speed optimization */
542 if (cl->cmode != HTB_CANT_SEND)
543 htb_deactivate_prios(q, cl);
544 cl->cmode = new_mode;
545 if (new_mode != HTB_CANT_SEND)
546 htb_activate_prios(q, cl);
547 } else
548 cl->cmode = new_mode;
549}
550
551/**
552 * htb_activate - inserts leaf cl into appropriate active feeds
553 *
554 * Routine learns (new) priority of leaf and activates feed chain
555 * for the prio. It can be called on already active leaf safely.
556 * It also adds leaf into droplist.
557 */
558static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
559{
560 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
561
562 if (!cl->prio_activity) {
563 cl->prio_activity = 1 << cl->prio;
564 htb_activate_prios(q, cl);
565 }
566}
567
568/**
569 * htb_deactivate - remove leaf cl from active feeds
570 *
571 * Make sure that leaf is active. In the other words it can't be called
572 * with non-active leaf. It also removes class from the drop list.
573 */
574static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
575{
576 WARN_ON(!cl->prio_activity);
577
578 htb_deactivate_prios(q, cl);
579 cl->prio_activity = 0;
580}
581
582static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
583 struct sk_buff **to_free)
584{
585 int ret;
586 unsigned int len = qdisc_pkt_len(skb);
587 struct htb_sched *q = qdisc_priv(sch);
588 struct htb_class *cl = htb_classify(skb, sch, &ret);
589
590 if (cl == HTB_DIRECT) {
591 /* enqueue to helper queue */
592 if (q->direct_queue.qlen < q->direct_qlen) {
593 __qdisc_enqueue_tail(skb, &q->direct_queue);
594 q->direct_pkts++;
595 } else {
596 return qdisc_drop(skb, sch, to_free);
597 }
598#ifdef CONFIG_NET_CLS_ACT
599 } else if (!cl) {
600 if (ret & __NET_XMIT_BYPASS)
601 qdisc_qstats_drop(sch);
602 __qdisc_drop(skb, to_free);
603 return ret;
604#endif
605 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
606 to_free)) != NET_XMIT_SUCCESS) {
607 if (net_xmit_drop_count(ret)) {
608 qdisc_qstats_drop(sch);
609 cl->drops++;
610 }
611 return ret;
612 } else {
613 htb_activate(q, cl);
614 }
615
616 sch->qstats.backlog += len;
617 sch->q.qlen++;
618 return NET_XMIT_SUCCESS;
619}
620
621static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
622{
623 s64 toks = diff + cl->tokens;
624
625 if (toks > cl->buffer)
626 toks = cl->buffer;
627 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
628 if (toks <= -cl->mbuffer)
629 toks = 1 - cl->mbuffer;
630
631 cl->tokens = toks;
632}
633
634static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
635{
636 s64 toks = diff + cl->ctokens;
637
638 if (toks > cl->cbuffer)
639 toks = cl->cbuffer;
640 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
641 if (toks <= -cl->mbuffer)
642 toks = 1 - cl->mbuffer;
643
644 cl->ctokens = toks;
645}
646
647/**
648 * htb_charge_class - charges amount "bytes" to leaf and ancestors
649 *
650 * Routine assumes that packet "bytes" long was dequeued from leaf cl
651 * borrowing from "level". It accounts bytes to ceil leaky bucket for
652 * leaf and all ancestors and to rate bucket for ancestors at levels
653 * "level" and higher. It also handles possible change of mode resulting
654 * from the update. Note that mode can also increase here (MAY_BORROW to
655 * CAN_SEND) because we can use more precise clock that event queue here.
656 * In such case we remove class from event queue first.
657 */
658static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
659 int level, struct sk_buff *skb)
660{
661 int bytes = qdisc_pkt_len(skb);
662 enum htb_cmode old_mode;
663 s64 diff;
664
665 while (cl) {
666 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
667 if (cl->level >= level) {
668 if (cl->level == level)
669 cl->xstats.lends++;
670 htb_accnt_tokens(cl, bytes, diff);
671 } else {
672 cl->xstats.borrows++;
673 cl->tokens += diff; /* we moved t_c; update tokens */
674 }
675 htb_accnt_ctokens(cl, bytes, diff);
676 cl->t_c = q->now;
677
678 old_mode = cl->cmode;
679 diff = 0;
680 htb_change_class_mode(q, cl, &diff);
681 if (old_mode != cl->cmode) {
682 if (old_mode != HTB_CAN_SEND)
683 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
684 if (cl->cmode != HTB_CAN_SEND)
685 htb_add_to_wait_tree(q, cl, diff);
686 }
687
688 /* update basic stats except for leaves which are already updated */
689 if (cl->level)
690 bstats_update(&cl->bstats, skb);
691
692 cl = cl->parent;
693 }
694}
695
696/**
697 * htb_do_events - make mode changes to classes at the level
698 *
699 * Scans event queue for pending events and applies them. Returns time of
700 * next pending event (0 for no event in pq, q->now for too many events).
701 * Note: Applied are events whose have cl->pq_key <= q->now.
702 */
703static s64 htb_do_events(struct htb_sched *q, const int level,
704 unsigned long start)
705{
706 /* don't run for longer than 2 jiffies; 2 is used instead of
707 * 1 to simplify things when jiffy is going to be incremented
708 * too soon
709 */
710 unsigned long stop_at = start + 2;
711 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
712
713 while (time_before(jiffies, stop_at)) {
714 struct htb_class *cl;
715 s64 diff;
716 struct rb_node *p = rb_first(wait_pq);
717
718 if (!p)
719 return 0;
720
721 cl = rb_entry(p, struct htb_class, pq_node);
722 if (cl->pq_key > q->now)
723 return cl->pq_key;
724
725 htb_safe_rb_erase(p, wait_pq);
726 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
727 htb_change_class_mode(q, cl, &diff);
728 if (cl->cmode != HTB_CAN_SEND)
729 htb_add_to_wait_tree(q, cl, diff);
730 }
731
732 /* too much load - let's continue after a break for scheduling */
733 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
734 pr_warn("htb: too many events!\n");
735 q->warned |= HTB_WARN_TOOMANYEVENTS;
736 }
737
738 return q->now;
739}
740
741/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
742 * is no such one exists.
743 */
744static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
745 u32 id)
746{
747 struct rb_node *r = NULL;
748 while (n) {
749 struct htb_class *cl =
750 rb_entry(n, struct htb_class, node[prio]);
751
752 if (id > cl->common.classid) {
753 n = n->rb_right;
754 } else if (id < cl->common.classid) {
755 r = n;
756 n = n->rb_left;
757 } else {
758 return n;
759 }
760 }
761 return r;
762}
763
764/**
765 * htb_lookup_leaf - returns next leaf class in DRR order
766 *
767 * Find leaf where current feed pointers points to.
768 */
769static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
770{
771 int i;
772 struct {
773 struct rb_node *root;
774 struct rb_node **pptr;
775 u32 *pid;
776 } stk[TC_HTB_MAXDEPTH], *sp = stk;
777
778 BUG_ON(!hprio->row.rb_node);
779 sp->root = hprio->row.rb_node;
780 sp->pptr = &hprio->ptr;
781 sp->pid = &hprio->last_ptr_id;
782
783 for (i = 0; i < 65535; i++) {
784 if (!*sp->pptr && *sp->pid) {
785 /* ptr was invalidated but id is valid - try to recover
786 * the original or next ptr
787 */
788 *sp->pptr =
789 htb_id_find_next_upper(prio, sp->root, *sp->pid);
790 }
791 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
792 * can become out of date quickly
793 */
794 if (!*sp->pptr) { /* we are at right end; rewind & go up */
795 *sp->pptr = sp->root;
796 while ((*sp->pptr)->rb_left)
797 *sp->pptr = (*sp->pptr)->rb_left;
798 if (sp > stk) {
799 sp--;
800 if (!*sp->pptr) {
801 WARN_ON(1);
802 return NULL;
803 }
804 htb_next_rb_node(sp->pptr);
805 }
806 } else {
807 struct htb_class *cl;
808 struct htb_prio *clp;
809
810 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
811 if (!cl->level)
812 return cl;
813 clp = &cl->inner.clprio[prio];
814 (++sp)->root = clp->feed.rb_node;
815 sp->pptr = &clp->ptr;
816 sp->pid = &clp->last_ptr_id;
817 }
818 }
819 WARN_ON(1);
820 return NULL;
821}
822
823/* dequeues packet at given priority and level; call only if
824 * you are sure that there is active class at prio/level
825 */
826static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
827 const int level)
828{
829 struct sk_buff *skb = NULL;
830 struct htb_class *cl, *start;
831 struct htb_level *hlevel = &q->hlevel[level];
832 struct htb_prio *hprio = &hlevel->hprio[prio];
833
834 /* look initial class up in the row */
835 start = cl = htb_lookup_leaf(hprio, prio);
836
837 do {
838next:
839 if (unlikely(!cl))
840 return NULL;
841
842 /* class can be empty - it is unlikely but can be true if leaf
843 * qdisc drops packets in enqueue routine or if someone used
844 * graft operation on the leaf since last dequeue;
845 * simply deactivate and skip such class
846 */
847 if (unlikely(cl->leaf.q->q.qlen == 0)) {
848 struct htb_class *next;
849 htb_deactivate(q, cl);
850
851 /* row/level might become empty */
852 if ((q->row_mask[level] & (1 << prio)) == 0)
853 return NULL;
854
855 next = htb_lookup_leaf(hprio, prio);
856
857 if (cl == start) /* fix start if we just deleted it */
858 start = next;
859 cl = next;
860 goto next;
861 }
862
863 skb = cl->leaf.q->dequeue(cl->leaf.q);
864 if (likely(skb != NULL))
865 break;
866
867 qdisc_warn_nonwc("htb", cl->leaf.q);
868 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
869 &q->hlevel[0].hprio[prio].ptr);
870 cl = htb_lookup_leaf(hprio, prio);
871
872 } while (cl != start);
873
874 if (likely(skb != NULL)) {
875 bstats_update(&cl->bstats, skb);
876 cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
877 if (cl->leaf.deficit[level] < 0) {
878 cl->leaf.deficit[level] += cl->quantum;
879 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
880 &q->hlevel[0].hprio[prio].ptr);
881 }
882 /* this used to be after charge_class but this constelation
883 * gives us slightly better performance
884 */
885 if (!cl->leaf.q->q.qlen)
886 htb_deactivate(q, cl);
887 htb_charge_class(q, cl, level, skb);
888 }
889 return skb;
890}
891
892static struct sk_buff *htb_dequeue(struct Qdisc *sch)
893{
894 struct sk_buff *skb;
895 struct htb_sched *q = qdisc_priv(sch);
896 int level;
897 s64 next_event;
898 unsigned long start_at;
899
900 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
901 skb = __qdisc_dequeue_head(&q->direct_queue);
902 if (skb != NULL) {
903ok:
904 qdisc_bstats_update(sch, skb);
905 qdisc_qstats_backlog_dec(sch, skb);
906 sch->q.qlen--;
907 return skb;
908 }
909
910 if (!sch->q.qlen)
911 goto fin;
912 q->now = ktime_get_ns();
913 start_at = jiffies;
914
915 next_event = q->now + 5LLU * NSEC_PER_SEC;
916
917 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
918 /* common case optimization - skip event handler quickly */
919 int m;
920 s64 event = q->near_ev_cache[level];
921
922 if (q->now >= event) {
923 event = htb_do_events(q, level, start_at);
924 if (!event)
925 event = q->now + NSEC_PER_SEC;
926 q->near_ev_cache[level] = event;
927 }
928
929 if (next_event > event)
930 next_event = event;
931
932 m = ~q->row_mask[level];
933 while (m != (int)(-1)) {
934 int prio = ffz(m);
935
936 m |= 1 << prio;
937 skb = htb_dequeue_tree(q, prio, level);
938 if (likely(skb != NULL))
939 goto ok;
940 }
941 }
942 if (likely(next_event > q->now))
943 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
944 else
945 schedule_work(&q->work);
946fin:
947 return skb;
948}
949
950/* reset all classes */
951/* always caled under BH & queue lock */
952static void htb_reset(struct Qdisc *sch)
953{
954 struct htb_sched *q = qdisc_priv(sch);
955 struct htb_class *cl;
956 unsigned int i;
957
958 for (i = 0; i < q->clhash.hashsize; i++) {
959 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
960 if (cl->level)
961 memset(&cl->inner, 0, sizeof(cl->inner));
962 else {
963 if (cl->leaf.q)
964 qdisc_reset(cl->leaf.q);
965 }
966 cl->prio_activity = 0;
967 cl->cmode = HTB_CAN_SEND;
968 }
969 }
970 qdisc_watchdog_cancel(&q->watchdog);
971 __qdisc_reset_queue(&q->direct_queue);
972 sch->q.qlen = 0;
973 sch->qstats.backlog = 0;
974 memset(q->hlevel, 0, sizeof(q->hlevel));
975 memset(q->row_mask, 0, sizeof(q->row_mask));
976}
977
978static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
979 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
980 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
981 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
982 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
983 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
984 [TCA_HTB_RATE64] = { .type = NLA_U64 },
985 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
986};
987
988static void htb_work_func(struct work_struct *work)
989{
990 struct htb_sched *q = container_of(work, struct htb_sched, work);
991 struct Qdisc *sch = q->watchdog.qdisc;
992
993 rcu_read_lock();
994 __netif_schedule(qdisc_root(sch));
995 rcu_read_unlock();
996}
997
998static int htb_init(struct Qdisc *sch, struct nlattr *opt,
999 struct netlink_ext_ack *extack)
1000{
1001 struct htb_sched *q = qdisc_priv(sch);
1002 struct nlattr *tb[TCA_HTB_MAX + 1];
1003 struct tc_htb_glob *gopt;
1004 int err;
1005
1006 qdisc_watchdog_init(&q->watchdog, sch);
1007 INIT_WORK(&q->work, htb_work_func);
1008
1009 if (!opt)
1010 return -EINVAL;
1011
1012 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1013 if (err)
1014 return err;
1015
1016 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1017 NULL);
1018 if (err < 0)
1019 return err;
1020
1021 if (!tb[TCA_HTB_INIT])
1022 return -EINVAL;
1023
1024 gopt = nla_data(tb[TCA_HTB_INIT]);
1025 if (gopt->version != HTB_VER >> 16)
1026 return -EINVAL;
1027
1028 err = qdisc_class_hash_init(&q->clhash);
1029 if (err < 0)
1030 return err;
1031
1032 qdisc_skb_head_init(&q->direct_queue);
1033
1034 if (tb[TCA_HTB_DIRECT_QLEN])
1035 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1036 else
1037 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1038
1039 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1040 q->rate2quantum = 1;
1041 q->defcls = gopt->defcls;
1042
1043 return 0;
1044}
1045
1046static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1047{
1048 struct htb_sched *q = qdisc_priv(sch);
1049 struct nlattr *nest;
1050 struct tc_htb_glob gopt;
1051
1052 sch->qstats.overlimits = q->overlimits;
1053 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1054 * no change can happen on the qdisc parameters.
1055 */
1056
1057 gopt.direct_pkts = q->direct_pkts;
1058 gopt.version = HTB_VER;
1059 gopt.rate2quantum = q->rate2quantum;
1060 gopt.defcls = q->defcls;
1061 gopt.debug = 0;
1062
1063 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1064 if (nest == NULL)
1065 goto nla_put_failure;
1066 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1067 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1068 goto nla_put_failure;
1069
1070 return nla_nest_end(skb, nest);
1071
1072nla_put_failure:
1073 nla_nest_cancel(skb, nest);
1074 return -1;
1075}
1076
1077static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1078 struct sk_buff *skb, struct tcmsg *tcm)
1079{
1080 struct htb_class *cl = (struct htb_class *)arg;
1081 struct nlattr *nest;
1082 struct tc_htb_opt opt;
1083
1084 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1085 * no change can happen on the class parameters.
1086 */
1087 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1088 tcm->tcm_handle = cl->common.classid;
1089 if (!cl->level && cl->leaf.q)
1090 tcm->tcm_info = cl->leaf.q->handle;
1091
1092 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1093 if (nest == NULL)
1094 goto nla_put_failure;
1095
1096 memset(&opt, 0, sizeof(opt));
1097
1098 psched_ratecfg_getrate(&opt.rate, &cl->rate);
1099 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1100 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1101 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1102 opt.quantum = cl->quantum;
1103 opt.prio = cl->prio;
1104 opt.level = cl->level;
1105 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1106 goto nla_put_failure;
1107 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1108 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1109 TCA_HTB_PAD))
1110 goto nla_put_failure;
1111 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1112 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1113 TCA_HTB_PAD))
1114 goto nla_put_failure;
1115
1116 return nla_nest_end(skb, nest);
1117
1118nla_put_failure:
1119 nla_nest_cancel(skb, nest);
1120 return -1;
1121}
1122
1123static int
1124htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1125{
1126 struct htb_class *cl = (struct htb_class *)arg;
1127 struct gnet_stats_queue qs = {
1128 .drops = cl->drops,
1129 .overlimits = cl->overlimits,
1130 };
1131 __u32 qlen = 0;
1132
1133 if (!cl->level && cl->leaf.q)
1134 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1135
1136 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1137 INT_MIN, INT_MAX);
1138 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1139 INT_MIN, INT_MAX);
1140
1141 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1142 d, NULL, &cl->bstats) < 0 ||
1143 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1144 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1145 return -1;
1146
1147 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1148}
1149
1150static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1151 struct Qdisc **old, struct netlink_ext_ack *extack)
1152{
1153 struct htb_class *cl = (struct htb_class *)arg;
1154
1155 if (cl->level)
1156 return -EINVAL;
1157 if (new == NULL &&
1158 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1159 cl->common.classid, extack)) == NULL)
1160 return -ENOBUFS;
1161
1162 *old = qdisc_replace(sch, new, &cl->leaf.q);
1163 return 0;
1164}
1165
1166static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1167{
1168 struct htb_class *cl = (struct htb_class *)arg;
1169 return !cl->level ? cl->leaf.q : NULL;
1170}
1171
1172static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1173{
1174 struct htb_class *cl = (struct htb_class *)arg;
1175
1176 htb_deactivate(qdisc_priv(sch), cl);
1177}
1178
1179static inline int htb_parent_last_child(struct htb_class *cl)
1180{
1181 if (!cl->parent)
1182 /* the root class */
1183 return 0;
1184 if (cl->parent->children > 1)
1185 /* not the last child */
1186 return 0;
1187 return 1;
1188}
1189
1190static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1191 struct Qdisc *new_q)
1192{
1193 struct htb_class *parent = cl->parent;
1194
1195 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1196
1197 if (parent->cmode != HTB_CAN_SEND)
1198 htb_safe_rb_erase(&parent->pq_node,
1199 &q->hlevel[parent->level].wait_pq);
1200
1201 parent->level = 0;
1202 memset(&parent->inner, 0, sizeof(parent->inner));
1203 parent->leaf.q = new_q ? new_q : &noop_qdisc;
1204 parent->tokens = parent->buffer;
1205 parent->ctokens = parent->cbuffer;
1206 parent->t_c = ktime_get_ns();
1207 parent->cmode = HTB_CAN_SEND;
1208}
1209
1210static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1211{
1212 if (!cl->level) {
1213 WARN_ON(!cl->leaf.q);
1214 qdisc_put(cl->leaf.q);
1215 }
1216 gen_kill_estimator(&cl->rate_est);
1217 tcf_block_put(cl->block);
1218 kfree(cl);
1219}
1220
1221static void htb_destroy(struct Qdisc *sch)
1222{
1223 struct htb_sched *q = qdisc_priv(sch);
1224 struct hlist_node *next;
1225 struct htb_class *cl;
1226 unsigned int i;
1227
1228 cancel_work_sync(&q->work);
1229 qdisc_watchdog_cancel(&q->watchdog);
1230 /* This line used to be after htb_destroy_class call below
1231 * and surprisingly it worked in 2.4. But it must precede it
1232 * because filter need its target class alive to be able to call
1233 * unbind_filter on it (without Oops).
1234 */
1235 tcf_block_put(q->block);
1236
1237 for (i = 0; i < q->clhash.hashsize; i++) {
1238 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1239 tcf_block_put(cl->block);
1240 cl->block = NULL;
1241 }
1242 }
1243 for (i = 0; i < q->clhash.hashsize; i++) {
1244 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1245 common.hnode)
1246 htb_destroy_class(sch, cl);
1247 }
1248 qdisc_class_hash_destroy(&q->clhash);
1249 __qdisc_reset_queue(&q->direct_queue);
1250}
1251
1252static int htb_delete(struct Qdisc *sch, unsigned long arg)
1253{
1254 struct htb_sched *q = qdisc_priv(sch);
1255 struct htb_class *cl = (struct htb_class *)arg;
1256 struct Qdisc *new_q = NULL;
1257 int last_child = 0;
1258
1259 /* TODO: why don't allow to delete subtree ? references ? does
1260 * tc subsys guarantee us that in htb_destroy it holds no class
1261 * refs so that we can remove children safely there ?
1262 */
1263 if (cl->children || cl->filter_cnt)
1264 return -EBUSY;
1265
1266 if (!cl->level && htb_parent_last_child(cl)) {
1267 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1268 cl->parent->common.classid,
1269 NULL);
1270 last_child = 1;
1271 }
1272
1273 sch_tree_lock(sch);
1274
1275 if (!cl->level)
1276 qdisc_purge_queue(cl->leaf.q);
1277
1278 /* delete from hash and active; remainder in destroy_class */
1279 qdisc_class_hash_remove(&q->clhash, &cl->common);
1280 if (cl->parent)
1281 cl->parent->children--;
1282
1283 if (cl->prio_activity)
1284 htb_deactivate(q, cl);
1285
1286 if (cl->cmode != HTB_CAN_SEND)
1287 htb_safe_rb_erase(&cl->pq_node,
1288 &q->hlevel[cl->level].wait_pq);
1289
1290 if (last_child)
1291 htb_parent_to_leaf(q, cl, new_q);
1292
1293 sch_tree_unlock(sch);
1294
1295 htb_destroy_class(sch, cl);
1296 return 0;
1297}
1298
1299static int htb_change_class(struct Qdisc *sch, u32 classid,
1300 u32 parentid, struct nlattr **tca,
1301 unsigned long *arg, struct netlink_ext_ack *extack)
1302{
1303 int err = -EINVAL;
1304 struct htb_sched *q = qdisc_priv(sch);
1305 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1306 struct nlattr *opt = tca[TCA_OPTIONS];
1307 struct nlattr *tb[TCA_HTB_MAX + 1];
1308 struct Qdisc *parent_qdisc = NULL;
1309 struct tc_htb_opt *hopt;
1310 u64 rate64, ceil64;
1311 int warn = 0;
1312
1313 /* extract all subattrs from opt attr */
1314 if (!opt)
1315 goto failure;
1316
1317 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1318 NULL);
1319 if (err < 0)
1320 goto failure;
1321
1322 err = -EINVAL;
1323 if (tb[TCA_HTB_PARMS] == NULL)
1324 goto failure;
1325
1326 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1327
1328 hopt = nla_data(tb[TCA_HTB_PARMS]);
1329 if (!hopt->rate.rate || !hopt->ceil.rate)
1330 goto failure;
1331
1332 /* Keeping backward compatible with rate_table based iproute2 tc */
1333 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1334 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1335 NULL));
1336
1337 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1338 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1339 NULL));
1340
1341 if (!cl) { /* new class */
1342 struct Qdisc *new_q;
1343 int prio;
1344 struct {
1345 struct nlattr nla;
1346 struct gnet_estimator opt;
1347 } est = {
1348 .nla = {
1349 .nla_len = nla_attr_size(sizeof(est.opt)),
1350 .nla_type = TCA_RATE,
1351 },
1352 .opt = {
1353 /* 4s interval, 16s averaging constant */
1354 .interval = 2,
1355 .ewma_log = 2,
1356 },
1357 };
1358
1359 /* check for valid classid */
1360 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1361 htb_find(classid, sch))
1362 goto failure;
1363
1364 /* check maximal depth */
1365 if (parent && parent->parent && parent->parent->level < 2) {
1366 pr_err("htb: tree is too deep\n");
1367 goto failure;
1368 }
1369 err = -ENOBUFS;
1370 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1371 if (!cl)
1372 goto failure;
1373
1374 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1375 if (err) {
1376 kfree(cl);
1377 goto failure;
1378 }
1379 if (htb_rate_est || tca[TCA_RATE]) {
1380 err = gen_new_estimator(&cl->bstats, NULL,
1381 &cl->rate_est,
1382 NULL,
1383 qdisc_root_sleeping_running(sch),
1384 tca[TCA_RATE] ? : &est.nla);
1385 if (err) {
1386 tcf_block_put(cl->block);
1387 kfree(cl);
1388 goto failure;
1389 }
1390 }
1391
1392 cl->children = 0;
1393 RB_CLEAR_NODE(&cl->pq_node);
1394
1395 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1396 RB_CLEAR_NODE(&cl->node[prio]);
1397
1398 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1399 * so that can't be used inside of sch_tree_lock
1400 * -- thanks to Karlis Peisenieks
1401 */
1402 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1403 classid, NULL);
1404 sch_tree_lock(sch);
1405 if (parent && !parent->level) {
1406 /* turn parent into inner node */
1407 qdisc_purge_queue(parent->leaf.q);
1408 parent_qdisc = parent->leaf.q;
1409 if (parent->prio_activity)
1410 htb_deactivate(q, parent);
1411
1412 /* remove from evt list because of level change */
1413 if (parent->cmode != HTB_CAN_SEND) {
1414 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1415 parent->cmode = HTB_CAN_SEND;
1416 }
1417 parent->level = (parent->parent ? parent->parent->level
1418 : TC_HTB_MAXDEPTH) - 1;
1419 memset(&parent->inner, 0, sizeof(parent->inner));
1420 }
1421 /* leaf (we) needs elementary qdisc */
1422 cl->leaf.q = new_q ? new_q : &noop_qdisc;
1423
1424 cl->common.classid = classid;
1425 cl->parent = parent;
1426
1427 /* set class to be in HTB_CAN_SEND state */
1428 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1429 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1430 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1431 cl->t_c = ktime_get_ns();
1432 cl->cmode = HTB_CAN_SEND;
1433
1434 /* attach to the hash list and parent's family */
1435 qdisc_class_hash_insert(&q->clhash, &cl->common);
1436 if (parent)
1437 parent->children++;
1438 if (cl->leaf.q != &noop_qdisc)
1439 qdisc_hash_add(cl->leaf.q, true);
1440 } else {
1441 if (tca[TCA_RATE]) {
1442 err = gen_replace_estimator(&cl->bstats, NULL,
1443 &cl->rate_est,
1444 NULL,
1445 qdisc_root_sleeping_running(sch),
1446 tca[TCA_RATE]);
1447 if (err)
1448 return err;
1449 }
1450 sch_tree_lock(sch);
1451 }
1452
1453 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1454
1455 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1456
1457 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1458 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1459
1460 /* it used to be a nasty bug here, we have to check that node
1461 * is really leaf before changing cl->leaf !
1462 */
1463 if (!cl->level) {
1464 u64 quantum = cl->rate.rate_bytes_ps;
1465
1466 do_div(quantum, q->rate2quantum);
1467 cl->quantum = min_t(u64, quantum, INT_MAX);
1468
1469 if (!hopt->quantum && cl->quantum < 1000) {
1470 warn = -1;
1471 cl->quantum = 1000;
1472 }
1473 if (!hopt->quantum && cl->quantum > 200000) {
1474 warn = 1;
1475 cl->quantum = 200000;
1476 }
1477 if (hopt->quantum)
1478 cl->quantum = hopt->quantum;
1479 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1480 cl->prio = TC_HTB_NUMPRIO - 1;
1481 }
1482
1483 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1484 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1485
1486 sch_tree_unlock(sch);
1487 qdisc_put(parent_qdisc);
1488
1489 if (warn)
1490 pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1491 cl->common.classid, (warn == -1 ? "small" : "big"));
1492
1493 qdisc_class_hash_grow(sch, &q->clhash);
1494
1495 *arg = (unsigned long)cl;
1496 return 0;
1497
1498failure:
1499 return err;
1500}
1501
1502static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
1503 struct netlink_ext_ack *extack)
1504{
1505 struct htb_sched *q = qdisc_priv(sch);
1506 struct htb_class *cl = (struct htb_class *)arg;
1507
1508 return cl ? cl->block : q->block;
1509}
1510
1511static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1512 u32 classid)
1513{
1514 struct htb_class *cl = htb_find(classid, sch);
1515
1516 /*if (cl && !cl->level) return 0;
1517 * The line above used to be there to prevent attaching filters to
1518 * leaves. But at least tc_index filter uses this just to get class
1519 * for other reasons so that we have to allow for it.
1520 * ----
1521 * 19.6.2002 As Werner explained it is ok - bind filter is just
1522 * another way to "lock" the class - unlike "get" this lock can
1523 * be broken by class during destroy IIUC.
1524 */
1525 if (cl)
1526 cl->filter_cnt++;
1527 return (unsigned long)cl;
1528}
1529
1530static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1531{
1532 struct htb_class *cl = (struct htb_class *)arg;
1533
1534 if (cl)
1535 cl->filter_cnt--;
1536}
1537
1538static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1539{
1540 struct htb_sched *q = qdisc_priv(sch);
1541 struct htb_class *cl;
1542 unsigned int i;
1543
1544 if (arg->stop)
1545 return;
1546
1547 for (i = 0; i < q->clhash.hashsize; i++) {
1548 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1549 if (arg->count < arg->skip) {
1550 arg->count++;
1551 continue;
1552 }
1553 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1554 arg->stop = 1;
1555 return;
1556 }
1557 arg->count++;
1558 }
1559 }
1560}
1561
1562static const struct Qdisc_class_ops htb_class_ops = {
1563 .graft = htb_graft,
1564 .leaf = htb_leaf,
1565 .qlen_notify = htb_qlen_notify,
1566 .find = htb_search,
1567 .change = htb_change_class,
1568 .delete = htb_delete,
1569 .walk = htb_walk,
1570 .tcf_block = htb_tcf_block,
1571 .bind_tcf = htb_bind_filter,
1572 .unbind_tcf = htb_unbind_filter,
1573 .dump = htb_dump_class,
1574 .dump_stats = htb_dump_class_stats,
1575};
1576
1577static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1578 .cl_ops = &htb_class_ops,
1579 .id = "htb",
1580 .priv_size = sizeof(struct htb_sched),
1581 .enqueue = htb_enqueue,
1582 .dequeue = htb_dequeue,
1583 .peek = qdisc_peek_dequeued,
1584 .init = htb_init,
1585 .reset = htb_reset,
1586 .destroy = htb_destroy,
1587 .dump = htb_dump,
1588 .owner = THIS_MODULE,
1589};
1590
1591static int __init htb_module_init(void)
1592{
1593 return register_qdisc(&htb_qdisc_ops);
1594}
1595static void __exit htb_module_exit(void)
1596{
1597 unregister_qdisc(&htb_qdisc_ops);
1598}
1599
1600module_init(htb_module_init)
1601module_exit(htb_module_exit)
1602MODULE_LICENSE("GPL");