Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/sched/sch_htb.c
15109 views
1
/*
2
* net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation; either version
7
* 2 of the License, or (at your option) any later version.
8
*
9
* Authors: Martin Devera, <[email protected]>
10
*
11
* Credits (in time order) for older HTB versions:
12
* Stef Coene <[email protected]>
13
* HTB support at LARTC mailing list
14
* Ondrej Kraus, <[email protected]>
15
* found missing INIT_QDISC(htb)
16
* Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17
* helped a lot to locate nasty class stall bug
18
* Andi Kleen, Jamal Hadi, Bert Hubert
19
* code review and helpful comments on shaping
20
* Tomasz Wrona, <[email protected]>
21
* created test case so that I was able to fix nasty bug
22
* Wilfried Weissmann
23
* spotted bug in dequeue code and helped with fix
24
* Jiri Fojtasek
25
* fixed requeue routine
26
* and many others. thanks.
27
*/
28
#include <linux/module.h>
29
#include <linux/moduleparam.h>
30
#include <linux/types.h>
31
#include <linux/kernel.h>
32
#include <linux/string.h>
33
#include <linux/errno.h>
34
#include <linux/skbuff.h>
35
#include <linux/list.h>
36
#include <linux/compiler.h>
37
#include <linux/rbtree.h>
38
#include <linux/workqueue.h>
39
#include <linux/slab.h>
40
#include <net/netlink.h>
41
#include <net/pkt_sched.h>
42
43
/* HTB algorithm.
44
Author: [email protected]
45
========================================================================
46
HTB is like TBF with multiple classes. It is also similar to CBQ because
47
it allows to assign priority to each class in hierarchy.
48
In fact it is another implementation of Floyd's formal sharing.
49
50
Levels:
51
Each class is assigned level. Leaf has ALWAYS level 0 and root
52
classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
53
one less than their parent.
54
*/
55
56
static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
57
#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
58
59
#if HTB_VER >> 16 != TC_HTB_PROTOVER
60
#error "Mismatched sch_htb.c and pkt_sch.h"
61
#endif
62
63
/* Module parameter and sysfs export */
64
module_param (htb_hysteresis, int, 0640);
65
MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
66
67
/* used internaly to keep status of single class */
68
enum htb_cmode {
69
HTB_CANT_SEND, /* class can't send and can't borrow */
70
HTB_MAY_BORROW, /* class can't send but may borrow */
71
HTB_CAN_SEND /* class can send */
72
};
73
74
/* interior & leaf nodes; props specific to leaves are marked L: */
75
struct htb_class {
76
struct Qdisc_class_common common;
77
/* general class parameters */
78
struct gnet_stats_basic_packed bstats;
79
struct gnet_stats_queue qstats;
80
struct gnet_stats_rate_est rate_est;
81
struct tc_htb_xstats xstats; /* our special stats */
82
int refcnt; /* usage count of this class */
83
84
/* topology */
85
int level; /* our level (see above) */
86
unsigned int children;
87
struct htb_class *parent; /* parent class */
88
89
int prio; /* these two are used only by leaves... */
90
int quantum; /* but stored for parent-to-leaf return */
91
92
union {
93
struct htb_class_leaf {
94
struct Qdisc *q;
95
int deficit[TC_HTB_MAXDEPTH];
96
struct list_head drop_list;
97
} leaf;
98
struct htb_class_inner {
99
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
100
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
101
/* When class changes from state 1->2 and disconnects from
102
* parent's feed then we lost ptr value and start from the
103
* first child again. Here we store classid of the
104
* last valid ptr (used when ptr is NULL).
105
*/
106
u32 last_ptr_id[TC_HTB_NUMPRIO];
107
} inner;
108
} un;
109
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
110
struct rb_node pq_node; /* node for event queue */
111
psched_time_t pq_key;
112
113
int prio_activity; /* for which prios are we active */
114
enum htb_cmode cmode; /* current mode of the class */
115
116
/* class attached filters */
117
struct tcf_proto *filter_list;
118
int filter_cnt;
119
120
/* token bucket parameters */
121
struct qdisc_rate_table *rate; /* rate table of the class itself */
122
struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
123
long buffer, cbuffer; /* token bucket depth/rate */
124
psched_tdiff_t mbuffer; /* max wait time */
125
long tokens, ctokens; /* current number of tokens */
126
psched_time_t t_c; /* checkpoint time */
127
};
128
129
struct htb_sched {
130
struct Qdisc_class_hash clhash;
131
struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
132
133
/* self list - roots of self generating tree */
134
struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
135
int row_mask[TC_HTB_MAXDEPTH];
136
struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
137
u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
138
139
/* self wait list - roots of wait PQs per row */
140
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
141
142
/* time of nearest event per level (row) */
143
psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
144
145
int defcls; /* class where unclassified flows go to */
146
147
/* filters for qdisc itself */
148
struct tcf_proto *filter_list;
149
150
int rate2quantum; /* quant = rate / rate2quantum */
151
psched_time_t now; /* cached dequeue time */
152
struct qdisc_watchdog watchdog;
153
154
/* non shaped skbs; let them go directly thru */
155
struct sk_buff_head direct_queue;
156
int direct_qlen; /* max qlen of above */
157
158
long direct_pkts;
159
160
#define HTB_WARN_TOOMANYEVENTS 0x1
161
unsigned int warned; /* only one warning */
162
struct work_struct work;
163
};
164
165
/* find class in global hash table using given handle */
166
static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
167
{
168
struct htb_sched *q = qdisc_priv(sch);
169
struct Qdisc_class_common *clc;
170
171
clc = qdisc_class_find(&q->clhash, handle);
172
if (clc == NULL)
173
return NULL;
174
return container_of(clc, struct htb_class, common);
175
}
176
177
/**
178
* htb_classify - classify a packet into class
179
*
180
* It returns NULL if the packet should be dropped or -1 if the packet
181
* should be passed directly thru. In all other cases leaf class is returned.
182
* We allow direct class selection by classid in priority. The we examine
183
* filters in qdisc and in inner nodes (if higher filter points to the inner
184
* node). If we end up with classid MAJOR:0 we enqueue the skb into special
185
* internal fifo (direct). These packets then go directly thru. If we still
186
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
187
* then finish and return direct queue.
188
*/
189
#define HTB_DIRECT ((struct htb_class *)-1L)
190
191
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
192
int *qerr)
193
{
194
struct htb_sched *q = qdisc_priv(sch);
195
struct htb_class *cl;
196
struct tcf_result res;
197
struct tcf_proto *tcf;
198
int result;
199
200
/* allow to select class by setting skb->priority to valid classid;
201
* note that nfmark can be used too by attaching filter fw with no
202
* rules in it
203
*/
204
if (skb->priority == sch->handle)
205
return HTB_DIRECT; /* X:0 (direct flow) selected */
206
cl = htb_find(skb->priority, sch);
207
if (cl && cl->level == 0)
208
return cl;
209
210
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
211
tcf = q->filter_list;
212
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
213
#ifdef CONFIG_NET_CLS_ACT
214
switch (result) {
215
case TC_ACT_QUEUED:
216
case TC_ACT_STOLEN:
217
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
218
case TC_ACT_SHOT:
219
return NULL;
220
}
221
#endif
222
cl = (void *)res.class;
223
if (!cl) {
224
if (res.classid == sch->handle)
225
return HTB_DIRECT; /* X:0 (direct flow) */
226
cl = htb_find(res.classid, sch);
227
if (!cl)
228
break; /* filter selected invalid classid */
229
}
230
if (!cl->level)
231
return cl; /* we hit leaf; return it */
232
233
/* we have got inner class; apply inner filter chain */
234
tcf = cl->filter_list;
235
}
236
/* classification failed; try to use default class */
237
cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
238
if (!cl || cl->level)
239
return HTB_DIRECT; /* bad default .. this is safe bet */
240
return cl;
241
}
242
243
/**
244
* htb_add_to_id_tree - adds class to the round robin list
245
*
246
* Routine adds class to the list (actually tree) sorted by classid.
247
* Make sure that class is not already on such list for given prio.
248
*/
249
static void htb_add_to_id_tree(struct rb_root *root,
250
struct htb_class *cl, int prio)
251
{
252
struct rb_node **p = &root->rb_node, *parent = NULL;
253
254
while (*p) {
255
struct htb_class *c;
256
parent = *p;
257
c = rb_entry(parent, struct htb_class, node[prio]);
258
259
if (cl->common.classid > c->common.classid)
260
p = &parent->rb_right;
261
else
262
p = &parent->rb_left;
263
}
264
rb_link_node(&cl->node[prio], parent, p);
265
rb_insert_color(&cl->node[prio], root);
266
}
267
268
/**
269
* htb_add_to_wait_tree - adds class to the event queue with delay
270
*
271
* The class is added to priority event queue to indicate that class will
272
* change its mode in cl->pq_key microseconds. Make sure that class is not
273
* already in the queue.
274
*/
275
static void htb_add_to_wait_tree(struct htb_sched *q,
276
struct htb_class *cl, long delay)
277
{
278
struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
279
280
cl->pq_key = q->now + delay;
281
if (cl->pq_key == q->now)
282
cl->pq_key++;
283
284
/* update the nearest event cache */
285
if (q->near_ev_cache[cl->level] > cl->pq_key)
286
q->near_ev_cache[cl->level] = cl->pq_key;
287
288
while (*p) {
289
struct htb_class *c;
290
parent = *p;
291
c = rb_entry(parent, struct htb_class, pq_node);
292
if (cl->pq_key >= c->pq_key)
293
p = &parent->rb_right;
294
else
295
p = &parent->rb_left;
296
}
297
rb_link_node(&cl->pq_node, parent, p);
298
rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
299
}
300
301
/**
302
* htb_next_rb_node - finds next node in binary tree
303
*
304
* When we are past last key we return NULL.
305
* Average complexity is 2 steps per call.
306
*/
307
static inline void htb_next_rb_node(struct rb_node **n)
308
{
309
*n = rb_next(*n);
310
}
311
312
/**
313
* htb_add_class_to_row - add class to its row
314
*
315
* The class is added to row at priorities marked in mask.
316
* It does nothing if mask == 0.
317
*/
318
static inline void htb_add_class_to_row(struct htb_sched *q,
319
struct htb_class *cl, int mask)
320
{
321
q->row_mask[cl->level] |= mask;
322
while (mask) {
323
int prio = ffz(~mask);
324
mask &= ~(1 << prio);
325
htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
326
}
327
}
328
329
/* If this triggers, it is a bug in this code, but it need not be fatal */
330
static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
331
{
332
if (RB_EMPTY_NODE(rb)) {
333
WARN_ON(1);
334
} else {
335
rb_erase(rb, root);
336
RB_CLEAR_NODE(rb);
337
}
338
}
339
340
341
/**
342
* htb_remove_class_from_row - removes class from its row
343
*
344
* The class is removed from row at priorities marked in mask.
345
* It does nothing if mask == 0.
346
*/
347
static inline void htb_remove_class_from_row(struct htb_sched *q,
348
struct htb_class *cl, int mask)
349
{
350
int m = 0;
351
352
while (mask) {
353
int prio = ffz(~mask);
354
355
mask &= ~(1 << prio);
356
if (q->ptr[cl->level][prio] == cl->node + prio)
357
htb_next_rb_node(q->ptr[cl->level] + prio);
358
359
htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
360
if (!q->row[cl->level][prio].rb_node)
361
m |= 1 << prio;
362
}
363
q->row_mask[cl->level] &= ~m;
364
}
365
366
/**
367
* htb_activate_prios - creates active classe's feed chain
368
*
369
* The class is connected to ancestors and/or appropriate rows
370
* for priorities it is participating on. cl->cmode must be new
371
* (activated) mode. It does nothing if cl->prio_activity == 0.
372
*/
373
static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
374
{
375
struct htb_class *p = cl->parent;
376
long m, mask = cl->prio_activity;
377
378
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
379
m = mask;
380
while (m) {
381
int prio = ffz(~m);
382
m &= ~(1 << prio);
383
384
if (p->un.inner.feed[prio].rb_node)
385
/* parent already has its feed in use so that
386
* reset bit in mask as parent is already ok
387
*/
388
mask &= ~(1 << prio);
389
390
htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
391
}
392
p->prio_activity |= mask;
393
cl = p;
394
p = cl->parent;
395
396
}
397
if (cl->cmode == HTB_CAN_SEND && mask)
398
htb_add_class_to_row(q, cl, mask);
399
}
400
401
/**
402
* htb_deactivate_prios - remove class from feed chain
403
*
404
* cl->cmode must represent old mode (before deactivation). It does
405
* nothing if cl->prio_activity == 0. Class is removed from all feed
406
* chains and rows.
407
*/
408
static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
409
{
410
struct htb_class *p = cl->parent;
411
long m, mask = cl->prio_activity;
412
413
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
414
m = mask;
415
mask = 0;
416
while (m) {
417
int prio = ffz(~m);
418
m &= ~(1 << prio);
419
420
if (p->un.inner.ptr[prio] == cl->node + prio) {
421
/* we are removing child which is pointed to from
422
* parent feed - forget the pointer but remember
423
* classid
424
*/
425
p->un.inner.last_ptr_id[prio] = cl->common.classid;
426
p->un.inner.ptr[prio] = NULL;
427
}
428
429
htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
430
431
if (!p->un.inner.feed[prio].rb_node)
432
mask |= 1 << prio;
433
}
434
435
p->prio_activity &= ~mask;
436
cl = p;
437
p = cl->parent;
438
439
}
440
if (cl->cmode == HTB_CAN_SEND && mask)
441
htb_remove_class_from_row(q, cl, mask);
442
}
443
444
static inline long htb_lowater(const struct htb_class *cl)
445
{
446
if (htb_hysteresis)
447
return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
448
else
449
return 0;
450
}
451
static inline long htb_hiwater(const struct htb_class *cl)
452
{
453
if (htb_hysteresis)
454
return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
455
else
456
return 0;
457
}
458
459
460
/**
461
* htb_class_mode - computes and returns current class mode
462
*
463
* It computes cl's mode at time cl->t_c+diff and returns it. If mode
464
* is not HTB_CAN_SEND then cl->pq_key is updated to time difference
465
* from now to time when cl will change its state.
466
* Also it is worth to note that class mode doesn't change simply
467
* at cl->{c,}tokens == 0 but there can rather be hysteresis of
468
* 0 .. -cl->{c,}buffer range. It is meant to limit number of
469
* mode transitions per time unit. The speed gain is about 1/6.
470
*/
471
static inline enum htb_cmode
472
htb_class_mode(struct htb_class *cl, long *diff)
473
{
474
long toks;
475
476
if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
477
*diff = -toks;
478
return HTB_CANT_SEND;
479
}
480
481
if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
482
return HTB_CAN_SEND;
483
484
*diff = -toks;
485
return HTB_MAY_BORROW;
486
}
487
488
/**
489
* htb_change_class_mode - changes classe's mode
490
*
491
* This should be the only way how to change classe's mode under normal
492
* cirsumstances. Routine will update feed lists linkage, change mode
493
* and add class to the wait event queue if appropriate. New mode should
494
* be different from old one and cl->pq_key has to be valid if changing
495
* to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
496
*/
497
static void
498
htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
499
{
500
enum htb_cmode new_mode = htb_class_mode(cl, diff);
501
502
if (new_mode == cl->cmode)
503
return;
504
505
if (cl->prio_activity) { /* not necessary: speed optimization */
506
if (cl->cmode != HTB_CANT_SEND)
507
htb_deactivate_prios(q, cl);
508
cl->cmode = new_mode;
509
if (new_mode != HTB_CANT_SEND)
510
htb_activate_prios(q, cl);
511
} else
512
cl->cmode = new_mode;
513
}
514
515
/**
516
* htb_activate - inserts leaf cl into appropriate active feeds
517
*
518
* Routine learns (new) priority of leaf and activates feed chain
519
* for the prio. It can be called on already active leaf safely.
520
* It also adds leaf into droplist.
521
*/
522
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
523
{
524
WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
525
526
if (!cl->prio_activity) {
527
cl->prio_activity = 1 << cl->prio;
528
htb_activate_prios(q, cl);
529
list_add_tail(&cl->un.leaf.drop_list,
530
q->drops + cl->prio);
531
}
532
}
533
534
/**
535
* htb_deactivate - remove leaf cl from active feeds
536
*
537
* Make sure that leaf is active. In the other words it can't be called
538
* with non-active leaf. It also removes class from the drop list.
539
*/
540
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
541
{
542
WARN_ON(!cl->prio_activity);
543
544
htb_deactivate_prios(q, cl);
545
cl->prio_activity = 0;
546
list_del_init(&cl->un.leaf.drop_list);
547
}
548
549
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
550
{
551
int uninitialized_var(ret);
552
struct htb_sched *q = qdisc_priv(sch);
553
struct htb_class *cl = htb_classify(skb, sch, &ret);
554
555
if (cl == HTB_DIRECT) {
556
/* enqueue to helper queue */
557
if (q->direct_queue.qlen < q->direct_qlen) {
558
__skb_queue_tail(&q->direct_queue, skb);
559
q->direct_pkts++;
560
} else {
561
kfree_skb(skb);
562
sch->qstats.drops++;
563
return NET_XMIT_DROP;
564
}
565
#ifdef CONFIG_NET_CLS_ACT
566
} else if (!cl) {
567
if (ret & __NET_XMIT_BYPASS)
568
sch->qstats.drops++;
569
kfree_skb(skb);
570
return ret;
571
#endif
572
} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
573
if (net_xmit_drop_count(ret)) {
574
sch->qstats.drops++;
575
cl->qstats.drops++;
576
}
577
return ret;
578
} else {
579
bstats_update(&cl->bstats, skb);
580
htb_activate(q, cl);
581
}
582
583
sch->q.qlen++;
584
return NET_XMIT_SUCCESS;
585
}
586
587
static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
588
{
589
long toks = diff + cl->tokens;
590
591
if (toks > cl->buffer)
592
toks = cl->buffer;
593
toks -= (long) qdisc_l2t(cl->rate, bytes);
594
if (toks <= -cl->mbuffer)
595
toks = 1 - cl->mbuffer;
596
597
cl->tokens = toks;
598
}
599
600
static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
601
{
602
long toks = diff + cl->ctokens;
603
604
if (toks > cl->cbuffer)
605
toks = cl->cbuffer;
606
toks -= (long) qdisc_l2t(cl->ceil, bytes);
607
if (toks <= -cl->mbuffer)
608
toks = 1 - cl->mbuffer;
609
610
cl->ctokens = toks;
611
}
612
613
/**
614
* htb_charge_class - charges amount "bytes" to leaf and ancestors
615
*
616
* Routine assumes that packet "bytes" long was dequeued from leaf cl
617
* borrowing from "level". It accounts bytes to ceil leaky bucket for
618
* leaf and all ancestors and to rate bucket for ancestors at levels
619
* "level" and higher. It also handles possible change of mode resulting
620
* from the update. Note that mode can also increase here (MAY_BORROW to
621
* CAN_SEND) because we can use more precise clock that event queue here.
622
* In such case we remove class from event queue first.
623
*/
624
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
625
int level, struct sk_buff *skb)
626
{
627
int bytes = qdisc_pkt_len(skb);
628
enum htb_cmode old_mode;
629
long diff;
630
631
while (cl) {
632
diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
633
if (cl->level >= level) {
634
if (cl->level == level)
635
cl->xstats.lends++;
636
htb_accnt_tokens(cl, bytes, diff);
637
} else {
638
cl->xstats.borrows++;
639
cl->tokens += diff; /* we moved t_c; update tokens */
640
}
641
htb_accnt_ctokens(cl, bytes, diff);
642
cl->t_c = q->now;
643
644
old_mode = cl->cmode;
645
diff = 0;
646
htb_change_class_mode(q, cl, &diff);
647
if (old_mode != cl->cmode) {
648
if (old_mode != HTB_CAN_SEND)
649
htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
650
if (cl->cmode != HTB_CAN_SEND)
651
htb_add_to_wait_tree(q, cl, diff);
652
}
653
654
/* update basic stats except for leaves which are already updated */
655
if (cl->level)
656
bstats_update(&cl->bstats, skb);
657
658
cl = cl->parent;
659
}
660
}
661
662
/**
663
* htb_do_events - make mode changes to classes at the level
664
*
665
* Scans event queue for pending events and applies them. Returns time of
666
* next pending event (0 for no event in pq, q->now for too many events).
667
* Note: Applied are events whose have cl->pq_key <= q->now.
668
*/
669
static psched_time_t htb_do_events(struct htb_sched *q, int level,
670
unsigned long start)
671
{
672
/* don't run for longer than 2 jiffies; 2 is used instead of
673
* 1 to simplify things when jiffy is going to be incremented
674
* too soon
675
*/
676
unsigned long stop_at = start + 2;
677
while (time_before(jiffies, stop_at)) {
678
struct htb_class *cl;
679
long diff;
680
struct rb_node *p = rb_first(&q->wait_pq[level]);
681
682
if (!p)
683
return 0;
684
685
cl = rb_entry(p, struct htb_class, pq_node);
686
if (cl->pq_key > q->now)
687
return cl->pq_key;
688
689
htb_safe_rb_erase(p, q->wait_pq + level);
690
diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
691
htb_change_class_mode(q, cl, &diff);
692
if (cl->cmode != HTB_CAN_SEND)
693
htb_add_to_wait_tree(q, cl, diff);
694
}
695
696
/* too much load - let's continue after a break for scheduling */
697
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
698
pr_warning("htb: too many events!\n");
699
q->warned |= HTB_WARN_TOOMANYEVENTS;
700
}
701
702
return q->now;
703
}
704
705
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
706
* is no such one exists.
707
*/
708
static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
709
u32 id)
710
{
711
struct rb_node *r = NULL;
712
while (n) {
713
struct htb_class *cl =
714
rb_entry(n, struct htb_class, node[prio]);
715
716
if (id > cl->common.classid) {
717
n = n->rb_right;
718
} else if (id < cl->common.classid) {
719
r = n;
720
n = n->rb_left;
721
} else {
722
return n;
723
}
724
}
725
return r;
726
}
727
728
/**
729
* htb_lookup_leaf - returns next leaf class in DRR order
730
*
731
* Find leaf where current feed pointers points to.
732
*/
733
static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
734
struct rb_node **pptr, u32 * pid)
735
{
736
int i;
737
struct {
738
struct rb_node *root;
739
struct rb_node **pptr;
740
u32 *pid;
741
} stk[TC_HTB_MAXDEPTH], *sp = stk;
742
743
BUG_ON(!tree->rb_node);
744
sp->root = tree->rb_node;
745
sp->pptr = pptr;
746
sp->pid = pid;
747
748
for (i = 0; i < 65535; i++) {
749
if (!*sp->pptr && *sp->pid) {
750
/* ptr was invalidated but id is valid - try to recover
751
* the original or next ptr
752
*/
753
*sp->pptr =
754
htb_id_find_next_upper(prio, sp->root, *sp->pid);
755
}
756
*sp->pid = 0; /* ptr is valid now so that remove this hint as it
757
* can become out of date quickly
758
*/
759
if (!*sp->pptr) { /* we are at right end; rewind & go up */
760
*sp->pptr = sp->root;
761
while ((*sp->pptr)->rb_left)
762
*sp->pptr = (*sp->pptr)->rb_left;
763
if (sp > stk) {
764
sp--;
765
if (!*sp->pptr) {
766
WARN_ON(1);
767
return NULL;
768
}
769
htb_next_rb_node(sp->pptr);
770
}
771
} else {
772
struct htb_class *cl;
773
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
774
if (!cl->level)
775
return cl;
776
(++sp)->root = cl->un.inner.feed[prio].rb_node;
777
sp->pptr = cl->un.inner.ptr + prio;
778
sp->pid = cl->un.inner.last_ptr_id + prio;
779
}
780
}
781
WARN_ON(1);
782
return NULL;
783
}
784
785
/* dequeues packet at given priority and level; call only if
786
* you are sure that there is active class at prio/level
787
*/
788
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
789
int level)
790
{
791
struct sk_buff *skb = NULL;
792
struct htb_class *cl, *start;
793
/* look initial class up in the row */
794
start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
795
q->ptr[level] + prio,
796
q->last_ptr_id[level] + prio);
797
798
do {
799
next:
800
if (unlikely(!cl))
801
return NULL;
802
803
/* class can be empty - it is unlikely but can be true if leaf
804
* qdisc drops packets in enqueue routine or if someone used
805
* graft operation on the leaf since last dequeue;
806
* simply deactivate and skip such class
807
*/
808
if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
809
struct htb_class *next;
810
htb_deactivate(q, cl);
811
812
/* row/level might become empty */
813
if ((q->row_mask[level] & (1 << prio)) == 0)
814
return NULL;
815
816
next = htb_lookup_leaf(q->row[level] + prio,
817
prio, q->ptr[level] + prio,
818
q->last_ptr_id[level] + prio);
819
820
if (cl == start) /* fix start if we just deleted it */
821
start = next;
822
cl = next;
823
goto next;
824
}
825
826
skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
827
if (likely(skb != NULL))
828
break;
829
830
qdisc_warn_nonwc("htb", cl->un.leaf.q);
831
htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
832
ptr[0]) + prio);
833
cl = htb_lookup_leaf(q->row[level] + prio, prio,
834
q->ptr[level] + prio,
835
q->last_ptr_id[level] + prio);
836
837
} while (cl != start);
838
839
if (likely(skb != NULL)) {
840
cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
841
if (cl->un.leaf.deficit[level] < 0) {
842
cl->un.leaf.deficit[level] += cl->quantum;
843
htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
844
ptr[0]) + prio);
845
}
846
/* this used to be after charge_class but this constelation
847
* gives us slightly better performance
848
*/
849
if (!cl->un.leaf.q->q.qlen)
850
htb_deactivate(q, cl);
851
htb_charge_class(q, cl, level, skb);
852
}
853
return skb;
854
}
855
856
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
857
{
858
struct sk_buff *skb;
859
struct htb_sched *q = qdisc_priv(sch);
860
int level;
861
psched_time_t next_event;
862
unsigned long start_at;
863
864
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
865
skb = __skb_dequeue(&q->direct_queue);
866
if (skb != NULL) {
867
ok:
868
qdisc_bstats_update(sch, skb);
869
qdisc_unthrottled(sch);
870
sch->q.qlen--;
871
return skb;
872
}
873
874
if (!sch->q.qlen)
875
goto fin;
876
q->now = psched_get_time();
877
start_at = jiffies;
878
879
next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
880
881
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
882
/* common case optimization - skip event handler quickly */
883
int m;
884
psched_time_t event;
885
886
if (q->now >= q->near_ev_cache[level]) {
887
event = htb_do_events(q, level, start_at);
888
if (!event)
889
event = q->now + PSCHED_TICKS_PER_SEC;
890
q->near_ev_cache[level] = event;
891
} else
892
event = q->near_ev_cache[level];
893
894
if (next_event > event)
895
next_event = event;
896
897
m = ~q->row_mask[level];
898
while (m != (int)(-1)) {
899
int prio = ffz(m);
900
901
m |= 1 << prio;
902
skb = htb_dequeue_tree(q, prio, level);
903
if (likely(skb != NULL))
904
goto ok;
905
}
906
}
907
sch->qstats.overlimits++;
908
if (likely(next_event > q->now))
909
qdisc_watchdog_schedule(&q->watchdog, next_event);
910
else
911
schedule_work(&q->work);
912
fin:
913
return skb;
914
}
915
916
/* try to drop from each class (by prio) until one succeed */
917
static unsigned int htb_drop(struct Qdisc *sch)
918
{
919
struct htb_sched *q = qdisc_priv(sch);
920
int prio;
921
922
for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
923
struct list_head *p;
924
list_for_each(p, q->drops + prio) {
925
struct htb_class *cl = list_entry(p, struct htb_class,
926
un.leaf.drop_list);
927
unsigned int len;
928
if (cl->un.leaf.q->ops->drop &&
929
(len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
930
sch->q.qlen--;
931
if (!cl->un.leaf.q->q.qlen)
932
htb_deactivate(q, cl);
933
return len;
934
}
935
}
936
}
937
return 0;
938
}
939
940
/* reset all classes */
941
/* always caled under BH & queue lock */
942
static void htb_reset(struct Qdisc *sch)
943
{
944
struct htb_sched *q = qdisc_priv(sch);
945
struct htb_class *cl;
946
struct hlist_node *n;
947
unsigned int i;
948
949
for (i = 0; i < q->clhash.hashsize; i++) {
950
hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
951
if (cl->level)
952
memset(&cl->un.inner, 0, sizeof(cl->un.inner));
953
else {
954
if (cl->un.leaf.q)
955
qdisc_reset(cl->un.leaf.q);
956
INIT_LIST_HEAD(&cl->un.leaf.drop_list);
957
}
958
cl->prio_activity = 0;
959
cl->cmode = HTB_CAN_SEND;
960
961
}
962
}
963
qdisc_watchdog_cancel(&q->watchdog);
964
__skb_queue_purge(&q->direct_queue);
965
sch->q.qlen = 0;
966
memset(q->row, 0, sizeof(q->row));
967
memset(q->row_mask, 0, sizeof(q->row_mask));
968
memset(q->wait_pq, 0, sizeof(q->wait_pq));
969
memset(q->ptr, 0, sizeof(q->ptr));
970
for (i = 0; i < TC_HTB_NUMPRIO; i++)
971
INIT_LIST_HEAD(q->drops + i);
972
}
973
974
static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
975
[TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
976
[TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
977
[TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
978
[TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
979
};
980
981
static void htb_work_func(struct work_struct *work)
982
{
983
struct htb_sched *q = container_of(work, struct htb_sched, work);
984
struct Qdisc *sch = q->watchdog.qdisc;
985
986
__netif_schedule(qdisc_root(sch));
987
}
988
989
static int htb_init(struct Qdisc *sch, struct nlattr *opt)
990
{
991
struct htb_sched *q = qdisc_priv(sch);
992
struct nlattr *tb[TCA_HTB_INIT + 1];
993
struct tc_htb_glob *gopt;
994
int err;
995
int i;
996
997
if (!opt)
998
return -EINVAL;
999
1000
err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
1001
if (err < 0)
1002
return err;
1003
1004
if (tb[TCA_HTB_INIT] == NULL) {
1005
pr_err("HTB: hey probably you have bad tc tool ?\n");
1006
return -EINVAL;
1007
}
1008
gopt = nla_data(tb[TCA_HTB_INIT]);
1009
if (gopt->version != HTB_VER >> 16) {
1010
pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
1011
HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1012
return -EINVAL;
1013
}
1014
1015
err = qdisc_class_hash_init(&q->clhash);
1016
if (err < 0)
1017
return err;
1018
for (i = 0; i < TC_HTB_NUMPRIO; i++)
1019
INIT_LIST_HEAD(q->drops + i);
1020
1021
qdisc_watchdog_init(&q->watchdog, sch);
1022
INIT_WORK(&q->work, htb_work_func);
1023
skb_queue_head_init(&q->direct_queue);
1024
1025
q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1026
if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1027
q->direct_qlen = 2;
1028
1029
if ((q->rate2quantum = gopt->rate2quantum) < 1)
1030
q->rate2quantum = 1;
1031
q->defcls = gopt->defcls;
1032
1033
return 0;
1034
}
1035
1036
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1037
{
1038
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1039
struct htb_sched *q = qdisc_priv(sch);
1040
struct nlattr *nest;
1041
struct tc_htb_glob gopt;
1042
1043
spin_lock_bh(root_lock);
1044
1045
gopt.direct_pkts = q->direct_pkts;
1046
gopt.version = HTB_VER;
1047
gopt.rate2quantum = q->rate2quantum;
1048
gopt.defcls = q->defcls;
1049
gopt.debug = 0;
1050
1051
nest = nla_nest_start(skb, TCA_OPTIONS);
1052
if (nest == NULL)
1053
goto nla_put_failure;
1054
NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1055
nla_nest_end(skb, nest);
1056
1057
spin_unlock_bh(root_lock);
1058
return skb->len;
1059
1060
nla_put_failure:
1061
spin_unlock_bh(root_lock);
1062
nla_nest_cancel(skb, nest);
1063
return -1;
1064
}
1065
1066
static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1067
struct sk_buff *skb, struct tcmsg *tcm)
1068
{
1069
struct htb_class *cl = (struct htb_class *)arg;
1070
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1071
struct nlattr *nest;
1072
struct tc_htb_opt opt;
1073
1074
spin_lock_bh(root_lock);
1075
tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1076
tcm->tcm_handle = cl->common.classid;
1077
if (!cl->level && cl->un.leaf.q)
1078
tcm->tcm_info = cl->un.leaf.q->handle;
1079
1080
nest = nla_nest_start(skb, TCA_OPTIONS);
1081
if (nest == NULL)
1082
goto nla_put_failure;
1083
1084
memset(&opt, 0, sizeof(opt));
1085
1086
opt.rate = cl->rate->rate;
1087
opt.buffer = cl->buffer;
1088
opt.ceil = cl->ceil->rate;
1089
opt.cbuffer = cl->cbuffer;
1090
opt.quantum = cl->quantum;
1091
opt.prio = cl->prio;
1092
opt.level = cl->level;
1093
NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1094
1095
nla_nest_end(skb, nest);
1096
spin_unlock_bh(root_lock);
1097
return skb->len;
1098
1099
nla_put_failure:
1100
spin_unlock_bh(root_lock);
1101
nla_nest_cancel(skb, nest);
1102
return -1;
1103
}
1104
1105
static int
1106
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1107
{
1108
struct htb_class *cl = (struct htb_class *)arg;
1109
1110
if (!cl->level && cl->un.leaf.q)
1111
cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1112
cl->xstats.tokens = cl->tokens;
1113
cl->xstats.ctokens = cl->ctokens;
1114
1115
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1116
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1117
gnet_stats_copy_queue(d, &cl->qstats) < 0)
1118
return -1;
1119
1120
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1121
}
1122
1123
static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1124
struct Qdisc **old)
1125
{
1126
struct htb_class *cl = (struct htb_class *)arg;
1127
1128
if (cl->level)
1129
return -EINVAL;
1130
if (new == NULL &&
1131
(new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1132
cl->common.classid)) == NULL)
1133
return -ENOBUFS;
1134
1135
sch_tree_lock(sch);
1136
*old = cl->un.leaf.q;
1137
cl->un.leaf.q = new;
1138
if (*old != NULL) {
1139
qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1140
qdisc_reset(*old);
1141
}
1142
sch_tree_unlock(sch);
1143
return 0;
1144
}
1145
1146
static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1147
{
1148
struct htb_class *cl = (struct htb_class *)arg;
1149
return !cl->level ? cl->un.leaf.q : NULL;
1150
}
1151
1152
static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1153
{
1154
struct htb_class *cl = (struct htb_class *)arg;
1155
1156
if (cl->un.leaf.q->q.qlen == 0)
1157
htb_deactivate(qdisc_priv(sch), cl);
1158
}
1159
1160
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1161
{
1162
struct htb_class *cl = htb_find(classid, sch);
1163
if (cl)
1164
cl->refcnt++;
1165
return (unsigned long)cl;
1166
}
1167
1168
static inline int htb_parent_last_child(struct htb_class *cl)
1169
{
1170
if (!cl->parent)
1171
/* the root class */
1172
return 0;
1173
if (cl->parent->children > 1)
1174
/* not the last child */
1175
return 0;
1176
return 1;
1177
}
1178
1179
static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1180
struct Qdisc *new_q)
1181
{
1182
struct htb_class *parent = cl->parent;
1183
1184
WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1185
1186
if (parent->cmode != HTB_CAN_SEND)
1187
htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
1188
1189
parent->level = 0;
1190
memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1191
INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1192
parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1193
parent->tokens = parent->buffer;
1194
parent->ctokens = parent->cbuffer;
1195
parent->t_c = psched_get_time();
1196
parent->cmode = HTB_CAN_SEND;
1197
}
1198
1199
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1200
{
1201
if (!cl->level) {
1202
WARN_ON(!cl->un.leaf.q);
1203
qdisc_destroy(cl->un.leaf.q);
1204
}
1205
gen_kill_estimator(&cl->bstats, &cl->rate_est);
1206
qdisc_put_rtab(cl->rate);
1207
qdisc_put_rtab(cl->ceil);
1208
1209
tcf_destroy_chain(&cl->filter_list);
1210
kfree(cl);
1211
}
1212
1213
static void htb_destroy(struct Qdisc *sch)
1214
{
1215
struct htb_sched *q = qdisc_priv(sch);
1216
struct hlist_node *n, *next;
1217
struct htb_class *cl;
1218
unsigned int i;
1219
1220
cancel_work_sync(&q->work);
1221
qdisc_watchdog_cancel(&q->watchdog);
1222
/* This line used to be after htb_destroy_class call below
1223
* and surprisingly it worked in 2.4. But it must precede it
1224
* because filter need its target class alive to be able to call
1225
* unbind_filter on it (without Oops).
1226
*/
1227
tcf_destroy_chain(&q->filter_list);
1228
1229
for (i = 0; i < q->clhash.hashsize; i++) {
1230
hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1231
tcf_destroy_chain(&cl->filter_list);
1232
}
1233
for (i = 0; i < q->clhash.hashsize; i++) {
1234
hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1235
common.hnode)
1236
htb_destroy_class(sch, cl);
1237
}
1238
qdisc_class_hash_destroy(&q->clhash);
1239
__skb_queue_purge(&q->direct_queue);
1240
}
1241
1242
static int htb_delete(struct Qdisc *sch, unsigned long arg)
1243
{
1244
struct htb_sched *q = qdisc_priv(sch);
1245
struct htb_class *cl = (struct htb_class *)arg;
1246
unsigned int qlen;
1247
struct Qdisc *new_q = NULL;
1248
int last_child = 0;
1249
1250
// TODO: why don't allow to delete subtree ? references ? does
1251
// tc subsys quarantee us that in htb_destroy it holds no class
1252
// refs so that we can remove children safely there ?
1253
if (cl->children || cl->filter_cnt)
1254
return -EBUSY;
1255
1256
if (!cl->level && htb_parent_last_child(cl)) {
1257
new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1258
cl->parent->common.classid);
1259
last_child = 1;
1260
}
1261
1262
sch_tree_lock(sch);
1263
1264
if (!cl->level) {
1265
qlen = cl->un.leaf.q->q.qlen;
1266
qdisc_reset(cl->un.leaf.q);
1267
qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1268
}
1269
1270
/* delete from hash and active; remainder in destroy_class */
1271
qdisc_class_hash_remove(&q->clhash, &cl->common);
1272
if (cl->parent)
1273
cl->parent->children--;
1274
1275
if (cl->prio_activity)
1276
htb_deactivate(q, cl);
1277
1278
if (cl->cmode != HTB_CAN_SEND)
1279
htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1280
1281
if (last_child)
1282
htb_parent_to_leaf(q, cl, new_q);
1283
1284
BUG_ON(--cl->refcnt == 0);
1285
/*
1286
* This shouldn't happen: we "hold" one cops->get() when called
1287
* from tc_ctl_tclass; the destroy method is done from cops->put().
1288
*/
1289
1290
sch_tree_unlock(sch);
1291
return 0;
1292
}
1293
1294
static void htb_put(struct Qdisc *sch, unsigned long arg)
1295
{
1296
struct htb_class *cl = (struct htb_class *)arg;
1297
1298
if (--cl->refcnt == 0)
1299
htb_destroy_class(sch, cl);
1300
}
1301
1302
static int htb_change_class(struct Qdisc *sch, u32 classid,
1303
u32 parentid, struct nlattr **tca,
1304
unsigned long *arg)
1305
{
1306
int err = -EINVAL;
1307
struct htb_sched *q = qdisc_priv(sch);
1308
struct htb_class *cl = (struct htb_class *)*arg, *parent;
1309
struct nlattr *opt = tca[TCA_OPTIONS];
1310
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1311
struct nlattr *tb[__TCA_HTB_MAX];
1312
struct tc_htb_opt *hopt;
1313
1314
/* extract all subattrs from opt attr */
1315
if (!opt)
1316
goto failure;
1317
1318
err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1319
if (err < 0)
1320
goto failure;
1321
1322
err = -EINVAL;
1323
if (tb[TCA_HTB_PARMS] == NULL)
1324
goto failure;
1325
1326
parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1327
1328
hopt = nla_data(tb[TCA_HTB_PARMS]);
1329
1330
rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1331
ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1332
if (!rtab || !ctab)
1333
goto failure;
1334
1335
if (!cl) { /* new class */
1336
struct Qdisc *new_q;
1337
int prio;
1338
struct {
1339
struct nlattr nla;
1340
struct gnet_estimator opt;
1341
} est = {
1342
.nla = {
1343
.nla_len = nla_attr_size(sizeof(est.opt)),
1344
.nla_type = TCA_RATE,
1345
},
1346
.opt = {
1347
/* 4s interval, 16s averaging constant */
1348
.interval = 2,
1349
.ewma_log = 2,
1350
},
1351
};
1352
1353
/* check for valid classid */
1354
if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1355
htb_find(classid, sch))
1356
goto failure;
1357
1358
/* check maximal depth */
1359
if (parent && parent->parent && parent->parent->level < 2) {
1360
pr_err("htb: tree is too deep\n");
1361
goto failure;
1362
}
1363
err = -ENOBUFS;
1364
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1365
if (!cl)
1366
goto failure;
1367
1368
err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1369
qdisc_root_sleeping_lock(sch),
1370
tca[TCA_RATE] ? : &est.nla);
1371
if (err) {
1372
kfree(cl);
1373
goto failure;
1374
}
1375
1376
cl->refcnt = 1;
1377
cl->children = 0;
1378
INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1379
RB_CLEAR_NODE(&cl->pq_node);
1380
1381
for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1382
RB_CLEAR_NODE(&cl->node[prio]);
1383
1384
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1385
* so that can't be used inside of sch_tree_lock
1386
* -- thanks to Karlis Peisenieks
1387
*/
1388
new_q = qdisc_create_dflt(sch->dev_queue,
1389
&pfifo_qdisc_ops, classid);
1390
sch_tree_lock(sch);
1391
if (parent && !parent->level) {
1392
unsigned int qlen = parent->un.leaf.q->q.qlen;
1393
1394
/* turn parent into inner node */
1395
qdisc_reset(parent->un.leaf.q);
1396
qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1397
qdisc_destroy(parent->un.leaf.q);
1398
if (parent->prio_activity)
1399
htb_deactivate(q, parent);
1400
1401
/* remove from evt list because of level change */
1402
if (parent->cmode != HTB_CAN_SEND) {
1403
htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1404
parent->cmode = HTB_CAN_SEND;
1405
}
1406
parent->level = (parent->parent ? parent->parent->level
1407
: TC_HTB_MAXDEPTH) - 1;
1408
memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1409
}
1410
/* leaf (we) needs elementary qdisc */
1411
cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1412
1413
cl->common.classid = classid;
1414
cl->parent = parent;
1415
1416
/* set class to be in HTB_CAN_SEND state */
1417
cl->tokens = hopt->buffer;
1418
cl->ctokens = hopt->cbuffer;
1419
cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
1420
cl->t_c = psched_get_time();
1421
cl->cmode = HTB_CAN_SEND;
1422
1423
/* attach to the hash list and parent's family */
1424
qdisc_class_hash_insert(&q->clhash, &cl->common);
1425
if (parent)
1426
parent->children++;
1427
} else {
1428
if (tca[TCA_RATE]) {
1429
err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1430
qdisc_root_sleeping_lock(sch),
1431
tca[TCA_RATE]);
1432
if (err)
1433
return err;
1434
}
1435
sch_tree_lock(sch);
1436
}
1437
1438
/* it used to be a nasty bug here, we have to check that node
1439
* is really leaf before changing cl->un.leaf !
1440
*/
1441
if (!cl->level) {
1442
cl->quantum = rtab->rate.rate / q->rate2quantum;
1443
if (!hopt->quantum && cl->quantum < 1000) {
1444
pr_warning(
1445
"HTB: quantum of class %X is small. Consider r2q change.\n",
1446
cl->common.classid);
1447
cl->quantum = 1000;
1448
}
1449
if (!hopt->quantum && cl->quantum > 200000) {
1450
pr_warning(
1451
"HTB: quantum of class %X is big. Consider r2q change.\n",
1452
cl->common.classid);
1453
cl->quantum = 200000;
1454
}
1455
if (hopt->quantum)
1456
cl->quantum = hopt->quantum;
1457
if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1458
cl->prio = TC_HTB_NUMPRIO - 1;
1459
}
1460
1461
cl->buffer = hopt->buffer;
1462
cl->cbuffer = hopt->cbuffer;
1463
if (cl->rate)
1464
qdisc_put_rtab(cl->rate);
1465
cl->rate = rtab;
1466
if (cl->ceil)
1467
qdisc_put_rtab(cl->ceil);
1468
cl->ceil = ctab;
1469
sch_tree_unlock(sch);
1470
1471
qdisc_class_hash_grow(sch, &q->clhash);
1472
1473
*arg = (unsigned long)cl;
1474
return 0;
1475
1476
failure:
1477
if (rtab)
1478
qdisc_put_rtab(rtab);
1479
if (ctab)
1480
qdisc_put_rtab(ctab);
1481
return err;
1482
}
1483
1484
static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1485
{
1486
struct htb_sched *q = qdisc_priv(sch);
1487
struct htb_class *cl = (struct htb_class *)arg;
1488
struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1489
1490
return fl;
1491
}
1492
1493
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1494
u32 classid)
1495
{
1496
struct htb_class *cl = htb_find(classid, sch);
1497
1498
/*if (cl && !cl->level) return 0;
1499
* The line above used to be there to prevent attaching filters to
1500
* leaves. But at least tc_index filter uses this just to get class
1501
* for other reasons so that we have to allow for it.
1502
* ----
1503
* 19.6.2002 As Werner explained it is ok - bind filter is just
1504
* another way to "lock" the class - unlike "get" this lock can
1505
* be broken by class during destroy IIUC.
1506
*/
1507
if (cl)
1508
cl->filter_cnt++;
1509
return (unsigned long)cl;
1510
}
1511
1512
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1513
{
1514
struct htb_class *cl = (struct htb_class *)arg;
1515
1516
if (cl)
1517
cl->filter_cnt--;
1518
}
1519
1520
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1521
{
1522
struct htb_sched *q = qdisc_priv(sch);
1523
struct htb_class *cl;
1524
struct hlist_node *n;
1525
unsigned int i;
1526
1527
if (arg->stop)
1528
return;
1529
1530
for (i = 0; i < q->clhash.hashsize; i++) {
1531
hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1532
if (arg->count < arg->skip) {
1533
arg->count++;
1534
continue;
1535
}
1536
if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1537
arg->stop = 1;
1538
return;
1539
}
1540
arg->count++;
1541
}
1542
}
1543
}
1544
1545
static const struct Qdisc_class_ops htb_class_ops = {
1546
.graft = htb_graft,
1547
.leaf = htb_leaf,
1548
.qlen_notify = htb_qlen_notify,
1549
.get = htb_get,
1550
.put = htb_put,
1551
.change = htb_change_class,
1552
.delete = htb_delete,
1553
.walk = htb_walk,
1554
.tcf_chain = htb_find_tcf,
1555
.bind_tcf = htb_bind_filter,
1556
.unbind_tcf = htb_unbind_filter,
1557
.dump = htb_dump_class,
1558
.dump_stats = htb_dump_class_stats,
1559
};
1560
1561
static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1562
.cl_ops = &htb_class_ops,
1563
.id = "htb",
1564
.priv_size = sizeof(struct htb_sched),
1565
.enqueue = htb_enqueue,
1566
.dequeue = htb_dequeue,
1567
.peek = qdisc_peek_dequeued,
1568
.drop = htb_drop,
1569
.init = htb_init,
1570
.reset = htb_reset,
1571
.destroy = htb_destroy,
1572
.dump = htb_dump,
1573
.owner = THIS_MODULE,
1574
};
1575
1576
static int __init htb_module_init(void)
1577
{
1578
return register_qdisc(&htb_qdisc_ops);
1579
}
1580
static void __exit htb_module_exit(void)
1581
{
1582
unregister_qdisc(&htb_qdisc_ops);
1583
}
1584
1585
module_init(htb_module_init)
1586
module_exit(htb_module_exit)
1587
MODULE_LICENSE("GPL");
1588
1589