Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/sched/sch_red.c
15109 views
1
/*
2
* net/sched/sch_red.c Random Early Detection queue.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation; either version
7
* 2 of the License, or (at your option) any later version.
8
*
9
* Authors: Alexey Kuznetsov, <[email protected]>
10
*
11
* Changes:
12
* J Hadi Salim 980914: computation fixes
13
* Alexey Makarenko <[email protected]> 990814: qave on idle link was calculated incorrectly.
14
* J Hadi Salim 980816: ECN support
15
*/
16
17
#include <linux/module.h>
18
#include <linux/types.h>
19
#include <linux/kernel.h>
20
#include <linux/skbuff.h>
21
#include <net/pkt_sched.h>
22
#include <net/inet_ecn.h>
23
#include <net/red.h>
24
25
26
/* Parameters, settable by user:
27
-----------------------------
28
29
limit - bytes (must be > qth_max + burst)
30
31
Hard limit on queue length, should be chosen >qth_max
32
to allow packet bursts. This parameter does not
33
affect the algorithms behaviour and can be chosen
34
arbitrarily high (well, less than ram size)
35
Really, this limit will never be reached
36
if RED works correctly.
37
*/
38
39
struct red_sched_data {
40
u32 limit; /* HARD maximal queue length */
41
unsigned char flags;
42
struct red_parms parms;
43
struct red_stats stats;
44
struct Qdisc *qdisc;
45
};
46
47
static inline int red_use_ecn(struct red_sched_data *q)
48
{
49
return q->flags & TC_RED_ECN;
50
}
51
52
static inline int red_use_harddrop(struct red_sched_data *q)
53
{
54
return q->flags & TC_RED_HARDDROP;
55
}
56
57
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
58
{
59
struct red_sched_data *q = qdisc_priv(sch);
60
struct Qdisc *child = q->qdisc;
61
int ret;
62
63
q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
64
65
if (red_is_idling(&q->parms))
66
red_end_of_idle_period(&q->parms);
67
68
switch (red_action(&q->parms, q->parms.qavg)) {
69
case RED_DONT_MARK:
70
break;
71
72
case RED_PROB_MARK:
73
sch->qstats.overlimits++;
74
if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
75
q->stats.prob_drop++;
76
goto congestion_drop;
77
}
78
79
q->stats.prob_mark++;
80
break;
81
82
case RED_HARD_MARK:
83
sch->qstats.overlimits++;
84
if (red_use_harddrop(q) || !red_use_ecn(q) ||
85
!INET_ECN_set_ce(skb)) {
86
q->stats.forced_drop++;
87
goto congestion_drop;
88
}
89
90
q->stats.forced_mark++;
91
break;
92
}
93
94
ret = qdisc_enqueue(skb, child);
95
if (likely(ret == NET_XMIT_SUCCESS)) {
96
sch->q.qlen++;
97
} else if (net_xmit_drop_count(ret)) {
98
q->stats.pdrop++;
99
sch->qstats.drops++;
100
}
101
return ret;
102
103
congestion_drop:
104
qdisc_drop(skb, sch);
105
return NET_XMIT_CN;
106
}
107
108
static struct sk_buff *red_dequeue(struct Qdisc *sch)
109
{
110
struct sk_buff *skb;
111
struct red_sched_data *q = qdisc_priv(sch);
112
struct Qdisc *child = q->qdisc;
113
114
skb = child->dequeue(child);
115
if (skb) {
116
qdisc_bstats_update(sch, skb);
117
sch->q.qlen--;
118
} else {
119
if (!red_is_idling(&q->parms))
120
red_start_of_idle_period(&q->parms);
121
}
122
return skb;
123
}
124
125
static struct sk_buff *red_peek(struct Qdisc *sch)
126
{
127
struct red_sched_data *q = qdisc_priv(sch);
128
struct Qdisc *child = q->qdisc;
129
130
return child->ops->peek(child);
131
}
132
133
static unsigned int red_drop(struct Qdisc *sch)
134
{
135
struct red_sched_data *q = qdisc_priv(sch);
136
struct Qdisc *child = q->qdisc;
137
unsigned int len;
138
139
if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
140
q->stats.other++;
141
sch->qstats.drops++;
142
sch->q.qlen--;
143
return len;
144
}
145
146
if (!red_is_idling(&q->parms))
147
red_start_of_idle_period(&q->parms);
148
149
return 0;
150
}
151
152
static void red_reset(struct Qdisc *sch)
153
{
154
struct red_sched_data *q = qdisc_priv(sch);
155
156
qdisc_reset(q->qdisc);
157
sch->q.qlen = 0;
158
red_restart(&q->parms);
159
}
160
161
static void red_destroy(struct Qdisc *sch)
162
{
163
struct red_sched_data *q = qdisc_priv(sch);
164
qdisc_destroy(q->qdisc);
165
}
166
167
static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
168
[TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
169
[TCA_RED_STAB] = { .len = RED_STAB_SIZE },
170
};
171
172
static int red_change(struct Qdisc *sch, struct nlattr *opt)
173
{
174
struct red_sched_data *q = qdisc_priv(sch);
175
struct nlattr *tb[TCA_RED_MAX + 1];
176
struct tc_red_qopt *ctl;
177
struct Qdisc *child = NULL;
178
int err;
179
180
if (opt == NULL)
181
return -EINVAL;
182
183
err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
184
if (err < 0)
185
return err;
186
187
if (tb[TCA_RED_PARMS] == NULL ||
188
tb[TCA_RED_STAB] == NULL)
189
return -EINVAL;
190
191
ctl = nla_data(tb[TCA_RED_PARMS]);
192
193
if (ctl->limit > 0) {
194
child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
195
if (IS_ERR(child))
196
return PTR_ERR(child);
197
}
198
199
sch_tree_lock(sch);
200
q->flags = ctl->flags;
201
q->limit = ctl->limit;
202
if (child) {
203
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
204
qdisc_destroy(q->qdisc);
205
q->qdisc = child;
206
}
207
208
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
209
ctl->Plog, ctl->Scell_log,
210
nla_data(tb[TCA_RED_STAB]));
211
212
if (skb_queue_empty(&sch->q))
213
red_end_of_idle_period(&q->parms);
214
215
sch_tree_unlock(sch);
216
return 0;
217
}
218
219
static int red_init(struct Qdisc *sch, struct nlattr *opt)
220
{
221
struct red_sched_data *q = qdisc_priv(sch);
222
223
q->qdisc = &noop_qdisc;
224
return red_change(sch, opt);
225
}
226
227
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
228
{
229
struct red_sched_data *q = qdisc_priv(sch);
230
struct nlattr *opts = NULL;
231
struct tc_red_qopt opt = {
232
.limit = q->limit,
233
.flags = q->flags,
234
.qth_min = q->parms.qth_min >> q->parms.Wlog,
235
.qth_max = q->parms.qth_max >> q->parms.Wlog,
236
.Wlog = q->parms.Wlog,
237
.Plog = q->parms.Plog,
238
.Scell_log = q->parms.Scell_log,
239
};
240
241
sch->qstats.backlog = q->qdisc->qstats.backlog;
242
opts = nla_nest_start(skb, TCA_OPTIONS);
243
if (opts == NULL)
244
goto nla_put_failure;
245
NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
246
return nla_nest_end(skb, opts);
247
248
nla_put_failure:
249
nla_nest_cancel(skb, opts);
250
return -EMSGSIZE;
251
}
252
253
static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
254
{
255
struct red_sched_data *q = qdisc_priv(sch);
256
struct tc_red_xstats st = {
257
.early = q->stats.prob_drop + q->stats.forced_drop,
258
.pdrop = q->stats.pdrop,
259
.other = q->stats.other,
260
.marked = q->stats.prob_mark + q->stats.forced_mark,
261
};
262
263
return gnet_stats_copy_app(d, &st, sizeof(st));
264
}
265
266
static int red_dump_class(struct Qdisc *sch, unsigned long cl,
267
struct sk_buff *skb, struct tcmsg *tcm)
268
{
269
struct red_sched_data *q = qdisc_priv(sch);
270
271
tcm->tcm_handle |= TC_H_MIN(1);
272
tcm->tcm_info = q->qdisc->handle;
273
return 0;
274
}
275
276
static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
277
struct Qdisc **old)
278
{
279
struct red_sched_data *q = qdisc_priv(sch);
280
281
if (new == NULL)
282
new = &noop_qdisc;
283
284
sch_tree_lock(sch);
285
*old = q->qdisc;
286
q->qdisc = new;
287
qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
288
qdisc_reset(*old);
289
sch_tree_unlock(sch);
290
return 0;
291
}
292
293
static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
294
{
295
struct red_sched_data *q = qdisc_priv(sch);
296
return q->qdisc;
297
}
298
299
static unsigned long red_get(struct Qdisc *sch, u32 classid)
300
{
301
return 1;
302
}
303
304
static void red_put(struct Qdisc *sch, unsigned long arg)
305
{
306
}
307
308
static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
309
{
310
if (!walker->stop) {
311
if (walker->count >= walker->skip)
312
if (walker->fn(sch, 1, walker) < 0) {
313
walker->stop = 1;
314
return;
315
}
316
walker->count++;
317
}
318
}
319
320
static const struct Qdisc_class_ops red_class_ops = {
321
.graft = red_graft,
322
.leaf = red_leaf,
323
.get = red_get,
324
.put = red_put,
325
.walk = red_walk,
326
.dump = red_dump_class,
327
};
328
329
static struct Qdisc_ops red_qdisc_ops __read_mostly = {
330
.id = "red",
331
.priv_size = sizeof(struct red_sched_data),
332
.cl_ops = &red_class_ops,
333
.enqueue = red_enqueue,
334
.dequeue = red_dequeue,
335
.peek = red_peek,
336
.drop = red_drop,
337
.init = red_init,
338
.reset = red_reset,
339
.destroy = red_destroy,
340
.change = red_change,
341
.dump = red_dump,
342
.dump_stats = red_dump_stats,
343
.owner = THIS_MODULE,
344
};
345
346
static int __init red_module_init(void)
347
{
348
return register_qdisc(&red_qdisc_ops);
349
}
350
351
static void __exit red_module_exit(void)
352
{
353
unregister_qdisc(&red_qdisc_ops);
354
}
355
356
module_init(red_module_init)
357
module_exit(red_module_exit)
358
359
MODULE_LICENSE("GPL");
360
361