2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
20 * For all the glorious comments look at Alexey's sch_red.c
23 #include <linux/config.h>
24 #include <linux/module.h>
25 #include <asm/uaccess.h>
26 #include <asm/system.h>
27 #include <linux/bitops.h>
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/string.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/if_ether.h>
39 #include <linux/inet.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/notifier.h>
44 #include <net/route.h>
45 #include <linux/skbuff.h>
47 #include <net/pkt_sched.h>
50 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
52 #define DPRINTK(format,args...)
56 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
58 #define D2PRINTK(format,args...)
61 struct gred_sched_data;
64 struct gred_sched_data
67 u32 limit; /* HARD maximal queue length */
68 u32 qth_min; /* Min average length threshold: A scaled */
69 u32 qth_max; /* Max average length threshold: A scaled */
70 u32 DP; /* the drop pramaters */
71 char Wlog; /* log(W) */
72 char Plog; /* random number bits */
75 u32 bytesin; /* bytes seen on virtualQ so far*/
76 u32 packetsin; /* packets seen on virtualQ so far*/
77 u32 backlog; /* bytes on the virtualQ */
78 u32 forced; /* packets dropped for exceeding limits */
79 u32 early; /* packets dropped as a warning */
80 u32 other; /* packets dropped by invoking drop() */
81 u32 pdrop; /* packets dropped because we exceeded physical queue limits */
84 u8 prio; /* the prio of this vq */
87 unsigned long qave; /* Average queue length: A scaled */
88 int qcount; /* Packets since last random number generation */
89 u32 qR; /* Cached random number */
91 psched_time_t qidlestart; /* Start of idle period */
101 struct gred_sched_data *tab[MAX_DPs];
108 static inline int gred_wred_mode(struct gred_sched *table)
110 return test_bit(GRED_WRED_MODE, &table->flags);
113 static inline void gred_enable_wred_mode(struct gred_sched *table)
115 __set_bit(GRED_WRED_MODE, &table->flags);
118 static inline void gred_disable_wred_mode(struct gred_sched *table)
120 __clear_bit(GRED_WRED_MODE, &table->flags);
123 static inline int gred_rio_mode(struct gred_sched *table)
125 return test_bit(GRED_RIO_MODE, &table->flags);
128 static inline void gred_enable_rio_mode(struct gred_sched *table)
130 __set_bit(GRED_RIO_MODE, &table->flags);
133 static inline void gred_disable_rio_mode(struct gred_sched *table)
135 __clear_bit(GRED_RIO_MODE, &table->flags);
138 static inline int gred_wred_mode_check(struct Qdisc *sch)
140 struct gred_sched *table = qdisc_priv(sch);
143 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
144 for (i = 0; i < table->DPs; i++) {
145 struct gred_sched_data *q = table->tab[i];
151 for (n = 0; n < table->DPs; n++)
152 if (table->tab[n] && table->tab[n] != q &&
153 table->tab[n]->prio == q->prio)
161 gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
164 struct gred_sched_data *q=NULL;
165 struct gred_sched *t= qdisc_priv(sch);
166 unsigned long qave=0;
169 if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
170 D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
175 if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
176 printk("GRED: setting to default (%d)\n ",t->def);
177 if (!(q=t->tab[t->def])) {
178 DPRINTK("GRED: setting to default FAILED! dropping!! "
182 /* fix tc_index? --could be controvesial but needed for
184 skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
187 D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
188 "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
189 sch->qstats.backlog);
190 /* sum up all the qaves of prios <= to ours to get the new qave*/
191 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
192 for (i=0;i<t->DPs;i++) {
193 if ((!t->tab[i]) || (i==q->DP))
196 if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
197 qave +=t->tab[i]->qave;
203 q->bytesin+=skb->len;
205 if (gred_wred_mode(t)) {
207 q->qave=t->tab[t->def]->qave;
208 q->qidlestart=t->tab[t->def]->qidlestart;
211 if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
213 PSCHED_GET_TIME(now);
214 us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
215 PSCHED_SET_PASTPERFECT(q->qidlestart);
217 q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
219 if (gred_wred_mode(t)) {
220 q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
222 q->qave += q->backlog - (q->qave >> q->Wlog);
228 if (gred_wred_mode(t))
229 t->tab[t->def]->qave=q->qave;
231 if ((q->qave+qave) < q->qth_min) {
234 if (q->backlog + skb->len <= q->limit) {
235 q->backlog += skb->len;
237 __skb_queue_tail(&sch->q, skb);
238 sch->qstats.backlog += skb->len;
239 sch->bstats.bytes += skb->len;
240 sch->bstats.packets++;
249 return NET_XMIT_DROP;
251 if ((q->qave+qave) >= q->qth_max) {
253 sch->qstats.overlimits++;
258 if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
261 q->qR = net_random()&q->Rmask;
262 sch->qstats.overlimits++;
266 q->qR = net_random()&q->Rmask;
271 gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
273 struct gred_sched_data *q;
274 struct gred_sched *t= qdisc_priv(sch);
275 q= t->tab[(skb->tc_index&0xf)];
276 /* error checking here -- probably unnecessary */
277 PSCHED_SET_PASTPERFECT(q->qidlestart);
279 __skb_queue_head(&sch->q, skb);
280 sch->qstats.backlog += skb->len;
281 sch->qstats.requeues++;
282 q->backlog += skb->len;
286 static struct sk_buff *
287 gred_dequeue(struct Qdisc* sch)
290 struct gred_sched_data *q;
291 struct gred_sched *t= qdisc_priv(sch);
293 skb = __skb_dequeue(&sch->q);
295 sch->qstats.backlog -= skb->len;
296 q= t->tab[(skb->tc_index&0xf)];
298 q->backlog -= skb->len;
299 if (!q->backlog && !gred_wred_mode(t))
300 PSCHED_GET_TIME(q->qidlestart);
302 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
307 if (gred_wred_mode(t)) {
310 D2PRINTK("no default VQ set: Results will be "
313 PSCHED_GET_TIME(q->qidlestart);
319 static unsigned int gred_drop(struct Qdisc* sch)
323 struct gred_sched_data *q;
324 struct gred_sched *t= qdisc_priv(sch);
326 skb = __skb_dequeue_tail(&sch->q);
328 unsigned int len = skb->len;
329 sch->qstats.backlog -= len;
331 q= t->tab[(skb->tc_index&0xf)];
335 if (!q->backlog && !gred_wred_mode(t))
336 PSCHED_GET_TIME(q->qidlestart);
338 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
347 D2PRINTK("no default VQ set: Results might be screwed up\n");
351 PSCHED_GET_TIME(q->qidlestart);
356 static void gred_reset(struct Qdisc* sch)
359 struct gred_sched_data *q;
360 struct gred_sched *t= qdisc_priv(sch);
362 __skb_queue_purge(&sch->q);
364 sch->qstats.backlog = 0;
366 for (i=0;i<t->DPs;i++) {
370 PSCHED_SET_PASTPERFECT(q->qidlestart);
381 static inline void gred_destroy_vq(struct gred_sched_data *q)
386 static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
388 struct gred_sched *table = qdisc_priv(sch);
389 struct tc_gred_sopt *sopt;
392 if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
395 sopt = RTA_DATA(dps);
397 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
401 table->DPs = sopt->DPs;
402 table->def = sopt->def_DP;
405 * Every entry point to GRED is synchronized with the above code
406 * and the DP is checked against DPs, i.e. shadowed VQs can no
407 * longer be found so we can unlock right here.
409 sch_tree_unlock(sch);
412 gred_enable_rio_mode(table);
413 gred_disable_wred_mode(table);
414 if (gred_wred_mode_check(sch))
415 gred_enable_wred_mode(table);
417 gred_disable_rio_mode(table);
418 gred_disable_wred_mode(table);
421 for (i = table->DPs; i < MAX_DPs; i++) {
423 printk(KERN_WARNING "GRED: Warning: Destroying "
424 "shadowed VQ 0x%x\n", i);
425 gred_destroy_vq(table->tab[i]);
426 table->tab[i] = NULL;
435 static int gred_change(struct Qdisc *sch, struct rtattr *opt)
437 struct gred_sched *table = qdisc_priv(sch);
438 struct gred_sched_data *q;
439 struct tc_gred_qopt *ctl;
440 struct rtattr *tb[TCA_GRED_STAB];
442 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt))
445 if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
446 return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
448 if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 ||
449 RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
450 RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
453 ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
455 if (ctl->DP >= table->DPs)
458 if (table->tab[ctl->DP] == NULL) {
459 table->tab[ctl->DP]=kmalloc(sizeof(struct gred_sched_data),
461 if (NULL == table->tab[ctl->DP])
463 memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data)));
465 q= table->tab[ctl->DP];
467 if (gred_rio_mode(table)) {
469 if (table->def && table->tab[table->def]) {
470 DPRINTK("\nGRED: DP %u does not have a prio"
471 "setting default to %d\n",ctl->DP,
472 table->tab[table->def]->prio);
473 q->prio=table->tab[table->def]->prio;
475 DPRINTK("\nGRED: DP %u does not have a prio"
476 " setting default to 8\n",ctl->DP);
490 q->limit = ctl->limit;
491 q->Scell_log = ctl->Scell_log;
492 q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
493 q->Scell_max = (255<<q->Scell_log);
494 q->qth_min = ctl->qth_min<<ctl->Wlog;
495 q->qth_max = ctl->qth_max<<ctl->Wlog;
504 PSCHED_SET_PASTPERFECT(q->qidlestart);
505 memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
507 if (gred_rio_mode(table)) {
508 gred_disable_wred_mode(table);
509 if (gred_wred_mode_check(sch))
510 gred_enable_wred_mode(table);
516 the first entry also goes into the default until
520 if (table->tab[table->def] == NULL) {
521 table->tab[table->def]=
522 kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL);
523 if (NULL == table->tab[table->def])
526 memset(table->tab[table->def], 0,
527 (sizeof(struct gred_sched_data)));
529 q= table->tab[table->def];
533 q->limit = ctl->limit;
534 q->Scell_log = ctl->Scell_log;
535 q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
536 q->Scell_max = (255<<q->Scell_log);
537 q->qth_min = ctl->qth_min<<ctl->Wlog;
538 q->qth_max = ctl->qth_max<<ctl->Wlog;
540 if (gred_rio_mode(table))
541 q->prio=table->tab[ctl->DP]->prio;
546 PSCHED_SET_PASTPERFECT(q->qidlestart);
547 memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
553 static int gred_init(struct Qdisc *sch, struct rtattr *opt)
555 struct rtattr *tb[TCA_GRED_MAX];
557 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
560 if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
563 return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
566 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
568 struct gred_sched *table = qdisc_priv(sch);
569 struct rtattr *parms, *opts = NULL;
571 struct tc_gred_sopt sopt = {
573 .def_DP = table->def,
574 .grio = gred_rio_mode(table),
577 opts = RTA_NEST(skb, TCA_OPTIONS);
578 RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
579 parms = RTA_NEST(skb, TCA_GRED_PARMS);
581 for (i = 0; i < MAX_DPs; i++) {
582 struct gred_sched_data *q = table->tab[i];
583 struct tc_gred_qopt opt;
585 memset(&opt, 0, sizeof(opt));
588 /* hack -- fix at some point with proper message
589 This is how we indicate to tc that there is no VQ
592 opt.DP = MAX_DPs + i;
596 opt.limit = q->limit;
598 opt.backlog = q->backlog;
600 opt.qth_min = q->qth_min >> q->Wlog;
601 opt.qth_max = q->qth_max >> q->Wlog;
604 opt.Scell_log = q->Scell_log;
605 opt.other = q->other;
606 opt.early = q->early;
607 opt.forced = q->forced;
608 opt.pdrop = q->pdrop;
609 opt.packets = q->packetsin;
610 opt.bytesin = q->bytesin;
613 if (gred_wred_mode(table)) {
614 q->qidlestart=table->tab[table->def]->qidlestart;
615 q->qave=table->tab[table->def]->qave;
617 if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
621 PSCHED_GET_TIME(now);
622 idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
623 qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
624 opt.qave = qave >> q->Wlog;
627 opt.qave = q->qave >> q->Wlog;
632 RTA_APPEND(skb, sizeof(opt), &opt);
635 RTA_NEST_END(skb, parms);
637 return RTA_NEST_END(skb, opts);
640 return RTA_NEST_CANCEL(skb, opts);
643 static void gred_destroy(struct Qdisc *sch)
645 struct gred_sched *table = qdisc_priv(sch);
648 for (i = 0;i < table->DPs; i++) {
650 gred_destroy_vq(table->tab[i]);
654 static struct Qdisc_ops gred_qdisc_ops = {
658 .priv_size = sizeof(struct gred_sched),
659 .enqueue = gred_enqueue,
660 .dequeue = gred_dequeue,
661 .requeue = gred_requeue,
665 .destroy = gred_destroy,
666 .change = gred_change,
668 .owner = THIS_MODULE,
671 static int __init gred_module_init(void)
673 return register_qdisc(&gred_qdisc_ops);
675 static void __exit gred_module_exit(void)
677 unregister_qdisc(&gred_qdisc_ops);
679 module_init(gred_module_init)
680 module_exit(gred_module_exit)
681 MODULE_LICENSE("GPL");