1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
7 * 991129: - Bug fix with grio mode
8 * - a better sing. AvgQ mode with Grio(WRED)
9 * - A finer grained VQ dequeue based on suggestion
13 * For all the glorious comments look at include/net/red.h
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
25 #define GRED_DEF_PRIO (MAX_DPs / 2)
26 #define GRED_VQ_MASK (MAX_DPs - 1)
28 #define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
30 struct gred_sched_data;
33 struct gred_sched_data {
34 u32 limit; /* HARD maximal queue length */
35 u32 DP; /* the drop parameters */
36 u32 red_flags; /* virtualQ version of red_flags */
37 u64 bytesin; /* bytes seen on virtualQ so far*/
38 u32 packetsin; /* packets seen on virtualQ so far*/
39 u32 backlog; /* bytes on the virtualQ */
40 u8 prio; /* the prio of this vq */
42 struct red_parms parms;
44 struct red_stats stats;
53 struct gred_sched_data *tab[MAX_DPs];
58 struct red_vars wred_set;
59 struct tc_gred_qopt_offload *opt;
62 static inline int gred_wred_mode(struct gred_sched *table)
64 return test_bit(GRED_WRED_MODE, &table->flags);
67 static inline void gred_enable_wred_mode(struct gred_sched *table)
69 __set_bit(GRED_WRED_MODE, &table->flags);
72 static inline void gred_disable_wred_mode(struct gred_sched *table)
74 __clear_bit(GRED_WRED_MODE, &table->flags);
77 static inline int gred_rio_mode(struct gred_sched *table)
79 return test_bit(GRED_RIO_MODE, &table->flags);
82 static inline void gred_enable_rio_mode(struct gred_sched *table)
84 __set_bit(GRED_RIO_MODE, &table->flags);
87 static inline void gred_disable_rio_mode(struct gred_sched *table)
89 __clear_bit(GRED_RIO_MODE, &table->flags);
92 static inline int gred_wred_mode_check(struct Qdisc *sch)
94 struct gred_sched *table = qdisc_priv(sch);
97 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98 for (i = 0; i < table->DPs; i++) {
99 struct gred_sched_data *q = table->tab[i];
105 for (n = i + 1; n < table->DPs; n++)
106 if (table->tab[n] && table->tab[n]->prio == q->prio)
113 static inline unsigned int gred_backlog(struct gred_sched *table,
114 struct gred_sched_data *q,
117 if (gred_wred_mode(table))
118 return sch->qstats.backlog;
123 static inline u16 tc_index_to_dp(struct sk_buff *skb)
125 return skb->tc_index & GRED_VQ_MASK;
128 static inline void gred_load_wred_set(const struct gred_sched *table,
129 struct gred_sched_data *q)
131 q->vars.qavg = table->wred_set.qavg;
132 q->vars.qidlestart = table->wred_set.qidlestart;
135 static inline void gred_store_wred_set(struct gred_sched *table,
136 struct gred_sched_data *q)
138 table->wred_set.qavg = q->vars.qavg;
139 table->wred_set.qidlestart = q->vars.qidlestart;
142 static int gred_use_ecn(struct gred_sched_data *q)
144 return q->red_flags & TC_RED_ECN;
147 static int gred_use_harddrop(struct gred_sched_data *q)
149 return q->red_flags & TC_RED_HARDDROP;
152 static bool gred_per_vq_red_flags_used(struct gred_sched *table)
156 /* Local per-vq flags couldn't have been set unless global are 0 */
157 if (table->red_flags)
159 for (i = 0; i < MAX_DPs; i++)
160 if (table->tab[i] && table->tab[i]->red_flags)
165 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
166 struct sk_buff **to_free)
168 struct gred_sched_data *q = NULL;
169 struct gred_sched *t = qdisc_priv(sch);
170 unsigned long qavg = 0;
171 u16 dp = tc_index_to_dp(skb);
173 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
178 /* Pass through packets not assigned to a DP
179 * if no default DP has been configured. This
180 * allows for DP flows to be left untouched.
182 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
184 return qdisc_enqueue_tail(skb, sch);
189 /* fix tc_index? --could be controversial but needed for
191 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
194 /* sum up all the qaves of prios < ours to get the new qave */
195 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
198 for (i = 0; i < t->DPs; i++) {
199 if (t->tab[i] && t->tab[i]->prio < q->prio &&
200 !red_is_idling(&t->tab[i]->vars))
201 qavg += t->tab[i]->vars.qavg;
207 q->bytesin += qdisc_pkt_len(skb);
209 if (gred_wred_mode(t))
210 gred_load_wred_set(t, q);
212 q->vars.qavg = red_calc_qavg(&q->parms,
214 gred_backlog(t, q, sch));
216 if (red_is_idling(&q->vars))
217 red_end_of_idle_period(&q->vars);
219 if (gred_wred_mode(t))
220 gred_store_wred_set(t, q);
222 switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
227 qdisc_qstats_overlimit(sch);
228 if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
229 q->stats.prob_drop++;
230 goto congestion_drop;
233 q->stats.prob_mark++;
237 qdisc_qstats_overlimit(sch);
238 if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
239 !INET_ECN_set_ce(skb)) {
240 q->stats.forced_drop++;
241 goto congestion_drop;
243 q->stats.forced_mark++;
247 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
248 q->backlog += qdisc_pkt_len(skb);
249 return qdisc_enqueue_tail(skb, sch);
254 return qdisc_drop(skb, sch, to_free);
257 qdisc_drop(skb, sch, to_free);
261 static struct sk_buff *gred_dequeue(struct Qdisc *sch)
264 struct gred_sched *t = qdisc_priv(sch);
266 skb = qdisc_dequeue_head(sch);
269 struct gred_sched_data *q;
270 u16 dp = tc_index_to_dp(skb);
272 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
273 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
274 tc_index_to_dp(skb));
276 q->backlog -= qdisc_pkt_len(skb);
278 if (gred_wred_mode(t)) {
279 if (!sch->qstats.backlog)
280 red_start_of_idle_period(&t->wred_set);
283 red_start_of_idle_period(&q->vars);
293 static void gred_reset(struct Qdisc *sch)
296 struct gred_sched *t = qdisc_priv(sch);
298 qdisc_reset_queue(sch);
300 for (i = 0; i < t->DPs; i++) {
301 struct gred_sched_data *q = t->tab[i];
306 red_restart(&q->vars);
311 static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
313 struct gred_sched *table = qdisc_priv(sch);
314 struct net_device *dev = qdisc_dev(sch);
315 struct tc_gred_qopt_offload *opt = table->opt;
317 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
320 memset(opt, 0, sizeof(*opt));
321 opt->command = command;
322 opt->handle = sch->handle;
323 opt->parent = sch->parent;
325 if (command == TC_GRED_REPLACE) {
328 opt->set.grio_on = gred_rio_mode(table);
329 opt->set.wred_on = gred_wred_mode(table);
330 opt->set.dp_cnt = table->DPs;
331 opt->set.dp_def = table->def;
333 for (i = 0; i < table->DPs; i++) {
334 struct gred_sched_data *q = table->tab[i];
338 opt->set.tab[i].present = true;
339 opt->set.tab[i].limit = q->limit;
340 opt->set.tab[i].prio = q->prio;
341 opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
342 opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
343 opt->set.tab[i].is_ecn = gred_use_ecn(q);
344 opt->set.tab[i].is_harddrop = gred_use_harddrop(q);
345 opt->set.tab[i].probability = q->parms.max_P;
346 opt->set.tab[i].backlog = &q->backlog;
348 opt->set.qstats = &sch->qstats;
351 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt);
354 static int gred_offload_dump_stats(struct Qdisc *sch)
356 struct gred_sched *table = qdisc_priv(sch);
357 struct tc_gred_qopt_offload *hw_stats;
358 u64 bytes = 0, packets = 0;
362 hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
366 hw_stats->command = TC_GRED_STATS;
367 hw_stats->handle = sch->handle;
368 hw_stats->parent = sch->parent;
370 for (i = 0; i < MAX_DPs; i++) {
371 gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
373 hw_stats->stats.xstats[i] = &table->tab[i]->stats;
376 ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
377 /* Even if driver returns failure adjust the stats - in case offload
378 * ended but driver still wants to adjust the values.
380 for (i = 0; i < MAX_DPs; i++) {
383 table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
384 table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
385 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
387 bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
388 packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
389 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
390 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
391 sch->qstats.drops += hw_stats->stats.qstats[i].drops;
392 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
393 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
395 _bstats_update(&sch->bstats, bytes, packets);
401 static inline void gred_destroy_vq(struct gred_sched_data *q)
406 static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
407 struct netlink_ext_ack *extack)
409 struct gred_sched *table = qdisc_priv(sch);
410 struct tc_gred_sopt *sopt;
411 bool red_flags_changed;
417 sopt = nla_data(dps);
419 if (sopt->DPs > MAX_DPs) {
420 NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
423 if (sopt->DPs == 0) {
424 NL_SET_ERR_MSG_MOD(extack,
425 "number of virtual queues can't be 0");
428 if (sopt->def_DP >= sopt->DPs) {
429 NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
432 if (sopt->flags && gred_per_vq_red_flags_used(table)) {
433 NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
438 table->DPs = sopt->DPs;
439 table->def = sopt->def_DP;
440 red_flags_changed = table->red_flags != sopt->flags;
441 table->red_flags = sopt->flags;
444 * Every entry point to GRED is synchronized with the above code
445 * and the DP is checked against DPs, i.e. shadowed VQs can no
446 * longer be found so we can unlock right here.
448 sch_tree_unlock(sch);
451 gred_enable_rio_mode(table);
452 gred_disable_wred_mode(table);
453 if (gred_wred_mode_check(sch))
454 gred_enable_wred_mode(table);
456 gred_disable_rio_mode(table);
457 gred_disable_wred_mode(table);
460 if (red_flags_changed)
461 for (i = 0; i < table->DPs; i++)
463 table->tab[i]->red_flags =
464 table->red_flags & GRED_VQ_RED_FLAGS;
466 for (i = table->DPs; i < MAX_DPs; i++) {
468 pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
470 gred_destroy_vq(table->tab[i]);
471 table->tab[i] = NULL;
475 gred_offload(sch, TC_GRED_REPLACE);
479 static inline int gred_change_vq(struct Qdisc *sch, int dp,
480 struct tc_gred_qopt *ctl, int prio,
482 struct gred_sched_data **prealloc,
483 struct netlink_ext_ack *extack)
485 struct gred_sched *table = qdisc_priv(sch);
486 struct gred_sched_data *q = table->tab[dp];
488 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
489 NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
494 table->tab[dp] = q = *prealloc;
498 q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
503 if (ctl->limit > sch->limit)
504 q->limit = sch->limit;
506 q->limit = ctl->limit;
509 red_end_of_idle_period(&q->vars);
511 red_set_parms(&q->parms,
512 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
513 ctl->Scell_log, stab, max_P);
514 red_set_vars(&q->vars);
518 static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
519 [TCA_GRED_VQ_DP] = { .type = NLA_U32 },
520 [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
523 static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
524 [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
527 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
528 [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
529 [TCA_GRED_STAB] = { .len = 256 },
530 [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
531 [TCA_GRED_MAX_P] = { .type = NLA_U32 },
532 [TCA_GRED_LIMIT] = { .type = NLA_U32 },
533 [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
536 static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
538 struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
541 nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
542 gred_vq_policy, NULL);
544 dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
546 if (tb[TCA_GRED_VQ_FLAGS])
547 table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
550 static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
552 const struct nlattr *attr;
555 nla_for_each_nested(attr, vqs, rem) {
556 switch (nla_type(attr)) {
557 case TCA_GRED_VQ_ENTRY:
558 gred_vq_apply(table, attr);
564 static int gred_vq_validate(struct gred_sched *table, u32 cdp,
565 const struct nlattr *entry,
566 struct netlink_ext_ack *extack)
568 struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
572 err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
573 gred_vq_policy, extack);
577 if (!tb[TCA_GRED_VQ_DP]) {
578 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
581 dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
582 if (dp >= table->DPs) {
583 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
586 if (dp != cdp && !table->tab[dp]) {
587 NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
591 if (tb[TCA_GRED_VQ_FLAGS]) {
592 u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
594 if (table->red_flags && table->red_flags != red_flags) {
595 NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
598 if (red_flags & ~GRED_VQ_RED_FLAGS) {
599 NL_SET_ERR_MSG_MOD(extack,
600 "invalid RED flags specified");
608 static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
609 struct nlattr *vqs, struct netlink_ext_ack *extack)
611 const struct nlattr *attr;
614 err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
615 gred_vqe_policy, extack);
619 nla_for_each_nested(attr, vqs, rem) {
620 switch (nla_type(attr)) {
621 case TCA_GRED_VQ_ENTRY:
622 err = gred_vq_validate(table, cdp, attr, extack);
627 NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
633 NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
640 static int gred_change(struct Qdisc *sch, struct nlattr *opt,
641 struct netlink_ext_ack *extack)
643 struct gred_sched *table = qdisc_priv(sch);
644 struct tc_gred_qopt *ctl;
645 struct nlattr *tb[TCA_GRED_MAX + 1];
646 int err, prio = GRED_DEF_PRIO;
649 struct gred_sched_data *prealloc;
651 err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
656 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
657 if (tb[TCA_GRED_LIMIT] != NULL)
658 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
659 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
662 if (tb[TCA_GRED_PARMS] == NULL ||
663 tb[TCA_GRED_STAB] == NULL ||
664 tb[TCA_GRED_LIMIT] != NULL) {
665 NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
669 max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
671 ctl = nla_data(tb[TCA_GRED_PARMS]);
672 stab = nla_data(tb[TCA_GRED_STAB]);
674 if (ctl->DP >= table->DPs) {
675 NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
679 if (tb[TCA_GRED_VQ_LIST]) {
680 err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
686 if (gred_rio_mode(table)) {
687 if (ctl->prio == 0) {
688 int def_prio = GRED_DEF_PRIO;
690 if (table->tab[table->def])
691 def_prio = table->tab[table->def]->prio;
693 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
694 "setting default to %d\n", ctl->DP, def_prio);
701 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
704 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
707 goto err_unlock_free;
709 if (tb[TCA_GRED_VQ_LIST])
710 gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
712 if (gred_rio_mode(table)) {
713 gred_disable_wred_mode(table);
714 if (gred_wred_mode_check(sch))
715 gred_enable_wred_mode(table);
718 sch_tree_unlock(sch);
721 gred_offload(sch, TC_GRED_REPLACE);
725 sch_tree_unlock(sch);
730 static int gred_init(struct Qdisc *sch, struct nlattr *opt,
731 struct netlink_ext_ack *extack)
733 struct gred_sched *table = qdisc_priv(sch);
734 struct nlattr *tb[TCA_GRED_MAX + 1];
740 err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
745 if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
746 NL_SET_ERR_MSG_MOD(extack,
747 "virtual queue configuration can't be specified at initialization time");
751 if (tb[TCA_GRED_LIMIT])
752 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
754 sch->limit = qdisc_dev(sch)->tx_queue_len
755 * psched_mtu(qdisc_dev(sch));
757 if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) {
758 table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL);
763 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
766 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
768 struct gred_sched *table = qdisc_priv(sch);
769 struct nlattr *parms, *vqs, *opts = NULL;
772 struct tc_gred_sopt sopt = {
774 .def_DP = table->def,
775 .grio = gred_rio_mode(table),
776 .flags = table->red_flags,
779 if (gred_offload_dump_stats(sch))
780 goto nla_put_failure;
782 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
784 goto nla_put_failure;
785 if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
786 goto nla_put_failure;
788 for (i = 0; i < MAX_DPs; i++) {
789 struct gred_sched_data *q = table->tab[i];
791 max_p[i] = q ? q->parms.max_P : 0;
793 if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
794 goto nla_put_failure;
796 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
797 goto nla_put_failure;
799 /* Old style all-in-one dump of VQs */
800 parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
802 goto nla_put_failure;
804 for (i = 0; i < MAX_DPs; i++) {
805 struct gred_sched_data *q = table->tab[i];
806 struct tc_gred_qopt opt;
809 memset(&opt, 0, sizeof(opt));
812 /* hack -- fix at some point with proper message
813 This is how we indicate to tc that there is no VQ
816 opt.DP = MAX_DPs + i;
820 opt.limit = q->limit;
822 opt.backlog = gred_backlog(table, q, sch);
824 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
825 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
826 opt.Wlog = q->parms.Wlog;
827 opt.Plog = q->parms.Plog;
828 opt.Scell_log = q->parms.Scell_log;
829 opt.early = q->stats.prob_drop;
830 opt.forced = q->stats.forced_drop;
831 opt.pdrop = q->stats.pdrop;
832 opt.packets = q->packetsin;
833 opt.bytesin = q->bytesin;
835 if (gred_wred_mode(table))
836 gred_load_wred_set(table, q);
838 qavg = red_calc_qavg(&q->parms, &q->vars,
839 q->vars.qavg >> q->parms.Wlog);
840 opt.qave = qavg >> q->parms.Wlog;
843 if (nla_append(skb, sizeof(opt), &opt) < 0)
844 goto nla_put_failure;
847 nla_nest_end(skb, parms);
849 /* Dump the VQs again, in more structured way */
850 vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
852 goto nla_put_failure;
854 for (i = 0; i < MAX_DPs; i++) {
855 struct gred_sched_data *q = table->tab[i];
861 vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
863 goto nla_put_failure;
865 if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
866 goto nla_put_failure;
868 if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
869 goto nla_put_failure;
872 if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
874 goto nla_put_failure;
875 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
876 goto nla_put_failure;
877 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
878 gred_backlog(table, q, sch)))
879 goto nla_put_failure;
880 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
882 goto nla_put_failure;
883 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
885 goto nla_put_failure;
886 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
887 q->stats.forced_drop))
888 goto nla_put_failure;
889 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
890 q->stats.forced_mark))
891 goto nla_put_failure;
892 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
893 goto nla_put_failure;
895 nla_nest_end(skb, vq);
897 nla_nest_end(skb, vqs);
899 return nla_nest_end(skb, opts);
902 nla_nest_cancel(skb, opts);
906 static void gred_destroy(struct Qdisc *sch)
908 struct gred_sched *table = qdisc_priv(sch);
911 for (i = 0; i < table->DPs; i++)
912 gred_destroy_vq(table->tab[i]);
914 gred_offload(sch, TC_GRED_DESTROY);
918 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
920 .priv_size = sizeof(struct gred_sched),
921 .enqueue = gred_enqueue,
922 .dequeue = gred_dequeue,
923 .peek = qdisc_peek_head,
926 .destroy = gred_destroy,
927 .change = gred_change,
929 .owner = THIS_MODULE,
932 static int __init gred_module_init(void)
934 return register_qdisc(&gred_qdisc_ops);
937 static void __exit gred_module_exit(void)
939 unregister_qdisc(&gred_qdisc_ops);
942 module_init(gred_module_init)
943 module_exit(gred_module_exit)
945 MODULE_LICENSE("GPL");