1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_red.c Random Early Detection queue.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 * J Hadi Salim 980914: computation fixes
9 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
10 * J Hadi Salim 980816: ECN support
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <net/inet_ecn.h>
23 /* Parameters, settable by user:
24 -----------------------------
26 limit - bytes (must be > qth_max + burst)
28 Hard limit on queue length, should be chosen >qth_max
29 to allow packet bursts. This parameter does not
30 affect the algorithms behaviour and can be chosen
31 arbitrarily high (well, less than ram size)
32 Really, this limit will never be reached
33 if RED works correctly.
36 struct red_sched_data {
37 u32 limit; /* HARD maximal queue length */
40 /* Non-flags in tc_red_qopt.flags. */
41 unsigned char userbits;
43 struct timer_list adapt_timer;
45 struct red_parms parms;
47 struct red_stats stats;
49 struct tcf_qevent qe_early_drop;
50 struct tcf_qevent qe_mark;
53 #define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
55 static inline int red_use_ecn(struct red_sched_data *q)
57 return q->flags & TC_RED_ECN;
60 static inline int red_use_harddrop(struct red_sched_data *q)
62 return q->flags & TC_RED_HARDDROP;
65 static int red_use_nodrop(struct red_sched_data *q)
67 return q->flags & TC_RED_NODROP;
70 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
71 struct sk_buff **to_free)
73 struct red_sched_data *q = qdisc_priv(sch);
74 struct Qdisc *child = q->qdisc;
78 q->vars.qavg = red_calc_qavg(&q->parms,
80 child->qstats.backlog);
82 if (red_is_idling(&q->vars))
83 red_end_of_idle_period(&q->vars);
85 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
90 qdisc_qstats_overlimit(sch);
91 if (!red_use_ecn(q)) {
96 if (INET_ECN_set_ce(skb)) {
98 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
100 return NET_XMIT_CN | ret;
101 } else if (!red_use_nodrop(q)) {
102 q->stats.prob_drop++;
103 goto congestion_drop;
106 /* Non-ECT packet in ECN nodrop mode: queue it. */
110 qdisc_qstats_overlimit(sch);
111 if (red_use_harddrop(q) || !red_use_ecn(q)) {
112 q->stats.forced_drop++;
113 goto congestion_drop;
116 if (INET_ECN_set_ce(skb)) {
117 q->stats.forced_mark++;
118 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
120 return NET_XMIT_CN | ret;
121 } else if (!red_use_nodrop(q)) {
122 q->stats.forced_drop++;
123 goto congestion_drop;
126 /* Non-ECT packet in ECN nodrop mode: queue it. */
130 len = qdisc_pkt_len(skb);
131 ret = qdisc_enqueue(skb, child, to_free);
132 if (likely(ret == NET_XMIT_SUCCESS)) {
133 sch->qstats.backlog += len;
135 } else if (net_xmit_drop_count(ret)) {
137 qdisc_qstats_drop(sch);
142 skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
144 return NET_XMIT_CN | ret;
146 qdisc_drop(skb, sch, to_free);
150 static struct sk_buff *red_dequeue(struct Qdisc *sch)
153 struct red_sched_data *q = qdisc_priv(sch);
154 struct Qdisc *child = q->qdisc;
156 skb = child->dequeue(child);
158 qdisc_bstats_update(sch, skb);
159 qdisc_qstats_backlog_dec(sch, skb);
162 if (!red_is_idling(&q->vars))
163 red_start_of_idle_period(&q->vars);
168 static struct sk_buff *red_peek(struct Qdisc *sch)
170 struct red_sched_data *q = qdisc_priv(sch);
171 struct Qdisc *child = q->qdisc;
173 return child->ops->peek(child);
176 static void red_reset(struct Qdisc *sch)
178 struct red_sched_data *q = qdisc_priv(sch);
180 qdisc_reset(q->qdisc);
181 red_restart(&q->vars);
184 static int red_offload(struct Qdisc *sch, bool enable)
186 struct red_sched_data *q = qdisc_priv(sch);
187 struct net_device *dev = qdisc_dev(sch);
188 struct tc_red_qopt_offload opt = {
189 .handle = sch->handle,
190 .parent = sch->parent,
193 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
197 opt.command = TC_RED_REPLACE;
198 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
199 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
200 opt.set.probability = q->parms.max_P;
201 opt.set.limit = q->limit;
202 opt.set.is_ecn = red_use_ecn(q);
203 opt.set.is_harddrop = red_use_harddrop(q);
204 opt.set.is_nodrop = red_use_nodrop(q);
205 opt.set.qstats = &sch->qstats;
207 opt.command = TC_RED_DESTROY;
210 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
213 static void red_destroy(struct Qdisc *sch)
215 struct red_sched_data *q = qdisc_priv(sch);
217 tcf_qevent_destroy(&q->qe_mark, sch);
218 tcf_qevent_destroy(&q->qe_early_drop, sch);
219 del_timer_sync(&q->adapt_timer);
220 red_offload(sch, false);
224 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
225 [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
226 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
227 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
228 [TCA_RED_MAX_P] = { .type = NLA_U32 },
229 [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
230 [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
231 [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
234 static int __red_change(struct Qdisc *sch, struct nlattr **tb,
235 struct netlink_ext_ack *extack)
237 struct Qdisc *old_child = NULL, *child = NULL;
238 struct red_sched_data *q = qdisc_priv(sch);
239 struct nla_bitfield32 flags_bf;
240 struct tc_red_qopt *ctl;
241 unsigned char userbits;
247 if (tb[TCA_RED_PARMS] == NULL ||
248 tb[TCA_RED_STAB] == NULL)
251 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
253 ctl = nla_data(tb[TCA_RED_PARMS]);
254 stab = nla_data(tb[TCA_RED_STAB]);
255 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
256 ctl->Scell_log, stab))
259 err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
260 tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
261 &flags_bf, &userbits, extack);
265 if (ctl->limit > 0) {
266 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
269 return PTR_ERR(child);
271 /* child is fifo, no need to check for noop_qdisc */
272 qdisc_hash_add(child, true);
277 flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
278 err = red_validate_flags(flags, extack);
283 q->userbits = userbits;
284 q->limit = ctl->limit;
286 qdisc_tree_flush_backlog(q->qdisc);
287 old_child = q->qdisc;
291 red_set_parms(&q->parms,
292 ctl->qth_min, ctl->qth_max, ctl->Wlog,
293 ctl->Plog, ctl->Scell_log,
296 red_set_vars(&q->vars);
298 del_timer(&q->adapt_timer);
299 if (ctl->flags & TC_RED_ADAPTATIVE)
300 mod_timer(&q->adapt_timer, jiffies + HZ/2);
302 if (!q->qdisc->q.qlen)
303 red_start_of_idle_period(&q->vars);
305 sch_tree_unlock(sch);
307 red_offload(sch, true);
310 qdisc_put(old_child);
314 sch_tree_unlock(sch);
320 static inline void red_adaptative_timer(struct timer_list *t)
322 struct red_sched_data *q = from_timer(q, t, adapt_timer);
323 struct Qdisc *sch = q->sch;
324 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
326 spin_lock(root_lock);
327 red_adaptative_algo(&q->parms, &q->vars);
328 mod_timer(&q->adapt_timer, jiffies + HZ/2);
329 spin_unlock(root_lock);
332 static int red_init(struct Qdisc *sch, struct nlattr *opt,
333 struct netlink_ext_ack *extack)
335 struct red_sched_data *q = qdisc_priv(sch);
336 struct nlattr *tb[TCA_RED_MAX + 1];
339 q->qdisc = &noop_qdisc;
341 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
346 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
351 err = __red_change(sch, tb, extack);
355 err = tcf_qevent_init(&q->qe_early_drop, sch,
356 FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
357 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
361 return tcf_qevent_init(&q->qe_mark, sch,
362 FLOW_BLOCK_BINDER_TYPE_RED_MARK,
363 tb[TCA_RED_MARK_BLOCK], extack);
366 static int red_change(struct Qdisc *sch, struct nlattr *opt,
367 struct netlink_ext_ack *extack)
369 struct red_sched_data *q = qdisc_priv(sch);
370 struct nlattr *tb[TCA_RED_MAX + 1];
373 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
378 err = tcf_qevent_validate_change(&q->qe_early_drop,
379 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
383 err = tcf_qevent_validate_change(&q->qe_mark,
384 tb[TCA_RED_MARK_BLOCK], extack);
388 return __red_change(sch, tb, extack);
391 static int red_dump_offload_stats(struct Qdisc *sch)
393 struct tc_red_qopt_offload hw_stats = {
394 .command = TC_RED_STATS,
395 .handle = sch->handle,
396 .parent = sch->parent,
398 .stats.bstats = &sch->bstats,
399 .stats.qstats = &sch->qstats,
403 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
406 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
408 struct red_sched_data *q = qdisc_priv(sch);
409 struct nlattr *opts = NULL;
410 struct tc_red_qopt opt = {
412 .flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
414 .qth_min = q->parms.qth_min >> q->parms.Wlog,
415 .qth_max = q->parms.qth_max >> q->parms.Wlog,
416 .Wlog = q->parms.Wlog,
417 .Plog = q->parms.Plog,
418 .Scell_log = q->parms.Scell_log,
422 err = red_dump_offload_stats(sch);
424 goto nla_put_failure;
426 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
428 goto nla_put_failure;
429 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
430 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
431 nla_put_bitfield32(skb, TCA_RED_FLAGS,
432 q->flags, TC_RED_SUPPORTED_FLAGS) ||
433 tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
434 tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
435 goto nla_put_failure;
436 return nla_nest_end(skb, opts);
439 nla_nest_cancel(skb, opts);
443 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
445 struct red_sched_data *q = qdisc_priv(sch);
446 struct net_device *dev = qdisc_dev(sch);
447 struct tc_red_xstats st = {0};
449 if (sch->flags & TCQ_F_OFFLOADED) {
450 struct tc_red_qopt_offload hw_stats_request = {
451 .command = TC_RED_XSTATS,
452 .handle = sch->handle,
453 .parent = sch->parent,
458 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
461 st.early = q->stats.prob_drop + q->stats.forced_drop;
462 st.pdrop = q->stats.pdrop;
463 st.marked = q->stats.prob_mark + q->stats.forced_mark;
465 return gnet_stats_copy_app(d, &st, sizeof(st));
468 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
469 struct sk_buff *skb, struct tcmsg *tcm)
471 struct red_sched_data *q = qdisc_priv(sch);
473 tcm->tcm_handle |= TC_H_MIN(1);
474 tcm->tcm_info = q->qdisc->handle;
478 static void red_graft_offload(struct Qdisc *sch,
479 struct Qdisc *new, struct Qdisc *old,
480 struct netlink_ext_ack *extack)
482 struct tc_red_qopt_offload graft_offload = {
483 .handle = sch->handle,
484 .parent = sch->parent,
485 .child_handle = new->handle,
486 .command = TC_RED_GRAFT,
489 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
490 TC_SETUP_QDISC_RED, &graft_offload, extack);
493 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
494 struct Qdisc **old, struct netlink_ext_ack *extack)
496 struct red_sched_data *q = qdisc_priv(sch);
501 *old = qdisc_replace(sch, new, &q->qdisc);
503 red_graft_offload(sch, new, *old, extack);
507 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
509 struct red_sched_data *q = qdisc_priv(sch);
513 static unsigned long red_find(struct Qdisc *sch, u32 classid)
518 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
521 tc_qdisc_stats_dump(sch, 1, walker);
525 static const struct Qdisc_class_ops red_class_ops = {
530 .dump = red_dump_class,
533 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
535 .priv_size = sizeof(struct red_sched_data),
536 .cl_ops = &red_class_ops,
537 .enqueue = red_enqueue,
538 .dequeue = red_dequeue,
542 .destroy = red_destroy,
543 .change = red_change,
545 .dump_stats = red_dump_stats,
546 .owner = THIS_MODULE,
549 static int __init red_module_init(void)
551 return register_qdisc(&red_qdisc_ops);
554 static void __exit red_module_exit(void)
556 unregister_qdisc(&red_qdisc_ops);
559 module_init(red_module_init)
560 module_exit(red_module_exit)
562 MODULE_LICENSE("GPL");