1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_api.c Packet scheduler API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
29 #include <net/net_namespace.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
35 #include <trace/events/qdisc.h>
42 This file consists of two interrelated parts:
44 1. queueing disciplines manager frontend.
45 2. traffic classes manager frontend.
47 Generally, queueing discipline ("qdisc") is a black box,
48 which is able to enqueue packets and to dequeue them (when
49 device is ready to send something) in order and at times
50 determined by algorithm hidden in it.
52 qdisc's are divided to two categories:
53 - "queues", which have no internal structure visible from outside.
54 - "schedulers", which split all the packets to "traffic classes",
55 using "packet classifiers" (look at cls_api.c)
57 In turn, classes may have child qdiscs (as rule, queues)
58 attached to them etc. etc. etc.
60 The goal of the routines in this file is to translate
61 information supplied by user in the form of handles
62 to more intelligible for kernel form, to make some sanity
63 checks and part of work, which is common to all qdiscs
64 and to provide rtnetlink notifications.
66 All real intelligent work is done inside qdisc modules.
70 Every discipline has two major routines: enqueue and dequeue.
74 dequeue usually returns a skb to send. It is allowed to return NULL,
75 but it does not mean that queue is empty, it just means that
76 discipline does not want to send anything this time.
77 Queue is really empty if q->q.qlen == 0.
78 For complicated disciplines with multiple queues q->q is not
79 real packet queue, but however q->q.qlen must be valid.
83 enqueue returns 0, if packet was enqueued successfully.
84 If packet (this one or another one) was dropped, it returns
86 NET_XMIT_DROP - this packet dropped
87 Expected action: do not backoff, but wait until queue will clear.
88 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
89 Expected action: backoff or ignore
95 like dequeue but without removing a packet from the queue
99 returns qdisc to initial state: purge all buffers, clear all
100 timers, counters (except for statistics) etc.
104 initializes newly created qdisc.
108 destroys resources allocated by init and during lifetime of qdisc.
112 changes qdisc parameters.
115 /* Protects list of registered TC modules. It is pure SMP lock. */
116 static DEFINE_RWLOCK(qdisc_mod_lock);
119 /************************************************
120 * Queueing disciplines manipulation. *
121 ************************************************/
124 /* The list of all installed queueing disciplines. */
126 static struct Qdisc_ops *qdisc_base;
128 /* Register/unregister queueing discipline */
130 int register_qdisc(struct Qdisc_ops *qops)
132 struct Qdisc_ops *q, **qp;
135 write_lock(&qdisc_mod_lock);
136 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
137 if (!strcmp(qops->id, q->id))
140 if (qops->enqueue == NULL)
141 qops->enqueue = noop_qdisc_ops.enqueue;
142 if (qops->peek == NULL) {
143 if (qops->dequeue == NULL)
144 qops->peek = noop_qdisc_ops.peek;
148 if (qops->dequeue == NULL)
149 qops->dequeue = noop_qdisc_ops.dequeue;
152 const struct Qdisc_class_ops *cops = qops->cl_ops;
154 if (!(cops->find && cops->walk && cops->leaf))
157 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
165 write_unlock(&qdisc_mod_lock);
172 EXPORT_SYMBOL(register_qdisc);
174 int unregister_qdisc(struct Qdisc_ops *qops)
176 struct Qdisc_ops *q, **qp;
179 write_lock(&qdisc_mod_lock);
180 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
188 write_unlock(&qdisc_mod_lock);
191 EXPORT_SYMBOL(unregister_qdisc);
193 /* Get default qdisc if not otherwise specified */
194 void qdisc_get_default(char *name, size_t len)
196 read_lock(&qdisc_mod_lock);
197 strlcpy(name, default_qdisc_ops->id, len);
198 read_unlock(&qdisc_mod_lock);
201 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
203 struct Qdisc_ops *q = NULL;
205 for (q = qdisc_base; q; q = q->next) {
206 if (!strcmp(name, q->id)) {
207 if (!try_module_get(q->owner))
216 /* Set new default qdisc to use */
217 int qdisc_set_default(const char *name)
219 const struct Qdisc_ops *ops;
221 if (!capable(CAP_NET_ADMIN))
224 write_lock(&qdisc_mod_lock);
225 ops = qdisc_lookup_default(name);
227 /* Not found, drop lock and try to load module */
228 write_unlock(&qdisc_mod_lock);
229 request_module("sch_%s", name);
230 write_lock(&qdisc_mod_lock);
232 ops = qdisc_lookup_default(name);
236 /* Set new default */
237 module_put(default_qdisc_ops->owner);
238 default_qdisc_ops = ops;
240 write_unlock(&qdisc_mod_lock);
242 return ops ? 0 : -ENOENT;
245 #ifdef CONFIG_NET_SCH_DEFAULT
246 /* Set default value from kernel config */
247 static int __init sch_default_qdisc(void)
249 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
251 late_initcall(sch_default_qdisc);
254 /* We know handle. Find qdisc among all qdisc's attached to device
255 * (root qdisc, all its children, children of children etc.)
256 * Note: caller either uses rtnl or rcu_read_lock()
259 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
263 if (!qdisc_dev(root))
264 return (root->handle == handle ? root : NULL);
266 if (!(root->flags & TCQ_F_BUILTIN) &&
267 root->handle == handle)
270 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
271 lockdep_rtnl_is_held()) {
272 if (q->handle == handle)
278 void qdisc_hash_add(struct Qdisc *q, bool invisible)
280 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
282 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
284 q->flags |= TCQ_F_INVISIBLE;
287 EXPORT_SYMBOL(qdisc_hash_add);
289 void qdisc_hash_del(struct Qdisc *q)
291 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
293 hash_del_rcu(&q->hash);
296 EXPORT_SYMBOL(qdisc_hash_del);
298 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
304 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
308 if (dev_ingress_queue(dev))
309 q = qdisc_match_from_root(
310 dev_ingress_queue(dev)->qdisc_sleeping,
316 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
318 struct netdev_queue *nq;
323 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
327 nq = dev_ingress_queue_rcu(dev);
329 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
334 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
337 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
341 cl = cops->find(p, classid);
345 return cops->leaf(p, cl);
348 /* Find queueing discipline by name */
350 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
352 struct Qdisc_ops *q = NULL;
355 read_lock(&qdisc_mod_lock);
356 for (q = qdisc_base; q; q = q->next) {
357 if (nla_strcmp(kind, q->id) == 0) {
358 if (!try_module_get(q->owner))
363 read_unlock(&qdisc_mod_lock);
368 /* The linklayer setting were not transferred from iproute2, in older
369 * versions, and the rate tables lookup systems have been dropped in
370 * the kernel. To keep backward compatible with older iproute2 tc
371 * utils, we detect the linklayer setting by detecting if the rate
372 * table were modified.
374 * For linklayer ATM table entries, the rate table will be aligned to
375 * 48 bytes, thus some table entries will contain the same value. The
376 * mpu (min packet unit) is also encoded into the old rate table, thus
377 * starting from the mpu, we find low and high table entries for
378 * mapping this cell. If these entries contain the same value, when
379 * the rate tables have been modified for linklayer ATM.
381 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
382 * and then roundup to the next cell, calc the table entry one below,
385 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
387 int low = roundup(r->mpu, 48);
388 int high = roundup(low+1, 48);
389 int cell_low = low >> r->cell_log;
390 int cell_high = (high >> r->cell_log) - 1;
392 /* rtab is too inaccurate at rates > 100Mbit/s */
393 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
394 pr_debug("TC linklayer: Giving up ATM detection\n");
395 return TC_LINKLAYER_ETHERNET;
398 if ((cell_high > cell_low) && (cell_high < 256)
399 && (rtab[cell_low] == rtab[cell_high])) {
400 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
401 cell_low, cell_high, rtab[cell_high]);
402 return TC_LINKLAYER_ATM;
404 return TC_LINKLAYER_ETHERNET;
407 static struct qdisc_rate_table *qdisc_rtab_list;
409 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
411 struct netlink_ext_ack *extack)
413 struct qdisc_rate_table *rtab;
415 if (tab == NULL || r->rate == 0 ||
416 r->cell_log == 0 || r->cell_log >= 32 ||
417 nla_len(tab) != TC_RTAB_SIZE) {
418 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
422 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
423 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
424 !memcmp(&rtab->data, nla_data(tab), 1024)) {
430 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
434 memcpy(rtab->data, nla_data(tab), 1024);
435 if (r->linklayer == TC_LINKLAYER_UNAWARE)
436 r->linklayer = __detect_linklayer(r, rtab->data);
437 rtab->next = qdisc_rtab_list;
438 qdisc_rtab_list = rtab;
440 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
444 EXPORT_SYMBOL(qdisc_get_rtab);
446 void qdisc_put_rtab(struct qdisc_rate_table *tab)
448 struct qdisc_rate_table *rtab, **rtabp;
450 if (!tab || --tab->refcnt)
453 for (rtabp = &qdisc_rtab_list;
454 (rtab = *rtabp) != NULL;
455 rtabp = &rtab->next) {
463 EXPORT_SYMBOL(qdisc_put_rtab);
465 static LIST_HEAD(qdisc_stab_list);
467 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
468 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
469 [TCA_STAB_DATA] = { .type = NLA_BINARY },
472 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
473 struct netlink_ext_ack *extack)
475 struct nlattr *tb[TCA_STAB_MAX + 1];
476 struct qdisc_size_table *stab;
477 struct tc_sizespec *s;
478 unsigned int tsize = 0;
482 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
486 if (!tb[TCA_STAB_BASE]) {
487 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
488 return ERR_PTR(-EINVAL);
491 s = nla_data(tb[TCA_STAB_BASE]);
494 if (!tb[TCA_STAB_DATA]) {
495 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
496 return ERR_PTR(-EINVAL);
498 tab = nla_data(tb[TCA_STAB_DATA]);
499 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
502 if (tsize != s->tsize || (!tab && tsize > 0)) {
503 NL_SET_ERR_MSG(extack, "Invalid size of size table");
504 return ERR_PTR(-EINVAL);
507 list_for_each_entry(stab, &qdisc_stab_list, list) {
508 if (memcmp(&stab->szopts, s, sizeof(*s)))
510 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
516 if (s->size_log > STAB_SIZE_LOG_MAX ||
517 s->cell_log > STAB_SIZE_LOG_MAX) {
518 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
519 return ERR_PTR(-EINVAL);
522 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
524 return ERR_PTR(-ENOMEM);
529 memcpy(stab->data, tab, tsize * sizeof(u16));
531 list_add_tail(&stab->list, &qdisc_stab_list);
536 void qdisc_put_stab(struct qdisc_size_table *tab)
541 if (--tab->refcnt == 0) {
542 list_del(&tab->list);
546 EXPORT_SYMBOL(qdisc_put_stab);
548 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
552 nest = nla_nest_start_noflag(skb, TCA_STAB);
554 goto nla_put_failure;
555 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
556 goto nla_put_failure;
557 nla_nest_end(skb, nest);
565 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
566 const struct qdisc_size_table *stab)
570 pkt_len = skb->len + stab->szopts.overhead;
571 if (unlikely(!stab->szopts.tsize))
574 slot = pkt_len + stab->szopts.cell_align;
575 if (unlikely(slot < 0))
578 slot >>= stab->szopts.cell_log;
579 if (likely(slot < stab->szopts.tsize))
580 pkt_len = stab->data[slot];
582 pkt_len = stab->data[stab->szopts.tsize - 1] *
583 (slot / stab->szopts.tsize) +
584 stab->data[slot % stab->szopts.tsize];
586 pkt_len <<= stab->szopts.size_log;
588 if (unlikely(pkt_len < 1))
590 qdisc_skb_cb(skb)->pkt_len = pkt_len;
592 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
594 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
596 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
597 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
598 txt, qdisc->ops->id, qdisc->handle >> 16);
599 qdisc->flags |= TCQ_F_WARN_NONWC;
602 EXPORT_SYMBOL(qdisc_warn_nonwc);
604 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
606 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
610 __netif_schedule(qdisc_root(wd->qdisc));
613 return HRTIMER_NORESTART;
616 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
619 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
620 wd->timer.function = qdisc_watchdog;
623 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
625 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
627 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
629 EXPORT_SYMBOL(qdisc_watchdog_init);
631 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
634 if (test_bit(__QDISC_STATE_DEACTIVATED,
635 &qdisc_root_sleeping(wd->qdisc)->state))
638 if (hrtimer_is_queued(&wd->timer)) {
639 /* If timer is already set in [expires, expires + delta_ns],
640 * do not reprogram it.
642 if (wd->last_expires - expires <= delta_ns)
646 wd->last_expires = expires;
647 hrtimer_start_range_ns(&wd->timer,
648 ns_to_ktime(expires),
650 HRTIMER_MODE_ABS_PINNED);
652 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
654 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
656 hrtimer_cancel(&wd->timer);
658 EXPORT_SYMBOL(qdisc_watchdog_cancel);
660 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
662 struct hlist_head *h;
665 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
668 for (i = 0; i < n; i++)
669 INIT_HLIST_HEAD(&h[i]);
674 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
676 struct Qdisc_class_common *cl;
677 struct hlist_node *next;
678 struct hlist_head *nhash, *ohash;
679 unsigned int nsize, nmask, osize;
682 /* Rehash when load factor exceeds 0.75 */
683 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
685 nsize = clhash->hashsize * 2;
687 nhash = qdisc_class_hash_alloc(nsize);
691 ohash = clhash->hash;
692 osize = clhash->hashsize;
695 for (i = 0; i < osize; i++) {
696 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
697 h = qdisc_class_hash(cl->classid, nmask);
698 hlist_add_head(&cl->hnode, &nhash[h]);
701 clhash->hash = nhash;
702 clhash->hashsize = nsize;
703 clhash->hashmask = nmask;
704 sch_tree_unlock(sch);
708 EXPORT_SYMBOL(qdisc_class_hash_grow);
710 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
712 unsigned int size = 4;
714 clhash->hash = qdisc_class_hash_alloc(size);
717 clhash->hashsize = size;
718 clhash->hashmask = size - 1;
719 clhash->hashelems = 0;
722 EXPORT_SYMBOL(qdisc_class_hash_init);
724 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
726 kvfree(clhash->hash);
728 EXPORT_SYMBOL(qdisc_class_hash_destroy);
730 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
731 struct Qdisc_class_common *cl)
735 INIT_HLIST_NODE(&cl->hnode);
736 h = qdisc_class_hash(cl->classid, clhash->hashmask);
737 hlist_add_head(&cl->hnode, &clhash->hash[h]);
740 EXPORT_SYMBOL(qdisc_class_hash_insert);
742 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
743 struct Qdisc_class_common *cl)
745 hlist_del(&cl->hnode);
748 EXPORT_SYMBOL(qdisc_class_hash_remove);
750 /* Allocate an unique handle from space managed by kernel
751 * Possible range is [8000-FFFF]:0000 (0x8000 values)
753 static u32 qdisc_alloc_handle(struct net_device *dev)
756 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
759 autohandle += TC_H_MAKE(0x10000U, 0);
760 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
761 autohandle = TC_H_MAKE(0x80000000U, 0);
762 if (!qdisc_lookup(dev, autohandle))
770 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
772 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
773 const struct Qdisc_class_ops *cops;
779 if (n == 0 && len == 0)
781 drops = max_t(int, n, 0);
783 while ((parentid = sch->parent)) {
784 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
787 if (sch->flags & TCQ_F_NOPARENT)
789 /* Notify parent qdisc only if child qdisc becomes empty.
791 * If child was empty even before update then backlog
792 * counter is screwed and we skip notification because
793 * parent class is already passive.
795 * If the original child was offloaded then it is allowed
796 * to be seem as empty, so the parent is notified anyway.
798 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
799 !qdisc_is_offloaded);
800 /* TODO: perform the search on a per txq basis */
801 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
803 WARN_ON_ONCE(parentid != TC_H_ROOT);
806 cops = sch->ops->cl_ops;
807 if (notify && cops->qlen_notify) {
808 cl = cops->find(sch, parentid);
809 cops->qlen_notify(sch, cl);
812 sch->qstats.backlog -= len;
813 __qdisc_qstats_drop(sch, drops);
817 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
819 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
822 struct net_device *dev = qdisc_dev(sch);
825 sch->flags &= ~TCQ_F_OFFLOADED;
826 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
829 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
830 if (err == -EOPNOTSUPP)
834 sch->flags |= TCQ_F_OFFLOADED;
838 EXPORT_SYMBOL(qdisc_offload_dump_helper);
840 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
841 struct Qdisc *new, struct Qdisc *old,
842 enum tc_setup_type type, void *type_data,
843 struct netlink_ext_ack *extack)
845 bool any_qdisc_is_offloaded;
848 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
851 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
853 /* Don't report error if the graft is part of destroy operation. */
854 if (!err || !new || new == &noop_qdisc)
857 /* Don't report error if the parent, the old child and the new
858 * one are not offloaded.
860 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
861 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
862 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
864 if (any_qdisc_is_offloaded)
865 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
867 EXPORT_SYMBOL(qdisc_offload_graft_helper);
869 static void qdisc_offload_graft_root(struct net_device *dev,
870 struct Qdisc *new, struct Qdisc *old,
871 struct netlink_ext_ack *extack)
873 struct tc_root_qopt_offload graft_offload = {
874 .command = TC_ROOT_GRAFT,
875 .handle = new ? new->handle : 0,
876 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
877 (old && old->flags & TCQ_F_INGRESS),
880 qdisc_offload_graft_helper(dev, NULL, new, old,
881 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
884 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
885 u32 portid, u32 seq, u16 flags, int event)
887 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
888 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
890 struct nlmsghdr *nlh;
891 unsigned char *b = skb_tail_pointer(skb);
893 struct qdisc_size_table *stab;
898 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
901 tcm = nlmsg_data(nlh);
902 tcm->tcm_family = AF_UNSPEC;
905 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
906 tcm->tcm_parent = clid;
907 tcm->tcm_handle = q->handle;
908 tcm->tcm_info = refcount_read(&q->refcnt);
909 if (nla_put_string(skb, TCA_KIND, q->ops->id))
910 goto nla_put_failure;
911 if (q->ops->ingress_block_get) {
912 block_index = q->ops->ingress_block_get(q);
914 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
915 goto nla_put_failure;
917 if (q->ops->egress_block_get) {
918 block_index = q->ops->egress_block_get(q);
920 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
921 goto nla_put_failure;
923 if (q->ops->dump && q->ops->dump(q, skb) < 0)
924 goto nla_put_failure;
925 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
926 goto nla_put_failure;
927 qlen = qdisc_qlen_sum(q);
929 stab = rtnl_dereference(q->stab);
930 if (stab && qdisc_dump_stab(skb, stab) < 0)
931 goto nla_put_failure;
933 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
934 NULL, &d, TCA_PAD) < 0)
935 goto nla_put_failure;
937 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
938 goto nla_put_failure;
940 if (qdisc_is_percpu_stats(q)) {
941 cpu_bstats = q->cpu_bstats;
942 cpu_qstats = q->cpu_qstats;
945 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
946 &d, cpu_bstats, &q->bstats) < 0 ||
947 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
948 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
949 goto nla_put_failure;
951 if (gnet_stats_finish_copy(&d) < 0)
952 goto nla_put_failure;
954 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
963 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
965 if (q->flags & TCQ_F_BUILTIN)
967 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
973 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
974 struct nlmsghdr *n, u32 clid,
975 struct Qdisc *old, struct Qdisc *new)
978 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
980 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
984 if (old && !tc_qdisc_dump_ignore(old, false)) {
985 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
986 0, RTM_DELQDISC) < 0)
989 if (new && !tc_qdisc_dump_ignore(new, false)) {
990 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
991 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
996 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
997 n->nlmsg_flags & NLM_F_ECHO);
1004 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1005 struct nlmsghdr *n, u32 clid,
1006 struct Qdisc *old, struct Qdisc *new)
1009 qdisc_notify(net, skb, n, clid, old, new);
1015 static void qdisc_clear_nolock(struct Qdisc *sch)
1017 sch->flags &= ~TCQ_F_NOLOCK;
1018 if (!(sch->flags & TCQ_F_CPUSTATS))
1021 free_percpu(sch->cpu_bstats);
1022 free_percpu(sch->cpu_qstats);
1023 sch->cpu_bstats = NULL;
1024 sch->cpu_qstats = NULL;
1025 sch->flags &= ~TCQ_F_CPUSTATS;
1028 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1031 * When appropriate send a netlink notification using 'skb'
1034 * On success, destroy old qdisc.
1037 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1038 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1039 struct Qdisc *new, struct Qdisc *old,
1040 struct netlink_ext_ack *extack)
1042 struct Qdisc *q = old;
1043 struct net *net = dev_net(dev);
1045 if (parent == NULL) {
1046 unsigned int i, num_q, ingress;
1049 num_q = dev->num_tx_queues;
1050 if ((q && q->flags & TCQ_F_INGRESS) ||
1051 (new && new->flags & TCQ_F_INGRESS)) {
1054 if (!dev_ingress_queue(dev)) {
1055 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1060 if (dev->flags & IFF_UP)
1061 dev_deactivate(dev);
1063 qdisc_offload_graft_root(dev, new, old, extack);
1065 if (new && new->ops->attach && !ingress)
1068 for (i = 0; i < num_q; i++) {
1069 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1072 dev_queue = netdev_get_tx_queue(dev, i);
1074 old = dev_graft_qdisc(dev_queue, new);
1076 qdisc_refcount_inc(new);
1084 old = rtnl_dereference(dev->qdisc);
1085 if (new && !new->ops->attach)
1086 qdisc_refcount_inc(new);
1087 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1089 notify_and_destroy(net, skb, n, classid, old, new);
1091 if (new && new->ops->attach)
1092 new->ops->attach(new);
1094 notify_and_destroy(net, skb, n, classid, old, new);
1097 if (dev->flags & IFF_UP)
1100 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1104 /* Only support running class lockless if parent is lockless */
1105 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1106 qdisc_clear_nolock(new);
1108 if (!cops || !cops->graft)
1111 cl = cops->find(parent, classid);
1113 NL_SET_ERR_MSG(extack, "Specified class not found");
1117 if (new && new->ops == &noqueue_qdisc_ops) {
1118 NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1122 err = cops->graft(parent, cl, new, &old, extack);
1125 notify_and_destroy(net, skb, n, classid, old, new);
1130 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1131 struct netlink_ext_ack *extack)
1135 if (tca[TCA_INGRESS_BLOCK]) {
1136 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1139 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1142 if (!sch->ops->ingress_block_set) {
1143 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1146 sch->ops->ingress_block_set(sch, block_index);
1148 if (tca[TCA_EGRESS_BLOCK]) {
1149 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1152 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1155 if (!sch->ops->egress_block_set) {
1156 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1159 sch->ops->egress_block_set(sch, block_index);
1165 Allocate and initialize new qdisc.
1167 Parameters are passed via opt.
1170 static struct Qdisc *qdisc_create(struct net_device *dev,
1171 struct netdev_queue *dev_queue,
1172 struct Qdisc *p, u32 parent, u32 handle,
1173 struct nlattr **tca, int *errp,
1174 struct netlink_ext_ack *extack)
1177 struct nlattr *kind = tca[TCA_KIND];
1179 struct Qdisc_ops *ops;
1180 struct qdisc_size_table *stab;
1182 ops = qdisc_lookup_ops(kind);
1183 #ifdef CONFIG_MODULES
1184 if (ops == NULL && kind != NULL) {
1185 char name[IFNAMSIZ];
1186 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1187 /* We dropped the RTNL semaphore in order to
1188 * perform the module load. So, even if we
1189 * succeeded in loading the module we have to
1190 * tell the caller to replay the request. We
1191 * indicate this using -EAGAIN.
1192 * We replay the request because the device may
1193 * go away in the mean time.
1196 request_module("sch_%s", name);
1198 ops = qdisc_lookup_ops(kind);
1200 /* We will try again qdisc_lookup_ops,
1201 * so don't keep a reference.
1203 module_put(ops->owner);
1213 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1217 sch = qdisc_alloc(dev_queue, ops, extack);
1223 sch->parent = parent;
1225 if (handle == TC_H_INGRESS) {
1226 sch->flags |= TCQ_F_INGRESS;
1227 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1230 handle = qdisc_alloc_handle(dev);
1232 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1237 if (!netif_is_multiqueue(dev))
1238 sch->flags |= TCQ_F_ONETXQUEUE;
1241 sch->handle = handle;
1243 /* This exist to keep backward compatible with a userspace
1244 * loophole, what allowed userspace to get IFF_NO_QUEUE
1245 * facility on older kernels by setting tx_queue_len=0 (prior
1246 * to qdisc init), and then forgot to reinit tx_queue_len
1247 * before again attaching a qdisc.
1249 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1250 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1251 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1254 err = qdisc_block_indexes_set(sch, tca, extack);
1259 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1264 if (tca[TCA_STAB]) {
1265 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1267 err = PTR_ERR(stab);
1270 rcu_assign_pointer(sch->stab, stab);
1272 if (tca[TCA_RATE]) {
1273 seqcount_t *running;
1276 if (sch->flags & TCQ_F_MQROOT) {
1277 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1281 if (sch->parent != TC_H_ROOT &&
1282 !(sch->flags & TCQ_F_INGRESS) &&
1283 (!p || !(p->flags & TCQ_F_MQROOT)))
1284 running = qdisc_root_sleeping_running(sch);
1286 running = &sch->running;
1288 err = gen_new_estimator(&sch->bstats,
1295 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1300 qdisc_hash_add(sch, false);
1301 trace_qdisc_create(ops, dev, parent);
1306 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1313 module_put(ops->owner);
1320 * Any broken qdiscs that would require a ops->reset() here?
1321 * The qdisc was never in action so it shouldn't be necessary.
1323 qdisc_put_stab(rtnl_dereference(sch->stab));
1329 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1330 struct netlink_ext_ack *extack)
1332 struct qdisc_size_table *ostab, *stab = NULL;
1335 if (tca[TCA_OPTIONS]) {
1336 if (!sch->ops->change) {
1337 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1340 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1341 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1344 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1349 if (tca[TCA_STAB]) {
1350 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1352 return PTR_ERR(stab);
1355 ostab = rtnl_dereference(sch->stab);
1356 rcu_assign_pointer(sch->stab, stab);
1357 qdisc_put_stab(ostab);
1359 if (tca[TCA_RATE]) {
1360 /* NB: ignores errors from replace_estimator
1361 because change can't be undone. */
1362 if (sch->flags & TCQ_F_MQROOT)
1364 gen_replace_estimator(&sch->bstats,
1368 qdisc_root_sleeping_running(sch),
1375 struct check_loop_arg {
1376 struct qdisc_walker w;
1381 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1382 struct qdisc_walker *w);
1384 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1386 struct check_loop_arg arg;
1388 if (q->ops->cl_ops == NULL)
1391 arg.w.stop = arg.w.skip = arg.w.count = 0;
1392 arg.w.fn = check_loop_fn;
1395 q->ops->cl_ops->walk(q, &arg.w);
1396 return arg.w.stop ? -ELOOP : 0;
1400 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1403 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1404 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1406 leaf = cops->leaf(q, cl);
1408 if (leaf == arg->p || arg->depth > 7)
1410 return check_loop(leaf, arg->p, arg->depth + 1);
1415 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1416 [TCA_KIND] = { .type = NLA_STRING },
1417 [TCA_RATE] = { .type = NLA_BINARY,
1418 .len = sizeof(struct tc_estimator) },
1419 [TCA_STAB] = { .type = NLA_NESTED },
1420 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1421 [TCA_CHAIN] = { .type = NLA_U32 },
1422 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1423 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1430 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1431 struct netlink_ext_ack *extack)
1433 struct net *net = sock_net(skb->sk);
1434 struct tcmsg *tcm = nlmsg_data(n);
1435 struct nlattr *tca[TCA_MAX + 1];
1436 struct net_device *dev;
1438 struct Qdisc *q = NULL;
1439 struct Qdisc *p = NULL;
1442 if ((n->nlmsg_type != RTM_GETQDISC) &&
1443 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1446 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1447 rtm_tca_policy, extack);
1451 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1455 clid = tcm->tcm_parent;
1457 if (clid != TC_H_ROOT) {
1458 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1459 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1461 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1464 q = qdisc_leaf(p, clid);
1465 } else if (dev_ingress_queue(dev)) {
1466 q = dev_ingress_queue(dev)->qdisc_sleeping;
1469 q = rtnl_dereference(dev->qdisc);
1472 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1476 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1477 NL_SET_ERR_MSG(extack, "Invalid handle");
1481 q = qdisc_lookup(dev, tcm->tcm_handle);
1483 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1488 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1489 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1493 if (n->nlmsg_type == RTM_DELQDISC) {
1495 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1498 if (q->handle == 0) {
1499 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1502 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1506 qdisc_notify(net, skb, n, clid, NULL, q);
1512 * Create/change qdisc.
1515 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1516 struct netlink_ext_ack *extack)
1518 struct net *net = sock_net(skb->sk);
1520 struct nlattr *tca[TCA_MAX + 1];
1521 struct net_device *dev;
1523 struct Qdisc *q, *p;
1526 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1530 /* Reinit, just in case something touches this. */
1531 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1532 rtm_tca_policy, extack);
1536 tcm = nlmsg_data(n);
1537 clid = tcm->tcm_parent;
1540 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1546 if (clid != TC_H_ROOT) {
1547 if (clid != TC_H_INGRESS) {
1548 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1550 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1553 q = qdisc_leaf(p, clid);
1554 } else if (dev_ingress_queue_create(dev)) {
1555 q = dev_ingress_queue(dev)->qdisc_sleeping;
1558 q = rtnl_dereference(dev->qdisc);
1561 /* It may be default qdisc, ignore it */
1562 if (q && q->handle == 0)
1565 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1566 if (tcm->tcm_handle) {
1567 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1568 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1571 if (TC_H_MIN(tcm->tcm_handle)) {
1572 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1575 q = qdisc_lookup(dev, tcm->tcm_handle);
1577 goto create_n_graft;
1578 if (n->nlmsg_flags & NLM_F_EXCL) {
1579 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1582 if (tca[TCA_KIND] &&
1583 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1584 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1588 (p && check_loop(q, p, 0))) {
1589 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1592 qdisc_refcount_inc(q);
1596 goto create_n_graft;
1598 /* This magic test requires explanation.
1600 * We know, that some child q is already
1601 * attached to this parent and have choice:
1602 * either to change it or to create/graft new one.
1604 * 1. We are allowed to create/graft only
1605 * if CREATE and REPLACE flags are set.
1607 * 2. If EXCL is set, requestor wanted to say,
1608 * that qdisc tcm_handle is not expected
1609 * to exist, so that we choose create/graft too.
1611 * 3. The last case is when no flags are set.
1612 * Alas, it is sort of hole in API, we
1613 * cannot decide what to do unambiguously.
1614 * For now we select create/graft, if
1615 * user gave KIND, which does not match existing.
1617 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1618 (n->nlmsg_flags & NLM_F_REPLACE) &&
1619 ((n->nlmsg_flags & NLM_F_EXCL) ||
1621 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1622 goto create_n_graft;
1626 if (!tcm->tcm_handle) {
1627 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1630 q = qdisc_lookup(dev, tcm->tcm_handle);
1633 /* Change qdisc parameters */
1635 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1638 if (n->nlmsg_flags & NLM_F_EXCL) {
1639 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1642 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1643 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1646 err = qdisc_change(q, tca, extack);
1648 qdisc_notify(net, skb, n, clid, NULL, q);
1652 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1653 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1656 if (clid == TC_H_INGRESS) {
1657 if (dev_ingress_queue(dev)) {
1658 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1659 tcm->tcm_parent, tcm->tcm_parent,
1662 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1666 struct netdev_queue *dev_queue;
1668 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1669 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1671 dev_queue = p->dev_queue;
1673 dev_queue = netdev_get_tx_queue(dev, 0);
1675 q = qdisc_create(dev, dev_queue, p,
1676 tcm->tcm_parent, tcm->tcm_handle,
1686 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1696 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1697 struct netlink_callback *cb,
1698 int *q_idx_p, int s_q_idx, bool recur,
1699 bool dump_invisible)
1701 int ret = 0, q_idx = *q_idx_p;
1709 if (q_idx < s_q_idx) {
1712 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1713 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1714 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1720 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1721 * itself has already been dumped.
1723 * If we've already dumped the top-level (ingress) qdisc above and the global
1724 * qdisc hashtable, we don't want to hit it again
1726 if (!qdisc_dev(root) || !recur)
1729 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1730 if (q_idx < s_q_idx) {
1734 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1735 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1736 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1750 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1752 struct net *net = sock_net(skb->sk);
1755 struct net_device *dev;
1756 const struct nlmsghdr *nlh = cb->nlh;
1757 struct nlattr *tca[TCA_MAX + 1];
1760 s_idx = cb->args[0];
1761 s_q_idx = q_idx = cb->args[1];
1766 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1767 rtm_tca_policy, cb->extack);
1771 for_each_netdev(net, dev) {
1772 struct netdev_queue *dev_queue;
1780 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1781 skb, cb, &q_idx, s_q_idx,
1782 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1785 dev_queue = dev_ingress_queue(dev);
1787 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1788 &q_idx, s_q_idx, false,
1789 tca[TCA_DUMP_INVISIBLE]) < 0)
1798 cb->args[1] = q_idx;
1805 /************************************************
1806 * Traffic classes manipulation. *
1807 ************************************************/
1809 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1811 u32 portid, u32 seq, u16 flags, int event)
1814 struct nlmsghdr *nlh;
1815 unsigned char *b = skb_tail_pointer(skb);
1817 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1820 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1822 goto out_nlmsg_trim;
1823 tcm = nlmsg_data(nlh);
1824 tcm->tcm_family = AF_UNSPEC;
1827 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1828 tcm->tcm_parent = q->handle;
1829 tcm->tcm_handle = q->handle;
1831 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1832 goto nla_put_failure;
1833 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1834 goto nla_put_failure;
1836 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1837 NULL, &d, TCA_PAD) < 0)
1838 goto nla_put_failure;
1840 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1841 goto nla_put_failure;
1843 if (gnet_stats_finish_copy(&d) < 0)
1844 goto nla_put_failure;
1846 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1855 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1856 struct nlmsghdr *n, struct Qdisc *q,
1857 unsigned long cl, int event)
1859 struct sk_buff *skb;
1860 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1862 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1866 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1871 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1872 n->nlmsg_flags & NLM_F_ECHO);
1875 static int tclass_del_notify(struct net *net,
1876 const struct Qdisc_class_ops *cops,
1877 struct sk_buff *oskb, struct nlmsghdr *n,
1878 struct Qdisc *q, unsigned long cl,
1879 struct netlink_ext_ack *extack)
1881 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1882 struct sk_buff *skb;
1888 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1892 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1893 RTM_DELTCLASS) < 0) {
1898 err = cops->delete(q, cl, extack);
1904 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1905 n->nlmsg_flags & NLM_F_ECHO);
1909 #ifdef CONFIG_NET_CLS
1911 struct tcf_bind_args {
1912 struct tcf_walker w;
1918 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1920 struct tcf_bind_args *a = (void *)arg;
1922 if (tp->ops->bind_class) {
1923 struct Qdisc *q = tcf_block_q(tp->chain->block);
1926 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
1932 struct tc_bind_class_args {
1933 struct qdisc_walker w;
1934 unsigned long new_cl;
1939 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
1940 struct qdisc_walker *w)
1942 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
1943 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1944 struct tcf_block *block;
1945 struct tcf_chain *chain;
1947 block = cops->tcf_block(q, cl, NULL);
1950 for (chain = tcf_get_next_chain(block, NULL);
1952 chain = tcf_get_next_chain(block, chain)) {
1953 struct tcf_proto *tp;
1955 for (tp = tcf_get_next_proto(chain, NULL);
1956 tp; tp = tcf_get_next_proto(chain, tp)) {
1957 struct tcf_bind_args arg = {};
1959 arg.w.fn = tcf_node_bind;
1960 arg.classid = a->clid;
1963 tp->ops->walk(tp, &arg.w, true);
1970 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1971 unsigned long new_cl)
1973 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1974 struct tc_bind_class_args args = {};
1976 if (!cops->tcf_block)
1978 args.portid = portid;
1980 args.new_cl = new_cl;
1981 args.w.fn = tc_bind_class_walker;
1982 q->ops->cl_ops->walk(q, &args.w);
1987 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1988 unsigned long new_cl)
1994 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1995 struct netlink_ext_ack *extack)
1997 struct net *net = sock_net(skb->sk);
1998 struct tcmsg *tcm = nlmsg_data(n);
1999 struct nlattr *tca[TCA_MAX + 1];
2000 struct net_device *dev;
2001 struct Qdisc *q = NULL;
2002 const struct Qdisc_class_ops *cops;
2003 unsigned long cl = 0;
2004 unsigned long new_cl;
2010 if ((n->nlmsg_type != RTM_GETTCLASS) &&
2011 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2014 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2015 rtm_tca_policy, extack);
2019 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2024 parent == TC_H_UNSPEC - unspecified parent.
2025 parent == TC_H_ROOT - class is root, which has no parent.
2026 parent == X:0 - parent is root class.
2027 parent == X:Y - parent is a node in hierarchy.
2028 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2030 handle == 0:0 - generate handle from kernel pool.
2031 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2032 handle == X:Y - clear.
2033 handle == X:0 - root class.
2036 /* Step 1. Determine qdisc handle X:0 */
2038 portid = tcm->tcm_parent;
2039 clid = tcm->tcm_handle;
2040 qid = TC_H_MAJ(clid);
2042 if (portid != TC_H_ROOT) {
2043 u32 qid1 = TC_H_MAJ(portid);
2046 /* If both majors are known, they must be identical. */
2051 } else if (qid == 0)
2052 qid = rtnl_dereference(dev->qdisc)->handle;
2054 /* Now qid is genuine qdisc handle consistent
2055 * both with parent and child.
2057 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2060 portid = TC_H_MAKE(qid, portid);
2063 qid = rtnl_dereference(dev->qdisc)->handle;
2066 /* OK. Locate qdisc */
2067 q = qdisc_lookup(dev, qid);
2071 /* An check that it supports classes */
2072 cops = q->ops->cl_ops;
2076 /* Now try to get class */
2078 if (portid == TC_H_ROOT)
2081 clid = TC_H_MAKE(qid, clid);
2084 cl = cops->find(q, clid);
2088 if (n->nlmsg_type != RTM_NEWTCLASS ||
2089 !(n->nlmsg_flags & NLM_F_CREATE))
2092 switch (n->nlmsg_type) {
2095 if (n->nlmsg_flags & NLM_F_EXCL)
2099 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2100 /* Unbind the class with flilters with 0 */
2101 tc_bind_tclass(q, portid, clid, 0);
2104 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
2112 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2113 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2120 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2122 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2123 /* We just create a new class, need to do reverse binding. */
2125 tc_bind_tclass(q, portid, clid, new_cl);
2131 struct qdisc_dump_args {
2132 struct qdisc_walker w;
2133 struct sk_buff *skb;
2134 struct netlink_callback *cb;
2137 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2138 struct qdisc_walker *arg)
2140 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2142 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2143 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2147 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2148 struct tcmsg *tcm, struct netlink_callback *cb,
2151 struct qdisc_dump_args arg;
2153 if (tc_qdisc_dump_ignore(q, false) ||
2154 *t_p < s_t || !q->ops->cl_ops ||
2156 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2161 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2162 arg.w.fn = qdisc_class_dump;
2166 arg.w.skip = cb->args[1];
2168 q->ops->cl_ops->walk(q, &arg.w);
2169 cb->args[1] = arg.w.count;
2176 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2177 struct tcmsg *tcm, struct netlink_callback *cb,
2178 int *t_p, int s_t, bool recur)
2186 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2189 if (!qdisc_dev(root) || !recur)
2192 if (tcm->tcm_parent) {
2193 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2194 if (q && q != root &&
2195 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2199 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2200 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2207 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2209 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2210 struct net *net = sock_net(skb->sk);
2211 struct netdev_queue *dev_queue;
2212 struct net_device *dev;
2215 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2217 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2224 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2225 skb, tcm, cb, &t, s_t, true) < 0)
2228 dev_queue = dev_ingress_queue(dev);
2230 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2231 &t, s_t, false) < 0)
2241 #ifdef CONFIG_PROC_FS
2242 static int psched_show(struct seq_file *seq, void *v)
2244 seq_printf(seq, "%08x %08x %08x %08x\n",
2245 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2247 (u32)NSEC_PER_SEC / hrtimer_resolution);
2252 static int __net_init psched_net_init(struct net *net)
2254 struct proc_dir_entry *e;
2256 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2263 static void __net_exit psched_net_exit(struct net *net)
2265 remove_proc_entry("psched", net->proc_net);
2268 static int __net_init psched_net_init(struct net *net)
2273 static void __net_exit psched_net_exit(struct net *net)
2278 static struct pernet_operations psched_net_ops = {
2279 .init = psched_net_init,
2280 .exit = psched_net_exit,
2283 static int __init pktsched_init(void)
2287 err = register_pernet_subsys(&psched_net_ops);
2289 pr_err("pktsched_init: "
2290 "cannot initialize per netns operations\n");
2294 register_qdisc(&pfifo_fast_ops);
2295 register_qdisc(&pfifo_qdisc_ops);
2296 register_qdisc(&bfifo_qdisc_ops);
2297 register_qdisc(&pfifo_head_drop_qdisc_ops);
2298 register_qdisc(&mq_qdisc_ops);
2299 register_qdisc(&noqueue_qdisc_ops);
2301 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2302 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2303 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2305 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2306 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2307 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2313 subsys_initcall(pktsched_init);