1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
5 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/kernel.h>
11 #include <linux/export.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <net/netlink.h>
16 #include <net/pkt_cls.h>
17 #include <net/pkt_sched.h>
18 #include <net/sch_generic.h>
21 struct Qdisc **qdiscs;
24 static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
26 struct net_device *dev = qdisc_dev(sch);
27 struct tc_mq_qopt_offload opt = {
29 .handle = sch->handle,
32 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
35 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
38 static int mq_offload_stats(struct Qdisc *sch)
40 struct tc_mq_qopt_offload opt = {
41 .command = TC_MQ_STATS,
42 .handle = sch->handle,
44 .bstats = &sch->bstats,
45 .qstats = &sch->qstats,
49 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
52 static void mq_destroy(struct Qdisc *sch)
54 struct net_device *dev = qdisc_dev(sch);
55 struct mq_sched *priv = qdisc_priv(sch);
58 mq_offload(sch, TC_MQ_DESTROY);
62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
63 qdisc_put(priv->qdiscs[ntx]);
67 static int mq_init(struct Qdisc *sch, struct nlattr *opt,
68 struct netlink_ext_ack *extack)
70 struct net_device *dev = qdisc_dev(sch);
71 struct mq_sched *priv = qdisc_priv(sch);
72 struct netdev_queue *dev_queue;
76 if (sch->parent != TC_H_ROOT)
79 if (!netif_is_multiqueue(dev))
82 /* pre-allocate qdiscs, attachment can't fail */
83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
89 dev_queue = netdev_get_tx_queue(dev, ntx);
90 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
91 TC_H_MAKE(TC_H_MAJ(sch->handle),
96 priv->qdiscs[ntx] = qdisc;
97 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
100 sch->flags |= TCQ_F_MQROOT;
102 mq_offload(sch, TC_MQ_CREATE);
106 static void mq_attach(struct Qdisc *sch)
108 struct net_device *dev = qdisc_dev(sch);
109 struct mq_sched *priv = qdisc_priv(sch);
110 struct Qdisc *qdisc, *old;
113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
114 qdisc = priv->qdiscs[ntx];
115 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
118 #ifdef CONFIG_NET_SCHED
119 if (ntx < dev->real_num_tx_queues)
120 qdisc_hash_add(qdisc, false);
128 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
130 struct net_device *dev = qdisc_dev(sch);
135 gnet_stats_basic_sync_init(&sch->bstats);
136 memset(&sch->qstats, 0, sizeof(sch->qstats));
138 /* MQ supports lockless qdiscs. However, statistics accounting needs
139 * to account for all, none, or a mix of locked and unlocked child
140 * qdiscs. Percpu stats are added to counters in-band and locking
141 * qdisc totals are added at end.
143 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
144 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
145 spin_lock_bh(qdisc_lock(qdisc));
147 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
148 &qdisc->bstats, false);
149 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
151 sch->q.qlen += qdisc_qlen(qdisc);
153 spin_unlock_bh(qdisc_lock(qdisc));
156 return mq_offload_stats(sch);
159 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
161 struct net_device *dev = qdisc_dev(sch);
162 unsigned long ntx = cl - 1;
164 if (ntx >= dev->num_tx_queues)
166 return netdev_get_tx_queue(dev, ntx);
169 static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
172 return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
175 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
176 struct Qdisc **old, struct netlink_ext_ack *extack)
178 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
179 struct tc_mq_qopt_offload graft_offload;
180 struct net_device *dev = qdisc_dev(sch);
182 if (dev->flags & IFF_UP)
185 *old = dev_graft_qdisc(dev_queue, new);
187 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
188 if (dev->flags & IFF_UP)
191 graft_offload.handle = sch->handle;
192 graft_offload.graft_params.queue = cl - 1;
193 graft_offload.graft_params.child_handle = new ? new->handle : 0;
194 graft_offload.command = TC_MQ_GRAFT;
196 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
197 TC_SETUP_QDISC_MQ, &graft_offload, extack);
201 static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
203 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
205 return rtnl_dereference(dev_queue->qdisc_sleeping);
208 static unsigned long mq_find(struct Qdisc *sch, u32 classid)
210 unsigned int ntx = TC_H_MIN(classid);
212 if (!mq_queue_get(sch, ntx))
217 static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
218 struct sk_buff *skb, struct tcmsg *tcm)
220 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
222 tcm->tcm_parent = TC_H_ROOT;
223 tcm->tcm_handle |= TC_H_MIN(cl);
224 tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
228 static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
231 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
233 sch = rtnl_dereference(dev_queue->qdisc_sleeping);
234 if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
235 qdisc_qstats_copy(d, sch) < 0)
240 static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
242 struct net_device *dev = qdisc_dev(sch);
248 arg->count = arg->skip;
249 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
250 if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
255 static const struct Qdisc_class_ops mq_class_ops = {
256 .select_queue = mq_select_queue,
261 .dump = mq_dump_class,
262 .dump_stats = mq_dump_class_stats,
265 struct Qdisc_ops mq_qdisc_ops __read_mostly = {
266 .cl_ops = &mq_class_ops,
268 .priv_size = sizeof(struct mq_sched),
270 .destroy = mq_destroy,
272 .change_real_num_tx = mq_change_real_num_tx,
274 .owner = THIS_MODULE,