2 * net/sched/sch_mqprio.c
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/module.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/sch_generic.h>
23 struct Qdisc **qdiscs;
27 static void mqprio_destroy(struct Qdisc *sch)
29 struct net_device *dev = qdisc_dev(sch);
30 struct mqprio_sched *priv = qdisc_priv(sch);
35 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
37 qdisc_destroy(priv->qdiscs[ntx]);
41 if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
42 dev->netdev_ops->ndo_setup_tc(dev, 0);
44 netdev_set_num_tc(dev, 0);
47 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
51 /* Verify num_tc is not out of max range */
52 if (qopt->num_tc > TC_MAX_QUEUE)
55 /* Verify priority mapping uses valid tcs */
56 for (i = 0; i < TC_BITMASK + 1; i++) {
57 if (qopt->prio_tc_map[i] >= qopt->num_tc)
61 /* net_device does not support requested operation */
62 if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
65 /* if hw owned qcount and qoffset are taken from LLD so
66 * no reason to verify them here
71 for (i = 0; i < qopt->num_tc; i++) {
72 unsigned int last = qopt->offset[i] + qopt->count[i];
74 /* Verify the queue count is in tx range being equal to the
75 * real_num_tx_queues indicates the last queue is in use.
77 if (qopt->offset[i] >= dev->real_num_tx_queues ||
79 last > dev->real_num_tx_queues)
82 /* Verify that the offset and counts do not overlap */
83 for (j = i + 1; j < qopt->num_tc; j++) {
84 if (last > qopt->offset[j])
92 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
94 struct net_device *dev = qdisc_dev(sch);
95 struct mqprio_sched *priv = qdisc_priv(sch);
96 struct netdev_queue *dev_queue;
98 int i, err = -EOPNOTSUPP;
99 struct tc_mqprio_qopt *qopt = NULL;
101 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
102 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
104 if (sch->parent != TC_H_ROOT)
107 if (!netif_is_multiqueue(dev))
110 if (!opt || nla_len(opt) < sizeof(*qopt))
113 qopt = nla_data(opt);
114 if (mqprio_parse_opt(dev, qopt))
117 /* pre-allocate qdisc, attachment can't fail */
118 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
120 if (priv->qdiscs == NULL) {
125 for (i = 0; i < dev->num_tx_queues; i++) {
126 dev_queue = netdev_get_tx_queue(dev, i);
127 qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
128 TC_H_MAKE(TC_H_MAJ(sch->handle),
134 priv->qdiscs[i] = qdisc;
135 qdisc->flags |= TCQ_F_ONETXQUEUE;
138 /* If the mqprio options indicate that hardware should own
139 * the queue mapping then run ndo_setup_tc otherwise use the
140 * supplied and verified mapping
144 err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
148 netdev_set_num_tc(dev, qopt->num_tc);
149 for (i = 0; i < qopt->num_tc; i++)
150 netdev_set_tc_queue(dev, i,
151 qopt->count[i], qopt->offset[i]);
154 /* Always use supplied priority mappings */
155 for (i = 0; i < TC_BITMASK + 1; i++)
156 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
158 sch->flags |= TCQ_F_MQROOT;
166 static void mqprio_attach(struct Qdisc *sch)
168 struct net_device *dev = qdisc_dev(sch);
169 struct mqprio_sched *priv = qdisc_priv(sch);
173 /* Attach underlying qdisc */
174 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
175 qdisc = priv->qdiscs[ntx];
176 qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
178 qdisc_destroy(qdisc);
184 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
187 struct net_device *dev = qdisc_dev(sch);
188 unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
190 if (ntx >= dev->num_tx_queues)
192 return netdev_get_tx_queue(dev, ntx);
195 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
198 struct net_device *dev = qdisc_dev(sch);
199 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
204 if (dev->flags & IFF_UP)
207 *old = dev_graft_qdisc(dev_queue, new);
210 new->flags |= TCQ_F_ONETXQUEUE;
212 if (dev->flags & IFF_UP)
218 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
220 struct net_device *dev = qdisc_dev(sch);
221 struct mqprio_sched *priv = qdisc_priv(sch);
222 unsigned char *b = skb_tail_pointer(skb);
223 struct tc_mqprio_qopt opt = { 0 };
228 memset(&sch->bstats, 0, sizeof(sch->bstats));
229 memset(&sch->qstats, 0, sizeof(sch->qstats));
231 for (i = 0; i < dev->num_tx_queues; i++) {
232 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
233 spin_lock_bh(qdisc_lock(qdisc));
234 sch->q.qlen += qdisc->q.qlen;
235 sch->bstats.bytes += qdisc->bstats.bytes;
236 sch->bstats.packets += qdisc->bstats.packets;
237 sch->qstats.qlen += qdisc->qstats.qlen;
238 sch->qstats.backlog += qdisc->qstats.backlog;
239 sch->qstats.drops += qdisc->qstats.drops;
240 sch->qstats.requeues += qdisc->qstats.requeues;
241 sch->qstats.overlimits += qdisc->qstats.overlimits;
242 spin_unlock_bh(qdisc_lock(qdisc));
245 opt.num_tc = netdev_get_num_tc(dev);
246 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
247 opt.hw = priv->hw_owned;
249 for (i = 0; i < netdev_get_num_tc(dev); i++) {
250 opt.count[i] = dev->tc_to_txq[i].count;
251 opt.offset[i] = dev->tc_to_txq[i].offset;
254 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
255 goto nla_put_failure;
263 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
265 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
270 return dev_queue->qdisc_sleeping;
273 static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
275 struct net_device *dev = qdisc_dev(sch);
276 unsigned int ntx = TC_H_MIN(classid);
278 if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
283 static void mqprio_put(struct Qdisc *sch, unsigned long cl)
287 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
288 struct sk_buff *skb, struct tcmsg *tcm)
290 struct net_device *dev = qdisc_dev(sch);
292 if (cl <= netdev_get_num_tc(dev)) {
293 tcm->tcm_parent = TC_H_ROOT;
297 struct netdev_queue *dev_queue;
299 dev_queue = mqprio_queue_get(sch, cl);
301 for (i = 0; i < netdev_get_num_tc(dev); i++) {
302 struct netdev_tc_txq tc = dev->tc_to_txq[i];
303 int q_idx = cl - netdev_get_num_tc(dev);
305 if (q_idx > tc.offset &&
306 q_idx <= tc.offset + tc.count) {
308 TC_H_MAKE(TC_H_MAJ(sch->handle),
313 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
315 tcm->tcm_handle |= TC_H_MIN(cl);
319 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
324 struct net_device *dev = qdisc_dev(sch);
326 if (cl <= netdev_get_num_tc(dev)) {
329 struct gnet_stats_queue qstats = {0};
330 struct gnet_stats_basic_packed bstats = {0};
331 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
333 /* Drop lock here it will be reclaimed before touching
334 * statistics this is required because the d->lock we
335 * hold here is the look on dev_queue->qdisc_sleeping
336 * also acquired below.
338 spin_unlock_bh(d->lock);
340 for (i = tc.offset; i < tc.offset + tc.count; i++) {
341 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
342 spin_lock_bh(qdisc_lock(qdisc));
343 bstats.bytes += qdisc->bstats.bytes;
344 bstats.packets += qdisc->bstats.packets;
345 qstats.qlen += qdisc->qstats.qlen;
346 qstats.backlog += qdisc->qstats.backlog;
347 qstats.drops += qdisc->qstats.drops;
348 qstats.requeues += qdisc->qstats.requeues;
349 qstats.overlimits += qdisc->qstats.overlimits;
350 spin_unlock_bh(qdisc_lock(qdisc));
352 /* Reclaim root sleeping lock before completing stats */
353 spin_lock_bh(d->lock);
354 if (gnet_stats_copy_basic(d, &bstats) < 0 ||
355 gnet_stats_copy_queue(d, &qstats) < 0)
358 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
360 sch = dev_queue->qdisc_sleeping;
361 sch->qstats.qlen = sch->q.qlen;
362 if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
363 gnet_stats_copy_queue(d, &sch->qstats) < 0)
369 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
371 struct net_device *dev = qdisc_dev(sch);
377 /* Walk hierarchy with a virtual class per tc */
378 arg->count = arg->skip;
379 for (ntx = arg->skip;
380 ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
382 if (arg->fn(sch, ntx + 1, arg) < 0) {
390 static const struct Qdisc_class_ops mqprio_class_ops = {
391 .graft = mqprio_graft,
396 .dump = mqprio_dump_class,
397 .dump_stats = mqprio_dump_class_stats,
400 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
401 .cl_ops = &mqprio_class_ops,
403 .priv_size = sizeof(struct mqprio_sched),
405 .destroy = mqprio_destroy,
406 .attach = mqprio_attach,
408 .owner = THIS_MODULE,
411 static int __init mqprio_module_init(void)
413 return register_qdisc(&mqprio_qdisc_ops);
416 static void __exit mqprio_module_exit(void)
418 unregister_qdisc(&mqprio_qdisc_ops);
421 module_init(mqprio_module_init);
422 module_exit(mqprio_module_exit);
424 MODULE_LICENSE("GPL");