Merge tag 'mips_6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[platform/kernel/linux-rpi.git] / net / sched / sch_mqprio.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_mqprio.c
4  *
5  * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6  */
7
8 #include <linux/ethtool_netlink.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/module.h>
16 #include <net/netlink.h>
17 #include <net/pkt_sched.h>
18 #include <net/sch_generic.h>
19 #include <net/pkt_cls.h>
20
21 #include "sch_mqprio_lib.h"
22
23 struct mqprio_sched {
24         struct Qdisc            **qdiscs;
25         u16 mode;
26         u16 shaper;
27         int hw_offload;
28         u32 flags;
29         u64 min_rate[TC_QOPT_MAX_QUEUE];
30         u64 max_rate[TC_QOPT_MAX_QUEUE];
31         u32 fp[TC_QOPT_MAX_QUEUE];
32 };
33
34 static int mqprio_enable_offload(struct Qdisc *sch,
35                                  const struct tc_mqprio_qopt *qopt,
36                                  struct netlink_ext_ack *extack)
37 {
38         struct mqprio_sched *priv = qdisc_priv(sch);
39         struct net_device *dev = qdisc_dev(sch);
40         struct tc_mqprio_qopt_offload mqprio = {
41                 .qopt = *qopt,
42                 .extack = extack,
43         };
44         int err, i;
45
46         switch (priv->mode) {
47         case TC_MQPRIO_MODE_DCB:
48                 if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
49                         return -EINVAL;
50                 break;
51         case TC_MQPRIO_MODE_CHANNEL:
52                 mqprio.flags = priv->flags;
53                 if (priv->flags & TC_MQPRIO_F_MODE)
54                         mqprio.mode = priv->mode;
55                 if (priv->flags & TC_MQPRIO_F_SHAPER)
56                         mqprio.shaper = priv->shaper;
57                 if (priv->flags & TC_MQPRIO_F_MIN_RATE)
58                         for (i = 0; i < mqprio.qopt.num_tc; i++)
59                                 mqprio.min_rate[i] = priv->min_rate[i];
60                 if (priv->flags & TC_MQPRIO_F_MAX_RATE)
61                         for (i = 0; i < mqprio.qopt.num_tc; i++)
62                                 mqprio.max_rate[i] = priv->max_rate[i];
63                 break;
64         default:
65                 return -EINVAL;
66         }
67
68         mqprio_fp_to_offload(priv->fp, &mqprio);
69
70         err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
71                                             &mqprio);
72         if (err)
73                 return err;
74
75         priv->hw_offload = mqprio.qopt.hw;
76
77         return 0;
78 }
79
80 static void mqprio_disable_offload(struct Qdisc *sch)
81 {
82         struct tc_mqprio_qopt_offload mqprio = { { 0 } };
83         struct mqprio_sched *priv = qdisc_priv(sch);
84         struct net_device *dev = qdisc_dev(sch);
85
86         switch (priv->mode) {
87         case TC_MQPRIO_MODE_DCB:
88         case TC_MQPRIO_MODE_CHANNEL:
89                 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
90                                               &mqprio);
91                 break;
92         }
93 }
94
95 static void mqprio_destroy(struct Qdisc *sch)
96 {
97         struct net_device *dev = qdisc_dev(sch);
98         struct mqprio_sched *priv = qdisc_priv(sch);
99         unsigned int ntx;
100
101         if (priv->qdiscs) {
102                 for (ntx = 0;
103                      ntx < dev->num_tx_queues && priv->qdiscs[ntx];
104                      ntx++)
105                         qdisc_put(priv->qdiscs[ntx]);
106                 kfree(priv->qdiscs);
107         }
108
109         if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc)
110                 mqprio_disable_offload(sch);
111         else
112                 netdev_set_num_tc(dev, 0);
113 }
114
115 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
116                             const struct tc_mqprio_caps *caps,
117                             struct netlink_ext_ack *extack)
118 {
119         int err;
120
121         /* Limit qopt->hw to maximum supported offload value.  Drivers have
122          * the option of overriding this later if they don't support the a
123          * given offload type.
124          */
125         if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
126                 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
127
128         /* If hardware offload is requested, we will leave 3 options to the
129          * device driver:
130          * - populate the queue counts itself (and ignore what was requested)
131          * - validate the provided queue counts by itself (and apply them)
132          * - request queue count validation here (and apply them)
133          */
134         err = mqprio_validate_qopt(dev, qopt,
135                                    !qopt->hw || caps->validate_queue_counts,
136                                    false, extack);
137         if (err)
138                 return err;
139
140         /* If ndo_setup_tc is not present then hardware doesn't support offload
141          * and we should return an error.
142          */
143         if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) {
144                 NL_SET_ERR_MSG(extack,
145                                "Device does not support hardware offload");
146                 return -EINVAL;
147         }
148
149         return 0;
150 }
151
152 static const struct
153 nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
154         [TCA_MQPRIO_TC_ENTRY_INDEX]     = NLA_POLICY_MAX(NLA_U32,
155                                                          TC_QOPT_MAX_QUEUE),
156         [TCA_MQPRIO_TC_ENTRY_FP]        = NLA_POLICY_RANGE(NLA_U32,
157                                                            TC_FP_EXPRESS,
158                                                            TC_FP_PREEMPTIBLE),
159 };
160
161 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
162         [TCA_MQPRIO_MODE]       = { .len = sizeof(u16) },
163         [TCA_MQPRIO_SHAPER]     = { .len = sizeof(u16) },
164         [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
165         [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
166         [TCA_MQPRIO_TC_ENTRY]   = { .type = NLA_NESTED },
167 };
168
169 static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE],
170                                  struct nlattr *opt,
171                                  unsigned long *seen_tcs,
172                                  struct netlink_ext_ack *extack)
173 {
174         struct nlattr *tb[TCA_MQPRIO_TC_ENTRY_MAX + 1];
175         int err, tc;
176
177         err = nla_parse_nested(tb, TCA_MQPRIO_TC_ENTRY_MAX, opt,
178                                mqprio_tc_entry_policy, extack);
179         if (err < 0)
180                 return err;
181
182         if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_MQPRIO_TC_ENTRY_INDEX)) {
183                 NL_SET_ERR_MSG(extack, "TC entry index missing");
184                 return -EINVAL;
185         }
186
187         tc = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_INDEX]);
188         if (*seen_tcs & BIT(tc)) {
189                 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_TC_ENTRY_INDEX],
190                                     "Duplicate tc entry");
191                 return -EINVAL;
192         }
193
194         *seen_tcs |= BIT(tc);
195
196         if (tb[TCA_MQPRIO_TC_ENTRY_FP])
197                 fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]);
198
199         return 0;
200 }
201
202 static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt,
203                                    int nlattr_opt_len,
204                                    struct netlink_ext_ack *extack)
205 {
206         struct mqprio_sched *priv = qdisc_priv(sch);
207         struct net_device *dev = qdisc_dev(sch);
208         bool have_preemption = false;
209         unsigned long seen_tcs = 0;
210         u32 fp[TC_QOPT_MAX_QUEUE];
211         struct nlattr *n;
212         int tc, rem;
213         int err = 0;
214
215         for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
216                 fp[tc] = priv->fp[tc];
217
218         nla_for_each_attr(n, nlattr_opt, nlattr_opt_len, rem) {
219                 if (nla_type(n) != TCA_MQPRIO_TC_ENTRY)
220                         continue;
221
222                 err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack);
223                 if (err)
224                         goto out;
225         }
226
227         for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
228                 priv->fp[tc] = fp[tc];
229                 if (fp[tc] == TC_FP_PREEMPTIBLE)
230                         have_preemption = true;
231         }
232
233         if (have_preemption && !ethtool_dev_mm_supported(dev)) {
234                 NL_SET_ERR_MSG(extack, "Device does not support preemption");
235                 return -EOPNOTSUPP;
236         }
237 out:
238         return err;
239 }
240
241 /* Parse the other netlink attributes that represent the payload of
242  * TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt.
243  */
244 static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
245                                struct nlattr *opt,
246                                struct netlink_ext_ack *extack)
247 {
248         struct nlattr *nlattr_opt = nla_data(opt) + NLA_ALIGN(sizeof(*qopt));
249         int nlattr_opt_len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
250         struct mqprio_sched *priv = qdisc_priv(sch);
251         struct nlattr *tb[TCA_MQPRIO_MAX + 1] = {};
252         struct nlattr *attr;
253         int i, rem, err;
254
255         if (nlattr_opt_len >= nla_attr_size(0)) {
256                 err = nla_parse_deprecated(tb, TCA_MQPRIO_MAX, nlattr_opt,
257                                            nlattr_opt_len, mqprio_policy,
258                                            NULL);
259                 if (err < 0)
260                         return err;
261         }
262
263         if (!qopt->hw) {
264                 NL_SET_ERR_MSG(extack,
265                                "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode");
266                 return -EINVAL;
267         }
268
269         if (tb[TCA_MQPRIO_MODE]) {
270                 priv->flags |= TC_MQPRIO_F_MODE;
271                 priv->mode = nla_get_u16(tb[TCA_MQPRIO_MODE]);
272         }
273
274         if (tb[TCA_MQPRIO_SHAPER]) {
275                 priv->flags |= TC_MQPRIO_F_SHAPER;
276                 priv->shaper = nla_get_u16(tb[TCA_MQPRIO_SHAPER]);
277         }
278
279         if (tb[TCA_MQPRIO_MIN_RATE64]) {
280                 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
281                         NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64],
282                                             "min_rate accepted only when shaper is in bw_rlimit mode");
283                         return -EINVAL;
284                 }
285                 i = 0;
286                 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
287                                     rem) {
288                         if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) {
289                                 NL_SET_ERR_MSG_ATTR(extack, attr,
290                                                     "Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
291                                 return -EINVAL;
292                         }
293
294                         if (nla_len(attr) != sizeof(u64)) {
295                                 NL_SET_ERR_MSG_ATTR(extack, attr,
296                                                     "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
297                                 return -EINVAL;
298                         }
299
300                         if (i >= qopt->num_tc)
301                                 break;
302                         priv->min_rate[i] = nla_get_u64(attr);
303                         i++;
304                 }
305                 priv->flags |= TC_MQPRIO_F_MIN_RATE;
306         }
307
308         if (tb[TCA_MQPRIO_MAX_RATE64]) {
309                 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
310                         NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64],
311                                             "max_rate accepted only when shaper is in bw_rlimit mode");
312                         return -EINVAL;
313                 }
314                 i = 0;
315                 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
316                                     rem) {
317                         if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) {
318                                 NL_SET_ERR_MSG_ATTR(extack, attr,
319                                                     "Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
320                                 return -EINVAL;
321                         }
322
323                         if (nla_len(attr) != sizeof(u64)) {
324                                 NL_SET_ERR_MSG_ATTR(extack, attr,
325                                                     "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
326                                 return -EINVAL;
327                         }
328
329                         if (i >= qopt->num_tc)
330                                 break;
331                         priv->max_rate[i] = nla_get_u64(attr);
332                         i++;
333                 }
334                 priv->flags |= TC_MQPRIO_F_MAX_RATE;
335         }
336
337         if (tb[TCA_MQPRIO_TC_ENTRY]) {
338                 err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len,
339                                               extack);
340                 if (err)
341                         return err;
342         }
343
344         return 0;
345 }
346
347 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
348                        struct netlink_ext_ack *extack)
349 {
350         struct net_device *dev = qdisc_dev(sch);
351         struct mqprio_sched *priv = qdisc_priv(sch);
352         struct netdev_queue *dev_queue;
353         struct Qdisc *qdisc;
354         int i, err = -EOPNOTSUPP;
355         struct tc_mqprio_qopt *qopt = NULL;
356         struct tc_mqprio_caps caps;
357         int len, tc;
358
359         BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
360         BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
361
362         if (sch->parent != TC_H_ROOT)
363                 return -EOPNOTSUPP;
364
365         if (!netif_is_multiqueue(dev))
366                 return -EOPNOTSUPP;
367
368         /* make certain can allocate enough classids to handle queues */
369         if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
370                 return -ENOMEM;
371
372         if (!opt || nla_len(opt) < sizeof(*qopt))
373                 return -EINVAL;
374
375         for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
376                 priv->fp[tc] = TC_FP_EXPRESS;
377
378         qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO,
379                                  &caps, sizeof(caps));
380
381         qopt = nla_data(opt);
382         if (mqprio_parse_opt(dev, qopt, &caps, extack))
383                 return -EINVAL;
384
385         len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
386         if (len > 0) {
387                 err = mqprio_parse_nlattr(sch, qopt, opt, extack);
388                 if (err)
389                         return err;
390         }
391
392         /* pre-allocate qdisc, attachment can't fail */
393         priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
394                                GFP_KERNEL);
395         if (!priv->qdiscs)
396                 return -ENOMEM;
397
398         for (i = 0; i < dev->num_tx_queues; i++) {
399                 dev_queue = netdev_get_tx_queue(dev, i);
400                 qdisc = qdisc_create_dflt(dev_queue,
401                                           get_default_qdisc_ops(dev, i),
402                                           TC_H_MAKE(TC_H_MAJ(sch->handle),
403                                                     TC_H_MIN(i + 1)), extack);
404                 if (!qdisc)
405                         return -ENOMEM;
406
407                 priv->qdiscs[i] = qdisc;
408                 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
409         }
410
411         /* If the mqprio options indicate that hardware should own
412          * the queue mapping then run ndo_setup_tc otherwise use the
413          * supplied and verified mapping
414          */
415         if (qopt->hw) {
416                 err = mqprio_enable_offload(sch, qopt, extack);
417                 if (err)
418                         return err;
419         } else {
420                 netdev_set_num_tc(dev, qopt->num_tc);
421                 for (i = 0; i < qopt->num_tc; i++)
422                         netdev_set_tc_queue(dev, i,
423                                             qopt->count[i], qopt->offset[i]);
424         }
425
426         /* Always use supplied priority mappings */
427         for (i = 0; i < TC_BITMASK + 1; i++)
428                 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
429
430         sch->flags |= TCQ_F_MQROOT;
431         return 0;
432 }
433
434 static void mqprio_attach(struct Qdisc *sch)
435 {
436         struct net_device *dev = qdisc_dev(sch);
437         struct mqprio_sched *priv = qdisc_priv(sch);
438         struct Qdisc *qdisc, *old;
439         unsigned int ntx;
440
441         /* Attach underlying qdisc */
442         for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
443                 qdisc = priv->qdiscs[ntx];
444                 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
445                 if (old)
446                         qdisc_put(old);
447                 if (ntx < dev->real_num_tx_queues)
448                         qdisc_hash_add(qdisc, false);
449         }
450         kfree(priv->qdiscs);
451         priv->qdiscs = NULL;
452 }
453
454 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
455                                              unsigned long cl)
456 {
457         struct net_device *dev = qdisc_dev(sch);
458         unsigned long ntx = cl - 1;
459
460         if (ntx >= dev->num_tx_queues)
461                 return NULL;
462         return netdev_get_tx_queue(dev, ntx);
463 }
464
465 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
466                         struct Qdisc **old, struct netlink_ext_ack *extack)
467 {
468         struct net_device *dev = qdisc_dev(sch);
469         struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
470
471         if (!dev_queue)
472                 return -EINVAL;
473
474         if (dev->flags & IFF_UP)
475                 dev_deactivate(dev);
476
477         *old = dev_graft_qdisc(dev_queue, new);
478
479         if (new)
480                 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
481
482         if (dev->flags & IFF_UP)
483                 dev_activate(dev);
484
485         return 0;
486 }
487
488 static int dump_rates(struct mqprio_sched *priv,
489                       struct tc_mqprio_qopt *opt, struct sk_buff *skb)
490 {
491         struct nlattr *nest;
492         int i;
493
494         if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
495                 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
496                 if (!nest)
497                         goto nla_put_failure;
498
499                 for (i = 0; i < opt->num_tc; i++) {
500                         if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
501                                     sizeof(priv->min_rate[i]),
502                                     &priv->min_rate[i]))
503                                 goto nla_put_failure;
504                 }
505                 nla_nest_end(skb, nest);
506         }
507
508         if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
509                 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
510                 if (!nest)
511                         goto nla_put_failure;
512
513                 for (i = 0; i < opt->num_tc; i++) {
514                         if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
515                                     sizeof(priv->max_rate[i]),
516                                     &priv->max_rate[i]))
517                                 goto nla_put_failure;
518                 }
519                 nla_nest_end(skb, nest);
520         }
521         return 0;
522
523 nla_put_failure:
524         nla_nest_cancel(skb, nest);
525         return -1;
526 }
527
528 static int mqprio_dump_tc_entries(struct mqprio_sched *priv,
529                                   struct sk_buff *skb)
530 {
531         struct nlattr *n;
532         int tc;
533
534         for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
535                 n = nla_nest_start(skb, TCA_MQPRIO_TC_ENTRY);
536                 if (!n)
537                         return -EMSGSIZE;
538
539                 if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_INDEX, tc))
540                         goto nla_put_failure;
541
542                 if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_FP, priv->fp[tc]))
543                         goto nla_put_failure;
544
545                 nla_nest_end(skb, n);
546         }
547
548         return 0;
549
550 nla_put_failure:
551         nla_nest_cancel(skb, n);
552         return -EMSGSIZE;
553 }
554
555 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
556 {
557         struct net_device *dev = qdisc_dev(sch);
558         struct mqprio_sched *priv = qdisc_priv(sch);
559         struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
560         struct tc_mqprio_qopt opt = { 0 };
561         struct Qdisc *qdisc;
562         unsigned int ntx;
563
564         sch->q.qlen = 0;
565         gnet_stats_basic_sync_init(&sch->bstats);
566         memset(&sch->qstats, 0, sizeof(sch->qstats));
567
568         /* MQ supports lockless qdiscs. However, statistics accounting needs
569          * to account for all, none, or a mix of locked and unlocked child
570          * qdiscs. Percpu stats are added to counters in-band and locking
571          * qdisc totals are added at end.
572          */
573         for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
574                 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
575                 spin_lock_bh(qdisc_lock(qdisc));
576
577                 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
578                                      &qdisc->bstats, false);
579                 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
580                                      &qdisc->qstats);
581                 sch->q.qlen += qdisc_qlen(qdisc);
582
583                 spin_unlock_bh(qdisc_lock(qdisc));
584         }
585
586         mqprio_qopt_reconstruct(dev, &opt);
587         opt.hw = priv->hw_offload;
588
589         if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
590                 goto nla_put_failure;
591
592         if ((priv->flags & TC_MQPRIO_F_MODE) &&
593             nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
594                 goto nla_put_failure;
595
596         if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
597             nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
598                 goto nla_put_failure;
599
600         if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
601              priv->flags & TC_MQPRIO_F_MAX_RATE) &&
602             (dump_rates(priv, &opt, skb) != 0))
603                 goto nla_put_failure;
604
605         if (mqprio_dump_tc_entries(priv, skb))
606                 goto nla_put_failure;
607
608         return nla_nest_end(skb, nla);
609 nla_put_failure:
610         nlmsg_trim(skb, nla);
611         return -1;
612 }
613
614 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
615 {
616         struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
617
618         if (!dev_queue)
619                 return NULL;
620
621         return rtnl_dereference(dev_queue->qdisc_sleeping);
622 }
623
624 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
625 {
626         struct net_device *dev = qdisc_dev(sch);
627         unsigned int ntx = TC_H_MIN(classid);
628
629         /* There are essentially two regions here that have valid classid
630          * values. The first region will have a classid value of 1 through
631          * num_tx_queues. All of these are backed by actual Qdiscs.
632          */
633         if (ntx < TC_H_MIN_PRIORITY)
634                 return (ntx <= dev->num_tx_queues) ? ntx : 0;
635
636         /* The second region represents the hardware traffic classes. These
637          * are represented by classid values of TC_H_MIN_PRIORITY through
638          * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
639          */
640         return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
641 }
642
643 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
644                          struct sk_buff *skb, struct tcmsg *tcm)
645 {
646         if (cl < TC_H_MIN_PRIORITY) {
647                 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
648                 struct net_device *dev = qdisc_dev(sch);
649                 int tc = netdev_txq_to_tc(dev, cl - 1);
650
651                 tcm->tcm_parent = (tc < 0) ? 0 :
652                         TC_H_MAKE(TC_H_MAJ(sch->handle),
653                                   TC_H_MIN(tc + TC_H_MIN_PRIORITY));
654                 tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
655         } else {
656                 tcm->tcm_parent = TC_H_ROOT;
657                 tcm->tcm_info = 0;
658         }
659         tcm->tcm_handle |= TC_H_MIN(cl);
660         return 0;
661 }
662
663 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
664                                    struct gnet_dump *d)
665         __releases(d->lock)
666         __acquires(d->lock)
667 {
668         if (cl >= TC_H_MIN_PRIORITY) {
669                 int i;
670                 __u32 qlen;
671                 struct gnet_stats_queue qstats = {0};
672                 struct gnet_stats_basic_sync bstats;
673                 struct net_device *dev = qdisc_dev(sch);
674                 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
675
676                 gnet_stats_basic_sync_init(&bstats);
677                 /* Drop lock here it will be reclaimed before touching
678                  * statistics this is required because the d->lock we
679                  * hold here is the look on dev_queue->qdisc_sleeping
680                  * also acquired below.
681                  */
682                 if (d->lock)
683                         spin_unlock_bh(d->lock);
684
685                 for (i = tc.offset; i < tc.offset + tc.count; i++) {
686                         struct netdev_queue *q = netdev_get_tx_queue(dev, i);
687                         struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
688
689                         spin_lock_bh(qdisc_lock(qdisc));
690
691                         gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
692                                              &qdisc->bstats, false);
693                         gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
694                                              &qdisc->qstats);
695                         sch->q.qlen += qdisc_qlen(qdisc);
696
697                         spin_unlock_bh(qdisc_lock(qdisc));
698                 }
699                 qlen = qdisc_qlen(sch) + qstats.qlen;
700
701                 /* Reclaim root sleeping lock before completing stats */
702                 if (d->lock)
703                         spin_lock_bh(d->lock);
704                 if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
705                     gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
706                         return -1;
707         } else {
708                 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
709
710                 sch = rtnl_dereference(dev_queue->qdisc_sleeping);
711                 if (gnet_stats_copy_basic(d, sch->cpu_bstats,
712                                           &sch->bstats, true) < 0 ||
713                     qdisc_qstats_copy(d, sch) < 0)
714                         return -1;
715         }
716         return 0;
717 }
718
719 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
720 {
721         struct net_device *dev = qdisc_dev(sch);
722         unsigned long ntx;
723
724         if (arg->stop)
725                 return;
726
727         /* Walk hierarchy with a virtual class per tc */
728         arg->count = arg->skip;
729         for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
730                 if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg))
731                         return;
732         }
733
734         /* Pad the values and skip over unused traffic classes */
735         if (ntx < TC_MAX_QUEUE) {
736                 arg->count = TC_MAX_QUEUE;
737                 ntx = TC_MAX_QUEUE;
738         }
739
740         /* Reset offset, sort out remaining per-queue qdiscs */
741         for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
742                 if (arg->fn(sch, ntx + 1, arg) < 0) {
743                         arg->stop = 1;
744                         return;
745                 }
746                 arg->count++;
747         }
748 }
749
750 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
751                                                 struct tcmsg *tcm)
752 {
753         return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
754 }
755
756 static const struct Qdisc_class_ops mqprio_class_ops = {
757         .graft          = mqprio_graft,
758         .leaf           = mqprio_leaf,
759         .find           = mqprio_find,
760         .walk           = mqprio_walk,
761         .dump           = mqprio_dump_class,
762         .dump_stats     = mqprio_dump_class_stats,
763         .select_queue   = mqprio_select_queue,
764 };
765
766 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
767         .cl_ops         = &mqprio_class_ops,
768         .id             = "mqprio",
769         .priv_size      = sizeof(struct mqprio_sched),
770         .init           = mqprio_init,
771         .destroy        = mqprio_destroy,
772         .attach         = mqprio_attach,
773         .change_real_num_tx = mq_change_real_num_tx,
774         .dump           = mqprio_dump,
775         .owner          = THIS_MODULE,
776 };
777
778 static int __init mqprio_module_init(void)
779 {
780         return register_qdisc(&mqprio_qdisc_ops);
781 }
782
783 static void __exit mqprio_module_exit(void)
784 {
785         unregister_qdisc(&mqprio_qdisc_ops);
786 }
787
788 module_init(mqprio_module_init);
789 module_exit(mqprio_module_exit);
790
791 MODULE_LICENSE("GPL");