Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
[platform/kernel/linux-starfive.git] / net / sched / sch_gred.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_gred.c Generic Random Early Detection queue.
4  *
5  * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
6  *
7  *             991129: -  Bug fix with grio mode
8  *                     - a better sing. AvgQ mode with Grio(WRED)
9  *                     - A finer grained VQ dequeue based on suggestion
10  *                       from Ren Liu
11  *                     - More error checks
12  *
13  *  For all the glorious comments look at include/net/red.h
14  */
15
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
23 #include <net/red.h>
24
25 #define GRED_DEF_PRIO (MAX_DPs / 2)
26 #define GRED_VQ_MASK (MAX_DPs - 1)
27
28 #define GRED_VQ_RED_FLAGS       (TC_RED_ECN | TC_RED_HARDDROP)
29
30 struct gred_sched_data;
31 struct gred_sched;
32
33 struct gred_sched_data {
34         u32             limit;          /* HARD maximal queue length    */
35         u32             DP;             /* the drop parameters */
36         u32             red_flags;      /* virtualQ version of red_flags */
37         u64             bytesin;        /* bytes seen on virtualQ so far*/
38         u32             packetsin;      /* packets seen on virtualQ so far*/
39         u32             backlog;        /* bytes on the virtualQ */
40         u8              prio;           /* the prio of this vq */
41
42         struct red_parms parms;
43         struct red_vars  vars;
44         struct red_stats stats;
45 };
46
47 enum {
48         GRED_WRED_MODE = 1,
49         GRED_RIO_MODE,
50 };
51
52 struct gred_sched {
53         struct gred_sched_data *tab[MAX_DPs];
54         unsigned long   flags;
55         u32             red_flags;
56         u32             DPs;
57         u32             def;
58         struct red_vars wred_set;
59         struct tc_gred_qopt_offload *opt;
60 };
61
62 static inline int gred_wred_mode(struct gred_sched *table)
63 {
64         return test_bit(GRED_WRED_MODE, &table->flags);
65 }
66
67 static inline void gred_enable_wred_mode(struct gred_sched *table)
68 {
69         __set_bit(GRED_WRED_MODE, &table->flags);
70 }
71
72 static inline void gred_disable_wred_mode(struct gred_sched *table)
73 {
74         __clear_bit(GRED_WRED_MODE, &table->flags);
75 }
76
77 static inline int gred_rio_mode(struct gred_sched *table)
78 {
79         return test_bit(GRED_RIO_MODE, &table->flags);
80 }
81
82 static inline void gred_enable_rio_mode(struct gred_sched *table)
83 {
84         __set_bit(GRED_RIO_MODE, &table->flags);
85 }
86
87 static inline void gred_disable_rio_mode(struct gred_sched *table)
88 {
89         __clear_bit(GRED_RIO_MODE, &table->flags);
90 }
91
92 static inline int gred_wred_mode_check(struct Qdisc *sch)
93 {
94         struct gred_sched *table = qdisc_priv(sch);
95         int i;
96
97         /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98         for (i = 0; i < table->DPs; i++) {
99                 struct gred_sched_data *q = table->tab[i];
100                 int n;
101
102                 if (q == NULL)
103                         continue;
104
105                 for (n = i + 1; n < table->DPs; n++)
106                         if (table->tab[n] && table->tab[n]->prio == q->prio)
107                                 return 1;
108         }
109
110         return 0;
111 }
112
113 static inline unsigned int gred_backlog(struct gred_sched *table,
114                                         struct gred_sched_data *q,
115                                         struct Qdisc *sch)
116 {
117         if (gred_wred_mode(table))
118                 return sch->qstats.backlog;
119         else
120                 return q->backlog;
121 }
122
123 static inline u16 tc_index_to_dp(struct sk_buff *skb)
124 {
125         return skb->tc_index & GRED_VQ_MASK;
126 }
127
128 static inline void gred_load_wred_set(const struct gred_sched *table,
129                                       struct gred_sched_data *q)
130 {
131         q->vars.qavg = table->wred_set.qavg;
132         q->vars.qidlestart = table->wred_set.qidlestart;
133 }
134
135 static inline void gred_store_wred_set(struct gred_sched *table,
136                                        struct gred_sched_data *q)
137 {
138         table->wred_set.qavg = q->vars.qavg;
139         table->wred_set.qidlestart = q->vars.qidlestart;
140 }
141
142 static int gred_use_ecn(struct gred_sched_data *q)
143 {
144         return q->red_flags & TC_RED_ECN;
145 }
146
147 static int gred_use_harddrop(struct gred_sched_data *q)
148 {
149         return q->red_flags & TC_RED_HARDDROP;
150 }
151
152 static bool gred_per_vq_red_flags_used(struct gred_sched *table)
153 {
154         unsigned int i;
155
156         /* Local per-vq flags couldn't have been set unless global are 0 */
157         if (table->red_flags)
158                 return false;
159         for (i = 0; i < MAX_DPs; i++)
160                 if (table->tab[i] && table->tab[i]->red_flags)
161                         return true;
162         return false;
163 }
164
165 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
166                         struct sk_buff **to_free)
167 {
168         struct gred_sched_data *q = NULL;
169         struct gred_sched *t = qdisc_priv(sch);
170         unsigned long qavg = 0;
171         u16 dp = tc_index_to_dp(skb);
172
173         if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
174                 dp = t->def;
175
176                 q = t->tab[dp];
177                 if (!q) {
178                         /* Pass through packets not assigned to a DP
179                          * if no default DP has been configured. This
180                          * allows for DP flows to be left untouched.
181                          */
182                         if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
183                                         sch->limit))
184                                 return qdisc_enqueue_tail(skb, sch);
185                         else
186                                 goto drop;
187                 }
188
189                 /* fix tc_index? --could be controversial but needed for
190                    requeueing */
191                 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
192         }
193
194         /* sum up all the qaves of prios < ours to get the new qave */
195         if (!gred_wred_mode(t) && gred_rio_mode(t)) {
196                 int i;
197
198                 for (i = 0; i < t->DPs; i++) {
199                         if (t->tab[i] && t->tab[i]->prio < q->prio &&
200                             !red_is_idling(&t->tab[i]->vars))
201                                 qavg += t->tab[i]->vars.qavg;
202                 }
203
204         }
205
206         q->packetsin++;
207         q->bytesin += qdisc_pkt_len(skb);
208
209         if (gred_wred_mode(t))
210                 gred_load_wred_set(t, q);
211
212         q->vars.qavg = red_calc_qavg(&q->parms,
213                                      &q->vars,
214                                      gred_backlog(t, q, sch));
215
216         if (red_is_idling(&q->vars))
217                 red_end_of_idle_period(&q->vars);
218
219         if (gred_wred_mode(t))
220                 gred_store_wred_set(t, q);
221
222         switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
223         case RED_DONT_MARK:
224                 break;
225
226         case RED_PROB_MARK:
227                 qdisc_qstats_overlimit(sch);
228                 if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
229                         q->stats.prob_drop++;
230                         goto congestion_drop;
231                 }
232
233                 q->stats.prob_mark++;
234                 break;
235
236         case RED_HARD_MARK:
237                 qdisc_qstats_overlimit(sch);
238                 if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
239                     !INET_ECN_set_ce(skb)) {
240                         q->stats.forced_drop++;
241                         goto congestion_drop;
242                 }
243                 q->stats.forced_mark++;
244                 break;
245         }
246
247         if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
248                 q->backlog += qdisc_pkt_len(skb);
249                 return qdisc_enqueue_tail(skb, sch);
250         }
251
252         q->stats.pdrop++;
253 drop:
254         return qdisc_drop(skb, sch, to_free);
255
256 congestion_drop:
257         qdisc_drop(skb, sch, to_free);
258         return NET_XMIT_CN;
259 }
260
261 static struct sk_buff *gred_dequeue(struct Qdisc *sch)
262 {
263         struct sk_buff *skb;
264         struct gred_sched *t = qdisc_priv(sch);
265
266         skb = qdisc_dequeue_head(sch);
267
268         if (skb) {
269                 struct gred_sched_data *q;
270                 u16 dp = tc_index_to_dp(skb);
271
272                 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
273                         net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
274                                              tc_index_to_dp(skb));
275                 } else {
276                         q->backlog -= qdisc_pkt_len(skb);
277
278                         if (gred_wred_mode(t)) {
279                                 if (!sch->qstats.backlog)
280                                         red_start_of_idle_period(&t->wred_set);
281                         } else {
282                                 if (!q->backlog)
283                                         red_start_of_idle_period(&q->vars);
284                         }
285                 }
286
287                 return skb;
288         }
289
290         return NULL;
291 }
292
293 static void gred_reset(struct Qdisc *sch)
294 {
295         int i;
296         struct gred_sched *t = qdisc_priv(sch);
297
298         qdisc_reset_queue(sch);
299
300         for (i = 0; i < t->DPs; i++) {
301                 struct gred_sched_data *q = t->tab[i];
302
303                 if (!q)
304                         continue;
305
306                 red_restart(&q->vars);
307                 q->backlog = 0;
308         }
309 }
310
311 static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
312 {
313         struct gred_sched *table = qdisc_priv(sch);
314         struct net_device *dev = qdisc_dev(sch);
315         struct tc_gred_qopt_offload *opt = table->opt;
316
317         if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
318                 return;
319
320         memset(opt, 0, sizeof(*opt));
321         opt->command = command;
322         opt->handle = sch->handle;
323         opt->parent = sch->parent;
324
325         if (command == TC_GRED_REPLACE) {
326                 unsigned int i;
327
328                 opt->set.grio_on = gred_rio_mode(table);
329                 opt->set.wred_on = gred_wred_mode(table);
330                 opt->set.dp_cnt = table->DPs;
331                 opt->set.dp_def = table->def;
332
333                 for (i = 0; i < table->DPs; i++) {
334                         struct gred_sched_data *q = table->tab[i];
335
336                         if (!q)
337                                 continue;
338                         opt->set.tab[i].present = true;
339                         opt->set.tab[i].limit = q->limit;
340                         opt->set.tab[i].prio = q->prio;
341                         opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
342                         opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
343                         opt->set.tab[i].is_ecn = gred_use_ecn(q);
344                         opt->set.tab[i].is_harddrop = gred_use_harddrop(q);
345                         opt->set.tab[i].probability = q->parms.max_P;
346                         opt->set.tab[i].backlog = &q->backlog;
347                 }
348                 opt->set.qstats = &sch->qstats;
349         }
350
351         dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt);
352 }
353
354 static int gred_offload_dump_stats(struct Qdisc *sch)
355 {
356         struct gred_sched *table = qdisc_priv(sch);
357         struct tc_gred_qopt_offload *hw_stats;
358         u64 bytes = 0, packets = 0;
359         unsigned int i;
360         int ret;
361
362         hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
363         if (!hw_stats)
364                 return -ENOMEM;
365
366         hw_stats->command = TC_GRED_STATS;
367         hw_stats->handle = sch->handle;
368         hw_stats->parent = sch->parent;
369
370         for (i = 0; i < MAX_DPs; i++) {
371                 gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
372                 if (table->tab[i])
373                         hw_stats->stats.xstats[i] = &table->tab[i]->stats;
374         }
375
376         ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
377         /* Even if driver returns failure adjust the stats - in case offload
378          * ended but driver still wants to adjust the values.
379          */
380         for (i = 0; i < MAX_DPs; i++) {
381                 if (!table->tab[i])
382                         continue;
383                 table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
384                 table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
385                 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
386
387                 bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
388                 packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
389                 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
390                 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
391                 sch->qstats.drops += hw_stats->stats.qstats[i].drops;
392                 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
393                 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
394         }
395         _bstats_update(&sch->bstats, bytes, packets);
396
397         kfree(hw_stats);
398         return ret;
399 }
400
401 static inline void gred_destroy_vq(struct gred_sched_data *q)
402 {
403         kfree(q);
404 }
405
406 static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
407                                  struct netlink_ext_ack *extack)
408 {
409         struct gred_sched *table = qdisc_priv(sch);
410         struct tc_gred_sopt *sopt;
411         bool red_flags_changed;
412         int i;
413
414         if (!dps)
415                 return -EINVAL;
416
417         sopt = nla_data(dps);
418
419         if (sopt->DPs > MAX_DPs) {
420                 NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
421                 return -EINVAL;
422         }
423         if (sopt->DPs == 0) {
424                 NL_SET_ERR_MSG_MOD(extack,
425                                    "number of virtual queues can't be 0");
426                 return -EINVAL;
427         }
428         if (sopt->def_DP >= sopt->DPs) {
429                 NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
430                 return -EINVAL;
431         }
432         if (sopt->flags && gred_per_vq_red_flags_used(table)) {
433                 NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
434                 return -EINVAL;
435         }
436
437         sch_tree_lock(sch);
438         table->DPs = sopt->DPs;
439         table->def = sopt->def_DP;
440         red_flags_changed = table->red_flags != sopt->flags;
441         table->red_flags = sopt->flags;
442
443         /*
444          * Every entry point to GRED is synchronized with the above code
445          * and the DP is checked against DPs, i.e. shadowed VQs can no
446          * longer be found so we can unlock right here.
447          */
448         sch_tree_unlock(sch);
449
450         if (sopt->grio) {
451                 gred_enable_rio_mode(table);
452                 gred_disable_wred_mode(table);
453                 if (gred_wred_mode_check(sch))
454                         gred_enable_wred_mode(table);
455         } else {
456                 gred_disable_rio_mode(table);
457                 gred_disable_wred_mode(table);
458         }
459
460         if (red_flags_changed)
461                 for (i = 0; i < table->DPs; i++)
462                         if (table->tab[i])
463                                 table->tab[i]->red_flags =
464                                         table->red_flags & GRED_VQ_RED_FLAGS;
465
466         for (i = table->DPs; i < MAX_DPs; i++) {
467                 if (table->tab[i]) {
468                         pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
469                                 i);
470                         gred_destroy_vq(table->tab[i]);
471                         table->tab[i] = NULL;
472                 }
473         }
474
475         gred_offload(sch, TC_GRED_REPLACE);
476         return 0;
477 }
478
479 static inline int gred_change_vq(struct Qdisc *sch, int dp,
480                                  struct tc_gred_qopt *ctl, int prio,
481                                  u8 *stab, u32 max_P,
482                                  struct gred_sched_data **prealloc,
483                                  struct netlink_ext_ack *extack)
484 {
485         struct gred_sched *table = qdisc_priv(sch);
486         struct gred_sched_data *q = table->tab[dp];
487
488         if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
489                 NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
490                 return -EINVAL;
491         }
492
493         if (!q) {
494                 table->tab[dp] = q = *prealloc;
495                 *prealloc = NULL;
496                 if (!q)
497                         return -ENOMEM;
498                 q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
499         }
500
501         q->DP = dp;
502         q->prio = prio;
503         if (ctl->limit > sch->limit)
504                 q->limit = sch->limit;
505         else
506                 q->limit = ctl->limit;
507
508         if (q->backlog == 0)
509                 red_end_of_idle_period(&q->vars);
510
511         red_set_parms(&q->parms,
512                       ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
513                       ctl->Scell_log, stab, max_P);
514         red_set_vars(&q->vars);
515         return 0;
516 }
517
518 static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
519         [TCA_GRED_VQ_DP]        = { .type = NLA_U32 },
520         [TCA_GRED_VQ_FLAGS]     = { .type = NLA_U32 },
521 };
522
523 static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
524         [TCA_GRED_VQ_ENTRY]     = { .type = NLA_NESTED },
525 };
526
527 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
528         [TCA_GRED_PARMS]        = { .len = sizeof(struct tc_gred_qopt) },
529         [TCA_GRED_STAB]         = { .len = 256 },
530         [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
531         [TCA_GRED_MAX_P]        = { .type = NLA_U32 },
532         [TCA_GRED_LIMIT]        = { .type = NLA_U32 },
533         [TCA_GRED_VQ_LIST]      = { .type = NLA_NESTED },
534 };
535
536 static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
537 {
538         struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
539         u32 dp;
540
541         nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
542                                     gred_vq_policy, NULL);
543
544         dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
545
546         if (tb[TCA_GRED_VQ_FLAGS])
547                 table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
548 }
549
550 static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
551 {
552         const struct nlattr *attr;
553         int rem;
554
555         nla_for_each_nested(attr, vqs, rem) {
556                 switch (nla_type(attr)) {
557                 case TCA_GRED_VQ_ENTRY:
558                         gred_vq_apply(table, attr);
559                         break;
560                 }
561         }
562 }
563
564 static int gred_vq_validate(struct gred_sched *table, u32 cdp,
565                             const struct nlattr *entry,
566                             struct netlink_ext_ack *extack)
567 {
568         struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
569         int err;
570         u32 dp;
571
572         err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
573                                           gred_vq_policy, extack);
574         if (err < 0)
575                 return err;
576
577         if (!tb[TCA_GRED_VQ_DP]) {
578                 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
579                 return -EINVAL;
580         }
581         dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
582         if (dp >= table->DPs) {
583                 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
584                 return -EINVAL;
585         }
586         if (dp != cdp && !table->tab[dp]) {
587                 NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
588                 return -EINVAL;
589         }
590
591         if (tb[TCA_GRED_VQ_FLAGS]) {
592                 u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
593
594                 if (table->red_flags && table->red_flags != red_flags) {
595                         NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
596                         return -EINVAL;
597                 }
598                 if (red_flags & ~GRED_VQ_RED_FLAGS) {
599                         NL_SET_ERR_MSG_MOD(extack,
600                                            "invalid RED flags specified");
601                         return -EINVAL;
602                 }
603         }
604
605         return 0;
606 }
607
608 static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
609                              struct nlattr *vqs, struct netlink_ext_ack *extack)
610 {
611         const struct nlattr *attr;
612         int rem, err;
613
614         err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
615                                              gred_vqe_policy, extack);
616         if (err < 0)
617                 return err;
618
619         nla_for_each_nested(attr, vqs, rem) {
620                 switch (nla_type(attr)) {
621                 case TCA_GRED_VQ_ENTRY:
622                         err = gred_vq_validate(table, cdp, attr, extack);
623                         if (err)
624                                 return err;
625                         break;
626                 default:
627                         NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
628                         return -EINVAL;
629                 }
630         }
631
632         if (rem > 0) {
633                 NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
634                 return -EINVAL;
635         }
636
637         return 0;
638 }
639
640 static int gred_change(struct Qdisc *sch, struct nlattr *opt,
641                        struct netlink_ext_ack *extack)
642 {
643         struct gred_sched *table = qdisc_priv(sch);
644         struct tc_gred_qopt *ctl;
645         struct nlattr *tb[TCA_GRED_MAX + 1];
646         int err, prio = GRED_DEF_PRIO;
647         u8 *stab;
648         u32 max_P;
649         struct gred_sched_data *prealloc;
650
651         err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
652                                           extack);
653         if (err < 0)
654                 return err;
655
656         if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
657                 if (tb[TCA_GRED_LIMIT] != NULL)
658                         sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
659                 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
660         }
661
662         if (tb[TCA_GRED_PARMS] == NULL ||
663             tb[TCA_GRED_STAB] == NULL ||
664             tb[TCA_GRED_LIMIT] != NULL) {
665                 NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
666                 return -EINVAL;
667         }
668
669         max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
670
671         ctl = nla_data(tb[TCA_GRED_PARMS]);
672         stab = nla_data(tb[TCA_GRED_STAB]);
673
674         if (ctl->DP >= table->DPs) {
675                 NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
676                 return -EINVAL;
677         }
678
679         if (tb[TCA_GRED_VQ_LIST]) {
680                 err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
681                                         extack);
682                 if (err)
683                         return err;
684         }
685
686         if (gred_rio_mode(table)) {
687                 if (ctl->prio == 0) {
688                         int def_prio = GRED_DEF_PRIO;
689
690                         if (table->tab[table->def])
691                                 def_prio = table->tab[table->def]->prio;
692
693                         printk(KERN_DEBUG "GRED: DP %u does not have a prio "
694                                "setting default to %d\n", ctl->DP, def_prio);
695
696                         prio = def_prio;
697                 } else
698                         prio = ctl->prio;
699         }
700
701         prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
702         sch_tree_lock(sch);
703
704         err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
705                              extack);
706         if (err < 0)
707                 goto err_unlock_free;
708
709         if (tb[TCA_GRED_VQ_LIST])
710                 gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
711
712         if (gred_rio_mode(table)) {
713                 gred_disable_wred_mode(table);
714                 if (gred_wred_mode_check(sch))
715                         gred_enable_wred_mode(table);
716         }
717
718         sch_tree_unlock(sch);
719         kfree(prealloc);
720
721         gred_offload(sch, TC_GRED_REPLACE);
722         return 0;
723
724 err_unlock_free:
725         sch_tree_unlock(sch);
726         kfree(prealloc);
727         return err;
728 }
729
730 static int gred_init(struct Qdisc *sch, struct nlattr *opt,
731                      struct netlink_ext_ack *extack)
732 {
733         struct gred_sched *table = qdisc_priv(sch);
734         struct nlattr *tb[TCA_GRED_MAX + 1];
735         int err;
736
737         if (!opt)
738                 return -EINVAL;
739
740         err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
741                                           extack);
742         if (err < 0)
743                 return err;
744
745         if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
746                 NL_SET_ERR_MSG_MOD(extack,
747                                    "virtual queue configuration can't be specified at initialization time");
748                 return -EINVAL;
749         }
750
751         if (tb[TCA_GRED_LIMIT])
752                 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
753         else
754                 sch->limit = qdisc_dev(sch)->tx_queue_len
755                              * psched_mtu(qdisc_dev(sch));
756
757         if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) {
758                 table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL);
759                 if (!table->opt)
760                         return -ENOMEM;
761         }
762
763         return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
764 }
765
766 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
767 {
768         struct gred_sched *table = qdisc_priv(sch);
769         struct nlattr *parms, *vqs, *opts = NULL;
770         int i;
771         u32 max_p[MAX_DPs];
772         struct tc_gred_sopt sopt = {
773                 .DPs    = table->DPs,
774                 .def_DP = table->def,
775                 .grio   = gred_rio_mode(table),
776                 .flags  = table->red_flags,
777         };
778
779         if (gred_offload_dump_stats(sch))
780                 goto nla_put_failure;
781
782         opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
783         if (opts == NULL)
784                 goto nla_put_failure;
785         if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
786                 goto nla_put_failure;
787
788         for (i = 0; i < MAX_DPs; i++) {
789                 struct gred_sched_data *q = table->tab[i];
790
791                 max_p[i] = q ? q->parms.max_P : 0;
792         }
793         if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
794                 goto nla_put_failure;
795
796         if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
797                 goto nla_put_failure;
798
799         /* Old style all-in-one dump of VQs */
800         parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
801         if (parms == NULL)
802                 goto nla_put_failure;
803
804         for (i = 0; i < MAX_DPs; i++) {
805                 struct gred_sched_data *q = table->tab[i];
806                 struct tc_gred_qopt opt;
807                 unsigned long qavg;
808
809                 memset(&opt, 0, sizeof(opt));
810
811                 if (!q) {
812                         /* hack -- fix at some point with proper message
813                            This is how we indicate to tc that there is no VQ
814                            at this DP */
815
816                         opt.DP = MAX_DPs + i;
817                         goto append_opt;
818                 }
819
820                 opt.limit       = q->limit;
821                 opt.DP          = q->DP;
822                 opt.backlog     = gred_backlog(table, q, sch);
823                 opt.prio        = q->prio;
824                 opt.qth_min     = q->parms.qth_min >> q->parms.Wlog;
825                 opt.qth_max     = q->parms.qth_max >> q->parms.Wlog;
826                 opt.Wlog        = q->parms.Wlog;
827                 opt.Plog        = q->parms.Plog;
828                 opt.Scell_log   = q->parms.Scell_log;
829                 opt.early       = q->stats.prob_drop;
830                 opt.forced      = q->stats.forced_drop;
831                 opt.pdrop       = q->stats.pdrop;
832                 opt.packets     = q->packetsin;
833                 opt.bytesin     = q->bytesin;
834
835                 if (gred_wred_mode(table))
836                         gred_load_wred_set(table, q);
837
838                 qavg = red_calc_qavg(&q->parms, &q->vars,
839                                      q->vars.qavg >> q->parms.Wlog);
840                 opt.qave = qavg >> q->parms.Wlog;
841
842 append_opt:
843                 if (nla_append(skb, sizeof(opt), &opt) < 0)
844                         goto nla_put_failure;
845         }
846
847         nla_nest_end(skb, parms);
848
849         /* Dump the VQs again, in more structured way */
850         vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
851         if (!vqs)
852                 goto nla_put_failure;
853
854         for (i = 0; i < MAX_DPs; i++) {
855                 struct gred_sched_data *q = table->tab[i];
856                 struct nlattr *vq;
857
858                 if (!q)
859                         continue;
860
861                 vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
862                 if (!vq)
863                         goto nla_put_failure;
864
865                 if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
866                         goto nla_put_failure;
867
868                 if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
869                         goto nla_put_failure;
870
871                 /* Stats */
872                 if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
873                                       TCA_GRED_VQ_PAD))
874                         goto nla_put_failure;
875                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
876                         goto nla_put_failure;
877                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
878                                 gred_backlog(table, q, sch)))
879                         goto nla_put_failure;
880                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
881                                 q->stats.prob_drop))
882                         goto nla_put_failure;
883                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
884                                 q->stats.prob_mark))
885                         goto nla_put_failure;
886                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
887                                 q->stats.forced_drop))
888                         goto nla_put_failure;
889                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
890                                 q->stats.forced_mark))
891                         goto nla_put_failure;
892                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
893                         goto nla_put_failure;
894
895                 nla_nest_end(skb, vq);
896         }
897         nla_nest_end(skb, vqs);
898
899         return nla_nest_end(skb, opts);
900
901 nla_put_failure:
902         nla_nest_cancel(skb, opts);
903         return -EMSGSIZE;
904 }
905
906 static void gred_destroy(struct Qdisc *sch)
907 {
908         struct gred_sched *table = qdisc_priv(sch);
909         int i;
910
911         for (i = 0; i < table->DPs; i++)
912                 gred_destroy_vq(table->tab[i]);
913
914         gred_offload(sch, TC_GRED_DESTROY);
915         kfree(table->opt);
916 }
917
918 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
919         .id             =       "gred",
920         .priv_size      =       sizeof(struct gred_sched),
921         .enqueue        =       gred_enqueue,
922         .dequeue        =       gred_dequeue,
923         .peek           =       qdisc_peek_head,
924         .init           =       gred_init,
925         .reset          =       gred_reset,
926         .destroy        =       gred_destroy,
927         .change         =       gred_change,
928         .dump           =       gred_dump,
929         .owner          =       THIS_MODULE,
930 };
931
932 static int __init gred_module_init(void)
933 {
934         return register_qdisc(&gred_qdisc_ops);
935 }
936
937 static void __exit gred_module_exit(void)
938 {
939         unregister_qdisc(&gred_qdisc_ops);
940 }
941
942 module_init(gred_module_init)
943 module_exit(gred_module_exit)
944
945 MODULE_LICENSE("GPL");