Merge tag 'for_v6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[platform/kernel/linux-starfive.git] / net / sched / cls_route.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_route.c        ROUTE4 classifier.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <net/dst.h>
16 #include <net/route.h>
17 #include <net/netlink.h>
18 #include <net/act_api.h>
19 #include <net/pkt_cls.h>
20 #include <net/tc_wrapper.h>
21
22 /*
23  * 1. For now we assume that route tags < 256.
24  *    It allows to use direct table lookups, instead of hash tables.
25  * 2. For now we assume that "from TAG" and "fromdev DEV" statements
26  *    are mutually  exclusive.
27  * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
28  */
29 struct route4_fastmap {
30         struct route4_filter            *filter;
31         u32                             id;
32         int                             iif;
33 };
34
35 struct route4_head {
36         struct route4_fastmap           fastmap[16];
37         struct route4_bucket __rcu      *table[256 + 1];
38         struct rcu_head                 rcu;
39 };
40
41 struct route4_bucket {
42         /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
43         struct route4_filter __rcu      *ht[16 + 16 + 1];
44         struct rcu_head                 rcu;
45 };
46
47 struct route4_filter {
48         struct route4_filter __rcu      *next;
49         u32                     id;
50         int                     iif;
51
52         struct tcf_result       res;
53         struct tcf_exts         exts;
54         u32                     handle;
55         struct route4_bucket    *bkt;
56         struct tcf_proto        *tp;
57         struct rcu_work         rwork;
58 };
59
60 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
61
62 static inline int route4_fastmap_hash(u32 id, int iif)
63 {
64         return id & 0xF;
65 }
66
67 static DEFINE_SPINLOCK(fastmap_lock);
68 static void
69 route4_reset_fastmap(struct route4_head *head)
70 {
71         spin_lock_bh(&fastmap_lock);
72         memset(head->fastmap, 0, sizeof(head->fastmap));
73         spin_unlock_bh(&fastmap_lock);
74 }
75
76 static void
77 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
78                    struct route4_filter *f)
79 {
80         int h = route4_fastmap_hash(id, iif);
81
82         /* fastmap updates must look atomic to aling id, iff, filter */
83         spin_lock_bh(&fastmap_lock);
84         head->fastmap[h].id = id;
85         head->fastmap[h].iif = iif;
86         head->fastmap[h].filter = f;
87         spin_unlock_bh(&fastmap_lock);
88 }
89
90 static inline int route4_hash_to(u32 id)
91 {
92         return id & 0xFF;
93 }
94
95 static inline int route4_hash_from(u32 id)
96 {
97         return (id >> 16) & 0xF;
98 }
99
100 static inline int route4_hash_iif(int iif)
101 {
102         return 16 + ((iif >> 16) & 0xF);
103 }
104
105 static inline int route4_hash_wild(void)
106 {
107         return 32;
108 }
109
110 #define ROUTE4_APPLY_RESULT()                                   \
111 {                                                               \
112         *res = f->res;                                          \
113         if (tcf_exts_has_actions(&f->exts)) {                   \
114                 int r = tcf_exts_exec(skb, &f->exts, res);      \
115                 if (r < 0) {                                    \
116                         dont_cache = 1;                         \
117                         continue;                               \
118                 }                                               \
119                 return r;                                       \
120         } else if (!dont_cache)                                 \
121                 route4_set_fastmap(head, id, iif, f);           \
122         return 0;                                               \
123 }
124
125 TC_INDIRECT_SCOPE int route4_classify(struct sk_buff *skb,
126                                       const struct tcf_proto *tp,
127                                       struct tcf_result *res)
128 {
129         struct route4_head *head = rcu_dereference_bh(tp->root);
130         struct dst_entry *dst;
131         struct route4_bucket *b;
132         struct route4_filter *f;
133         u32 id, h;
134         int iif, dont_cache = 0;
135
136         dst = skb_dst(skb);
137         if (!dst)
138                 goto failure;
139
140         id = dst->tclassid;
141
142         iif = inet_iif(skb);
143
144         h = route4_fastmap_hash(id, iif);
145
146         spin_lock(&fastmap_lock);
147         if (id == head->fastmap[h].id &&
148             iif == head->fastmap[h].iif &&
149             (f = head->fastmap[h].filter) != NULL) {
150                 if (f == ROUTE4_FAILURE) {
151                         spin_unlock(&fastmap_lock);
152                         goto failure;
153                 }
154
155                 *res = f->res;
156                 spin_unlock(&fastmap_lock);
157                 return 0;
158         }
159         spin_unlock(&fastmap_lock);
160
161         h = route4_hash_to(id);
162
163 restart:
164         b = rcu_dereference_bh(head->table[h]);
165         if (b) {
166                 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
167                      f;
168                      f = rcu_dereference_bh(f->next))
169                         if (f->id == id)
170                                 ROUTE4_APPLY_RESULT();
171
172                 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
173                      f;
174                      f = rcu_dereference_bh(f->next))
175                         if (f->iif == iif)
176                                 ROUTE4_APPLY_RESULT();
177
178                 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
179                      f;
180                      f = rcu_dereference_bh(f->next))
181                         ROUTE4_APPLY_RESULT();
182         }
183         if (h < 256) {
184                 h = 256;
185                 id &= ~0xFFFF;
186                 goto restart;
187         }
188
189         if (!dont_cache)
190                 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
191 failure:
192         return -1;
193 }
194
195 static inline u32 to_hash(u32 id)
196 {
197         u32 h = id & 0xFF;
198
199         if (id & 0x8000)
200                 h += 256;
201         return h;
202 }
203
204 static inline u32 from_hash(u32 id)
205 {
206         id &= 0xFFFF;
207         if (id == 0xFFFF)
208                 return 32;
209         if (!(id & 0x8000)) {
210                 if (id > 255)
211                         return 256;
212                 return id & 0xF;
213         }
214         return 16 + (id & 0xF);
215 }
216
217 static void *route4_get(struct tcf_proto *tp, u32 handle)
218 {
219         struct route4_head *head = rtnl_dereference(tp->root);
220         struct route4_bucket *b;
221         struct route4_filter *f;
222         unsigned int h1, h2;
223
224         h1 = to_hash(handle);
225         if (h1 > 256)
226                 return NULL;
227
228         h2 = from_hash(handle >> 16);
229         if (h2 > 32)
230                 return NULL;
231
232         b = rtnl_dereference(head->table[h1]);
233         if (b) {
234                 for (f = rtnl_dereference(b->ht[h2]);
235                      f;
236                      f = rtnl_dereference(f->next))
237                         if (f->handle == handle)
238                                 return f;
239         }
240         return NULL;
241 }
242
243 static int route4_init(struct tcf_proto *tp)
244 {
245         struct route4_head *head;
246
247         head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
248         if (head == NULL)
249                 return -ENOBUFS;
250
251         rcu_assign_pointer(tp->root, head);
252         return 0;
253 }
254
255 static void __route4_delete_filter(struct route4_filter *f)
256 {
257         tcf_exts_destroy(&f->exts);
258         tcf_exts_put_net(&f->exts);
259         kfree(f);
260 }
261
262 static void route4_delete_filter_work(struct work_struct *work)
263 {
264         struct route4_filter *f = container_of(to_rcu_work(work),
265                                                struct route4_filter,
266                                                rwork);
267         rtnl_lock();
268         __route4_delete_filter(f);
269         rtnl_unlock();
270 }
271
272 static void route4_queue_work(struct route4_filter *f)
273 {
274         tcf_queue_work(&f->rwork, route4_delete_filter_work);
275 }
276
277 static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
278                            struct netlink_ext_ack *extack)
279 {
280         struct route4_head *head = rtnl_dereference(tp->root);
281         int h1, h2;
282
283         if (head == NULL)
284                 return;
285
286         for (h1 = 0; h1 <= 256; h1++) {
287                 struct route4_bucket *b;
288
289                 b = rtnl_dereference(head->table[h1]);
290                 if (b) {
291                         for (h2 = 0; h2 <= 32; h2++) {
292                                 struct route4_filter *f;
293
294                                 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
295                                         struct route4_filter *next;
296
297                                         next = rtnl_dereference(f->next);
298                                         RCU_INIT_POINTER(b->ht[h2], next);
299                                         tcf_unbind_filter(tp, &f->res);
300                                         if (tcf_exts_get_net(&f->exts))
301                                                 route4_queue_work(f);
302                                         else
303                                                 __route4_delete_filter(f);
304                                 }
305                         }
306                         RCU_INIT_POINTER(head->table[h1], NULL);
307                         kfree_rcu(b, rcu);
308                 }
309         }
310         kfree_rcu(head, rcu);
311 }
312
313 static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
314                          bool rtnl_held, struct netlink_ext_ack *extack)
315 {
316         struct route4_head *head = rtnl_dereference(tp->root);
317         struct route4_filter *f = arg;
318         struct route4_filter __rcu **fp;
319         struct route4_filter *nf;
320         struct route4_bucket *b;
321         unsigned int h = 0;
322         int i, h1;
323
324         if (!head || !f)
325                 return -EINVAL;
326
327         h = f->handle;
328         b = f->bkt;
329
330         fp = &b->ht[from_hash(h >> 16)];
331         for (nf = rtnl_dereference(*fp); nf;
332              fp = &nf->next, nf = rtnl_dereference(*fp)) {
333                 if (nf == f) {
334                         /* unlink it */
335                         RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
336
337                         /* Remove any fastmap lookups that might ref filter
338                          * notice we unlink'd the filter so we can't get it
339                          * back in the fastmap.
340                          */
341                         route4_reset_fastmap(head);
342
343                         /* Delete it */
344                         tcf_unbind_filter(tp, &f->res);
345                         tcf_exts_get_net(&f->exts);
346                         tcf_queue_work(&f->rwork, route4_delete_filter_work);
347
348                         /* Strip RTNL protected tree */
349                         for (i = 0; i <= 32; i++) {
350                                 struct route4_filter *rt;
351
352                                 rt = rtnl_dereference(b->ht[i]);
353                                 if (rt)
354                                         goto out;
355                         }
356
357                         /* OK, session has no flows */
358                         RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
359                         kfree_rcu(b, rcu);
360                         break;
361                 }
362         }
363
364 out:
365         *last = true;
366         for (h1 = 0; h1 <= 256; h1++) {
367                 if (rcu_access_pointer(head->table[h1])) {
368                         *last = false;
369                         break;
370                 }
371         }
372
373         return 0;
374 }
375
376 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
377         [TCA_ROUTE4_CLASSID]    = { .type = NLA_U32 },
378         [TCA_ROUTE4_TO]         = { .type = NLA_U32 },
379         [TCA_ROUTE4_FROM]       = { .type = NLA_U32 },
380         [TCA_ROUTE4_IIF]        = { .type = NLA_U32 },
381 };
382
383 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
384                             unsigned long base, struct route4_filter *f,
385                             u32 handle, struct route4_head *head,
386                             struct nlattr **tb, struct nlattr *est, int new,
387                             u32 flags, struct netlink_ext_ack *extack)
388 {
389         u32 id = 0, to = 0, nhandle = 0x8000;
390         struct route4_filter *fp;
391         unsigned int h1;
392         struct route4_bucket *b;
393         int err;
394
395         err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
396         if (err < 0)
397                 return err;
398
399         if (tb[TCA_ROUTE4_TO]) {
400                 if (new && handle & 0x8000)
401                         return -EINVAL;
402                 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
403                 if (to > 0xFF)
404                         return -EINVAL;
405                 nhandle = to;
406         }
407
408         if (tb[TCA_ROUTE4_FROM]) {
409                 if (tb[TCA_ROUTE4_IIF])
410                         return -EINVAL;
411                 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
412                 if (id > 0xFF)
413                         return -EINVAL;
414                 nhandle |= id << 16;
415         } else if (tb[TCA_ROUTE4_IIF]) {
416                 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
417                 if (id > 0x7FFF)
418                         return -EINVAL;
419                 nhandle |= (id | 0x8000) << 16;
420         } else
421                 nhandle |= 0xFFFF << 16;
422
423         if (handle && new) {
424                 nhandle |= handle & 0x7F00;
425                 if (nhandle != handle)
426                         return -EINVAL;
427         }
428
429         if (!nhandle) {
430                 NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
431                 return -EINVAL;
432         }
433
434         h1 = to_hash(nhandle);
435         b = rtnl_dereference(head->table[h1]);
436         if (!b) {
437                 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
438                 if (b == NULL)
439                         return -ENOBUFS;
440
441                 rcu_assign_pointer(head->table[h1], b);
442         } else {
443                 unsigned int h2 = from_hash(nhandle >> 16);
444
445                 for (fp = rtnl_dereference(b->ht[h2]);
446                      fp;
447                      fp = rtnl_dereference(fp->next))
448                         if (fp->handle == f->handle)
449                                 return -EEXIST;
450         }
451
452         if (tb[TCA_ROUTE4_TO])
453                 f->id = to;
454
455         if (tb[TCA_ROUTE4_FROM])
456                 f->id = to | id<<16;
457         else if (tb[TCA_ROUTE4_IIF])
458                 f->iif = id;
459
460         f->handle = nhandle;
461         f->bkt = b;
462         f->tp = tp;
463
464         if (tb[TCA_ROUTE4_CLASSID]) {
465                 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
466                 tcf_bind_filter(tp, &f->res, base);
467         }
468
469         return 0;
470 }
471
472 static int route4_change(struct net *net, struct sk_buff *in_skb,
473                          struct tcf_proto *tp, unsigned long base, u32 handle,
474                          struct nlattr **tca, void **arg, u32 flags,
475                          struct netlink_ext_ack *extack)
476 {
477         struct route4_head *head = rtnl_dereference(tp->root);
478         struct route4_filter __rcu **fp;
479         struct route4_filter *fold, *f1, *pfp, *f = NULL;
480         struct route4_bucket *b;
481         struct nlattr *opt = tca[TCA_OPTIONS];
482         struct nlattr *tb[TCA_ROUTE4_MAX + 1];
483         unsigned int h, th;
484         int err;
485         bool new = true;
486
487         if (!handle) {
488                 NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
489                 return -EINVAL;
490         }
491
492         if (opt == NULL)
493                 return -EINVAL;
494
495         err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
496                                           route4_policy, NULL);
497         if (err < 0)
498                 return err;
499
500         fold = *arg;
501         if (fold && fold->handle != handle)
502                         return -EINVAL;
503
504         err = -ENOBUFS;
505         f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
506         if (!f)
507                 goto errout;
508
509         err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
510         if (err < 0)
511                 goto errout;
512
513         if (fold) {
514                 f->id = fold->id;
515                 f->iif = fold->iif;
516                 f->handle = fold->handle;
517
518                 f->tp = fold->tp;
519                 f->bkt = fold->bkt;
520                 new = false;
521         }
522
523         err = route4_set_parms(net, tp, base, f, handle, head, tb,
524                                tca[TCA_RATE], new, flags, extack);
525         if (err < 0)
526                 goto errout;
527
528         h = from_hash(f->handle >> 16);
529         fp = &f->bkt->ht[h];
530         for (pfp = rtnl_dereference(*fp);
531              (f1 = rtnl_dereference(*fp)) != NULL;
532              fp = &f1->next)
533                 if (f->handle < f1->handle)
534                         break;
535
536         tcf_block_netif_keep_dst(tp->chain->block);
537         rcu_assign_pointer(f->next, f1);
538         rcu_assign_pointer(*fp, f);
539
540         if (fold) {
541                 th = to_hash(fold->handle);
542                 h = from_hash(fold->handle >> 16);
543                 b = rtnl_dereference(head->table[th]);
544                 if (b) {
545                         fp = &b->ht[h];
546                         for (pfp = rtnl_dereference(*fp); pfp;
547                              fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
548                                 if (pfp == fold) {
549                                         rcu_assign_pointer(*fp, fold->next);
550                                         break;
551                                 }
552                         }
553                 }
554         }
555
556         route4_reset_fastmap(head);
557         *arg = f;
558         if (fold) {
559                 tcf_unbind_filter(tp, &fold->res);
560                 tcf_exts_get_net(&fold->exts);
561                 tcf_queue_work(&fold->rwork, route4_delete_filter_work);
562         }
563         return 0;
564
565 errout:
566         if (f)
567                 tcf_exts_destroy(&f->exts);
568         kfree(f);
569         return err;
570 }
571
572 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
573                         bool rtnl_held)
574 {
575         struct route4_head *head = rtnl_dereference(tp->root);
576         unsigned int h, h1;
577
578         if (head == NULL || arg->stop)
579                 return;
580
581         for (h = 0; h <= 256; h++) {
582                 struct route4_bucket *b = rtnl_dereference(head->table[h]);
583
584                 if (b) {
585                         for (h1 = 0; h1 <= 32; h1++) {
586                                 struct route4_filter *f;
587
588                                 for (f = rtnl_dereference(b->ht[h1]);
589                                      f;
590                                      f = rtnl_dereference(f->next)) {
591                                         if (!tc_cls_stats_dump(tp, arg, f))
592                                                 return;
593                                 }
594                         }
595                 }
596         }
597 }
598
599 static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
600                        struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
601 {
602         struct route4_filter *f = fh;
603         struct nlattr *nest;
604         u32 id;
605
606         if (f == NULL)
607                 return skb->len;
608
609         t->tcm_handle = f->handle;
610
611         nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
612         if (nest == NULL)
613                 goto nla_put_failure;
614
615         if (!(f->handle & 0x8000)) {
616                 id = f->id & 0xFF;
617                 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
618                         goto nla_put_failure;
619         }
620         if (f->handle & 0x80000000) {
621                 if ((f->handle >> 16) != 0xFFFF &&
622                     nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
623                         goto nla_put_failure;
624         } else {
625                 id = f->id >> 16;
626                 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
627                         goto nla_put_failure;
628         }
629         if (f->res.classid &&
630             nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
631                 goto nla_put_failure;
632
633         if (tcf_exts_dump(skb, &f->exts) < 0)
634                 goto nla_put_failure;
635
636         nla_nest_end(skb, nest);
637
638         if (tcf_exts_dump_stats(skb, &f->exts) < 0)
639                 goto nla_put_failure;
640
641         return skb->len;
642
643 nla_put_failure:
644         nla_nest_cancel(skb, nest);
645         return -1;
646 }
647
648 static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
649                               unsigned long base)
650 {
651         struct route4_filter *f = fh;
652
653         tc_cls_bind_class(classid, cl, q, &f->res, base);
654 }
655
656 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
657         .kind           =       "route",
658         .classify       =       route4_classify,
659         .init           =       route4_init,
660         .destroy        =       route4_destroy,
661         .get            =       route4_get,
662         .change         =       route4_change,
663         .delete         =       route4_delete,
664         .walk           =       route4_walk,
665         .dump           =       route4_dump,
666         .bind_class     =       route4_bind_class,
667         .owner          =       THIS_MODULE,
668 };
669
670 static int __init init_route4(void)
671 {
672         return register_tcf_proto_ops(&cls_route4_ops);
673 }
674
675 static void __exit exit_route4(void)
676 {
677         unregister_tcf_proto_ops(&cls_route4_ops);
678 }
679
680 module_init(init_route4)
681 module_exit(exit_route4)
682 MODULE_LICENSE("GPL");