1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_route.c ROUTE4 classifier.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
16 #include <net/route.h>
17 #include <net/netlink.h>
18 #include <net/act_api.h>
19 #include <net/pkt_cls.h>
22 * 1. For now we assume that route tags < 256.
23 * It allows to use direct table lookups, instead of hash tables.
24 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
25 * are mutually exclusive.
26 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
28 struct route4_fastmap {
29 struct route4_filter *filter;
35 struct route4_fastmap fastmap[16];
36 struct route4_bucket __rcu *table[256 + 1];
40 struct route4_bucket {
41 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
42 struct route4_filter __rcu *ht[16 + 16 + 1];
46 struct route4_filter {
47 struct route4_filter __rcu *next;
51 struct tcf_result res;
54 struct route4_bucket *bkt;
56 struct rcu_work rwork;
59 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
61 static inline int route4_fastmap_hash(u32 id, int iif)
66 static DEFINE_SPINLOCK(fastmap_lock);
68 route4_reset_fastmap(struct route4_head *head)
70 spin_lock_bh(&fastmap_lock);
71 memset(head->fastmap, 0, sizeof(head->fastmap));
72 spin_unlock_bh(&fastmap_lock);
76 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
77 struct route4_filter *f)
79 int h = route4_fastmap_hash(id, iif);
81 /* fastmap updates must look atomic to aling id, iff, filter */
82 spin_lock_bh(&fastmap_lock);
83 head->fastmap[h].id = id;
84 head->fastmap[h].iif = iif;
85 head->fastmap[h].filter = f;
86 spin_unlock_bh(&fastmap_lock);
89 static inline int route4_hash_to(u32 id)
94 static inline int route4_hash_from(u32 id)
96 return (id >> 16) & 0xF;
99 static inline int route4_hash_iif(int iif)
101 return 16 + ((iif >> 16) & 0xF);
104 static inline int route4_hash_wild(void)
109 #define ROUTE4_APPLY_RESULT() \
112 if (tcf_exts_has_actions(&f->exts)) { \
113 int r = tcf_exts_exec(skb, &f->exts, res); \
119 } else if (!dont_cache) \
120 route4_set_fastmap(head, id, iif, f); \
124 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
125 struct tcf_result *res)
127 struct route4_head *head = rcu_dereference_bh(tp->root);
128 struct dst_entry *dst;
129 struct route4_bucket *b;
130 struct route4_filter *f;
132 int iif, dont_cache = 0;
142 h = route4_fastmap_hash(id, iif);
144 spin_lock(&fastmap_lock);
145 if (id == head->fastmap[h].id &&
146 iif == head->fastmap[h].iif &&
147 (f = head->fastmap[h].filter) != NULL) {
148 if (f == ROUTE4_FAILURE) {
149 spin_unlock(&fastmap_lock);
154 spin_unlock(&fastmap_lock);
157 spin_unlock(&fastmap_lock);
159 h = route4_hash_to(id);
162 b = rcu_dereference_bh(head->table[h]);
164 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
166 f = rcu_dereference_bh(f->next))
168 ROUTE4_APPLY_RESULT();
170 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
172 f = rcu_dereference_bh(f->next))
174 ROUTE4_APPLY_RESULT();
176 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
178 f = rcu_dereference_bh(f->next))
179 ROUTE4_APPLY_RESULT();
188 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
193 static inline u32 to_hash(u32 id)
202 static inline u32 from_hash(u32 id)
207 if (!(id & 0x8000)) {
212 return 16 + (id & 0xF);
215 static void *route4_get(struct tcf_proto *tp, u32 handle)
217 struct route4_head *head = rtnl_dereference(tp->root);
218 struct route4_bucket *b;
219 struct route4_filter *f;
222 h1 = to_hash(handle);
226 h2 = from_hash(handle >> 16);
230 b = rtnl_dereference(head->table[h1]);
232 for (f = rtnl_dereference(b->ht[h2]);
234 f = rtnl_dereference(f->next))
235 if (f->handle == handle)
241 static int route4_init(struct tcf_proto *tp)
243 struct route4_head *head;
245 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
249 rcu_assign_pointer(tp->root, head);
253 static void __route4_delete_filter(struct route4_filter *f)
255 tcf_exts_destroy(&f->exts);
256 tcf_exts_put_net(&f->exts);
260 static void route4_delete_filter_work(struct work_struct *work)
262 struct route4_filter *f = container_of(to_rcu_work(work),
263 struct route4_filter,
266 __route4_delete_filter(f);
270 static void route4_queue_work(struct route4_filter *f)
272 tcf_queue_work(&f->rwork, route4_delete_filter_work);
275 static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
276 struct netlink_ext_ack *extack)
278 struct route4_head *head = rtnl_dereference(tp->root);
284 for (h1 = 0; h1 <= 256; h1++) {
285 struct route4_bucket *b;
287 b = rtnl_dereference(head->table[h1]);
289 for (h2 = 0; h2 <= 32; h2++) {
290 struct route4_filter *f;
292 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
293 struct route4_filter *next;
295 next = rtnl_dereference(f->next);
296 RCU_INIT_POINTER(b->ht[h2], next);
297 tcf_unbind_filter(tp, &f->res);
298 if (tcf_exts_get_net(&f->exts))
299 route4_queue_work(f);
301 __route4_delete_filter(f);
304 RCU_INIT_POINTER(head->table[h1], NULL);
308 kfree_rcu(head, rcu);
311 static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
312 bool rtnl_held, struct netlink_ext_ack *extack)
314 struct route4_head *head = rtnl_dereference(tp->root);
315 struct route4_filter *f = arg;
316 struct route4_filter __rcu **fp;
317 struct route4_filter *nf;
318 struct route4_bucket *b;
328 fp = &b->ht[from_hash(h >> 16)];
329 for (nf = rtnl_dereference(*fp); nf;
330 fp = &nf->next, nf = rtnl_dereference(*fp)) {
333 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
335 /* Remove any fastmap lookups that might ref filter
336 * notice we unlink'd the filter so we can't get it
337 * back in the fastmap.
339 route4_reset_fastmap(head);
342 tcf_unbind_filter(tp, &f->res);
343 tcf_exts_get_net(&f->exts);
344 tcf_queue_work(&f->rwork, route4_delete_filter_work);
346 /* Strip RTNL protected tree */
347 for (i = 0; i <= 32; i++) {
348 struct route4_filter *rt;
350 rt = rtnl_dereference(b->ht[i]);
355 /* OK, session has no flows */
356 RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
364 for (h1 = 0; h1 <= 256; h1++) {
365 if (rcu_access_pointer(head->table[h1])) {
374 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
375 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
376 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
377 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
378 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
381 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
382 unsigned long base, struct route4_filter *f,
383 u32 handle, struct route4_head *head,
384 struct nlattr **tb, struct nlattr *est, int new,
385 u32 flags, struct netlink_ext_ack *extack)
387 u32 id = 0, to = 0, nhandle = 0x8000;
388 struct route4_filter *fp;
390 struct route4_bucket *b;
393 err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
397 if (tb[TCA_ROUTE4_TO]) {
398 if (new && handle & 0x8000)
400 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
406 if (tb[TCA_ROUTE4_FROM]) {
407 if (tb[TCA_ROUTE4_IIF])
409 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
413 } else if (tb[TCA_ROUTE4_IIF]) {
414 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
417 nhandle |= (id | 0x8000) << 16;
419 nhandle |= 0xFFFF << 16;
422 nhandle |= handle & 0x7F00;
423 if (nhandle != handle)
428 NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
432 h1 = to_hash(nhandle);
433 b = rtnl_dereference(head->table[h1]);
435 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
439 rcu_assign_pointer(head->table[h1], b);
441 unsigned int h2 = from_hash(nhandle >> 16);
443 for (fp = rtnl_dereference(b->ht[h2]);
445 fp = rtnl_dereference(fp->next))
446 if (fp->handle == f->handle)
450 if (tb[TCA_ROUTE4_TO])
453 if (tb[TCA_ROUTE4_FROM])
455 else if (tb[TCA_ROUTE4_IIF])
462 if (tb[TCA_ROUTE4_CLASSID]) {
463 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
464 tcf_bind_filter(tp, &f->res, base);
470 static int route4_change(struct net *net, struct sk_buff *in_skb,
471 struct tcf_proto *tp, unsigned long base, u32 handle,
472 struct nlattr **tca, void **arg, u32 flags,
473 struct netlink_ext_ack *extack)
475 struct route4_head *head = rtnl_dereference(tp->root);
476 struct route4_filter __rcu **fp;
477 struct route4_filter *fold, *f1, *pfp, *f = NULL;
478 struct route4_bucket *b;
479 struct nlattr *opt = tca[TCA_OPTIONS];
480 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
486 NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
491 return handle ? -EINVAL : 0;
493 err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
494 route4_policy, NULL);
499 if (fold && handle && fold->handle != handle)
503 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
507 err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
515 f->handle = fold->handle;
522 err = route4_set_parms(net, tp, base, f, handle, head, tb,
523 tca[TCA_RATE], new, flags, extack);
527 h = from_hash(f->handle >> 16);
529 for (pfp = rtnl_dereference(*fp);
530 (f1 = rtnl_dereference(*fp)) != NULL;
532 if (f->handle < f1->handle)
535 tcf_block_netif_keep_dst(tp->chain->block);
536 rcu_assign_pointer(f->next, f1);
537 rcu_assign_pointer(*fp, f);
540 th = to_hash(fold->handle);
541 h = from_hash(fold->handle >> 16);
542 b = rtnl_dereference(head->table[th]);
545 for (pfp = rtnl_dereference(*fp); pfp;
546 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
548 rcu_assign_pointer(*fp, fold->next);
555 route4_reset_fastmap(head);
558 tcf_unbind_filter(tp, &fold->res);
559 tcf_exts_get_net(&fold->exts);
560 tcf_queue_work(&fold->rwork, route4_delete_filter_work);
566 tcf_exts_destroy(&f->exts);
571 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
574 struct route4_head *head = rtnl_dereference(tp->root);
577 if (head == NULL || arg->stop)
580 for (h = 0; h <= 256; h++) {
581 struct route4_bucket *b = rtnl_dereference(head->table[h]);
584 for (h1 = 0; h1 <= 32; h1++) {
585 struct route4_filter *f;
587 for (f = rtnl_dereference(b->ht[h1]);
589 f = rtnl_dereference(f->next)) {
590 if (arg->count < arg->skip) {
594 if (arg->fn(tp, f, arg) < 0) {
605 static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
606 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
608 struct route4_filter *f = fh;
615 t->tcm_handle = f->handle;
617 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
619 goto nla_put_failure;
621 if (!(f->handle & 0x8000)) {
623 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
624 goto nla_put_failure;
626 if (f->handle & 0x80000000) {
627 if ((f->handle >> 16) != 0xFFFF &&
628 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
629 goto nla_put_failure;
632 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
633 goto nla_put_failure;
635 if (f->res.classid &&
636 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
637 goto nla_put_failure;
639 if (tcf_exts_dump(skb, &f->exts) < 0)
640 goto nla_put_failure;
642 nla_nest_end(skb, nest);
644 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
645 goto nla_put_failure;
650 nla_nest_cancel(skb, nest);
654 static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
657 struct route4_filter *f = fh;
659 if (f && f->res.classid == classid) {
661 __tcf_bind_filter(q, &f->res, base);
663 __tcf_unbind_filter(q, &f->res);
667 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
669 .classify = route4_classify,
671 .destroy = route4_destroy,
673 .change = route4_change,
674 .delete = route4_delete,
677 .bind_class = route4_bind_class,
678 .owner = THIS_MODULE,
681 static int __init init_route4(void)
683 return register_tcf_proto_ops(&cls_route4_ops);
686 static void __exit exit_route4(void)
688 unregister_tcf_proto_ops(&cls_route4_ops);
691 module_init(init_route4)
692 module_exit(exit_route4)
693 MODULE_LICENSE("GPL");