2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/skbuff.h>
41 #include <net/netlink.h>
42 #include <net/act_api.h>
43 #include <net/pkt_cls.h>
46 struct tc_u_knode *next;
48 struct tc_u_hnode *ht_up;
50 #ifdef CONFIG_NET_CLS_IND
54 struct tcf_result res;
55 struct tc_u_hnode *ht_down;
56 #ifdef CONFIG_CLS_U32_PERF
57 struct tc_u32_pcnt *pf;
59 #ifdef CONFIG_CLS_U32_MARK
60 struct tc_u32_mark mark;
62 struct tc_u32_sel sel;
66 struct tc_u_hnode *next;
69 struct tc_u_common *tp_c;
72 struct tc_u_knode *ht[1];
76 struct tc_u_hnode *hlist;
82 static const struct tcf_ext_map u32_ext_map = {
83 .action = TCA_U32_ACT,
84 .police = TCA_U32_POLICE
87 static inline unsigned int u32_hash_fold(__be32 key,
88 const struct tc_u32_sel *sel,
91 unsigned int h = ntohl(key & sel->hmask) >> fshift;
96 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
99 struct tc_u_knode *knode;
101 } stack[TC_U32_MAXDEPTH];
103 struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
104 unsigned int off = skb_network_offset(skb);
105 struct tc_u_knode *n;
109 #ifdef CONFIG_CLS_U32_PERF
119 struct tc_u32_key *key = n->sel.keys;
121 #ifdef CONFIG_CLS_U32_PERF
126 #ifdef CONFIG_CLS_U32_MARK
127 if ((skb->mark & n->mark.mask) != n->mark.val) {
135 for (i = n->sel.nkeys; i > 0; i--, key++) {
136 int toff = off + key->off + (off2 & key->offmask);
139 if (skb_headroom(skb) + toff > INT_MAX)
142 data = skb_header_pointer(skb, toff, 4, &hdata);
145 if ((*data ^ key->val) & key->mask) {
149 #ifdef CONFIG_CLS_U32_PERF
150 n->pf->kcnts[j] += 1;
154 if (n->ht_down == NULL) {
156 if (n->sel.flags & TC_U32_TERMINAL) {
159 #ifdef CONFIG_NET_CLS_IND
160 if (!tcf_match_indev(skb, n->indev)) {
165 #ifdef CONFIG_CLS_U32_PERF
168 r = tcf_exts_exec(skb, &n->exts, res);
181 if (sdepth >= TC_U32_MAXDEPTH)
183 stack[sdepth].knode = n;
184 stack[sdepth].off = off;
192 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
196 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
199 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
202 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
203 off2 = n->sel.off + 3;
204 if (n->sel.flags & TC_U32_VAROFFSET) {
207 data = skb_header_pointer(skb,
212 off2 += ntohs(n->sel.offmask & *data) >>
217 if (n->sel.flags & TC_U32_EAT) {
228 n = stack[sdepth].knode;
230 off = stack[sdepth].off;
237 net_warn_ratelimited("cls_u32: dead loop\n");
241 static struct tc_u_hnode *
242 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
244 struct tc_u_hnode *ht;
246 for (ht = tp_c->hlist; ht; ht = ht->next)
247 if (ht->handle == handle)
253 static struct tc_u_knode *
254 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
257 struct tc_u_knode *n = NULL;
259 sel = TC_U32_HASH(handle);
260 if (sel > ht->divisor)
263 for (n = ht->ht[sel]; n; n = n->next)
264 if (n->handle == handle)
271 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
273 struct tc_u_hnode *ht;
274 struct tc_u_common *tp_c = tp->data;
276 if (TC_U32_HTID(handle) == TC_U32_ROOT)
279 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
284 if (TC_U32_KEY(handle) == 0)
285 return (unsigned long)ht;
287 return (unsigned long)u32_lookup_key(ht, handle);
290 static void u32_put(struct tcf_proto *tp, unsigned long f)
294 static u32 gen_new_htid(struct tc_u_common *tp_c)
299 if (++tp_c->hgenerator == 0x7FF)
300 tp_c->hgenerator = 1;
301 } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
303 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
306 static int u32_init(struct tcf_proto *tp)
308 struct tc_u_hnode *root_ht;
309 struct tc_u_common *tp_c;
311 tp_c = tp->q->u32_node;
313 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
317 root_ht->divisor = 0;
319 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
320 root_ht->prio = tp->prio;
323 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
329 tp->q->u32_node = tp_c;
333 root_ht->next = tp_c->hlist;
334 tp_c->hlist = root_ht;
335 root_ht->tp_c = tp_c;
342 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
344 tcf_unbind_filter(tp, &n->res);
345 tcf_exts_destroy(tp, &n->exts);
347 n->ht_down->refcnt--;
348 #ifdef CONFIG_CLS_U32_PERF
355 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
357 struct tc_u_knode **kp;
358 struct tc_u_hnode *ht = key->ht_up;
361 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
367 u32_destroy_key(tp, key);
376 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
378 struct tc_u_knode *n;
381 for (h = 0; h <= ht->divisor; h++) {
382 while ((n = ht->ht[h]) != NULL) {
385 u32_destroy_key(tp, n);
390 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
392 struct tc_u_common *tp_c = tp->data;
393 struct tc_u_hnode **hn;
397 u32_clear_hnode(tp, ht);
399 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
411 static void u32_destroy(struct tcf_proto *tp)
413 struct tc_u_common *tp_c = tp->data;
414 struct tc_u_hnode *root_ht = tp->root;
416 WARN_ON(root_ht == NULL);
418 if (root_ht && --root_ht->refcnt == 0)
419 u32_destroy_hnode(tp, root_ht);
421 if (--tp_c->refcnt == 0) {
422 struct tc_u_hnode *ht;
424 tp->q->u32_node = NULL;
426 for (ht = tp_c->hlist; ht; ht = ht->next) {
428 u32_clear_hnode(tp, ht);
431 while ((ht = tp_c->hlist) != NULL) {
432 tp_c->hlist = ht->next;
434 WARN_ON(ht->refcnt != 0);
445 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
447 struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
452 if (TC_U32_KEY(ht->handle))
453 return u32_delete_key(tp, (struct tc_u_knode *)ht);
458 if (ht->refcnt == 1) {
460 u32_destroy_hnode(tp, ht);
468 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
470 struct tc_u_knode *n;
471 unsigned int i = 0x7FF;
473 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
474 if (i < TC_U32_NODE(n->handle))
475 i = TC_U32_NODE(n->handle);
478 return handle | (i > 0xFFF ? 0xFFF : i);
481 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
482 [TCA_U32_CLASSID] = { .type = NLA_U32 },
483 [TCA_U32_HASH] = { .type = NLA_U32 },
484 [TCA_U32_LINK] = { .type = NLA_U32 },
485 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
486 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
487 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
488 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
491 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
492 unsigned long base, struct tc_u_hnode *ht,
493 struct tc_u_knode *n, struct nlattr **tb,
499 err = tcf_exts_validate(net, tp, tb, est, &e, &u32_ext_map);
504 if (tb[TCA_U32_LINK]) {
505 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
506 struct tc_u_hnode *ht_down = NULL, *ht_old;
508 if (TC_U32_KEY(handle))
512 ht_down = u32_lookup_ht(ht->tp_c, handle);
521 n->ht_down = ht_down;
527 if (tb[TCA_U32_CLASSID]) {
528 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
529 tcf_bind_filter(tp, &n->res, base);
532 #ifdef CONFIG_NET_CLS_IND
533 if (tb[TCA_U32_INDEV]) {
534 err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]);
539 tcf_exts_change(tp, &n->exts, &e);
543 tcf_exts_destroy(tp, &e);
547 static int u32_change(struct net *net, struct sk_buff *in_skb,
548 struct tcf_proto *tp, unsigned long base, u32 handle,
552 struct tc_u_common *tp_c = tp->data;
553 struct tc_u_hnode *ht;
554 struct tc_u_knode *n;
555 struct tc_u32_sel *s;
556 struct nlattr *opt = tca[TCA_OPTIONS];
557 struct nlattr *tb[TCA_U32_MAX + 1];
562 return handle ? -EINVAL : 0;
564 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
568 n = (struct tc_u_knode *)*arg;
570 if (TC_U32_KEY(n->handle) == 0)
573 return u32_set_parms(net, tp, base, n->ht_up, n, tb,
577 if (tb[TCA_U32_DIVISOR]) {
578 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
580 if (--divisor > 0x100)
582 if (TC_U32_KEY(handle))
585 handle = gen_new_htid(tp->data);
589 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
594 ht->divisor = divisor;
597 ht->next = tp_c->hlist;
599 *arg = (unsigned long)ht;
603 if (tb[TCA_U32_HASH]) {
604 htid = nla_get_u32(tb[TCA_U32_HASH]);
605 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
609 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
618 if (ht->divisor < TC_U32_HASH(htid))
622 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
624 handle = htid | TC_U32_NODE(handle);
626 handle = gen_new_kid(ht, htid);
628 if (tb[TCA_U32_SEL] == NULL)
631 s = nla_data(tb[TCA_U32_SEL]);
633 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
637 #ifdef CONFIG_CLS_U32_PERF
638 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
645 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
648 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
650 #ifdef CONFIG_CLS_U32_MARK
651 if (tb[TCA_U32_MARK]) {
652 struct tc_u32_mark *mark;
654 mark = nla_data(tb[TCA_U32_MARK]);
655 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
660 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]);
662 struct tc_u_knode **ins;
663 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
664 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
672 *arg = (unsigned long)n;
675 #ifdef CONFIG_CLS_U32_PERF
682 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
684 struct tc_u_common *tp_c = tp->data;
685 struct tc_u_hnode *ht;
686 struct tc_u_knode *n;
692 for (ht = tp_c->hlist; ht; ht = ht->next) {
693 if (ht->prio != tp->prio)
695 if (arg->count >= arg->skip) {
696 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
702 for (h = 0; h <= ht->divisor; h++) {
703 for (n = ht->ht[h]; n; n = n->next) {
704 if (arg->count < arg->skip) {
708 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
718 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
719 struct sk_buff *skb, struct tcmsg *t)
721 struct tc_u_knode *n = (struct tc_u_knode *)fh;
727 t->tcm_handle = n->handle;
729 nest = nla_nest_start(skb, TCA_OPTIONS);
731 goto nla_put_failure;
733 if (TC_U32_KEY(n->handle) == 0) {
734 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
735 u32 divisor = ht->divisor + 1;
737 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
738 goto nla_put_failure;
740 if (nla_put(skb, TCA_U32_SEL,
741 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
743 goto nla_put_failure;
745 u32 htid = n->handle & 0xFFFFF000;
746 if (nla_put_u32(skb, TCA_U32_HASH, htid))
747 goto nla_put_failure;
749 if (n->res.classid &&
750 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
751 goto nla_put_failure;
753 nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
754 goto nla_put_failure;
756 #ifdef CONFIG_CLS_U32_MARK
757 if ((n->mark.val || n->mark.mask) &&
758 nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
759 goto nla_put_failure;
762 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
763 goto nla_put_failure;
765 #ifdef CONFIG_NET_CLS_IND
766 if (strlen(n->indev) &&
767 nla_put_string(skb, TCA_U32_INDEV, n->indev))
768 goto nla_put_failure;
770 #ifdef CONFIG_CLS_U32_PERF
771 if (nla_put(skb, TCA_U32_PCNT,
772 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
774 goto nla_put_failure;
778 nla_nest_end(skb, nest);
780 if (TC_U32_KEY(n->handle))
781 if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
782 goto nla_put_failure;
786 nla_nest_cancel(skb, nest);
790 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
792 .classify = u32_classify,
794 .destroy = u32_destroy,
797 .change = u32_change,
798 .delete = u32_delete,
801 .owner = THIS_MODULE,
804 static int __init init_u32(void)
806 pr_info("u32 classifier\n");
807 #ifdef CONFIG_CLS_U32_PERF
808 pr_info(" Performance counters on\n");
810 #ifdef CONFIG_NET_CLS_IND
811 pr_info(" input device check on\n");
813 #ifdef CONFIG_NET_CLS_ACT
814 pr_info(" Actions configured\n");
816 return register_tcf_proto_ops(&cls_u32_ops);
819 static void __exit exit_u32(void)
821 unregister_tcf_proto_ops(&cls_u32_ops);
824 module_init(init_u32)
825 module_exit(exit_u32)
826 MODULE_LICENSE("GPL");