2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/skbuff.h>
41 #include <net/netlink.h>
42 #include <net/act_api.h>
43 #include <net/pkt_cls.h>
47 struct tc_u_knode *next;
49 struct tc_u_hnode *ht_up;
51 #ifdef CONFIG_NET_CLS_IND
55 struct tcf_result res;
56 struct tc_u_hnode *ht_down;
57 #ifdef CONFIG_CLS_U32_PERF
58 struct tc_u32_pcnt *pf;
60 #ifdef CONFIG_CLS_U32_MARK
61 struct tc_u32_mark mark;
63 struct tc_u32_sel sel;
68 struct tc_u_hnode *next;
71 struct tc_u_common *tp_c;
74 struct tc_u_knode *ht[1];
79 struct tc_u_hnode *hlist;
85 static const struct tcf_ext_map u32_ext_map = {
86 .action = TCA_U32_ACT,
87 .police = TCA_U32_POLICE
90 static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
92 unsigned h = ntohl(key & sel->hmask)>>fshift;
97 static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
100 struct tc_u_knode *knode;
102 } stack[TC_U32_MAXDEPTH];
104 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
105 unsigned int off = skb_network_offset(skb);
106 struct tc_u_knode *n;
110 #ifdef CONFIG_CLS_U32_PERF
120 struct tc_u32_key *key = n->sel.keys;
122 #ifdef CONFIG_CLS_U32_PERF
127 #ifdef CONFIG_CLS_U32_MARK
128 if ((skb->mark & n->mark.mask) != n->mark.val) {
136 for (i = n->sel.nkeys; i>0; i--, key++) {
137 int toff = off + key->off + (off2 & key->offmask);
140 if (skb_headroom(skb) + toff > INT_MAX)
143 data = skb_header_pointer(skb, toff, 4, &_data);
146 if ((*data ^ key->val) & key->mask) {
150 #ifdef CONFIG_CLS_U32_PERF
155 if (n->ht_down == NULL) {
157 if (n->sel.flags&TC_U32_TERMINAL) {
160 #ifdef CONFIG_NET_CLS_IND
161 if (!tcf_match_indev(skb, n->indev)) {
166 #ifdef CONFIG_CLS_U32_PERF
169 r = tcf_exts_exec(skb, &n->exts, res);
182 if (sdepth >= TC_U32_MAXDEPTH)
184 stack[sdepth].knode = n;
185 stack[sdepth].off = off;
193 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
197 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
200 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
203 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
204 off2 = n->sel.off + 3;
205 if (n->sel.flags & TC_U32_VAROFFSET) {
208 data = skb_header_pointer(skb,
213 off2 += ntohs(n->sel.offmask & *data) >>
218 if (n->sel.flags&TC_U32_EAT) {
229 n = stack[sdepth].knode;
231 off = stack[sdepth].off;
239 printk(KERN_WARNING "cls_u32: dead loop\n");
243 static __inline__ struct tc_u_hnode *
244 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
246 struct tc_u_hnode *ht;
248 for (ht = tp_c->hlist; ht; ht = ht->next)
249 if (ht->handle == handle)
255 static __inline__ struct tc_u_knode *
256 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
259 struct tc_u_knode *n = NULL;
261 sel = TC_U32_HASH(handle);
262 if (sel > ht->divisor)
265 for (n = ht->ht[sel]; n; n = n->next)
266 if (n->handle == handle)
273 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
275 struct tc_u_hnode *ht;
276 struct tc_u_common *tp_c = tp->data;
278 if (TC_U32_HTID(handle) == TC_U32_ROOT)
281 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
286 if (TC_U32_KEY(handle) == 0)
287 return (unsigned long)ht;
289 return (unsigned long)u32_lookup_key(ht, handle);
292 static void u32_put(struct tcf_proto *tp, unsigned long f)
296 static u32 gen_new_htid(struct tc_u_common *tp_c)
301 if (++tp_c->hgenerator == 0x7FF)
302 tp_c->hgenerator = 1;
303 } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
305 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
308 static int u32_init(struct tcf_proto *tp)
310 struct tc_u_hnode *root_ht;
311 struct tc_u_common *tp_c;
313 tp_c = tp->q->u32_node;
315 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
319 root_ht->divisor = 0;
321 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
322 root_ht->prio = tp->prio;
325 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
331 tp->q->u32_node = tp_c;
335 root_ht->next = tp_c->hlist;
336 tp_c->hlist = root_ht;
337 root_ht->tp_c = tp_c;
344 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
346 tcf_unbind_filter(tp, &n->res);
347 tcf_exts_destroy(tp, &n->exts);
349 n->ht_down->refcnt--;
350 #ifdef CONFIG_CLS_U32_PERF
357 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
359 struct tc_u_knode **kp;
360 struct tc_u_hnode *ht = key->ht_up;
363 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
369 u32_destroy_key(tp, key);
378 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
380 struct tc_u_knode *n;
383 for (h=0; h<=ht->divisor; h++) {
384 while ((n = ht->ht[h]) != NULL) {
387 u32_destroy_key(tp, n);
392 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
394 struct tc_u_common *tp_c = tp->data;
395 struct tc_u_hnode **hn;
399 u32_clear_hnode(tp, ht);
401 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
413 static void u32_destroy(struct tcf_proto *tp)
415 struct tc_u_common *tp_c = tp->data;
416 struct tc_u_hnode *root_ht = tp->root;
418 WARN_ON(root_ht == NULL);
420 if (root_ht && --root_ht->refcnt == 0)
421 u32_destroy_hnode(tp, root_ht);
423 if (--tp_c->refcnt == 0) {
424 struct tc_u_hnode *ht;
426 tp->q->u32_node = NULL;
428 for (ht = tp_c->hlist; ht; ht = ht->next) {
430 u32_clear_hnode(tp, ht);
433 while ((ht = tp_c->hlist) != NULL) {
434 tp_c->hlist = ht->next;
436 WARN_ON(ht->refcnt != 0);
447 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
449 struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
454 if (TC_U32_KEY(ht->handle))
455 return u32_delete_key(tp, (struct tc_u_knode*)ht);
460 if (ht->refcnt == 1) {
462 u32_destroy_hnode(tp, ht);
470 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
472 struct tc_u_knode *n;
475 for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
476 if (i < TC_U32_NODE(n->handle))
477 i = TC_U32_NODE(n->handle);
480 return handle|(i>0xFFF ? 0xFFF : i);
483 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
484 [TCA_U32_CLASSID] = { .type = NLA_U32 },
485 [TCA_U32_HASH] = { .type = NLA_U32 },
486 [TCA_U32_LINK] = { .type = NLA_U32 },
487 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
488 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
489 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
490 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
493 static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
494 struct tc_u_hnode *ht,
495 struct tc_u_knode *n, struct nlattr **tb,
501 err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
506 if (tb[TCA_U32_LINK]) {
507 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
508 struct tc_u_hnode *ht_down = NULL, *ht_old;
510 if (TC_U32_KEY(handle))
514 ht_down = u32_lookup_ht(ht->tp_c, handle);
523 n->ht_down = ht_down;
529 if (tb[TCA_U32_CLASSID]) {
530 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
531 tcf_bind_filter(tp, &n->res, base);
534 #ifdef CONFIG_NET_CLS_IND
535 if (tb[TCA_U32_INDEV]) {
536 err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]);
541 tcf_exts_change(tp, &n->exts, &e);
545 tcf_exts_destroy(tp, &e);
549 static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
553 struct tc_u_common *tp_c = tp->data;
554 struct tc_u_hnode *ht;
555 struct tc_u_knode *n;
556 struct tc_u32_sel *s;
557 struct nlattr *opt = tca[TCA_OPTIONS];
558 struct nlattr *tb[TCA_U32_MAX + 1];
563 return handle ? -EINVAL : 0;
565 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
569 if ((n = (struct tc_u_knode*)*arg) != NULL) {
570 if (TC_U32_KEY(n->handle) == 0)
573 return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]);
576 if (tb[TCA_U32_DIVISOR]) {
577 unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
579 if (--divisor > 0x100)
581 if (TC_U32_KEY(handle))
584 handle = gen_new_htid(tp->data);
588 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
593 ht->divisor = divisor;
596 ht->next = tp_c->hlist;
598 *arg = (unsigned long)ht;
602 if (tb[TCA_U32_HASH]) {
603 htid = nla_get_u32(tb[TCA_U32_HASH]);
604 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
608 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
617 if (ht->divisor < TC_U32_HASH(htid))
621 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
623 handle = htid | TC_U32_NODE(handle);
625 handle = gen_new_kid(ht, htid);
627 if (tb[TCA_U32_SEL] == NULL)
630 s = nla_data(tb[TCA_U32_SEL]);
632 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
636 #ifdef CONFIG_CLS_U32_PERF
637 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
644 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
647 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
649 #ifdef CONFIG_CLS_U32_MARK
650 if (tb[TCA_U32_MARK]) {
651 struct tc_u32_mark *mark;
653 mark = nla_data(tb[TCA_U32_MARK]);
654 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
659 err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]);
661 struct tc_u_knode **ins;
662 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
663 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
671 *arg = (unsigned long)n;
674 #ifdef CONFIG_CLS_U32_PERF
681 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
683 struct tc_u_common *tp_c = tp->data;
684 struct tc_u_hnode *ht;
685 struct tc_u_knode *n;
691 for (ht = tp_c->hlist; ht; ht = ht->next) {
692 if (ht->prio != tp->prio)
694 if (arg->count >= arg->skip) {
695 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
701 for (h = 0; h <= ht->divisor; h++) {
702 for (n = ht->ht[h]; n; n = n->next) {
703 if (arg->count < arg->skip) {
707 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
717 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
718 struct sk_buff *skb, struct tcmsg *t)
720 struct tc_u_knode *n = (struct tc_u_knode*)fh;
726 t->tcm_handle = n->handle;
728 nest = nla_nest_start(skb, TCA_OPTIONS);
730 goto nla_put_failure;
732 if (TC_U32_KEY(n->handle) == 0) {
733 struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
734 u32 divisor = ht->divisor+1;
735 NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
737 NLA_PUT(skb, TCA_U32_SEL,
738 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
741 u32 htid = n->handle & 0xFFFFF000;
742 NLA_PUT_U32(skb, TCA_U32_HASH, htid);
745 NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
747 NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
749 #ifdef CONFIG_CLS_U32_MARK
750 if (n->mark.val || n->mark.mask)
751 NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
754 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
755 goto nla_put_failure;
757 #ifdef CONFIG_NET_CLS_IND
759 NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
761 #ifdef CONFIG_CLS_U32_PERF
762 NLA_PUT(skb, TCA_U32_PCNT,
763 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
768 nla_nest_end(skb, nest);
770 if (TC_U32_KEY(n->handle))
771 if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
772 goto nla_put_failure;
776 nla_nest_cancel(skb, nest);
780 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
782 .classify = u32_classify,
784 .destroy = u32_destroy,
787 .change = u32_change,
788 .delete = u32_delete,
791 .owner = THIS_MODULE,
794 static int __init init_u32(void)
796 pr_info("u32 classifier\n");
797 #ifdef CONFIG_CLS_U32_PERF
798 pr_info(" Performance counters on\n");
800 #ifdef CONFIG_NET_CLS_IND
801 pr_info(" input device check on\n");
803 #ifdef CONFIG_NET_CLS_ACT
804 pr_info(" Actions configured\n");
806 return register_tcf_proto_ops(&cls_u32_ops);
809 static void __exit exit_u32(void)
811 unregister_tcf_proto_ops(&cls_u32_ops);
814 module_init(init_u32)
815 module_exit(exit_u32)
816 MODULE_LICENSE("GPL");