1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
7 * The filters are packed to hash tables of key nodes
8 * with a set of 32bit key/mask pairs at every node.
9 * Nodes reference next level hash tables etc.
11 * This scheme is the best universal classifier I managed to
12 * invent; it is not super-fast, but it is not slow (provided you
13 * program it correctly), and general enough. And its relative
14 * speed grows as the number of rules becomes larger.
16 * It seems that it represents the best middle point between
17 * speed and manageability both by human and by machine.
19 * It is especially useful for link sharing combined with QoS;
20 * pure RSVP doesn't need such a general approach and can use
21 * much simpler (and faster) schemes, sort of cls_rsvp.c.
23 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/string.h>
31 #include <linux/errno.h>
32 #include <linux/percpu.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/skbuff.h>
35 #include <linux/bitmap.h>
36 #include <linux/netdevice.h>
37 #include <linux/hash.h>
38 #include <net/netlink.h>
39 #include <net/act_api.h>
40 #include <net/pkt_cls.h>
41 #include <linux/idr.h>
44 struct tc_u_knode __rcu *next;
46 struct tc_u_hnode __rcu *ht_up;
50 struct tcf_result res;
51 struct tc_u_hnode __rcu *ht_down;
52 #ifdef CONFIG_CLS_U32_PERF
53 struct tc_u32_pcnt __percpu *pf;
56 unsigned int in_hw_count;
57 #ifdef CONFIG_CLS_U32_MARK
60 u32 __percpu *pcpu_success;
62 struct rcu_work rwork;
63 /* The 'sel' field MUST be the last field in structure to allow for
64 * tc_u32_keys allocated at end of structure.
66 struct tc_u32_sel sel;
70 struct tc_u_hnode __rcu *next;
75 struct idr handle_idr;
79 /* The 'ht' field MUST be the last field in structure to allow for
80 * more entries allocated at end of structure.
82 struct tc_u_knode __rcu *ht[];
86 struct tc_u_hnode __rcu *hlist;
89 struct idr handle_idr;
90 struct hlist_node hnode;
94 static inline unsigned int u32_hash_fold(__be32 key,
95 const struct tc_u32_sel *sel,
98 unsigned int h = ntohl(key & sel->hmask) >> fshift;
103 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
104 struct tcf_result *res)
107 struct tc_u_knode *knode;
109 } stack[TC_U32_MAXDEPTH];
111 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
112 unsigned int off = skb_network_offset(skb);
113 struct tc_u_knode *n;
117 #ifdef CONFIG_CLS_U32_PERF
123 n = rcu_dereference_bh(ht->ht[sel]);
127 struct tc_u32_key *key = n->sel.keys;
129 #ifdef CONFIG_CLS_U32_PERF
130 __this_cpu_inc(n->pf->rcnt);
134 if (tc_skip_sw(n->flags)) {
135 n = rcu_dereference_bh(n->next);
139 #ifdef CONFIG_CLS_U32_MARK
140 if ((skb->mark & n->mask) != n->val) {
141 n = rcu_dereference_bh(n->next);
144 __this_cpu_inc(*n->pcpu_success);
148 for (i = n->sel.nkeys; i > 0; i--, key++) {
149 int toff = off + key->off + (off2 & key->offmask);
152 if (skb_headroom(skb) + toff > INT_MAX)
155 data = skb_header_pointer(skb, toff, 4, &hdata);
158 if ((*data ^ key->val) & key->mask) {
159 n = rcu_dereference_bh(n->next);
162 #ifdef CONFIG_CLS_U32_PERF
163 __this_cpu_inc(n->pf->kcnts[j]);
168 ht = rcu_dereference_bh(n->ht_down);
171 if (n->sel.flags & TC_U32_TERMINAL) {
174 if (!tcf_match_indev(skb, n->ifindex)) {
175 n = rcu_dereference_bh(n->next);
178 #ifdef CONFIG_CLS_U32_PERF
179 __this_cpu_inc(n->pf->rhit);
181 r = tcf_exts_exec(skb, &n->exts, res);
183 n = rcu_dereference_bh(n->next);
189 n = rcu_dereference_bh(n->next);
194 if (sdepth >= TC_U32_MAXDEPTH)
196 stack[sdepth].knode = n;
197 stack[sdepth].off = off;
200 ht = rcu_dereference_bh(n->ht_down);
205 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
209 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
212 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
215 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
216 off2 = n->sel.off + 3;
217 if (n->sel.flags & TC_U32_VAROFFSET) {
220 data = skb_header_pointer(skb,
225 off2 += ntohs(n->sel.offmask & *data) >>
230 if (n->sel.flags & TC_U32_EAT) {
241 n = stack[sdepth].knode;
242 ht = rcu_dereference_bh(n->ht_up);
243 off = stack[sdepth].off;
250 net_warn_ratelimited("cls_u32: dead loop\n");
254 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
256 struct tc_u_hnode *ht;
258 for (ht = rtnl_dereference(tp_c->hlist);
260 ht = rtnl_dereference(ht->next))
261 if (ht->handle == handle)
267 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
270 struct tc_u_knode *n = NULL;
272 sel = TC_U32_HASH(handle);
273 if (sel > ht->divisor)
276 for (n = rtnl_dereference(ht->ht[sel]);
278 n = rtnl_dereference(n->next))
279 if (n->handle == handle)
286 static void *u32_get(struct tcf_proto *tp, u32 handle)
288 struct tc_u_hnode *ht;
289 struct tc_u_common *tp_c = tp->data;
291 if (TC_U32_HTID(handle) == TC_U32_ROOT)
292 ht = rtnl_dereference(tp->root);
294 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
299 if (TC_U32_KEY(handle) == 0)
302 return u32_lookup_key(ht, handle);
305 /* Protected by rtnl lock */
306 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
308 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
311 return (id | 0x800U) << 20;
314 static struct hlist_head *tc_u_common_hash;
316 #define U32_HASH_SHIFT 10
317 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
319 static void *tc_u_common_ptr(const struct tcf_proto *tp)
321 struct tcf_block *block = tp->chain->block;
323 /* The block sharing is currently supported only
324 * for classless qdiscs. In that case we use block
325 * for tc_u_common identification. In case the
326 * block is not shared, block->q is a valid pointer
327 * and we can use that. That works for classful qdiscs.
329 if (tcf_block_shared(block))
335 static struct hlist_head *tc_u_hash(void *key)
337 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
340 static struct tc_u_common *tc_u_common_find(void *key)
342 struct tc_u_common *tc;
343 hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
350 static int u32_init(struct tcf_proto *tp)
352 struct tc_u_hnode *root_ht;
353 void *key = tc_u_common_ptr(tp);
354 struct tc_u_common *tp_c = tc_u_common_find(key);
356 root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
361 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
362 root_ht->prio = tp->prio;
363 root_ht->is_root = true;
364 idr_init(&root_ht->handle_idr);
367 tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
373 INIT_HLIST_NODE(&tp_c->hnode);
374 idr_init(&tp_c->handle_idr);
376 hlist_add_head(&tp_c->hnode, tc_u_hash(key));
380 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
381 rcu_assign_pointer(tp_c->hlist, root_ht);
384 rcu_assign_pointer(tp->root, root_ht);
389 static void __u32_destroy_key(struct tc_u_knode *n)
391 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
393 tcf_exts_destroy(&n->exts);
394 if (ht && --ht->refcnt == 0)
399 static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
401 tcf_exts_put_net(&n->exts);
402 #ifdef CONFIG_CLS_U32_PERF
406 #ifdef CONFIG_CLS_U32_MARK
408 free_percpu(n->pcpu_success);
410 __u32_destroy_key(n);
413 /* u32_delete_key_rcu should be called when free'ing a copied
414 * version of a tc_u_knode obtained from u32_init_knode(). When
415 * copies are obtained from u32_init_knode() the statistics are
416 * shared between the old and new copies to allow readers to
417 * continue to update the statistics during the copy. To support
418 * this the u32_delete_key_rcu variant does not free the percpu
421 static void u32_delete_key_work(struct work_struct *work)
423 struct tc_u_knode *key = container_of(to_rcu_work(work),
427 u32_destroy_key(key, false);
431 /* u32_delete_key_freepf_rcu is the rcu callback variant
432 * that free's the entire structure including the statistics
433 * percpu variables. Only use this if the key is not a copy
434 * returned by u32_init_knode(). See u32_delete_key_rcu()
435 * for the variant that should be used with keys return from
438 static void u32_delete_key_freepf_work(struct work_struct *work)
440 struct tc_u_knode *key = container_of(to_rcu_work(work),
444 u32_destroy_key(key, true);
448 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
450 struct tc_u_common *tp_c = tp->data;
451 struct tc_u_knode __rcu **kp;
452 struct tc_u_knode *pkp;
453 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
456 kp = &ht->ht[TC_U32_HASH(key->handle)];
457 for (pkp = rtnl_dereference(*kp); pkp;
458 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
460 RCU_INIT_POINTER(*kp, key->next);
463 tcf_unbind_filter(tp, &key->res);
464 idr_remove(&ht->handle_idr, key->handle);
465 tcf_exts_get_net(&key->exts);
466 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
475 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
476 struct netlink_ext_ack *extack)
478 struct tcf_block *block = tp->chain->block;
479 struct tc_cls_u32_offload cls_u32 = {};
481 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
482 cls_u32.command = TC_CLSU32_DELETE_HNODE;
483 cls_u32.hnode.divisor = h->divisor;
484 cls_u32.hnode.handle = h->handle;
485 cls_u32.hnode.prio = h->prio;
487 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
490 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
491 u32 flags, struct netlink_ext_ack *extack)
493 struct tcf_block *block = tp->chain->block;
494 struct tc_cls_u32_offload cls_u32 = {};
495 bool skip_sw = tc_skip_sw(flags);
496 bool offloaded = false;
499 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
500 cls_u32.command = TC_CLSU32_NEW_HNODE;
501 cls_u32.hnode.divisor = h->divisor;
502 cls_u32.hnode.handle = h->handle;
503 cls_u32.hnode.prio = h->prio;
505 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
507 u32_clear_hw_hnode(tp, h, NULL);
509 } else if (err > 0) {
513 if (skip_sw && !offloaded)
519 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
520 struct netlink_ext_ack *extack)
522 struct tcf_block *block = tp->chain->block;
523 struct tc_cls_u32_offload cls_u32 = {};
525 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
526 cls_u32.command = TC_CLSU32_DELETE_KNODE;
527 cls_u32.knode.handle = n->handle;
529 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
530 &n->flags, &n->in_hw_count, true);
533 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
534 u32 flags, struct netlink_ext_ack *extack)
536 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
537 struct tcf_block *block = tp->chain->block;
538 struct tc_cls_u32_offload cls_u32 = {};
539 bool skip_sw = tc_skip_sw(flags);
542 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
543 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
544 cls_u32.knode.handle = n->handle;
545 cls_u32.knode.fshift = n->fshift;
546 #ifdef CONFIG_CLS_U32_MARK
547 cls_u32.knode.val = n->val;
548 cls_u32.knode.mask = n->mask;
550 cls_u32.knode.val = 0;
551 cls_u32.knode.mask = 0;
553 cls_u32.knode.sel = &n->sel;
554 cls_u32.knode.res = &n->res;
555 cls_u32.knode.exts = &n->exts;
557 cls_u32.knode.link_handle = ht->handle;
559 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
560 &n->flags, &n->in_hw_count, true);
562 u32_remove_hw_knode(tp, n, NULL);
566 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
572 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
573 struct netlink_ext_ack *extack)
575 struct tc_u_common *tp_c = tp->data;
576 struct tc_u_knode *n;
579 for (h = 0; h <= ht->divisor; h++) {
580 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
581 RCU_INIT_POINTER(ht->ht[h],
582 rtnl_dereference(n->next));
584 tcf_unbind_filter(tp, &n->res);
585 u32_remove_hw_knode(tp, n, extack);
586 idr_remove(&ht->handle_idr, n->handle);
587 if (tcf_exts_get_net(&n->exts))
588 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
590 u32_destroy_key(n, true);
595 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
596 struct netlink_ext_ack *extack)
598 struct tc_u_common *tp_c = tp->data;
599 struct tc_u_hnode __rcu **hn;
600 struct tc_u_hnode *phn;
602 WARN_ON(--ht->refcnt);
604 u32_clear_hnode(tp, ht, extack);
607 for (phn = rtnl_dereference(*hn);
609 hn = &phn->next, phn = rtnl_dereference(*hn)) {
611 u32_clear_hw_hnode(tp, ht, extack);
612 idr_destroy(&ht->handle_idr);
613 idr_remove(&tp_c->handle_idr, ht->handle);
614 RCU_INIT_POINTER(*hn, ht->next);
623 static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
624 struct netlink_ext_ack *extack)
626 struct tc_u_common *tp_c = tp->data;
627 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
629 WARN_ON(root_ht == NULL);
631 if (root_ht && --root_ht->refcnt == 1)
632 u32_destroy_hnode(tp, root_ht, extack);
634 if (--tp_c->refcnt == 0) {
635 struct tc_u_hnode *ht;
637 hlist_del(&tp_c->hnode);
639 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
640 u32_clear_hnode(tp, ht, extack);
641 RCU_INIT_POINTER(tp_c->hlist, ht->next);
643 /* u32_destroy_key() will later free ht for us, if it's
644 * still referenced by some knode
646 if (--ht->refcnt == 0)
650 idr_destroy(&tp_c->handle_idr);
657 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
658 bool rtnl_held, struct netlink_ext_ack *extack)
660 struct tc_u_hnode *ht = arg;
661 struct tc_u_common *tp_c = tp->data;
664 if (TC_U32_KEY(ht->handle)) {
665 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
666 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
671 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
675 if (ht->refcnt == 1) {
676 u32_destroy_hnode(tp, ht, extack);
678 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
683 *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
687 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
689 u32 index = htid | 0x800;
690 u32 max = htid | 0xFFF;
692 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
694 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
702 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
703 [TCA_U32_CLASSID] = { .type = NLA_U32 },
704 [TCA_U32_HASH] = { .type = NLA_U32 },
705 [TCA_U32_LINK] = { .type = NLA_U32 },
706 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
707 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
708 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
709 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
710 [TCA_U32_FLAGS] = { .type = NLA_U32 },
713 static void u32_unbind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
716 if (tb[TCA_U32_CLASSID])
717 tcf_unbind_filter(tp, &n->res);
720 static void u32_bind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
721 unsigned long base, struct nlattr **tb)
723 if (tb[TCA_U32_CLASSID]) {
724 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
725 tcf_bind_filter(tp, &n->res, base);
729 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
730 struct tc_u_knode *n, struct nlattr **tb,
731 struct nlattr *est, u32 flags, u32 fl_flags,
732 struct netlink_ext_ack *extack)
734 int err, ifindex = -1;
736 err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
741 if (tb[TCA_U32_INDEV]) {
742 ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
747 if (tb[TCA_U32_LINK]) {
748 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
749 struct tc_u_hnode *ht_down = NULL, *ht_old;
751 if (TC_U32_KEY(handle)) {
752 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
757 ht_down = u32_lookup_ht(tp->data, handle);
760 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
763 if (ht_down->is_root) {
764 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
770 ht_old = rtnl_dereference(n->ht_down);
771 rcu_assign_pointer(n->ht_down, ht_down);
778 n->ifindex = ifindex;
783 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
784 struct tc_u_knode *n)
786 struct tc_u_knode __rcu **ins;
787 struct tc_u_knode *pins;
788 struct tc_u_hnode *ht;
790 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
791 ht = rtnl_dereference(tp->root);
793 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
795 ins = &ht->ht[TC_U32_HASH(n->handle)];
797 /* The node must always exist for it to be replaced if this is not the
798 * case then something went very wrong elsewhere.
800 for (pins = rtnl_dereference(*ins); ;
801 ins = &pins->next, pins = rtnl_dereference(*ins))
802 if (pins->handle == n->handle)
805 idr_replace(&ht->handle_idr, n, n->handle);
806 RCU_INIT_POINTER(n->next, pins->next);
807 rcu_assign_pointer(*ins, n);
810 static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
811 struct tc_u_knode *n)
813 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
814 struct tc_u32_sel *s = &n->sel;
815 struct tc_u_knode *new;
817 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
821 RCU_INIT_POINTER(new->next, n->next);
822 new->handle = n->handle;
823 RCU_INIT_POINTER(new->ht_up, n->ht_up);
825 new->ifindex = n->ifindex;
826 new->fshift = n->fshift;
827 new->flags = n->flags;
828 RCU_INIT_POINTER(new->ht_down, ht);
830 #ifdef CONFIG_CLS_U32_PERF
831 /* Statistics may be incremented by readers during update
832 * so we must keep them in tact. When the node is later destroyed
833 * a special destroy call must be made to not free the pf memory.
838 #ifdef CONFIG_CLS_U32_MARK
841 /* Similarly success statistics must be moved as pointers */
842 new->pcpu_success = n->pcpu_success;
844 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
846 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
851 /* bump reference count as long as we hold pointer to structure */
858 static int u32_change(struct net *net, struct sk_buff *in_skb,
859 struct tcf_proto *tp, unsigned long base, u32 handle,
860 struct nlattr **tca, void **arg, u32 flags,
861 struct netlink_ext_ack *extack)
863 struct tc_u_common *tp_c = tp->data;
864 struct tc_u_hnode *ht;
865 struct tc_u_knode *n;
866 struct tc_u32_sel *s;
867 struct nlattr *opt = tca[TCA_OPTIONS];
868 struct nlattr *tb[TCA_U32_MAX + 1];
869 u32 htid, userflags = 0;
875 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
882 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
887 if (tb[TCA_U32_FLAGS]) {
888 userflags = nla_get_u32(tb[TCA_U32_FLAGS]);
889 if (!tc_flags_valid(userflags)) {
890 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
897 struct tc_u_knode *new;
899 if (TC_U32_KEY(n->handle) == 0) {
900 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
904 if ((n->flags ^ userflags) &
905 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
906 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
910 new = u32_init_knode(net, tp, n);
914 err = u32_set_parms(net, tp, new, tb, tca[TCA_RATE],
915 flags, new->flags, extack);
918 __u32_destroy_key(new);
922 u32_bind_filter(tp, new, base, tb);
924 err = u32_replace_hw_knode(tp, new, flags, extack);
926 u32_unbind_filter(tp, new, tb);
928 if (tb[TCA_U32_LINK]) {
929 struct tc_u_hnode *ht_old;
931 ht_old = rtnl_dereference(n->ht_down);
935 __u32_destroy_key(new);
939 if (!tc_in_hw(new->flags))
940 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
942 u32_replace_knode(tp, tp_c, new);
943 tcf_unbind_filter(tp, &n->res);
944 tcf_exts_get_net(&n->exts);
945 tcf_queue_work(&n->rwork, u32_delete_key_work);
949 if (tb[TCA_U32_DIVISOR]) {
950 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
952 if (!is_power_of_2(divisor)) {
953 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
956 if (divisor-- > 0x100) {
957 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
960 if (TC_U32_KEY(handle)) {
961 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
964 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
968 handle = gen_new_htid(tp->data, ht);
974 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
982 ht->divisor = divisor;
985 idr_init(&ht->handle_idr);
986 ht->flags = userflags;
988 err = u32_replace_hw_hnode(tp, ht, userflags, extack);
990 idr_remove(&tp_c->handle_idr, handle);
995 RCU_INIT_POINTER(ht->next, tp_c->hlist);
996 rcu_assign_pointer(tp_c->hlist, ht);
1002 if (tb[TCA_U32_HASH]) {
1003 htid = nla_get_u32(tb[TCA_U32_HASH]);
1004 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
1005 ht = rtnl_dereference(tp->root);
1008 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1010 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
1015 ht = rtnl_dereference(tp->root);
1019 if (ht->divisor < TC_U32_HASH(htid)) {
1020 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
1024 /* At this point, we need to derive the new handle that will be used to
1025 * uniquely map the identity of this table match entry. The
1026 * identity of the entry that we need to construct is 32 bits made of:
1027 * htid(12b):bucketid(8b):node/entryid(12b)
1029 * At this point _we have the table(ht)_ in which we will insert this
1030 * entry. We carry the table's id in variable "htid".
1031 * Note that earlier code picked the ht selection either by a) the user
1032 * providing the htid specified via TCA_U32_HASH attribute or b) when
1033 * no such attribute is passed then the root ht, is default to at ID
1034 * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
1035 * If OTOH the user passed us the htid, they may also pass a bucketid of
1036 * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
1037 * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
1038 * passed via the htid, so even if it was non-zero it will be ignored.
1040 * We may also have a handle, if the user passed one. The handle also
1041 * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
1042 * Rule: the bucketid on the handle is ignored even if one was passed;
1043 * rather the value on "htid" is always assumed to be the bucketid.
1046 /* Rule: The htid from handle and tableid from htid must match */
1047 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1048 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1051 /* Ok, so far we have a valid htid(12b):bucketid(8b) but we
1052 * need to finalize the table entry identification with the last
1053 * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
1054 * entries. Rule: nodeid of 0 is reserved only for tables(see
1055 * earlier code which processes TC_U32_DIVISOR attribute).
1056 * Rule: The nodeid can only be derived from the handle (and not
1058 * Rule: if the handle specified zero for the node id example
1059 * 0x60000000, then pick a new nodeid from the pool of IDs
1060 * this hash table has been allocating from.
1061 * If OTOH it is specified (i.e for example the user passed a
1062 * handle such as 0x60000123), then we use it generate our final
1063 * handle which is used to uniquely identify the match entry.
1065 if (!TC_U32_NODE(handle)) {
1066 handle = gen_new_kid(ht, htid);
1068 handle = htid | TC_U32_NODE(handle);
1069 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
1070 handle, GFP_KERNEL);
1075 /* The user did not give us a handle; lets just generate one
1076 * from the table's pool of nodeids.
1078 handle = gen_new_kid(ht, htid);
1081 if (tb[TCA_U32_SEL] == NULL) {
1082 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1087 s = nla_data(tb[TCA_U32_SEL]);
1088 sel_size = struct_size(s, keys, s->nkeys);
1089 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1094 n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
1100 #ifdef CONFIG_CLS_U32_PERF
1101 n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
1102 __alignof__(struct tc_u32_pcnt));
1109 unsafe_memcpy(&n->sel, s, sel_size,
1110 /* A composite flex-array structure destination,
1111 * which was correctly sized with struct_size(),
1112 * bounds-checked against nla_len(), and allocated
1114 RCU_INIT_POINTER(n->ht_up, ht);
1116 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1117 n->flags = userflags;
1119 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1123 #ifdef CONFIG_CLS_U32_MARK
1124 n->pcpu_success = alloc_percpu(u32);
1125 if (!n->pcpu_success) {
1130 if (tb[TCA_U32_MARK]) {
1131 struct tc_u32_mark *mark;
1133 mark = nla_data(tb[TCA_U32_MARK]);
1135 n->mask = mark->mask;
1139 err = u32_set_parms(net, tp, n, tb, tca[TCA_RATE],
1140 flags, n->flags, extack);
1142 u32_bind_filter(tp, n, base, tb);
1145 struct tc_u_knode __rcu **ins;
1146 struct tc_u_knode *pins;
1148 err = u32_replace_hw_knode(tp, n, flags, extack);
1152 if (!tc_in_hw(n->flags))
1153 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1155 ins = &ht->ht[TC_U32_HASH(handle)];
1156 for (pins = rtnl_dereference(*ins); pins;
1157 ins = &pins->next, pins = rtnl_dereference(*ins))
1158 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1161 RCU_INIT_POINTER(n->next, pins);
1162 rcu_assign_pointer(*ins, n);
1169 u32_unbind_filter(tp, n, tb);
1171 #ifdef CONFIG_CLS_U32_MARK
1172 free_percpu(n->pcpu_success);
1176 tcf_exts_destroy(&n->exts);
1177 #ifdef CONFIG_CLS_U32_PERF
1183 idr_remove(&ht->handle_idr, handle);
1187 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1190 struct tc_u_common *tp_c = tp->data;
1191 struct tc_u_hnode *ht;
1192 struct tc_u_knode *n;
1198 for (ht = rtnl_dereference(tp_c->hlist);
1200 ht = rtnl_dereference(ht->next)) {
1201 if (ht->prio != tp->prio)
1204 if (!tc_cls_stats_dump(tp, arg, ht))
1207 for (h = 0; h <= ht->divisor; h++) {
1208 for (n = rtnl_dereference(ht->ht[h]);
1210 n = rtnl_dereference(n->next)) {
1211 if (!tc_cls_stats_dump(tp, arg, n))
1218 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1219 bool add, flow_setup_cb_t *cb, void *cb_priv,
1220 struct netlink_ext_ack *extack)
1222 struct tc_cls_u32_offload cls_u32 = {};
1225 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1226 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1227 cls_u32.hnode.divisor = ht->divisor;
1228 cls_u32.hnode.handle = ht->handle;
1229 cls_u32.hnode.prio = ht->prio;
1231 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1232 if (err && add && tc_skip_sw(ht->flags))
1238 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1239 bool add, flow_setup_cb_t *cb, void *cb_priv,
1240 struct netlink_ext_ack *extack)
1242 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1243 struct tcf_block *block = tp->chain->block;
1244 struct tc_cls_u32_offload cls_u32 = {};
1246 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1247 cls_u32.command = add ?
1248 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1249 cls_u32.knode.handle = n->handle;
1252 cls_u32.knode.fshift = n->fshift;
1253 #ifdef CONFIG_CLS_U32_MARK
1254 cls_u32.knode.val = n->val;
1255 cls_u32.knode.mask = n->mask;
1257 cls_u32.knode.val = 0;
1258 cls_u32.knode.mask = 0;
1260 cls_u32.knode.sel = &n->sel;
1261 cls_u32.knode.res = &n->res;
1262 cls_u32.knode.exts = &n->exts;
1264 cls_u32.knode.link_handle = ht->handle;
1267 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1268 &cls_u32, cb_priv, &n->flags,
1272 static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1273 void *cb_priv, struct netlink_ext_ack *extack)
1275 struct tc_u_common *tp_c = tp->data;
1276 struct tc_u_hnode *ht;
1277 struct tc_u_knode *n;
1281 for (ht = rtnl_dereference(tp_c->hlist);
1283 ht = rtnl_dereference(ht->next)) {
1284 if (ht->prio != tp->prio)
1287 /* When adding filters to a new dev, try to offload the
1288 * hashtable first. When removing, do the filters before the
1291 if (add && !tc_skip_hw(ht->flags)) {
1292 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1298 for (h = 0; h <= ht->divisor; h++) {
1299 for (n = rtnl_dereference(ht->ht[h]);
1301 n = rtnl_dereference(n->next)) {
1302 if (tc_skip_hw(n->flags))
1305 err = u32_reoffload_knode(tp, n, add, cb,
1312 if (!add && !tc_skip_hw(ht->flags))
1313 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1319 static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1322 struct tc_u_knode *n = fh;
1324 tc_cls_bind_class(classid, cl, q, &n->res, base);
1327 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1328 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1330 struct tc_u_knode *n = fh;
1331 struct tc_u_hnode *ht_up, *ht_down;
1332 struct nlattr *nest;
1337 t->tcm_handle = n->handle;
1339 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1341 goto nla_put_failure;
1343 if (TC_U32_KEY(n->handle) == 0) {
1344 struct tc_u_hnode *ht = fh;
1345 u32 divisor = ht->divisor + 1;
1347 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1348 goto nla_put_failure;
1350 #ifdef CONFIG_CLS_U32_PERF
1351 struct tc_u32_pcnt *gpf;
1355 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
1357 goto nla_put_failure;
1359 ht_up = rtnl_dereference(n->ht_up);
1361 u32 htid = n->handle & 0xFFFFF000;
1362 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1363 goto nla_put_failure;
1365 if (n->res.classid &&
1366 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1367 goto nla_put_failure;
1369 ht_down = rtnl_dereference(n->ht_down);
1371 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1372 goto nla_put_failure;
1374 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1375 goto nla_put_failure;
1377 #ifdef CONFIG_CLS_U32_MARK
1378 if ((n->val || n->mask)) {
1379 struct tc_u32_mark mark = {.val = n->val,
1384 for_each_possible_cpu(cpum) {
1385 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1387 mark.success += cnt;
1390 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1391 goto nla_put_failure;
1395 if (tcf_exts_dump(skb, &n->exts) < 0)
1396 goto nla_put_failure;
1399 struct net_device *dev;
1400 dev = __dev_get_by_index(net, n->ifindex);
1401 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1402 goto nla_put_failure;
1404 #ifdef CONFIG_CLS_U32_PERF
1405 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
1407 goto nla_put_failure;
1409 for_each_possible_cpu(cpu) {
1411 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1413 gpf->rcnt += pf->rcnt;
1414 gpf->rhit += pf->rhit;
1415 for (i = 0; i < n->sel.nkeys; i++)
1416 gpf->kcnts[i] += pf->kcnts[i];
1419 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
1420 gpf, TCA_U32_PAD)) {
1422 goto nla_put_failure;
1428 nla_nest_end(skb, nest);
1430 if (TC_U32_KEY(n->handle))
1431 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1432 goto nla_put_failure;
1436 nla_nest_cancel(skb, nest);
1440 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1442 .classify = u32_classify,
1444 .destroy = u32_destroy,
1446 .change = u32_change,
1447 .delete = u32_delete,
1449 .reoffload = u32_reoffload,
1451 .bind_class = u32_bind_class,
1452 .owner = THIS_MODULE,
1455 static int __init init_u32(void)
1459 pr_info("u32 classifier\n");
1460 #ifdef CONFIG_CLS_U32_PERF
1461 pr_info(" Performance counters on\n");
1463 pr_info(" input device check on\n");
1464 #ifdef CONFIG_NET_CLS_ACT
1465 pr_info(" Actions configured\n");
1467 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1468 sizeof(struct hlist_head),
1470 if (!tc_u_common_hash)
1473 for (i = 0; i < U32_HASH_SIZE; i++)
1474 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1476 ret = register_tcf_proto_ops(&cls_u32_ops);
1478 kvfree(tc_u_common_hash);
1482 static void __exit exit_u32(void)
1484 unregister_tcf_proto_ops(&cls_u32_ops);
1485 kvfree(tc_u_common_hash);
1488 module_init(init_u32)
1489 module_exit(exit_u32)
1490 MODULE_LICENSE("GPL");