1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
21 seqcount_rwlock_t count;
22 struct delayed_work gc_work;
25 struct nft_rbtree_elem {
27 struct nft_set_ext ext;
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
32 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
38 return !nft_rbtree_interval_end(rbe);
41 static int nft_rbtree_cmp(const struct nft_set *set,
42 const struct nft_rbtree_elem *e1,
43 const struct nft_rbtree_elem *e2)
45 return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
49 static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
51 return nft_set_elem_expired(&rbe->ext) ||
52 nft_set_elem_is_dead(&rbe->ext);
55 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
56 const u32 *key, const struct nft_set_ext **ext,
59 struct nft_rbtree *priv = nft_set_priv(set);
60 const struct nft_rbtree_elem *rbe, *interval = NULL;
61 u8 genmask = nft_genmask_cur(net);
62 const struct rb_node *parent;
65 parent = rcu_dereference_raw(priv->root.rb_node);
66 while (parent != NULL) {
67 if (read_seqcount_retry(&priv->count, seq))
70 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
72 d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
74 parent = rcu_dereference_raw(parent->rb_left);
76 !nft_rbtree_cmp(set, rbe, interval) &&
77 nft_rbtree_interval_end(rbe) &&
78 nft_rbtree_interval_start(interval))
82 parent = rcu_dereference_raw(parent->rb_right);
84 if (!nft_set_elem_active(&rbe->ext, genmask)) {
85 parent = rcu_dereference_raw(parent->rb_left);
89 if (nft_rbtree_elem_expired(rbe))
92 if (nft_rbtree_interval_end(rbe)) {
93 if (nft_set_is_anonymous(set))
95 parent = rcu_dereference_raw(parent->rb_left);
105 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
106 nft_set_elem_active(&interval->ext, genmask) &&
107 !nft_rbtree_elem_expired(interval) &&
108 nft_rbtree_interval_start(interval)) {
109 *ext = &interval->ext;
116 INDIRECT_CALLABLE_SCOPE
117 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
118 const u32 *key, const struct nft_set_ext **ext)
120 struct nft_rbtree *priv = nft_set_priv(set);
121 unsigned int seq = read_seqcount_begin(&priv->count);
124 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
125 if (ret || !read_seqcount_retry(&priv->count, seq))
128 read_lock_bh(&priv->lock);
129 seq = read_seqcount_begin(&priv->count);
130 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
131 read_unlock_bh(&priv->lock);
136 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
137 const u32 *key, struct nft_rbtree_elem **elem,
138 unsigned int seq, unsigned int flags, u8 genmask)
140 struct nft_rbtree_elem *rbe, *interval = NULL;
141 struct nft_rbtree *priv = nft_set_priv(set);
142 const struct rb_node *parent;
146 parent = rcu_dereference_raw(priv->root.rb_node);
147 while (parent != NULL) {
148 if (read_seqcount_retry(&priv->count, seq))
151 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
153 this = nft_set_ext_key(&rbe->ext);
154 d = memcmp(this, key, set->klen);
156 parent = rcu_dereference_raw(parent->rb_left);
157 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
160 parent = rcu_dereference_raw(parent->rb_right);
161 if (flags & NFT_SET_ELEM_INTERVAL_END)
164 if (!nft_set_elem_active(&rbe->ext, genmask)) {
165 parent = rcu_dereference_raw(parent->rb_left);
169 if (nft_set_elem_expired(&rbe->ext))
172 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
173 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
174 (flags & NFT_SET_ELEM_INTERVAL_END)) {
179 if (nft_rbtree_interval_end(rbe))
182 parent = rcu_dereference_raw(parent->rb_left);
186 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
187 nft_set_elem_active(&interval->ext, genmask) &&
188 !nft_set_elem_expired(&interval->ext) &&
189 ((!nft_rbtree_interval_end(interval) &&
190 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
191 (nft_rbtree_interval_end(interval) &&
192 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
200 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
201 const struct nft_set_elem *elem, unsigned int flags)
203 struct nft_rbtree *priv = nft_set_priv(set);
204 unsigned int seq = read_seqcount_begin(&priv->count);
205 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
206 const u32 *key = (const u32 *)&elem->key.val;
207 u8 genmask = nft_genmask_cur(net);
210 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
211 if (ret || !read_seqcount_retry(&priv->count, seq))
214 read_lock_bh(&priv->lock);
215 seq = read_seqcount_begin(&priv->count);
216 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
218 rbe = ERR_PTR(-ENOENT);
219 read_unlock_bh(&priv->lock);
224 static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
225 struct nft_rbtree *priv,
226 struct nft_rbtree_elem *rbe)
228 struct nft_set_elem elem = {
232 nft_setelem_data_deactivate(net, set, &elem);
233 rb_erase(&rbe->node, &priv->root);
236 static int nft_rbtree_gc_elem(const struct nft_set *__set,
237 struct nft_rbtree *priv,
238 struct nft_rbtree_elem *rbe,
241 struct nft_set *set = (struct nft_set *)__set;
242 struct rb_node *prev = rb_prev(&rbe->node);
243 struct net *net = read_pnet(&set->net);
244 struct nft_rbtree_elem *rbe_prev;
245 struct nft_trans_gc *gc;
247 gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
251 /* search for end interval coming before this element.
252 * end intervals don't carry a timeout extension, they
253 * are coupled with the interval start element.
256 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
257 if (nft_rbtree_interval_end(rbe_prev) &&
258 nft_set_elem_active(&rbe_prev->ext, genmask))
261 prev = rb_prev(prev);
265 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
266 nft_rbtree_gc_remove(net, set, priv, rbe_prev);
268 /* There is always room in this trans gc for this element,
269 * memory allocation never actually happens, hence, the warning
270 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
271 * this is synchronous gc which never fails.
273 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
274 if (WARN_ON_ONCE(!gc))
277 nft_trans_gc_elem_add(gc, rbe_prev);
280 nft_rbtree_gc_remove(net, set, priv, rbe);
281 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
282 if (WARN_ON_ONCE(!gc))
285 nft_trans_gc_elem_add(gc, rbe);
287 nft_trans_gc_queue_sync_done(gc);
292 static bool nft_rbtree_update_first(const struct nft_set *set,
293 struct nft_rbtree_elem *rbe,
294 struct rb_node *first)
296 struct nft_rbtree_elem *first_elem;
298 first_elem = rb_entry(first, struct nft_rbtree_elem, node);
299 /* this element is closest to where the new element is to be inserted:
300 * update the first element for the node list path.
302 if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
308 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
309 struct nft_rbtree_elem *new,
310 struct nft_set_ext **ext)
312 struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
313 struct rb_node *node, *next, *parent, **p, *first = NULL;
314 struct nft_rbtree *priv = nft_set_priv(set);
315 u8 genmask = nft_genmask_next(net);
318 /* Descend the tree to search for an existing element greater than the
319 * key value to insert that is greater than the new element. This is the
320 * first element to walk the ordered elements to find possible overlap.
323 p = &priv->root.rb_node;
326 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
327 d = nft_rbtree_cmp(set, rbe, new);
330 p = &parent->rb_left;
333 nft_rbtree_update_first(set, rbe, first))
336 p = &parent->rb_right;
338 if (nft_rbtree_interval_end(rbe))
339 p = &parent->rb_left;
341 p = &parent->rb_right;
346 first = rb_first(&priv->root);
348 /* Detect overlap by going through the list of valid tree nodes.
349 * Values stored in the tree are in reversed order, starting from
350 * highest to lowest value.
352 for (node = first; node != NULL; node = next) {
353 next = rb_next(node);
355 rbe = rb_entry(node, struct nft_rbtree_elem, node);
357 if (!nft_set_elem_active(&rbe->ext, genmask))
360 /* perform garbage collection to avoid bogus overlap reports. */
361 if (nft_set_elem_expired(&rbe->ext)) {
362 err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
369 d = nft_rbtree_cmp(set, rbe, new);
371 /* Matching end element: no need to look for an
372 * overlapping greater or equal element.
374 if (nft_rbtree_interval_end(rbe)) {
379 /* first element that is greater or equal to key value. */
385 /* this is a closer more or equal element, update it. */
386 if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
391 /* element is equal to key value, make sure flags are
392 * the same, an existing more or equal start element
393 * must not be replaced by more or equal end element.
395 if ((nft_rbtree_interval_start(new) &&
396 nft_rbtree_interval_start(rbe_ge)) ||
397 (nft_rbtree_interval_end(new) &&
398 nft_rbtree_interval_end(rbe_ge))) {
403 /* annotate element greater than the new element. */
407 /* annotate element less than the new element. */
413 /* - new start element matching existing start element: full overlap
414 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
416 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
417 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
422 /* - new end element matching existing end element: full overlap
423 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
425 if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
426 nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
431 /* - new start element with existing closest, less or equal key value
432 * being a start element: partial overlap, reported as -ENOTEMPTY.
433 * Anonymous sets allow for two consecutive start element since they
434 * are constant, skip them to avoid bogus overlap reports.
436 if (!nft_set_is_anonymous(set) && rbe_le &&
437 nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
440 /* - new end element with existing closest, less or equal key value
441 * being a end element: partial overlap, reported as -ENOTEMPTY.
444 nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
447 /* - new end element with existing closest, greater or equal key value
448 * being an end element: partial overlap, reported as -ENOTEMPTY
451 nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
454 /* Accepted element: pick insertion point depending on key value */
456 p = &priv->root.rb_node;
459 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
460 d = nft_rbtree_cmp(set, rbe, new);
463 p = &parent->rb_left;
465 p = &parent->rb_right;
466 else if (nft_rbtree_interval_end(rbe))
467 p = &parent->rb_left;
469 p = &parent->rb_right;
472 rb_link_node_rcu(&new->node, parent, p);
473 rb_insert_color(&new->node, &priv->root);
477 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
478 const struct nft_set_elem *elem,
479 struct nft_set_ext **ext)
481 struct nft_rbtree *priv = nft_set_priv(set);
482 struct nft_rbtree_elem *rbe = elem->priv;
485 write_lock_bh(&priv->lock);
486 write_seqcount_begin(&priv->count);
487 err = __nft_rbtree_insert(net, set, rbe, ext);
488 write_seqcount_end(&priv->count);
489 write_unlock_bh(&priv->lock);
494 static void nft_rbtree_remove(const struct net *net,
495 const struct nft_set *set,
496 const struct nft_set_elem *elem)
498 struct nft_rbtree *priv = nft_set_priv(set);
499 struct nft_rbtree_elem *rbe = elem->priv;
501 write_lock_bh(&priv->lock);
502 write_seqcount_begin(&priv->count);
503 rb_erase(&rbe->node, &priv->root);
504 write_seqcount_end(&priv->count);
505 write_unlock_bh(&priv->lock);
508 static void nft_rbtree_activate(const struct net *net,
509 const struct nft_set *set,
510 const struct nft_set_elem *elem)
512 struct nft_rbtree_elem *rbe = elem->priv;
514 nft_set_elem_change_active(net, set, &rbe->ext);
517 static bool nft_rbtree_flush(const struct net *net,
518 const struct nft_set *set, void *priv)
520 struct nft_rbtree_elem *rbe = priv;
522 nft_set_elem_change_active(net, set, &rbe->ext);
527 static void *nft_rbtree_deactivate(const struct net *net,
528 const struct nft_set *set,
529 const struct nft_set_elem *elem)
531 const struct nft_rbtree *priv = nft_set_priv(set);
532 const struct rb_node *parent = priv->root.rb_node;
533 struct nft_rbtree_elem *rbe, *this = elem->priv;
534 u8 genmask = nft_genmask_next(net);
537 while (parent != NULL) {
538 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
540 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
543 parent = parent->rb_left;
545 parent = parent->rb_right;
547 if (nft_rbtree_interval_end(rbe) &&
548 nft_rbtree_interval_start(this)) {
549 parent = parent->rb_left;
551 } else if (nft_rbtree_interval_start(rbe) &&
552 nft_rbtree_interval_end(this)) {
553 parent = parent->rb_right;
555 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
556 parent = parent->rb_left;
559 nft_rbtree_flush(net, set, rbe);
566 static void nft_rbtree_walk(const struct nft_ctx *ctx,
568 struct nft_set_iter *iter)
570 struct nft_rbtree *priv = nft_set_priv(set);
571 struct nft_rbtree_elem *rbe;
572 struct nft_set_elem elem;
573 struct rb_node *node;
575 read_lock_bh(&priv->lock);
576 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
577 rbe = rb_entry(node, struct nft_rbtree_elem, node);
579 if (iter->count < iter->skip)
581 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
586 iter->err = iter->fn(ctx, set, iter, &elem);
588 read_unlock_bh(&priv->lock);
594 read_unlock_bh(&priv->lock);
597 static void nft_rbtree_gc(struct work_struct *work)
599 struct nft_rbtree_elem *rbe, *rbe_end = NULL;
600 struct nftables_pernet *nft_net;
601 struct nft_rbtree *priv;
602 struct nft_trans_gc *gc;
603 struct rb_node *node;
608 priv = container_of(work, struct nft_rbtree, gc_work.work);
609 set = nft_set_container_of(priv);
610 net = read_pnet(&set->net);
611 nft_net = nft_pernet(net);
612 gc_seq = READ_ONCE(nft_net->gc_seq);
614 if (nft_set_gc_is_pending(set))
617 gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
621 write_lock_bh(&priv->lock);
622 write_seqcount_begin(&priv->count);
623 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
625 /* Ruleset has been updated, try later. */
626 if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
627 nft_trans_gc_destroy(gc);
632 rbe = rb_entry(node, struct nft_rbtree_elem, node);
634 if (nft_set_elem_is_dead(&rbe->ext))
637 /* elements are reversed in the rbtree for historical reasons,
638 * from highest to lowest value, that is why end element is
639 * always visited before the start element.
641 if (nft_rbtree_interval_end(rbe)) {
645 if (!nft_set_elem_expired(&rbe->ext))
648 nft_set_elem_dead(&rbe->ext);
653 nft_set_elem_dead(&rbe_end->ext);
655 gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
659 nft_trans_gc_elem_add(gc, rbe_end);
662 gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
666 nft_trans_gc_elem_add(gc, rbe);
669 gc = nft_trans_gc_catchall(gc, gc_seq);
672 write_seqcount_end(&priv->count);
673 write_unlock_bh(&priv->lock);
676 nft_trans_gc_queue_async_done(gc);
678 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
679 nft_set_gc_interval(set));
682 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
683 const struct nft_set_desc *desc)
685 return sizeof(struct nft_rbtree);
688 static int nft_rbtree_init(const struct nft_set *set,
689 const struct nft_set_desc *desc,
690 const struct nlattr * const nla[])
692 struct nft_rbtree *priv = nft_set_priv(set);
694 rwlock_init(&priv->lock);
695 seqcount_rwlock_init(&priv->count, &priv->lock);
696 priv->root = RB_ROOT;
698 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
699 if (set->flags & NFT_SET_TIMEOUT)
700 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
701 nft_set_gc_interval(set));
706 static void nft_rbtree_destroy(const struct nft_ctx *ctx,
707 const struct nft_set *set)
709 struct nft_rbtree *priv = nft_set_priv(set);
710 struct nft_rbtree_elem *rbe;
711 struct rb_node *node;
713 cancel_delayed_work_sync(&priv->gc_work);
715 while ((node = priv->root.rb_node) != NULL) {
716 rb_erase(node, &priv->root);
717 rbe = rb_entry(node, struct nft_rbtree_elem, node);
718 nf_tables_set_elem_destroy(ctx, set, rbe);
722 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
723 struct nft_set_estimate *est)
725 if (desc->field_count > 1)
729 est->size = sizeof(struct nft_rbtree) +
730 desc->size * sizeof(struct nft_rbtree_elem);
734 est->lookup = NFT_SET_CLASS_O_LOG_N;
735 est->space = NFT_SET_CLASS_O_N;
740 const struct nft_set_type nft_set_rbtree_type = {
741 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
743 .privsize = nft_rbtree_privsize,
744 .elemsize = offsetof(struct nft_rbtree_elem, ext),
745 .estimate = nft_rbtree_estimate,
746 .init = nft_rbtree_init,
747 .destroy = nft_rbtree_destroy,
748 .insert = nft_rbtree_insert,
749 .remove = nft_rbtree_remove,
750 .deactivate = nft_rbtree_deactivate,
751 .flush = nft_rbtree_flush,
752 .activate = nft_rbtree_activate,
753 .lookup = nft_rbtree_lookup,
754 .walk = nft_rbtree_walk,
755 .get = nft_rbtree_get,