1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_api.c Packet classifier API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
46 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
48 /* The list of all installed classifier types */
49 static LIST_HEAD(tcf_proto_base);
51 /* Protects list of registered TC modules. It is pure SMP lock. */
52 static DEFINE_RWLOCK(cls_mod_lock);
54 static struct xarray tcf_exts_miss_cookies_xa;
55 struct tcf_exts_miss_cookie_node {
56 const struct tcf_chain *chain;
57 const struct tcf_proto *tp;
58 const struct tcf_exts *exts;
66 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
67 * action index in the exts tc actions array.
69 union tcf_exts_miss_cookie {
77 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
79 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
82 struct tcf_exts_miss_cookie_node *n;
86 if (WARN_ON(!handle || !tp->ops->get_exts))
89 n = kzalloc(sizeof(*n), GFP_KERNEL);
93 n->chain_index = tp->chain->index;
95 n->tp_prio = tp->prio;
100 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
101 n, xa_limit_32b, &next, GFP_KERNEL);
105 exts->miss_cookie_node = n;
113 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
115 struct tcf_exts_miss_cookie_node *n;
117 if (!exts->miss_cookie_node)
120 n = exts->miss_cookie_node;
121 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
125 static struct tcf_exts_miss_cookie_node *
126 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
128 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
130 *act_index = mc.act_index;
131 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
133 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
135 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
141 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
144 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
146 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
148 union tcf_exts_miss_cookie mc = { .act_index = act_index, };
150 if (!miss_cookie_base)
153 mc.miss_cookie_base = miss_cookie_base;
154 return mc.miss_cookie;
157 #ifdef CONFIG_NET_CLS_ACT
158 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
159 EXPORT_SYMBOL(tc_skb_ext_tc);
161 void tc_skb_ext_tc_enable(void)
163 static_branch_inc(&tc_skb_ext_tc);
165 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
167 void tc_skb_ext_tc_disable(void)
169 static_branch_dec(&tc_skb_ext_tc);
171 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
174 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
176 return jhash_3words(tp->chain->index, tp->prio,
177 (__force __u32)tp->protocol, 0);
180 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
181 struct tcf_proto *tp)
183 struct tcf_block *block = chain->block;
185 mutex_lock(&block->proto_destroy_lock);
186 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
187 destroy_obj_hashfn(tp));
188 mutex_unlock(&block->proto_destroy_lock);
191 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
192 const struct tcf_proto *tp2)
194 return tp1->chain->index == tp2->chain->index &&
195 tp1->prio == tp2->prio &&
196 tp1->protocol == tp2->protocol;
199 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
200 struct tcf_proto *tp)
202 u32 hash = destroy_obj_hashfn(tp);
203 struct tcf_proto *iter;
207 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
208 destroy_ht_node, hash) {
209 if (tcf_proto_cmp(tp, iter)) {
220 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
222 struct tcf_block *block = chain->block;
224 mutex_lock(&block->proto_destroy_lock);
225 if (hash_hashed(&tp->destroy_ht_node))
226 hash_del_rcu(&tp->destroy_ht_node);
227 mutex_unlock(&block->proto_destroy_lock);
230 /* Find classifier type by string name */
232 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
234 const struct tcf_proto_ops *t, *res = NULL;
237 read_lock(&cls_mod_lock);
238 list_for_each_entry(t, &tcf_proto_base, head) {
239 if (strcmp(kind, t->kind) == 0) {
240 if (try_module_get(t->owner))
245 read_unlock(&cls_mod_lock);
250 static const struct tcf_proto_ops *
251 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
252 struct netlink_ext_ack *extack)
254 const struct tcf_proto_ops *ops;
256 ops = __tcf_proto_lookup_ops(kind);
259 #ifdef CONFIG_MODULES
262 request_module("cls_%s", kind);
265 ops = __tcf_proto_lookup_ops(kind);
266 /* We dropped the RTNL semaphore in order to perform
267 * the module load. So, even if we succeeded in loading
268 * the module we have to replay the request. We indicate
269 * this using -EAGAIN.
272 module_put(ops->owner);
273 return ERR_PTR(-EAGAIN);
276 NL_SET_ERR_MSG(extack, "TC classifier not found");
277 return ERR_PTR(-ENOENT);
280 /* Register(unregister) new classifier type */
282 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
284 struct tcf_proto_ops *t;
287 write_lock(&cls_mod_lock);
288 list_for_each_entry(t, &tcf_proto_base, head)
289 if (!strcmp(ops->kind, t->kind))
292 list_add_tail(&ops->head, &tcf_proto_base);
295 write_unlock(&cls_mod_lock);
298 EXPORT_SYMBOL(register_tcf_proto_ops);
300 static struct workqueue_struct *tc_filter_wq;
302 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
304 struct tcf_proto_ops *t;
307 /* Wait for outstanding call_rcu()s, if any, from a
308 * tcf_proto_ops's destroy() handler.
311 flush_workqueue(tc_filter_wq);
313 write_lock(&cls_mod_lock);
314 list_for_each_entry(t, &tcf_proto_base, head) {
321 write_unlock(&cls_mod_lock);
323 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
325 EXPORT_SYMBOL(unregister_tcf_proto_ops);
327 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
329 INIT_RCU_WORK(rwork, func);
330 return queue_rcu_work(tc_filter_wq, rwork);
332 EXPORT_SYMBOL(tcf_queue_work);
334 /* Select new prio value from the range, managed by kernel. */
336 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
338 u32 first = TC_H_MAKE(0xC0000000U, 0U);
341 first = tp->prio - 1;
343 return TC_H_MAJ(first);
346 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
349 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
350 memset(name, 0, IFNAMSIZ);
354 static bool tcf_proto_is_unlocked(const char *kind)
356 const struct tcf_proto_ops *ops;
359 if (strlen(kind) == 0)
362 ops = tcf_proto_lookup_ops(kind, false, NULL);
363 /* On error return false to take rtnl lock. Proto lookup/create
364 * functions will perform lookup again and properly handle errors.
369 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
370 module_put(ops->owner);
374 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
375 u32 prio, struct tcf_chain *chain,
377 struct netlink_ext_ack *extack)
379 struct tcf_proto *tp;
382 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
384 return ERR_PTR(-ENOBUFS);
386 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
387 if (IS_ERR(tp->ops)) {
388 err = PTR_ERR(tp->ops);
391 tp->classify = tp->ops->classify;
392 tp->protocol = protocol;
395 spin_lock_init(&tp->lock);
396 refcount_set(&tp->refcnt, 1);
398 err = tp->ops->init(tp);
400 module_put(tp->ops->owner);
410 static void tcf_proto_get(struct tcf_proto *tp)
412 refcount_inc(&tp->refcnt);
415 static void tcf_chain_put(struct tcf_chain *chain);
417 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
418 bool sig_destroy, struct netlink_ext_ack *extack)
420 tp->ops->destroy(tp, rtnl_held, extack);
422 tcf_proto_signal_destroyed(tp->chain, tp);
423 tcf_chain_put(tp->chain);
424 module_put(tp->ops->owner);
428 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
429 struct netlink_ext_ack *extack)
431 if (refcount_dec_and_test(&tp->refcnt))
432 tcf_proto_destroy(tp, rtnl_held, true, extack);
435 static bool tcf_proto_check_delete(struct tcf_proto *tp)
437 if (tp->ops->delete_empty)
438 return tp->ops->delete_empty(tp);
444 static void tcf_proto_mark_delete(struct tcf_proto *tp)
446 spin_lock(&tp->lock);
448 spin_unlock(&tp->lock);
451 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
455 spin_lock(&tp->lock);
456 deleting = tp->deleting;
457 spin_unlock(&tp->lock);
462 #define ASSERT_BLOCK_LOCKED(block) \
463 lockdep_assert_held(&(block)->lock)
465 struct tcf_filter_chain_list_item {
466 struct list_head list;
467 tcf_chain_head_change_t *chain_head_change;
468 void *chain_head_change_priv;
471 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
474 struct tcf_chain *chain;
476 ASSERT_BLOCK_LOCKED(block);
478 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
481 list_add_tail_rcu(&chain->list, &block->chain_list);
482 mutex_init(&chain->filter_chain_lock);
483 chain->block = block;
484 chain->index = chain_index;
487 block->chain0.chain = chain;
491 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
492 struct tcf_proto *tp_head)
494 if (item->chain_head_change)
495 item->chain_head_change(tp_head, item->chain_head_change_priv);
498 static void tcf_chain0_head_change(struct tcf_chain *chain,
499 struct tcf_proto *tp_head)
501 struct tcf_filter_chain_list_item *item;
502 struct tcf_block *block = chain->block;
507 mutex_lock(&block->lock);
508 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
509 tcf_chain_head_change_item(item, tp_head);
510 mutex_unlock(&block->lock);
513 /* Returns true if block can be safely freed. */
515 static bool tcf_chain_detach(struct tcf_chain *chain)
517 struct tcf_block *block = chain->block;
519 ASSERT_BLOCK_LOCKED(block);
521 list_del_rcu(&chain->list);
523 block->chain0.chain = NULL;
525 if (list_empty(&block->chain_list) &&
526 refcount_read(&block->refcnt) == 0)
532 static void tcf_block_destroy(struct tcf_block *block)
534 mutex_destroy(&block->lock);
535 mutex_destroy(&block->proto_destroy_lock);
536 kfree_rcu(block, rcu);
539 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
541 struct tcf_block *block = chain->block;
543 mutex_destroy(&chain->filter_chain_lock);
544 kfree_rcu(chain, rcu);
546 tcf_block_destroy(block);
549 static void tcf_chain_hold(struct tcf_chain *chain)
551 ASSERT_BLOCK_LOCKED(chain->block);
556 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
558 ASSERT_BLOCK_LOCKED(chain->block);
560 /* In case all the references are action references, this
561 * chain should not be shown to the user.
563 return chain->refcnt == chain->action_refcnt;
566 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
569 struct tcf_chain *chain;
571 ASSERT_BLOCK_LOCKED(block);
573 list_for_each_entry(chain, &block->chain_list, list) {
574 if (chain->index == chain_index)
580 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
581 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
584 struct tcf_chain *chain;
586 list_for_each_entry_rcu(chain, &block->chain_list, list) {
587 if (chain->index == chain_index)
594 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
595 u32 seq, u16 flags, int event, bool unicast,
596 struct netlink_ext_ack *extack);
598 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
599 u32 chain_index, bool create,
602 struct tcf_chain *chain = NULL;
603 bool is_first_reference;
605 mutex_lock(&block->lock);
606 chain = tcf_chain_lookup(block, chain_index);
608 tcf_chain_hold(chain);
612 chain = tcf_chain_create(block, chain_index);
618 ++chain->action_refcnt;
619 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
620 mutex_unlock(&block->lock);
622 /* Send notification only in case we got the first
623 * non-action reference. Until then, the chain acts only as
624 * a placeholder for actions pointing to it and user ought
625 * not know about them.
627 if (is_first_reference && !by_act)
628 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
629 RTM_NEWCHAIN, false, NULL);
634 mutex_unlock(&block->lock);
638 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
641 return __tcf_chain_get(block, chain_index, create, false);
644 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
646 return __tcf_chain_get(block, chain_index, true, true);
648 EXPORT_SYMBOL(tcf_chain_get_by_act);
650 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
652 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
653 void *tmplt_priv, u32 chain_index,
654 struct tcf_block *block, struct sk_buff *oskb,
655 u32 seq, u16 flags, bool unicast);
657 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
658 bool explicitly_created)
660 struct tcf_block *block = chain->block;
661 const struct tcf_proto_ops *tmplt_ops;
662 bool free_block = false;
666 mutex_lock(&block->lock);
667 if (explicitly_created) {
668 if (!chain->explicitly_created) {
669 mutex_unlock(&block->lock);
672 chain->explicitly_created = false;
676 chain->action_refcnt--;
678 /* tc_chain_notify_delete can't be called while holding block lock.
679 * However, when block is unlocked chain can be changed concurrently, so
680 * save these to temporary variables.
682 refcnt = --chain->refcnt;
683 tmplt_ops = chain->tmplt_ops;
684 tmplt_priv = chain->tmplt_priv;
686 /* The last dropped non-action reference will trigger notification. */
687 if (refcnt - chain->action_refcnt == 0 && !by_act) {
688 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
689 block, NULL, 0, 0, false);
690 /* Last reference to chain, no need to lock. */
691 chain->flushing = false;
695 free_block = tcf_chain_detach(chain);
696 mutex_unlock(&block->lock);
699 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
700 tcf_chain_destroy(chain, free_block);
704 static void tcf_chain_put(struct tcf_chain *chain)
706 __tcf_chain_put(chain, false, false);
709 void tcf_chain_put_by_act(struct tcf_chain *chain)
711 __tcf_chain_put(chain, true, false);
713 EXPORT_SYMBOL(tcf_chain_put_by_act);
715 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
717 __tcf_chain_put(chain, false, true);
720 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
722 struct tcf_proto *tp, *tp_next;
724 mutex_lock(&chain->filter_chain_lock);
725 tp = tcf_chain_dereference(chain->filter_chain, chain);
727 tp_next = rcu_dereference_protected(tp->next, 1);
728 tcf_proto_signal_destroying(chain, tp);
731 tp = tcf_chain_dereference(chain->filter_chain, chain);
732 RCU_INIT_POINTER(chain->filter_chain, NULL);
733 tcf_chain0_head_change(chain, NULL);
734 chain->flushing = true;
735 mutex_unlock(&chain->filter_chain_lock);
738 tp_next = rcu_dereference_protected(tp->next, 1);
739 tcf_proto_put(tp, rtnl_held, NULL);
744 static int tcf_block_setup(struct tcf_block *block,
745 struct flow_block_offload *bo);
747 static void tcf_block_offload_init(struct flow_block_offload *bo,
748 struct net_device *dev, struct Qdisc *sch,
749 enum flow_block_command command,
750 enum flow_block_binder_type binder_type,
751 struct flow_block *flow_block,
752 bool shared, struct netlink_ext_ack *extack)
754 bo->net = dev_net(dev);
755 bo->command = command;
756 bo->binder_type = binder_type;
757 bo->block = flow_block;
758 bo->block_shared = shared;
761 bo->cb_list_head = &flow_block->cb_list;
762 INIT_LIST_HEAD(&bo->cb_list);
765 static void tcf_block_unbind(struct tcf_block *block,
766 struct flow_block_offload *bo);
768 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
770 struct tcf_block *block = block_cb->indr.data;
771 struct net_device *dev = block_cb->indr.dev;
772 struct Qdisc *sch = block_cb->indr.sch;
773 struct netlink_ext_ack extack = {};
774 struct flow_block_offload bo = {};
776 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
777 block_cb->indr.binder_type,
778 &block->flow_block, tcf_block_shared(block),
781 down_write(&block->cb_lock);
782 list_del(&block_cb->driver_list);
783 list_move(&block_cb->list, &bo.cb_list);
784 tcf_block_unbind(block, &bo);
785 up_write(&block->cb_lock);
789 static bool tcf_block_offload_in_use(struct tcf_block *block)
791 return atomic_read(&block->offloadcnt);
794 static int tcf_block_offload_cmd(struct tcf_block *block,
795 struct net_device *dev, struct Qdisc *sch,
796 struct tcf_block_ext_info *ei,
797 enum flow_block_command command,
798 struct netlink_ext_ack *extack)
800 struct flow_block_offload bo = {};
802 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
803 &block->flow_block, tcf_block_shared(block),
806 if (dev->netdev_ops->ndo_setup_tc) {
809 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
811 if (err != -EOPNOTSUPP)
812 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
816 return tcf_block_setup(block, &bo);
819 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
820 tc_block_indr_cleanup);
821 tcf_block_setup(block, &bo);
826 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
827 struct tcf_block_ext_info *ei,
828 struct netlink_ext_ack *extack)
830 struct net_device *dev = q->dev_queue->dev;
833 down_write(&block->cb_lock);
835 /* If tc offload feature is disabled and the block we try to bind
836 * to already has some offloaded filters, forbid to bind.
838 if (dev->netdev_ops->ndo_setup_tc &&
839 !tc_can_offload(dev) &&
840 tcf_block_offload_in_use(block)) {
841 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
846 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
847 if (err == -EOPNOTSUPP)
848 goto no_offload_dev_inc;
852 up_write(&block->cb_lock);
856 if (tcf_block_offload_in_use(block))
860 block->nooffloaddevcnt++;
862 up_write(&block->cb_lock);
866 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
867 struct tcf_block_ext_info *ei)
869 struct net_device *dev = q->dev_queue->dev;
872 down_write(&block->cb_lock);
873 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
874 if (err == -EOPNOTSUPP)
875 goto no_offload_dev_dec;
876 up_write(&block->cb_lock);
880 WARN_ON(block->nooffloaddevcnt-- == 0);
881 up_write(&block->cb_lock);
885 tcf_chain0_head_change_cb_add(struct tcf_block *block,
886 struct tcf_block_ext_info *ei,
887 struct netlink_ext_ack *extack)
889 struct tcf_filter_chain_list_item *item;
890 struct tcf_chain *chain0;
892 item = kmalloc(sizeof(*item), GFP_KERNEL);
894 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
897 item->chain_head_change = ei->chain_head_change;
898 item->chain_head_change_priv = ei->chain_head_change_priv;
900 mutex_lock(&block->lock);
901 chain0 = block->chain0.chain;
903 tcf_chain_hold(chain0);
905 list_add(&item->list, &block->chain0.filter_chain_list);
906 mutex_unlock(&block->lock);
909 struct tcf_proto *tp_head;
911 mutex_lock(&chain0->filter_chain_lock);
913 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
915 tcf_chain_head_change_item(item, tp_head);
917 mutex_lock(&block->lock);
918 list_add(&item->list, &block->chain0.filter_chain_list);
919 mutex_unlock(&block->lock);
921 mutex_unlock(&chain0->filter_chain_lock);
922 tcf_chain_put(chain0);
929 tcf_chain0_head_change_cb_del(struct tcf_block *block,
930 struct tcf_block_ext_info *ei)
932 struct tcf_filter_chain_list_item *item;
934 mutex_lock(&block->lock);
935 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
936 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
937 (item->chain_head_change == ei->chain_head_change &&
938 item->chain_head_change_priv == ei->chain_head_change_priv)) {
939 if (block->chain0.chain)
940 tcf_chain_head_change_item(item, NULL);
941 list_del(&item->list);
942 mutex_unlock(&block->lock);
948 mutex_unlock(&block->lock);
953 spinlock_t idr_lock; /* Protects idr */
957 static unsigned int tcf_net_id;
959 static int tcf_block_insert(struct tcf_block *block, struct net *net,
960 struct netlink_ext_ack *extack)
962 struct tcf_net *tn = net_generic(net, tcf_net_id);
965 idr_preload(GFP_KERNEL);
966 spin_lock(&tn->idr_lock);
967 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
969 spin_unlock(&tn->idr_lock);
975 static void tcf_block_remove(struct tcf_block *block, struct net *net)
977 struct tcf_net *tn = net_generic(net, tcf_net_id);
979 spin_lock(&tn->idr_lock);
980 idr_remove(&tn->idr, block->index);
981 spin_unlock(&tn->idr_lock);
984 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
986 struct netlink_ext_ack *extack)
988 struct tcf_block *block;
990 block = kzalloc(sizeof(*block), GFP_KERNEL);
992 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
993 return ERR_PTR(-ENOMEM);
995 mutex_init(&block->lock);
996 mutex_init(&block->proto_destroy_lock);
997 init_rwsem(&block->cb_lock);
998 flow_block_init(&block->flow_block);
999 INIT_LIST_HEAD(&block->chain_list);
1000 INIT_LIST_HEAD(&block->owner_list);
1001 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1003 refcount_set(&block->refcnt, 1);
1005 block->index = block_index;
1007 /* Don't store q pointer for blocks which are shared */
1008 if (!tcf_block_shared(block))
1013 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1015 struct tcf_net *tn = net_generic(net, tcf_net_id);
1017 return idr_find(&tn->idr, block_index);
1020 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1022 struct tcf_block *block;
1025 block = tcf_block_lookup(net, block_index);
1026 if (block && !refcount_inc_not_zero(&block->refcnt))
1033 static struct tcf_chain *
1034 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1036 mutex_lock(&block->lock);
1038 chain = list_is_last(&chain->list, &block->chain_list) ?
1039 NULL : list_next_entry(chain, list);
1041 chain = list_first_entry_or_null(&block->chain_list,
1042 struct tcf_chain, list);
1044 /* skip all action-only chains */
1045 while (chain && tcf_chain_held_by_acts_only(chain))
1046 chain = list_is_last(&chain->list, &block->chain_list) ?
1047 NULL : list_next_entry(chain, list);
1050 tcf_chain_hold(chain);
1051 mutex_unlock(&block->lock);
1056 /* Function to be used by all clients that want to iterate over all chains on
1057 * block. It properly obtains block->lock and takes reference to chain before
1058 * returning it. Users of this function must be tolerant to concurrent chain
1059 * insertion/deletion or ensure that no concurrent chain modification is
1060 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1061 * consistent dump because rtnl lock is released each time skb is filled with
1062 * data and sent to user-space.
1066 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1068 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1071 tcf_chain_put(chain);
1075 EXPORT_SYMBOL(tcf_get_next_chain);
1077 static struct tcf_proto *
1078 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1083 mutex_lock(&chain->filter_chain_lock);
1086 tp = tcf_chain_dereference(chain->filter_chain, chain);
1087 } else if (tcf_proto_is_deleting(tp)) {
1088 /* 'deleting' flag is set and chain->filter_chain_lock was
1089 * unlocked, which means next pointer could be invalid. Restart
1092 prio = tp->prio + 1;
1093 tp = tcf_chain_dereference(chain->filter_chain, chain);
1095 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1096 if (!tp->deleting && tp->prio >= prio)
1099 tp = tcf_chain_dereference(tp->next, chain);
1105 mutex_unlock(&chain->filter_chain_lock);
1110 /* Function to be used by all clients that want to iterate over all tp's on
1111 * chain. Users of this function must be tolerant to concurrent tp
1112 * insertion/deletion or ensure that no concurrent chain modification is
1113 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1114 * consistent dump because rtnl lock is released each time skb is filled with
1115 * data and sent to user-space.
1119 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1121 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1124 tcf_proto_put(tp, true, NULL);
1128 EXPORT_SYMBOL(tcf_get_next_proto);
1130 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1132 struct tcf_chain *chain;
1134 /* Last reference to block. At this point chains cannot be added or
1135 * removed concurrently.
1137 for (chain = tcf_get_next_chain(block, NULL);
1139 chain = tcf_get_next_chain(block, chain)) {
1140 tcf_chain_put_explicitly_created(chain);
1141 tcf_chain_flush(chain, rtnl_held);
1145 /* Lookup Qdisc and increments its reference counter.
1146 * Set parent, if necessary.
1149 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1150 u32 *parent, int ifindex, bool rtnl_held,
1151 struct netlink_ext_ack *extack)
1153 const struct Qdisc_class_ops *cops;
1154 struct net_device *dev;
1157 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1163 dev = dev_get_by_index_rcu(net, ifindex);
1171 *q = rcu_dereference(dev->qdisc);
1172 *parent = (*q)->handle;
1174 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1176 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1182 *q = qdisc_refcount_inc_nz(*q);
1184 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1189 /* Is it classful? */
1190 cops = (*q)->ops->cl_ops;
1192 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1197 if (!cops->tcf_block) {
1198 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1204 /* At this point we know that qdisc is not noop_qdisc,
1205 * which means that qdisc holds a reference to net_device
1206 * and we hold a reference to qdisc, so it is safe to release
1218 qdisc_put_unlocked(*q);
1224 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1225 int ifindex, struct netlink_ext_ack *extack)
1227 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1230 /* Do we search for filter, attached to class? */
1231 if (TC_H_MIN(parent)) {
1232 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1234 *cl = cops->find(q, parent);
1236 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1244 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1245 unsigned long cl, int ifindex,
1247 struct netlink_ext_ack *extack)
1249 struct tcf_block *block;
1251 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1252 block = tcf_block_refcnt_get(net, block_index);
1254 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1255 return ERR_PTR(-EINVAL);
1258 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1260 block = cops->tcf_block(q, cl, extack);
1262 return ERR_PTR(-EINVAL);
1264 if (tcf_block_shared(block)) {
1265 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1266 return ERR_PTR(-EOPNOTSUPP);
1269 /* Always take reference to block in order to support execution
1270 * of rules update path of cls API without rtnl lock. Caller
1271 * must release block when it is finished using it. 'if' block
1272 * of this conditional obtain reference to block by calling
1273 * tcf_block_refcnt_get().
1275 refcount_inc(&block->refcnt);
1281 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1282 struct tcf_block_ext_info *ei, bool rtnl_held)
1284 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1285 /* Flushing/putting all chains will cause the block to be
1286 * deallocated when last chain is freed. However, if chain_list
1287 * is empty, block has to be manually deallocated. After block
1288 * reference counter reached 0, it is no longer possible to
1289 * increment it or add new chains to block.
1291 bool free_block = list_empty(&block->chain_list);
1293 mutex_unlock(&block->lock);
1294 if (tcf_block_shared(block))
1295 tcf_block_remove(block, block->net);
1298 tcf_block_offload_unbind(block, q, ei);
1301 tcf_block_destroy(block);
1303 tcf_block_flush_all_chains(block, rtnl_held);
1305 tcf_block_offload_unbind(block, q, ei);
1309 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1311 __tcf_block_put(block, NULL, NULL, rtnl_held);
1315 * Set q, parent, cl when appropriate.
1318 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1319 u32 *parent, unsigned long *cl,
1320 int ifindex, u32 block_index,
1321 struct netlink_ext_ack *extack)
1323 struct tcf_block *block;
1328 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1332 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1336 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1337 if (IS_ERR(block)) {
1338 err = PTR_ERR(block);
1349 return ERR_PTR(err);
1352 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1355 if (!IS_ERR_OR_NULL(block))
1356 tcf_block_refcnt_put(block, rtnl_held);
1362 qdisc_put_unlocked(q);
1366 struct tcf_block_owner_item {
1367 struct list_head list;
1369 enum flow_block_binder_type binder_type;
1373 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1375 enum flow_block_binder_type binder_type)
1377 if (block->keep_dst &&
1378 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1379 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1380 netif_keep_dst(qdisc_dev(q));
1383 void tcf_block_netif_keep_dst(struct tcf_block *block)
1385 struct tcf_block_owner_item *item;
1387 block->keep_dst = true;
1388 list_for_each_entry(item, &block->owner_list, list)
1389 tcf_block_owner_netif_keep_dst(block, item->q,
1392 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1394 static int tcf_block_owner_add(struct tcf_block *block,
1396 enum flow_block_binder_type binder_type)
1398 struct tcf_block_owner_item *item;
1400 item = kmalloc(sizeof(*item), GFP_KERNEL);
1404 item->binder_type = binder_type;
1405 list_add(&item->list, &block->owner_list);
1409 static void tcf_block_owner_del(struct tcf_block *block,
1411 enum flow_block_binder_type binder_type)
1413 struct tcf_block_owner_item *item;
1415 list_for_each_entry(item, &block->owner_list, list) {
1416 if (item->q == q && item->binder_type == binder_type) {
1417 list_del(&item->list);
1425 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1426 struct tcf_block_ext_info *ei,
1427 struct netlink_ext_ack *extack)
1429 struct net *net = qdisc_net(q);
1430 struct tcf_block *block = NULL;
1433 if (ei->block_index)
1434 /* block_index not 0 means the shared block is requested */
1435 block = tcf_block_refcnt_get(net, ei->block_index);
1438 block = tcf_block_create(net, q, ei->block_index, extack);
1440 return PTR_ERR(block);
1441 if (tcf_block_shared(block)) {
1442 err = tcf_block_insert(block, net, extack);
1444 goto err_block_insert;
1448 err = tcf_block_owner_add(block, q, ei->binder_type);
1450 goto err_block_owner_add;
1452 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1454 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1456 goto err_chain0_head_change_cb_add;
1458 err = tcf_block_offload_bind(block, q, ei, extack);
1460 goto err_block_offload_bind;
1465 err_block_offload_bind:
1466 tcf_chain0_head_change_cb_del(block, ei);
1467 err_chain0_head_change_cb_add:
1468 tcf_block_owner_del(block, q, ei->binder_type);
1469 err_block_owner_add:
1471 tcf_block_refcnt_put(block, true);
1474 EXPORT_SYMBOL(tcf_block_get_ext);
1476 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1478 struct tcf_proto __rcu **p_filter_chain = priv;
1480 rcu_assign_pointer(*p_filter_chain, tp_head);
1483 int tcf_block_get(struct tcf_block **p_block,
1484 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1485 struct netlink_ext_ack *extack)
1487 struct tcf_block_ext_info ei = {
1488 .chain_head_change = tcf_chain_head_change_dflt,
1489 .chain_head_change_priv = p_filter_chain,
1492 WARN_ON(!p_filter_chain);
1493 return tcf_block_get_ext(p_block, q, &ei, extack);
1495 EXPORT_SYMBOL(tcf_block_get);
1497 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1498 * actions should be all removed after flushing.
1500 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1501 struct tcf_block_ext_info *ei)
1505 tcf_chain0_head_change_cb_del(block, ei);
1506 tcf_block_owner_del(block, q, ei->binder_type);
1508 __tcf_block_put(block, q, ei, true);
1510 EXPORT_SYMBOL(tcf_block_put_ext);
1512 void tcf_block_put(struct tcf_block *block)
1514 struct tcf_block_ext_info ei = {0, };
1518 tcf_block_put_ext(block, block->q, &ei);
1521 EXPORT_SYMBOL(tcf_block_put);
1524 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1525 void *cb_priv, bool add, bool offload_in_use,
1526 struct netlink_ext_ack *extack)
1528 struct tcf_chain *chain, *chain_prev;
1529 struct tcf_proto *tp, *tp_prev;
1532 lockdep_assert_held(&block->cb_lock);
1534 for (chain = __tcf_get_next_chain(block, NULL);
1537 chain = __tcf_get_next_chain(block, chain),
1538 tcf_chain_put(chain_prev)) {
1539 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1541 tp = __tcf_get_next_proto(chain, tp),
1542 tcf_proto_put(tp_prev, true, NULL)) {
1543 if (tp->ops->reoffload) {
1544 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1547 goto err_playback_remove;
1548 } else if (add && offload_in_use) {
1550 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1551 goto err_playback_remove;
1558 err_playback_remove:
1559 tcf_proto_put(tp, true, NULL);
1560 tcf_chain_put(chain);
1561 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1566 static int tcf_block_bind(struct tcf_block *block,
1567 struct flow_block_offload *bo)
1569 struct flow_block_cb *block_cb, *next;
1572 lockdep_assert_held(&block->cb_lock);
1574 list_for_each_entry(block_cb, &bo->cb_list, list) {
1575 err = tcf_block_playback_offloads(block, block_cb->cb,
1576 block_cb->cb_priv, true,
1577 tcf_block_offload_in_use(block),
1581 if (!bo->unlocked_driver_cb)
1582 block->lockeddevcnt++;
1586 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1591 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1593 list_del(&block_cb->list);
1594 tcf_block_playback_offloads(block, block_cb->cb,
1595 block_cb->cb_priv, false,
1596 tcf_block_offload_in_use(block),
1598 if (!bo->unlocked_driver_cb)
1599 block->lockeddevcnt--;
1601 flow_block_cb_free(block_cb);
1607 static void tcf_block_unbind(struct tcf_block *block,
1608 struct flow_block_offload *bo)
1610 struct flow_block_cb *block_cb, *next;
1612 lockdep_assert_held(&block->cb_lock);
1614 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1615 tcf_block_playback_offloads(block, block_cb->cb,
1616 block_cb->cb_priv, false,
1617 tcf_block_offload_in_use(block),
1619 list_del(&block_cb->list);
1620 flow_block_cb_free(block_cb);
1621 if (!bo->unlocked_driver_cb)
1622 block->lockeddevcnt--;
1626 static int tcf_block_setup(struct tcf_block *block,
1627 struct flow_block_offload *bo)
1631 switch (bo->command) {
1632 case FLOW_BLOCK_BIND:
1633 err = tcf_block_bind(block, bo);
1635 case FLOW_BLOCK_UNBIND:
1637 tcf_block_unbind(block, bo);
1647 /* Main classifier routine: scans classifier chain attached
1648 * to this qdisc, (optionally) tests for protocol and asks
1649 * specific classifiers.
1651 static inline int __tcf_classify(struct sk_buff *skb,
1652 const struct tcf_proto *tp,
1653 const struct tcf_proto *orig_tp,
1654 struct tcf_result *res,
1656 struct tcf_exts_miss_cookie_node *n,
1658 u32 *last_executed_chain)
1660 #ifdef CONFIG_NET_CLS_ACT
1661 const int max_reclassify_loop = 16;
1662 const struct tcf_proto *first_tp;
1667 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1668 __be16 protocol = skb_protocol(skb, false);
1672 struct tcf_exts *exts;
1674 if (n->tp_prio != tp->prio)
1677 /* We re-lookup the tp and chain based on index instead
1678 * of having hard refs and locks to them, so do a sanity
1679 * check if any of tp,chain,exts was replaced by the
1680 * time we got here with a cookie from hardware.
1682 if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1683 !tp->ops->get_exts))
1686 exts = tp->ops->get_exts(tp, n->handle);
1687 if (unlikely(!exts || n->exts != exts))
1691 err = tcf_exts_exec_ex(skb, exts, act_index, res);
1693 if (tp->protocol != protocol &&
1694 tp->protocol != htons(ETH_P_ALL))
1697 err = tc_classify(skb, tp, res);
1699 #ifdef CONFIG_NET_CLS_ACT
1700 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1702 *last_executed_chain = first_tp->chain->index;
1704 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1705 first_tp = res->goto_tp;
1706 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1717 return TC_ACT_UNSPEC; /* signal: continue lookup */
1718 #ifdef CONFIG_NET_CLS_ACT
1720 if (unlikely(limit++ >= max_reclassify_loop)) {
1721 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1722 tp->chain->block->index,
1724 ntohs(tp->protocol));
1733 int tcf_classify(struct sk_buff *skb,
1734 const struct tcf_block *block,
1735 const struct tcf_proto *tp,
1736 struct tcf_result *res, bool compat_mode)
1738 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1739 u32 last_executed_chain = 0;
1741 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1742 &last_executed_chain);
1744 u32 last_executed_chain = tp ? tp->chain->index : 0;
1745 struct tcf_exts_miss_cookie_node *n = NULL;
1746 const struct tcf_proto *orig_tp = tp;
1747 struct tc_skb_ext *ext;
1752 ext = skb_ext_find(skb, TC_SKB_EXT);
1754 if (ext && (ext->chain || ext->act_miss)) {
1755 struct tcf_chain *fchain;
1758 if (ext->act_miss) {
1759 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1764 chain = n->chain_index;
1769 fchain = tcf_chain_lookup_rcu(block, chain);
1773 /* Consume, so cloned/redirect skbs won't inherit ext */
1774 skb_ext_del(skb, TC_SKB_EXT);
1776 tp = rcu_dereference_bh(fchain->filter_chain);
1777 last_executed_chain = fchain->index;
1781 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1782 &last_executed_chain);
1784 if (tc_skb_ext_tc_enabled()) {
1785 /* If we missed on some chain */
1786 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1787 struct tc_skb_cb *cb = tc_skb_cb(skb);
1789 ext = tc_skb_ext_alloc(skb);
1790 if (WARN_ON_ONCE(!ext))
1792 ext->chain = last_executed_chain;
1794 ext->post_ct = cb->post_ct;
1795 ext->post_ct_snat = cb->post_ct_snat;
1796 ext->post_ct_dnat = cb->post_ct_dnat;
1797 ext->zone = cb->zone;
1804 EXPORT_SYMBOL(tcf_classify);
1806 struct tcf_chain_info {
1807 struct tcf_proto __rcu **pprev;
1808 struct tcf_proto __rcu *next;
1811 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1812 struct tcf_chain_info *chain_info)
1814 return tcf_chain_dereference(*chain_info->pprev, chain);
1817 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1818 struct tcf_chain_info *chain_info,
1819 struct tcf_proto *tp)
1821 if (chain->flushing)
1824 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1825 if (*chain_info->pprev == chain->filter_chain)
1826 tcf_chain0_head_change(chain, tp);
1828 rcu_assign_pointer(*chain_info->pprev, tp);
1833 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1834 struct tcf_chain_info *chain_info,
1835 struct tcf_proto *tp)
1837 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1839 tcf_proto_mark_delete(tp);
1840 if (tp == chain->filter_chain)
1841 tcf_chain0_head_change(chain, next);
1842 RCU_INIT_POINTER(*chain_info->pprev, next);
1845 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1846 struct tcf_chain_info *chain_info,
1847 u32 protocol, u32 prio,
1848 bool prio_allocate);
1850 /* Try to insert new proto.
1851 * If proto with specified priority already exists, free new proto
1852 * and return existing one.
1855 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1856 struct tcf_proto *tp_new,
1857 u32 protocol, u32 prio,
1860 struct tcf_chain_info chain_info;
1861 struct tcf_proto *tp;
1864 mutex_lock(&chain->filter_chain_lock);
1866 if (tcf_proto_exists_destroying(chain, tp_new)) {
1867 mutex_unlock(&chain->filter_chain_lock);
1868 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1869 return ERR_PTR(-EAGAIN);
1872 tp = tcf_chain_tp_find(chain, &chain_info,
1873 protocol, prio, false);
1875 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1876 mutex_unlock(&chain->filter_chain_lock);
1879 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1882 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1883 tp_new = ERR_PTR(err);
1889 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1890 struct tcf_proto *tp, bool rtnl_held,
1891 struct netlink_ext_ack *extack)
1893 struct tcf_chain_info chain_info;
1894 struct tcf_proto *tp_iter;
1895 struct tcf_proto **pprev;
1896 struct tcf_proto *next;
1898 mutex_lock(&chain->filter_chain_lock);
1900 /* Atomically find and remove tp from chain. */
1901 for (pprev = &chain->filter_chain;
1902 (tp_iter = tcf_chain_dereference(*pprev, chain));
1903 pprev = &tp_iter->next) {
1904 if (tp_iter == tp) {
1905 chain_info.pprev = pprev;
1906 chain_info.next = tp_iter->next;
1907 WARN_ON(tp_iter->deleting);
1911 /* Verify that tp still exists and no new filters were inserted
1913 * Mark tp for deletion if it is empty.
1915 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1916 mutex_unlock(&chain->filter_chain_lock);
1920 tcf_proto_signal_destroying(chain, tp);
1921 next = tcf_chain_dereference(chain_info.next, chain);
1922 if (tp == chain->filter_chain)
1923 tcf_chain0_head_change(chain, next);
1924 RCU_INIT_POINTER(*chain_info.pprev, next);
1925 mutex_unlock(&chain->filter_chain_lock);
1927 tcf_proto_put(tp, rtnl_held, extack);
1930 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1931 struct tcf_chain_info *chain_info,
1932 u32 protocol, u32 prio,
1935 struct tcf_proto **pprev;
1936 struct tcf_proto *tp;
1938 /* Check the chain for existence of proto-tcf with this priority */
1939 for (pprev = &chain->filter_chain;
1940 (tp = tcf_chain_dereference(*pprev, chain));
1941 pprev = &tp->next) {
1942 if (tp->prio >= prio) {
1943 if (tp->prio == prio) {
1944 if (prio_allocate ||
1945 (tp->protocol != protocol && protocol))
1946 return ERR_PTR(-EINVAL);
1953 chain_info->pprev = pprev;
1955 chain_info->next = tp->next;
1958 chain_info->next = NULL;
1963 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1964 struct tcf_proto *tp, struct tcf_block *block,
1965 struct Qdisc *q, u32 parent, void *fh,
1966 u32 portid, u32 seq, u16 flags, int event,
1967 bool terse_dump, bool rtnl_held,
1968 struct netlink_ext_ack *extack)
1971 struct nlmsghdr *nlh;
1972 unsigned char *b = skb_tail_pointer(skb);
1974 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1976 goto out_nlmsg_trim;
1977 tcm = nlmsg_data(nlh);
1978 tcm->tcm_family = AF_UNSPEC;
1982 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1983 tcm->tcm_parent = parent;
1985 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1986 tcm->tcm_block_index = block->index;
1988 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1989 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1990 goto nla_put_failure;
1991 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1992 goto nla_put_failure;
1994 tcm->tcm_handle = 0;
1995 } else if (terse_dump) {
1996 if (tp->ops->terse_dump) {
1997 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1999 goto nla_put_failure;
2001 goto cls_op_not_supp;
2004 if (tp->ops->dump &&
2005 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2006 goto nla_put_failure;
2009 if (extack && extack->_msg &&
2010 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2011 goto nla_put_failure;
2013 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2024 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2025 struct nlmsghdr *n, struct tcf_proto *tp,
2026 struct tcf_block *block, struct Qdisc *q,
2027 u32 parent, void *fh, int event, bool unicast,
2028 bool rtnl_held, struct netlink_ext_ack *extack)
2030 struct sk_buff *skb;
2031 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2034 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2038 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2039 n->nlmsg_seq, n->nlmsg_flags, event,
2040 false, rtnl_held, extack) <= 0) {
2046 err = rtnl_unicast(skb, net, portid);
2048 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2049 n->nlmsg_flags & NLM_F_ECHO);
2053 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2054 struct nlmsghdr *n, struct tcf_proto *tp,
2055 struct tcf_block *block, struct Qdisc *q,
2056 u32 parent, void *fh, bool unicast, bool *last,
2057 bool rtnl_held, struct netlink_ext_ack *extack)
2059 struct sk_buff *skb;
2060 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2063 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2067 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2068 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2069 false, rtnl_held, extack) <= 0) {
2070 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2075 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2082 err = rtnl_unicast(skb, net, portid);
2084 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2085 n->nlmsg_flags & NLM_F_ECHO);
2087 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2092 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2093 struct tcf_block *block, struct Qdisc *q,
2094 u32 parent, struct nlmsghdr *n,
2095 struct tcf_chain *chain, int event,
2096 struct netlink_ext_ack *extack)
2098 struct tcf_proto *tp;
2100 for (tp = tcf_get_next_proto(chain, NULL);
2101 tp; tp = tcf_get_next_proto(chain, tp))
2102 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2103 event, false, true, extack);
2106 static void tfilter_put(struct tcf_proto *tp, void *fh)
2108 if (tp->ops->put && fh)
2109 tp->ops->put(tp, fh);
2112 static bool is_qdisc_ingress(__u32 classid)
2114 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2117 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2118 struct netlink_ext_ack *extack)
2120 struct net *net = sock_net(skb->sk);
2121 struct nlattr *tca[TCA_MAX + 1];
2122 char name[IFNAMSIZ];
2130 struct tcf_chain_info chain_info;
2131 struct tcf_chain *chain;
2132 struct tcf_block *block;
2133 struct tcf_proto *tp;
2138 bool rtnl_held = false;
2144 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2145 rtm_tca_policy, extack);
2150 protocol = TC_H_MIN(t->tcm_info);
2151 prio = TC_H_MAJ(t->tcm_info);
2152 prio_allocate = false;
2153 parent = t->tcm_parent;
2162 /* If no priority is provided by the user,
2165 if (n->nlmsg_flags & NLM_F_CREATE) {
2166 prio = TC_H_MAKE(0x80000000U, 0U);
2167 prio_allocate = true;
2169 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2174 /* Find head of filter chain. */
2176 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2180 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2181 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2186 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2187 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2188 * type is not specified, classifier is not unlocked.
2191 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2192 !tcf_proto_is_unlocked(name)) {
2197 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2201 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2203 if (IS_ERR(block)) {
2204 err = PTR_ERR(block);
2207 block->classid = parent;
2209 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2210 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2211 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2215 chain = tcf_chain_get(block, chain_index, true);
2217 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2222 mutex_lock(&chain->filter_chain_lock);
2223 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2224 prio, prio_allocate);
2226 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2232 struct tcf_proto *tp_new = NULL;
2234 if (chain->flushing) {
2239 /* Proto-tcf does not exist, create new one */
2241 if (tca[TCA_KIND] == NULL || !protocol) {
2242 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2247 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2248 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2254 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2257 mutex_unlock(&chain->filter_chain_lock);
2258 tp_new = tcf_proto_create(name, protocol, prio, chain,
2260 if (IS_ERR(tp_new)) {
2261 err = PTR_ERR(tp_new);
2266 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2273 mutex_unlock(&chain->filter_chain_lock);
2276 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2277 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2282 fh = tp->ops->get(tp, t->tcm_handle);
2285 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2286 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2290 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2291 tfilter_put(tp, fh);
2292 NL_SET_ERR_MSG(extack, "Filter already exists");
2297 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2298 tfilter_put(tp, fh);
2299 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2304 if (!(n->nlmsg_flags & NLM_F_CREATE))
2305 flags |= TCA_ACT_FLAGS_REPLACE;
2307 flags |= TCA_ACT_FLAGS_NO_RTNL;
2308 if (is_qdisc_ingress(parent))
2309 flags |= TCA_ACT_FLAGS_AT_INGRESS;
2310 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2313 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2314 RTM_NEWTFILTER, false, rtnl_held, extack);
2315 tfilter_put(tp, fh);
2316 /* q pointer is NULL for shared blocks */
2318 q->flags &= ~TCQ_F_CAN_BYPASS;
2322 if (err && tp_created)
2323 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2326 if (tp && !IS_ERR(tp))
2327 tcf_proto_put(tp, rtnl_held, NULL);
2329 tcf_chain_put(chain);
2331 tcf_block_release(q, block, rtnl_held);
2336 if (err == -EAGAIN) {
2337 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2341 /* Replay the request. */
2347 mutex_unlock(&chain->filter_chain_lock);
2351 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2352 struct netlink_ext_ack *extack)
2354 struct net *net = sock_net(skb->sk);
2355 struct nlattr *tca[TCA_MAX + 1];
2356 char name[IFNAMSIZ];
2362 struct Qdisc *q = NULL;
2363 struct tcf_chain_info chain_info;
2364 struct tcf_chain *chain = NULL;
2365 struct tcf_block *block = NULL;
2366 struct tcf_proto *tp = NULL;
2367 unsigned long cl = 0;
2370 bool rtnl_held = false;
2372 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2373 rtm_tca_policy, extack);
2378 protocol = TC_H_MIN(t->tcm_info);
2379 prio = TC_H_MAJ(t->tcm_info);
2380 parent = t->tcm_parent;
2382 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2383 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2387 /* Find head of filter chain. */
2389 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2393 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2394 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2398 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2399 * found), qdisc is not unlocked, classifier type is not specified,
2400 * classifier is not unlocked.
2403 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2404 !tcf_proto_is_unlocked(name)) {
2409 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2413 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2415 if (IS_ERR(block)) {
2416 err = PTR_ERR(block);
2420 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2421 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2422 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2426 chain = tcf_chain_get(block, chain_index, false);
2428 /* User requested flush on non-existent chain. Nothing to do,
2429 * so just return success.
2435 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2441 tfilter_notify_chain(net, skb, block, q, parent, n,
2442 chain, RTM_DELTFILTER, extack);
2443 tcf_chain_flush(chain, rtnl_held);
2448 mutex_lock(&chain->filter_chain_lock);
2449 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2451 if (!tp || IS_ERR(tp)) {
2452 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2453 err = tp ? PTR_ERR(tp) : -ENOENT;
2455 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2456 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2459 } else if (t->tcm_handle == 0) {
2460 tcf_proto_signal_destroying(chain, tp);
2461 tcf_chain_tp_remove(chain, &chain_info, tp);
2462 mutex_unlock(&chain->filter_chain_lock);
2464 tcf_proto_put(tp, rtnl_held, NULL);
2465 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2466 RTM_DELTFILTER, false, rtnl_held, extack);
2470 mutex_unlock(&chain->filter_chain_lock);
2472 fh = tp->ops->get(tp, t->tcm_handle);
2475 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2480 err = tfilter_del_notify(net, skb, n, tp, block,
2481 q, parent, fh, false, &last,
2487 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2492 if (tp && !IS_ERR(tp))
2493 tcf_proto_put(tp, rtnl_held, NULL);
2494 tcf_chain_put(chain);
2496 tcf_block_release(q, block, rtnl_held);
2504 mutex_unlock(&chain->filter_chain_lock);
2508 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2509 struct netlink_ext_ack *extack)
2511 struct net *net = sock_net(skb->sk);
2512 struct nlattr *tca[TCA_MAX + 1];
2513 char name[IFNAMSIZ];
2519 struct Qdisc *q = NULL;
2520 struct tcf_chain_info chain_info;
2521 struct tcf_chain *chain = NULL;
2522 struct tcf_block *block = NULL;
2523 struct tcf_proto *tp = NULL;
2524 unsigned long cl = 0;
2527 bool rtnl_held = false;
2529 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2530 rtm_tca_policy, extack);
2535 protocol = TC_H_MIN(t->tcm_info);
2536 prio = TC_H_MAJ(t->tcm_info);
2537 parent = t->tcm_parent;
2540 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2544 /* Find head of filter chain. */
2546 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2550 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2551 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2555 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2556 * unlocked, classifier type is not specified, classifier is not
2559 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2560 !tcf_proto_is_unlocked(name)) {
2565 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2569 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2571 if (IS_ERR(block)) {
2572 err = PTR_ERR(block);
2576 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2577 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2578 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2582 chain = tcf_chain_get(block, chain_index, false);
2584 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2589 mutex_lock(&chain->filter_chain_lock);
2590 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2592 mutex_unlock(&chain->filter_chain_lock);
2593 if (!tp || IS_ERR(tp)) {
2594 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2595 err = tp ? PTR_ERR(tp) : -ENOENT;
2597 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2598 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2603 fh = tp->ops->get(tp, t->tcm_handle);
2606 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2609 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2610 fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2612 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2615 tfilter_put(tp, fh);
2618 if (tp && !IS_ERR(tp))
2619 tcf_proto_put(tp, rtnl_held, NULL);
2620 tcf_chain_put(chain);
2622 tcf_block_release(q, block, rtnl_held);
2630 struct tcf_dump_args {
2631 struct tcf_walker w;
2632 struct sk_buff *skb;
2633 struct netlink_callback *cb;
2634 struct tcf_block *block;
2640 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2642 struct tcf_dump_args *a = (void *)arg;
2643 struct net *net = sock_net(a->skb->sk);
2645 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2646 n, NETLINK_CB(a->cb->skb).portid,
2647 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2648 RTM_NEWTFILTER, a->terse_dump, true, NULL);
2651 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2652 struct sk_buff *skb, struct netlink_callback *cb,
2653 long index_start, long *p_index, bool terse)
2655 struct net *net = sock_net(skb->sk);
2656 struct tcf_block *block = chain->block;
2657 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2658 struct tcf_proto *tp, *tp_prev;
2659 struct tcf_dump_args arg;
2661 for (tp = __tcf_get_next_proto(chain, NULL);
2664 tp = __tcf_get_next_proto(chain, tp),
2665 tcf_proto_put(tp_prev, true, NULL),
2667 if (*p_index < index_start)
2669 if (TC_H_MAJ(tcm->tcm_info) &&
2670 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2672 if (TC_H_MIN(tcm->tcm_info) &&
2673 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2675 if (*p_index > index_start)
2676 memset(&cb->args[1], 0,
2677 sizeof(cb->args) - sizeof(cb->args[0]));
2678 if (cb->args[1] == 0) {
2679 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2680 NETLINK_CB(cb->skb).portid,
2681 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2682 RTM_NEWTFILTER, false, true, NULL) <= 0)
2688 arg.w.fn = tcf_node_dump;
2693 arg.parent = parent;
2695 arg.w.skip = cb->args[1] - 1;
2697 arg.w.cookie = cb->args[2];
2698 arg.terse_dump = terse;
2699 tp->ops->walk(tp, &arg.w, true);
2700 cb->args[2] = arg.w.cookie;
2701 cb->args[1] = arg.w.count + 1;
2708 tcf_proto_put(tp, true, NULL);
2712 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2713 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2716 /* called with RTNL */
2717 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2719 struct tcf_chain *chain, *chain_prev;
2720 struct net *net = sock_net(skb->sk);
2721 struct nlattr *tca[TCA_MAX + 1];
2722 struct Qdisc *q = NULL;
2723 struct tcf_block *block;
2724 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2725 bool terse_dump = false;
2731 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2734 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2735 tcf_tfilter_dump_policy, cb->extack);
2739 if (tca[TCA_DUMP_FLAGS]) {
2740 struct nla_bitfield32 flags =
2741 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2743 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2746 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2747 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2750 /* If we work with block index, q is NULL and parent value
2751 * will never be used in the following code. The check
2752 * in tcf_fill_node prevents it. However, compiler does not
2753 * see that far, so set parent to zero to silence the warning
2754 * about parent being uninitialized.
2758 const struct Qdisc_class_ops *cops;
2759 struct net_device *dev;
2760 unsigned long cl = 0;
2762 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2766 parent = tcm->tcm_parent;
2768 q = rtnl_dereference(dev->qdisc);
2770 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2773 cops = q->ops->cl_ops;
2776 if (!cops->tcf_block)
2778 if (TC_H_MIN(tcm->tcm_parent)) {
2779 cl = cops->find(q, tcm->tcm_parent);
2783 block = cops->tcf_block(q, cl, NULL);
2786 parent = block->classid;
2787 if (tcf_block_shared(block))
2791 index_start = cb->args[0];
2794 for (chain = __tcf_get_next_chain(block, NULL);
2797 chain = __tcf_get_next_chain(block, chain),
2798 tcf_chain_put(chain_prev)) {
2799 if (tca[TCA_CHAIN] &&
2800 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2802 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2803 index_start, &index, terse_dump)) {
2804 tcf_chain_put(chain);
2810 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2811 tcf_block_refcnt_put(block, true);
2812 cb->args[0] = index;
2815 /* If we did no progress, the error (EMSGSIZE) is real */
2816 if (skb->len == 0 && err)
2821 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2822 void *tmplt_priv, u32 chain_index,
2823 struct net *net, struct sk_buff *skb,
2824 struct tcf_block *block,
2825 u32 portid, u32 seq, u16 flags, int event,
2826 struct netlink_ext_ack *extack)
2828 unsigned char *b = skb_tail_pointer(skb);
2829 const struct tcf_proto_ops *ops;
2830 struct nlmsghdr *nlh;
2837 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2839 goto out_nlmsg_trim;
2840 tcm = nlmsg_data(nlh);
2841 tcm->tcm_family = AF_UNSPEC;
2844 tcm->tcm_handle = 0;
2846 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2847 tcm->tcm_parent = block->q->handle;
2849 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2850 tcm->tcm_block_index = block->index;
2853 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2854 goto nla_put_failure;
2857 if (nla_put_string(skb, TCA_KIND, ops->kind))
2858 goto nla_put_failure;
2859 if (ops->tmplt_dump(skb, net, priv) < 0)
2860 goto nla_put_failure;
2863 if (extack && extack->_msg &&
2864 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2865 goto out_nlmsg_trim;
2867 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2877 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2878 u32 seq, u16 flags, int event, bool unicast,
2879 struct netlink_ext_ack *extack)
2881 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2882 struct tcf_block *block = chain->block;
2883 struct net *net = block->net;
2884 struct sk_buff *skb;
2887 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2891 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2892 chain->index, net, skb, block, portid,
2893 seq, flags, event, extack) <= 0) {
2899 err = rtnl_unicast(skb, net, portid);
2901 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2902 flags & NLM_F_ECHO);
2907 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2908 void *tmplt_priv, u32 chain_index,
2909 struct tcf_block *block, struct sk_buff *oskb,
2910 u32 seq, u16 flags, bool unicast)
2912 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2913 struct net *net = block->net;
2914 struct sk_buff *skb;
2916 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2920 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2921 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2927 return rtnl_unicast(skb, net, portid);
2929 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2932 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2933 struct nlattr **tca,
2934 struct netlink_ext_ack *extack)
2936 const struct tcf_proto_ops *ops;
2937 char name[IFNAMSIZ];
2940 /* If kind is not set, user did not specify template. */
2944 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2945 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2949 ops = tcf_proto_lookup_ops(name, true, extack);
2951 return PTR_ERR(ops);
2952 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2953 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2957 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2958 if (IS_ERR(tmplt_priv)) {
2959 module_put(ops->owner);
2960 return PTR_ERR(tmplt_priv);
2962 chain->tmplt_ops = ops;
2963 chain->tmplt_priv = tmplt_priv;
2967 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2970 /* If template ops are set, no work to do for us. */
2974 tmplt_ops->tmplt_destroy(tmplt_priv);
2975 module_put(tmplt_ops->owner);
2978 /* Add/delete/get a chain */
2980 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2981 struct netlink_ext_ack *extack)
2983 struct net *net = sock_net(skb->sk);
2984 struct nlattr *tca[TCA_MAX + 1];
2989 struct tcf_chain *chain;
2990 struct tcf_block *block;
2996 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2997 rtm_tca_policy, extack);
3002 parent = t->tcm_parent;
3005 block = tcf_block_find(net, &q, &parent, &cl,
3006 t->tcm_ifindex, t->tcm_block_index, extack);
3008 return PTR_ERR(block);
3010 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3011 if (chain_index > TC_ACT_EXT_VAL_MASK) {
3012 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3017 mutex_lock(&block->lock);
3018 chain = tcf_chain_lookup(block, chain_index);
3019 if (n->nlmsg_type == RTM_NEWCHAIN) {
3021 if (tcf_chain_held_by_acts_only(chain)) {
3022 /* The chain exists only because there is
3023 * some action referencing it.
3025 tcf_chain_hold(chain);
3027 NL_SET_ERR_MSG(extack, "Filter chain already exists");
3029 goto errout_block_locked;
3032 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3033 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3035 goto errout_block_locked;
3037 chain = tcf_chain_create(block, chain_index);
3039 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3041 goto errout_block_locked;
3045 if (!chain || tcf_chain_held_by_acts_only(chain)) {
3046 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3048 goto errout_block_locked;
3050 tcf_chain_hold(chain);
3053 if (n->nlmsg_type == RTM_NEWCHAIN) {
3054 /* Modifying chain requires holding parent block lock. In case
3055 * the chain was successfully added, take a reference to the
3056 * chain. This ensures that an empty chain does not disappear at
3057 * the end of this function.
3059 tcf_chain_hold(chain);
3060 chain->explicitly_created = true;
3062 mutex_unlock(&block->lock);
3064 switch (n->nlmsg_type) {
3066 err = tc_chain_tmplt_add(chain, net, tca, extack);
3068 tcf_chain_put_explicitly_created(chain);
3072 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3073 RTM_NEWCHAIN, false, extack);
3076 tfilter_notify_chain(net, skb, block, q, parent, n,
3077 chain, RTM_DELTFILTER, extack);
3078 /* Flush the chain first as the user requested chain removal. */
3079 tcf_chain_flush(chain, true);
3080 /* In case the chain was successfully deleted, put a reference
3081 * to the chain previously taken during addition.
3083 tcf_chain_put_explicitly_created(chain);
3086 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3087 n->nlmsg_flags, n->nlmsg_type, true, extack);
3089 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3093 NL_SET_ERR_MSG(extack, "Unsupported message type");
3098 tcf_chain_put(chain);
3100 tcf_block_release(q, block, true);
3102 /* Replay the request. */
3106 errout_block_locked:
3107 mutex_unlock(&block->lock);
3111 /* called with RTNL */
3112 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3114 struct net *net = sock_net(skb->sk);
3115 struct nlattr *tca[TCA_MAX + 1];
3116 struct Qdisc *q = NULL;
3117 struct tcf_block *block;
3118 struct tcmsg *tcm = nlmsg_data(cb->nlh);
3119 struct tcf_chain *chain;
3124 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3127 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3128 rtm_tca_policy, cb->extack);
3132 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3133 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3137 const struct Qdisc_class_ops *cops;
3138 struct net_device *dev;
3139 unsigned long cl = 0;
3141 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3145 if (!tcm->tcm_parent)
3146 q = rtnl_dereference(dev->qdisc);
3148 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3152 cops = q->ops->cl_ops;
3155 if (!cops->tcf_block)
3157 if (TC_H_MIN(tcm->tcm_parent)) {
3158 cl = cops->find(q, tcm->tcm_parent);
3162 block = cops->tcf_block(q, cl, NULL);
3165 if (tcf_block_shared(block))
3169 index_start = cb->args[0];
3172 mutex_lock(&block->lock);
3173 list_for_each_entry(chain, &block->chain_list, list) {
3174 if ((tca[TCA_CHAIN] &&
3175 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3177 if (index < index_start) {
3181 if (tcf_chain_held_by_acts_only(chain))
3183 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3184 chain->index, net, skb, block,
3185 NETLINK_CB(cb->skb).portid,
3186 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3187 RTM_NEWCHAIN, NULL);
3192 mutex_unlock(&block->lock);
3194 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3195 tcf_block_refcnt_put(block, true);
3196 cb->args[0] = index;
3199 /* If we did no progress, the error (EMSGSIZE) is real */
3200 if (skb->len == 0 && err)
3205 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3206 int police, struct tcf_proto *tp, u32 handle,
3207 bool use_action_miss)
3211 #ifdef CONFIG_NET_CLS_ACT
3213 exts->nr_actions = 0;
3214 exts->miss_cookie_node = NULL;
3215 /* Note: we do not own yet a reference on net.
3216 * This reference might be taken later from tcf_exts_get_net().
3219 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3225 exts->action = action;
3226 exts->police = police;
3228 if (!use_action_miss)
3231 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3233 goto err_miss_alloc;
3238 tcf_exts_destroy(exts);
3239 #ifdef CONFIG_NET_CLS_ACT
3240 exts->actions = NULL;
3244 EXPORT_SYMBOL(tcf_exts_init_ex);
3246 void tcf_exts_destroy(struct tcf_exts *exts)
3248 tcf_exts_miss_cookie_base_destroy(exts);
3250 #ifdef CONFIG_NET_CLS_ACT
3251 if (exts->actions) {
3252 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3253 kfree(exts->actions);
3255 exts->nr_actions = 0;
3258 EXPORT_SYMBOL(tcf_exts_destroy);
3260 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3261 struct nlattr *rate_tlv, struct tcf_exts *exts,
3262 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3264 #ifdef CONFIG_NET_CLS_ACT
3266 int init_res[TCA_ACT_MAX_PRIO] = {};
3267 struct tc_action *act;
3268 size_t attr_size = 0;
3270 if (exts->police && tb[exts->police]) {
3271 struct tc_action_ops *a_o;
3273 a_o = tc_action_load_ops(tb[exts->police], true,
3274 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3277 return PTR_ERR(a_o);
3278 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3279 act = tcf_action_init_1(net, tp, tb[exts->police],
3280 rate_tlv, a_o, init_res, flags,
3282 module_put(a_o->owner);
3284 return PTR_ERR(act);
3286 act->type = exts->type = TCA_OLD_COMPAT;
3287 exts->actions[0] = act;
3288 exts->nr_actions = 1;
3289 tcf_idr_insert_many(exts->actions);
3290 } else if (exts->action && tb[exts->action]) {
3293 flags |= TCA_ACT_FLAGS_BIND;
3294 err = tcf_action_init(net, tp, tb[exts->action],
3295 rate_tlv, exts->actions, init_res,
3296 &attr_size, flags, fl_flags,
3300 exts->nr_actions = err;
3304 if ((exts->action && tb[exts->action]) ||
3305 (exts->police && tb[exts->police])) {
3306 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3313 EXPORT_SYMBOL(tcf_exts_validate_ex);
3315 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3316 struct nlattr *rate_tlv, struct tcf_exts *exts,
3317 u32 flags, struct netlink_ext_ack *extack)
3319 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3322 EXPORT_SYMBOL(tcf_exts_validate);
3324 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3326 #ifdef CONFIG_NET_CLS_ACT
3327 struct tcf_exts old = *dst;
3330 tcf_exts_destroy(&old);
3333 EXPORT_SYMBOL(tcf_exts_change);
3335 #ifdef CONFIG_NET_CLS_ACT
3336 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3338 if (exts->nr_actions == 0)
3341 return exts->actions[0];
3345 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3347 #ifdef CONFIG_NET_CLS_ACT
3348 struct nlattr *nest;
3350 if (exts->action && tcf_exts_has_actions(exts)) {
3352 * again for backward compatible mode - we want
3353 * to work with both old and new modes of entering
3354 * tc data even if iproute2 was newer - jhs
3356 if (exts->type != TCA_OLD_COMPAT) {
3357 nest = nla_nest_start_noflag(skb, exts->action);
3359 goto nla_put_failure;
3361 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3363 goto nla_put_failure;
3364 nla_nest_end(skb, nest);
3365 } else if (exts->police) {
3366 struct tc_action *act = tcf_exts_first_act(exts);
3367 nest = nla_nest_start_noflag(skb, exts->police);
3368 if (nest == NULL || !act)
3369 goto nla_put_failure;
3370 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3371 goto nla_put_failure;
3372 nla_nest_end(skb, nest);
3378 nla_nest_cancel(skb, nest);
3384 EXPORT_SYMBOL(tcf_exts_dump);
3386 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3388 #ifdef CONFIG_NET_CLS_ACT
3389 struct nlattr *nest;
3391 if (!exts->action || !tcf_exts_has_actions(exts))
3394 nest = nla_nest_start_noflag(skb, exts->action);
3396 goto nla_put_failure;
3398 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3399 goto nla_put_failure;
3400 nla_nest_end(skb, nest);
3404 nla_nest_cancel(skb, nest);
3410 EXPORT_SYMBOL(tcf_exts_terse_dump);
3412 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3414 #ifdef CONFIG_NET_CLS_ACT
3415 struct tc_action *a = tcf_exts_first_act(exts);
3416 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3421 EXPORT_SYMBOL(tcf_exts_dump_stats);
3423 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3425 if (*flags & TCA_CLS_FLAGS_IN_HW)
3427 *flags |= TCA_CLS_FLAGS_IN_HW;
3428 atomic_inc(&block->offloadcnt);
3431 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3433 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3435 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3436 atomic_dec(&block->offloadcnt);
3439 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3440 struct tcf_proto *tp, u32 *cnt,
3441 u32 *flags, u32 diff, bool add)
3443 lockdep_assert_held(&block->cb_lock);
3445 spin_lock(&tp->lock);
3448 tcf_block_offload_inc(block, flags);
3453 tcf_block_offload_dec(block, flags);
3455 spin_unlock(&tp->lock);
3459 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3460 u32 *cnt, u32 *flags)
3462 lockdep_assert_held(&block->cb_lock);
3464 spin_lock(&tp->lock);
3465 tcf_block_offload_dec(block, flags);
3467 spin_unlock(&tp->lock);
3471 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3472 void *type_data, bool err_stop)
3474 struct flow_block_cb *block_cb;
3478 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3479 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3490 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3491 void *type_data, bool err_stop, bool rtnl_held)
3493 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3499 down_read(&block->cb_lock);
3500 /* Need to obtain rtnl lock if block is bound to devs that require it.
3501 * In block bind code cb_lock is obtained while holding rtnl, so we must
3502 * obtain the locks in same order here.
3504 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3505 up_read(&block->cb_lock);
3510 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3512 up_read(&block->cb_lock);
3517 EXPORT_SYMBOL(tc_setup_cb_call);
3519 /* Non-destructive filter add. If filter that wasn't already in hardware is
3520 * successfully offloaded, increment block offloads counter. On failure,
3521 * previously offloaded filter is considered to be intact and offloads counter
3522 * is not decremented.
3525 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3526 enum tc_setup_type type, void *type_data, bool err_stop,
3527 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3529 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3535 down_read(&block->cb_lock);
3536 /* Need to obtain rtnl lock if block is bound to devs that require it.
3537 * In block bind code cb_lock is obtained while holding rtnl, so we must
3538 * obtain the locks in same order here.
3540 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3541 up_read(&block->cb_lock);
3546 /* Make sure all netdevs sharing this block are offload-capable. */
3547 if (block->nooffloaddevcnt && err_stop) {
3548 ok_count = -EOPNOTSUPP;
3552 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3556 if (tp->ops->hw_add)
3557 tp->ops->hw_add(tp, type_data);
3559 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3562 up_read(&block->cb_lock);
3565 return min(ok_count, 0);
3567 EXPORT_SYMBOL(tc_setup_cb_add);
3569 /* Destructive filter replace. If filter that wasn't already in hardware is
3570 * successfully offloaded, increment block offload counter. On failure,
3571 * previously offloaded filter is considered to be destroyed and offload counter
3575 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3576 enum tc_setup_type type, void *type_data, bool err_stop,
3577 u32 *old_flags, unsigned int *old_in_hw_count,
3578 u32 *new_flags, unsigned int *new_in_hw_count,
3581 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3587 down_read(&block->cb_lock);
3588 /* Need to obtain rtnl lock if block is bound to devs that require it.
3589 * In block bind code cb_lock is obtained while holding rtnl, so we must
3590 * obtain the locks in same order here.
3592 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3593 up_read(&block->cb_lock);
3598 /* Make sure all netdevs sharing this block are offload-capable. */
3599 if (block->nooffloaddevcnt && err_stop) {
3600 ok_count = -EOPNOTSUPP;
3604 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3605 if (tp->ops->hw_del)
3606 tp->ops->hw_del(tp, type_data);
3608 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3612 if (tp->ops->hw_add)
3613 tp->ops->hw_add(tp, type_data);
3615 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3616 new_flags, ok_count, true);
3618 up_read(&block->cb_lock);
3621 return min(ok_count, 0);
3623 EXPORT_SYMBOL(tc_setup_cb_replace);
3625 /* Destroy filter and decrement block offload counter, if filter was previously
3629 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3630 enum tc_setup_type type, void *type_data, bool err_stop,
3631 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3633 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3639 down_read(&block->cb_lock);
3640 /* Need to obtain rtnl lock if block is bound to devs that require it.
3641 * In block bind code cb_lock is obtained while holding rtnl, so we must
3642 * obtain the locks in same order here.
3644 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3645 up_read(&block->cb_lock);
3650 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3652 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3653 if (tp->ops->hw_del)
3654 tp->ops->hw_del(tp, type_data);
3656 up_read(&block->cb_lock);
3659 return min(ok_count, 0);
3661 EXPORT_SYMBOL(tc_setup_cb_destroy);
3663 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3664 bool add, flow_setup_cb_t *cb,
3665 enum tc_setup_type type, void *type_data,
3666 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3668 int err = cb(type, type_data, cb_priv);
3671 if (add && tc_skip_sw(*flags))
3674 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3680 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3682 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3683 const struct tc_action *act)
3685 struct tc_cookie *user_cookie;
3689 user_cookie = rcu_dereference(act->user_cookie);
3691 entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3694 if (!entry->user_cookie)
3701 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3703 flow_action_cookie_destroy(entry->user_cookie);
3706 void tc_cleanup_offload_action(struct flow_action *flow_action)
3708 struct flow_action_entry *entry;
3711 flow_action_for_each(i, entry, flow_action) {
3712 tcf_act_put_user_cookie(entry);
3713 if (entry->destructor)
3714 entry->destructor(entry->destructor_priv);
3717 EXPORT_SYMBOL(tc_cleanup_offload_action);
3719 static int tc_setup_offload_act(struct tc_action *act,
3720 struct flow_action_entry *entry,
3722 struct netlink_ext_ack *extack)
3724 #ifdef CONFIG_NET_CLS_ACT
3725 if (act->ops->offload_act_setup) {
3726 return act->ops->offload_act_setup(act, entry, index_inc, true,
3729 NL_SET_ERR_MSG(extack, "Action does not support offload");
3737 int tc_setup_action(struct flow_action *flow_action,
3738 struct tc_action *actions[],
3739 u32 miss_cookie_base,
3740 struct netlink_ext_ack *extack)
3742 int i, j, k, index, err = 0;
3743 struct tc_action *act;
3745 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3746 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3747 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3753 tcf_act_for_each_action(i, act, actions) {
3754 struct flow_action_entry *entry;
3756 entry = &flow_action->entries[j];
3757 spin_lock_bh(&act->tcfa_lock);
3758 err = tcf_act_get_user_cookie(entry, act);
3760 goto err_out_locked;
3763 err = tc_setup_offload_act(act, entry, &index, extack);
3765 goto err_out_locked;
3767 for (k = 0; k < index ; k++) {
3768 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3769 entry[k].hw_index = act->tcfa_index;
3770 entry[k].cookie = (unsigned long)act;
3771 entry[k].miss_cookie =
3772 tcf_exts_miss_cookie_get(miss_cookie_base, i);
3777 spin_unlock_bh(&act->tcfa_lock);
3782 tc_cleanup_offload_action(flow_action);
3786 spin_unlock_bh(&act->tcfa_lock);
3790 int tc_setup_offload_action(struct flow_action *flow_action,
3791 const struct tcf_exts *exts,
3792 struct netlink_ext_ack *extack)
3794 #ifdef CONFIG_NET_CLS_ACT
3795 u32 miss_cookie_base;
3800 miss_cookie_base = exts->miss_cookie_node ?
3801 exts->miss_cookie_node->miss_cookie_base : 0;
3802 return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3808 EXPORT_SYMBOL(tc_setup_offload_action);
3810 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3812 unsigned int num_acts = 0;
3813 struct tc_action *act;
3816 tcf_exts_for_each_action(i, act, exts) {
3817 if (is_tcf_pedit(act))
3818 num_acts += tcf_pedit_nkeys(act);
3824 EXPORT_SYMBOL(tcf_exts_num_actions);
3826 #ifdef CONFIG_NET_CLS_ACT
3827 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3829 struct netlink_ext_ack *extack)
3831 *p_block_index = nla_get_u32(block_index_attr);
3832 if (!*p_block_index) {
3833 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3840 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3841 enum flow_block_binder_type binder_type,
3842 struct nlattr *block_index_attr,
3843 struct netlink_ext_ack *extack)
3848 if (!block_index_attr)
3851 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3855 qe->info.binder_type = binder_type;
3856 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3857 qe->info.chain_head_change_priv = &qe->filter_chain;
3858 qe->info.block_index = block_index;
3860 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3862 EXPORT_SYMBOL(tcf_qevent_init);
3864 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3866 if (qe->info.block_index)
3867 tcf_block_put_ext(qe->block, sch, &qe->info);
3869 EXPORT_SYMBOL(tcf_qevent_destroy);
3871 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3872 struct netlink_ext_ack *extack)
3877 if (!block_index_attr)
3880 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3884 /* Bounce newly-configured block or change in block. */
3885 if (block_index != qe->info.block_index) {
3886 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3892 EXPORT_SYMBOL(tcf_qevent_validate_change);
3894 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3895 struct sk_buff **to_free, int *ret)
3897 struct tcf_result cl_res;
3898 struct tcf_proto *fl;
3900 if (!qe->info.block_index)
3903 fl = rcu_dereference_bh(qe->filter_chain);
3905 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3907 qdisc_qstats_drop(sch);
3908 __qdisc_drop(skb, to_free);
3909 *ret = __NET_XMIT_BYPASS;
3914 __qdisc_drop(skb, to_free);
3915 *ret = __NET_XMIT_STOLEN;
3917 case TC_ACT_REDIRECT:
3918 skb_do_redirect(skb);
3919 *ret = __NET_XMIT_STOLEN;
3925 EXPORT_SYMBOL(tcf_qevent_handle);
3927 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3929 if (!qe->info.block_index)
3931 return nla_put_u32(skb, attr_name, qe->info.block_index);
3933 EXPORT_SYMBOL(tcf_qevent_dump);
3936 static __net_init int tcf_net_init(struct net *net)
3938 struct tcf_net *tn = net_generic(net, tcf_net_id);
3940 spin_lock_init(&tn->idr_lock);
3945 static void __net_exit tcf_net_exit(struct net *net)
3947 struct tcf_net *tn = net_generic(net, tcf_net_id);
3949 idr_destroy(&tn->idr);
3952 static struct pernet_operations tcf_net_ops = {
3953 .init = tcf_net_init,
3954 .exit = tcf_net_exit,
3956 .size = sizeof(struct tcf_net),
3959 static int __init tc_filter_init(void)
3963 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3967 err = register_pernet_subsys(&tcf_net_ops);
3969 goto err_register_pernet_subsys;
3971 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
3973 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3974 RTNL_FLAG_DOIT_UNLOCKED);
3975 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3976 RTNL_FLAG_DOIT_UNLOCKED);
3977 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3978 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3979 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3980 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3981 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3986 err_register_pernet_subsys:
3987 destroy_workqueue(tc_filter_wq);
3991 subsys_initcall(tc_filter_init);