1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_api.c Packet action API.
5 * Author: Jamal Hadi Salim
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
26 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
27 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
30 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
33 if (static_branch_unlikely(&tcf_frag_xmit_count))
34 return sch_frag_xmit_hook(skb, xmit);
39 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
41 static void tcf_action_goto_chain_exec(const struct tc_action *a,
42 struct tcf_result *res)
44 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
46 res->goto_tp = rcu_dereference_bh(chain->filter_chain);
49 static void tcf_free_cookie_rcu(struct rcu_head *p)
51 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
57 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
58 struct tc_cookie *new_cookie)
60 struct tc_cookie *old;
62 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
64 call_rcu(&old->rcu, tcf_free_cookie_rcu);
67 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
68 struct tcf_chain **newchain,
69 struct netlink_ext_ack *extack)
71 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
75 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
76 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
79 NL_SET_ERR_MSG(extack, "invalid control action");
83 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
84 chain_index = action & TC_ACT_EXT_VAL_MASK;
85 if (!tp || !newchain) {
87 NL_SET_ERR_MSG(extack,
88 "can't goto NULL proto/chain");
91 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
94 NL_SET_ERR_MSG(extack,
95 "can't allocate goto_chain");
101 EXPORT_SYMBOL(tcf_action_check_ctrlact);
103 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
104 struct tcf_chain *goto_chain)
106 a->tcfa_action = action;
107 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
110 EXPORT_SYMBOL(tcf_action_set_ctrlact);
112 /* XXX: For standalone actions, we don't need a RCU grace period either, because
113 * actions are always connected to filters and filters are already destroyed in
114 * RCU callbacks, so after a RCU grace period actions are already disconnected
115 * from filters. Readers later can not find us.
117 static void free_tcf(struct tc_action *p)
119 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
121 free_percpu(p->cpu_bstats);
122 free_percpu(p->cpu_bstats_hw);
123 free_percpu(p->cpu_qstats);
125 tcf_set_action_cookie(&p->act_cookie, NULL);
127 tcf_chain_put_by_act(chain);
132 static void tcf_action_cleanup(struct tc_action *p)
137 gen_kill_estimator(&p->tcfa_rate_est);
141 static int __tcf_action_put(struct tc_action *p, bool bind)
143 struct tcf_idrinfo *idrinfo = p->idrinfo;
145 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
147 atomic_dec(&p->tcfa_bindcnt);
148 idr_remove(&idrinfo->action_idr, p->tcfa_index);
149 mutex_unlock(&idrinfo->lock);
151 tcf_action_cleanup(p);
156 atomic_dec(&p->tcfa_bindcnt);
161 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
165 /* Release with strict==1 and bind==0 is only called through act API
166 * interface (classifiers always bind). Only case when action with
167 * positive reference count and zero bind count can exist is when it was
168 * also created with act API (unbinding last classifier will destroy the
169 * action if it was created by classifier). So only case when bind count
170 * can be changed after initial check is when unbound action is
171 * destroyed by act API while classifier binds to action with same id
172 * concurrently. This result either creation of new action(same behavior
173 * as before), or reusing existing action if concurrent process
174 * increments reference count before action is deleted. Both scenarios
178 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
181 if (__tcf_action_put(p, bind))
188 int tcf_idr_release(struct tc_action *a, bool bind)
190 const struct tc_action_ops *ops = a->ops;
193 ret = __tcf_idr_release(a, bind, false);
194 if (ret == ACT_P_DELETED)
195 module_put(ops->owner);
198 EXPORT_SYMBOL(tcf_idr_release);
200 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
202 struct tc_cookie *act_cookie;
206 act_cookie = rcu_dereference(act->act_cookie);
209 cookie_len = nla_total_size(act_cookie->len);
212 return nla_total_size(0) /* action number nested */
213 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
214 + cookie_len /* TCA_ACT_COOKIE */
215 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
216 + nla_total_size(0) /* TCA_ACT_STATS nested */
217 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
218 /* TCA_STATS_BASIC */
219 + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
220 /* TCA_STATS_PKT64 */
221 + nla_total_size_64bit(sizeof(u64))
222 /* TCA_STATS_QUEUE */
223 + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
224 + nla_total_size(0) /* TCA_OPTIONS nested */
225 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
228 static size_t tcf_action_full_attrs_size(size_t sz)
230 return NLMSG_HDRLEN /* struct nlmsghdr */
231 + sizeof(struct tcamsg)
232 + nla_total_size(0) /* TCA_ACT_TAB nested */
236 static size_t tcf_action_fill_size(const struct tc_action *act)
238 size_t sz = tcf_action_shared_attrs_size(act);
240 if (act->ops->get_fill_size)
241 return act->ops->get_fill_size(act) + sz;
246 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
248 unsigned char *b = skb_tail_pointer(skb);
249 struct tc_cookie *cookie;
251 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
252 goto nla_put_failure;
253 if (tcf_action_copy_stats(skb, a, 0))
254 goto nla_put_failure;
255 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
256 goto nla_put_failure;
259 cookie = rcu_dereference(a->act_cookie);
261 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
263 goto nla_put_failure;
275 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
276 struct netlink_callback *cb)
278 int err = 0, index = -1, s_i = 0, n_i = 0;
279 u32 act_flags = cb->args[2];
280 unsigned long jiffy_since = cb->args[3];
282 struct idr *idr = &idrinfo->action_idr;
284 unsigned long id = 1;
287 mutex_lock(&idrinfo->lock);
291 idr_for_each_entry_ul(idr, p, tmp, id) {
299 time_after(jiffy_since,
300 (unsigned long)p->tcfa_tm.lastuse))
303 nest = nla_nest_start_noflag(skb, n_i);
306 goto nla_put_failure;
308 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
309 tcf_action_dump_terse(skb, p, true) :
310 tcf_action_dump_1(skb, p, 0, 0);
313 nlmsg_trim(skb, nest);
316 nla_nest_end(skb, nest);
318 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
319 n_i >= TCA_ACT_MAX_PRIO)
324 cb->args[0] = index + 1;
326 mutex_unlock(&idrinfo->lock);
328 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
334 nla_nest_cancel(skb, nest);
338 static int tcf_idr_release_unsafe(struct tc_action *p)
340 if (atomic_read(&p->tcfa_bindcnt) > 0)
343 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
344 idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
345 tcf_action_cleanup(p);
346 return ACT_P_DELETED;
352 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
353 const struct tc_action_ops *ops,
354 struct netlink_ext_ack *extack)
359 struct idr *idr = &idrinfo->action_idr;
361 unsigned long id = 1;
364 nest = nla_nest_start_noflag(skb, 0);
366 goto nla_put_failure;
367 if (nla_put_string(skb, TCA_KIND, ops->kind))
368 goto nla_put_failure;
371 mutex_lock(&idrinfo->lock);
372 idr_for_each_entry_ul(idr, p, tmp, id) {
375 ret = tcf_idr_release_unsafe(p);
376 if (ret == ACT_P_DELETED)
377 module_put(ops->owner);
382 mutex_unlock(&idrinfo->lock);
385 NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
387 goto nla_put_failure;
390 ret = nla_put_u32(skb, TCA_FCNT, n_i);
392 goto nla_put_failure;
393 nla_nest_end(skb, nest);
397 nla_nest_cancel(skb, nest);
401 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
402 struct netlink_callback *cb, int type,
403 const struct tc_action_ops *ops,
404 struct netlink_ext_ack *extack)
406 struct tcf_idrinfo *idrinfo = tn->idrinfo;
408 if (type == RTM_DELACTION) {
409 return tcf_del_walker(idrinfo, skb, ops, extack);
410 } else if (type == RTM_GETACTION) {
411 return tcf_dump_walker(idrinfo, skb, cb);
413 WARN(1, "tcf_generic_walker: unknown command %d\n", type);
414 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
418 EXPORT_SYMBOL(tcf_generic_walker);
420 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
422 struct tcf_idrinfo *idrinfo = tn->idrinfo;
425 mutex_lock(&idrinfo->lock);
426 p = idr_find(&idrinfo->action_idr, index);
430 refcount_inc(&p->tcfa_refcnt);
431 mutex_unlock(&idrinfo->lock);
439 EXPORT_SYMBOL(tcf_idr_search);
441 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
446 mutex_lock(&idrinfo->lock);
447 p = idr_find(&idrinfo->action_idr, index);
449 mutex_unlock(&idrinfo->lock);
453 if (!atomic_read(&p->tcfa_bindcnt)) {
454 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
455 struct module *owner = p->ops->owner;
457 WARN_ON(p != idr_remove(&idrinfo->action_idr,
459 mutex_unlock(&idrinfo->lock);
461 tcf_action_cleanup(p);
470 mutex_unlock(&idrinfo->lock);
474 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
475 struct tc_action **a, const struct tc_action_ops *ops,
476 int bind, bool cpustats, u32 flags)
478 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
479 struct tcf_idrinfo *idrinfo = tn->idrinfo;
484 refcount_set(&p->tcfa_refcnt, 1);
486 atomic_set(&p->tcfa_bindcnt, 1);
489 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
492 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
493 if (!p->cpu_bstats_hw)
495 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
499 spin_lock_init(&p->tcfa_lock);
500 p->tcfa_index = index;
501 p->tcfa_tm.install = jiffies;
502 p->tcfa_tm.lastuse = jiffies;
503 p->tcfa_tm.firstuse = 0;
504 p->tcfa_flags = flags & TCA_ACT_FLAGS_USER_MASK;
506 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
508 &p->tcfa_lock, NULL, est);
513 p->idrinfo = idrinfo;
514 __module_get(ops->owner);
519 free_percpu(p->cpu_qstats);
521 free_percpu(p->cpu_bstats_hw);
523 free_percpu(p->cpu_bstats);
528 EXPORT_SYMBOL(tcf_idr_create);
530 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
531 struct nlattr *est, struct tc_action **a,
532 const struct tc_action_ops *ops, int bind,
535 /* Set cpustats according to actions flags. */
536 return tcf_idr_create(tn, index, est, a, ops, bind,
537 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
539 EXPORT_SYMBOL(tcf_idr_create_from_flags);
541 /* Cleanup idr index that was allocated but not initialized. */
543 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
545 struct tcf_idrinfo *idrinfo = tn->idrinfo;
547 mutex_lock(&idrinfo->lock);
548 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
549 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
550 mutex_unlock(&idrinfo->lock);
552 EXPORT_SYMBOL(tcf_idr_cleanup);
554 /* Check if action with specified index exists. If actions is found, increments
555 * its reference and bind counters, and return 1. Otherwise insert temporary
556 * error pointer (to prevent concurrent users from inserting actions with same
557 * index) and return 0.
560 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
561 struct tc_action **a, int bind)
563 struct tcf_idrinfo *idrinfo = tn->idrinfo;
568 mutex_lock(&idrinfo->lock);
570 p = idr_find(&idrinfo->action_idr, *index);
572 /* This means that another process allocated
573 * index but did not assign the pointer yet.
575 mutex_unlock(&idrinfo->lock);
580 refcount_inc(&p->tcfa_refcnt);
582 atomic_inc(&p->tcfa_bindcnt);
587 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
590 idr_replace(&idrinfo->action_idr,
591 ERR_PTR(-EBUSY), *index);
596 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
597 UINT_MAX, GFP_KERNEL);
599 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
602 mutex_unlock(&idrinfo->lock);
605 EXPORT_SYMBOL(tcf_idr_check_alloc);
607 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
608 struct tcf_idrinfo *idrinfo)
610 struct idr *idr = &idrinfo->action_idr;
613 unsigned long id = 1;
616 idr_for_each_entry_ul(idr, p, tmp, id) {
617 ret = __tcf_idr_release(p, false, true);
618 if (ret == ACT_P_DELETED)
619 module_put(ops->owner);
623 idr_destroy(&idrinfo->action_idr);
625 EXPORT_SYMBOL(tcf_idrinfo_destroy);
627 static LIST_HEAD(act_base);
628 static DEFINE_RWLOCK(act_mod_lock);
630 int tcf_register_action(struct tc_action_ops *act,
631 struct pernet_operations *ops)
633 struct tc_action_ops *a;
636 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
639 /* We have to register pernet ops before making the action ops visible,
640 * otherwise tcf_action_init_1() could get a partially initialized
643 ret = register_pernet_subsys(ops);
647 write_lock(&act_mod_lock);
648 list_for_each_entry(a, &act_base, head) {
649 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
650 write_unlock(&act_mod_lock);
651 unregister_pernet_subsys(ops);
655 list_add_tail(&act->head, &act_base);
656 write_unlock(&act_mod_lock);
660 EXPORT_SYMBOL(tcf_register_action);
662 int tcf_unregister_action(struct tc_action_ops *act,
663 struct pernet_operations *ops)
665 struct tc_action_ops *a;
668 write_lock(&act_mod_lock);
669 list_for_each_entry(a, &act_base, head) {
671 list_del(&act->head);
676 write_unlock(&act_mod_lock);
678 unregister_pernet_subsys(ops);
681 EXPORT_SYMBOL(tcf_unregister_action);
684 static struct tc_action_ops *tc_lookup_action_n(char *kind)
686 struct tc_action_ops *a, *res = NULL;
689 read_lock(&act_mod_lock);
690 list_for_each_entry(a, &act_base, head) {
691 if (strcmp(kind, a->kind) == 0) {
692 if (try_module_get(a->owner))
697 read_unlock(&act_mod_lock);
702 /* lookup by nlattr */
703 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
705 struct tc_action_ops *a, *res = NULL;
708 read_lock(&act_mod_lock);
709 list_for_each_entry(a, &act_base, head) {
710 if (nla_strcmp(kind, a->kind) == 0) {
711 if (try_module_get(a->owner))
716 read_unlock(&act_mod_lock);
721 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
722 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
723 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
724 int nr_actions, struct tcf_result *res)
727 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
731 if (skb_skip_tc_classify(skb))
735 for (i = 0; i < nr_actions; i++) {
736 const struct tc_action *a = actions[i];
739 if (jmp_prgcnt > 0) {
746 ret = a->ops->act(skb, a, res);
748 if (unlikely(ret == TC_ACT_REPEAT)) {
749 if (--repeat_ttl != 0)
751 /* suspicious opcode, stop pipeline */
752 net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
756 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
757 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
758 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
759 /* faulty opcode, stop pipeline */
764 goto restart_act_graph;
765 else /* faulty graph, stop pipeline */
768 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
769 if (unlikely(!rcu_access_pointer(a->goto_chain))) {
770 net_warn_ratelimited("can't go to NULL chain!\n");
773 tcf_action_goto_chain_exec(a, res);
776 if (ret != TC_ACT_PIPE)
782 EXPORT_SYMBOL(tcf_action_exec);
784 int tcf_action_destroy(struct tc_action *actions[], int bind)
786 const struct tc_action_ops *ops;
790 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
794 ret = __tcf_idr_release(a, bind, true);
795 if (ret == ACT_P_DELETED)
796 module_put(ops->owner);
803 static int tcf_action_put(struct tc_action *p)
805 return __tcf_action_put(p, false);
808 /* Put all actions in this array, skip those NULL's. */
809 static void tcf_action_put_many(struct tc_action *actions[])
813 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
814 struct tc_action *a = actions[i];
815 const struct tc_action_ops *ops;
820 if (tcf_action_put(a))
821 module_put(ops->owner);
826 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
828 return a->ops->dump(skb, a, bind, ref);
832 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
835 unsigned char *b = skb_tail_pointer(skb);
838 if (tcf_action_dump_terse(skb, a, false))
839 goto nla_put_failure;
841 if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
842 nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
843 a->hw_stats, TCA_ACT_HW_STATS_ANY))
844 goto nla_put_failure;
846 if (a->used_hw_stats_valid &&
847 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
848 a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
849 goto nla_put_failure;
852 nla_put_bitfield32(skb, TCA_ACT_FLAGS,
853 a->tcfa_flags, a->tcfa_flags))
854 goto nla_put_failure;
856 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
858 goto nla_put_failure;
859 err = tcf_action_dump_old(skb, a, bind, ref);
861 nla_nest_end(skb, nest);
869 EXPORT_SYMBOL(tcf_action_dump_1);
871 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
872 int bind, int ref, bool terse)
875 int err = -EINVAL, i;
878 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
880 nest = nla_nest_start_noflag(skb, i + 1);
882 goto nla_put_failure;
883 err = terse ? tcf_action_dump_terse(skb, a, false) :
884 tcf_action_dump_1(skb, a, bind, ref);
887 nla_nest_end(skb, nest);
895 nla_nest_cancel(skb, nest);
899 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
901 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
905 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
910 c->len = nla_len(tb[TCA_ACT_COOKIE]);
915 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
917 struct nla_bitfield32 hw_stats_bf;
919 /* If the user did not pass the attr, that means he does
920 * not care about the type. Return "any" in that case
921 * which is setting on all supported types.
924 return TCA_ACT_HW_STATS_ANY;
925 hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
926 return hw_stats_bf.value;
929 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
930 [TCA_ACT_KIND] = { .type = NLA_STRING },
931 [TCA_ACT_INDEX] = { .type = NLA_U32 },
932 [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
933 .len = TC_COOKIE_MAX_SIZE },
934 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
935 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
936 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
939 void tcf_idr_insert_many(struct tc_action *actions[])
943 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
944 struct tc_action *a = actions[i];
945 struct tcf_idrinfo *idrinfo;
949 idrinfo = a->idrinfo;
950 mutex_lock(&idrinfo->lock);
951 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
952 * it is just created, otherwise this is just a nop.
954 idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
955 mutex_unlock(&idrinfo->lock);
959 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
961 struct netlink_ext_ack *extack)
963 struct nlattr *tb[TCA_ACT_MAX + 1];
964 struct tc_action_ops *a_o;
965 char act_name[IFNAMSIZ];
970 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
971 tcf_action_policy, extack);
975 kind = tb[TCA_ACT_KIND];
977 NL_SET_ERR_MSG(extack, "TC action kind must be specified");
980 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
981 NL_SET_ERR_MSG(extack, "TC action name too long");
985 if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) {
986 NL_SET_ERR_MSG(extack, "TC action name too long");
987 return ERR_PTR(-EINVAL);
991 a_o = tc_lookup_action_n(act_name);
993 #ifdef CONFIG_MODULES
996 request_module("act_%s", act_name);
1000 a_o = tc_lookup_action_n(act_name);
1002 /* We dropped the RTNL semaphore in order to
1003 * perform the module load. So, even if we
1004 * succeeded in loading the module we have to
1005 * tell the caller to replay the request. We
1006 * indicate this using -EAGAIN.
1009 module_put(a_o->owner);
1010 return ERR_PTR(-EAGAIN);
1013 NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1014 return ERR_PTR(-ENOENT);
1020 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1021 struct nlattr *nla, struct nlattr *est,
1022 struct tc_action_ops *a_o, int *init_res,
1023 u32 flags, struct netlink_ext_ack *extack)
1025 bool police = flags & TCA_ACT_FLAGS_POLICE;
1026 struct nla_bitfield32 userflags = { 0, 0 };
1027 u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1028 struct nlattr *tb[TCA_ACT_MAX + 1];
1029 struct tc_cookie *cookie = NULL;
1030 struct tc_action *a;
1033 /* backward compatibility for policer */
1035 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1036 tcf_action_policy, extack);
1038 return ERR_PTR(err);
1039 if (tb[TCA_ACT_COOKIE]) {
1040 cookie = nla_memdup_cookie(tb);
1042 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1047 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1048 if (tb[TCA_ACT_FLAGS])
1049 userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1051 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1052 userflags.value | flags, extack);
1054 err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1061 if (!police && tb[TCA_ACT_COOKIE])
1062 tcf_set_action_cookie(&a->act_cookie, cookie);
1065 a->hw_stats = hw_stats;
1071 kfree(cookie->data);
1074 return ERR_PTR(err);
1077 /* Returns numbers of initialized actions or negative error. */
1079 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1080 struct nlattr *est, struct tc_action *actions[],
1081 int init_res[], size_t *attr_size, u32 flags,
1082 struct netlink_ext_ack *extack)
1084 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1085 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1086 struct tc_action *act;
1091 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1096 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1097 struct tc_action_ops *a_o;
1099 a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE,
1100 !(flags & TCA_ACT_FLAGS_NO_RTNL),
1109 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1110 act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1111 &init_res[i - 1], flags, extack);
1116 sz += tcf_action_fill_size(act);
1117 /* Start from index 0 */
1118 actions[i - 1] = act;
1121 /* We have to commit them all together, because if any error happened in
1122 * between, we could not handle the failure gracefully.
1124 tcf_idr_insert_many(actions);
1126 *attr_size = tcf_action_full_attrs_size(sz);
1131 tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1133 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1135 module_put(ops[i]->owner);
1140 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1143 if (a->cpu_bstats) {
1144 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1146 this_cpu_ptr(a->cpu_qstats)->drops += drops;
1149 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
1154 _bstats_update(&a->tcfa_bstats, bytes, packets);
1155 a->tcfa_qstats.drops += drops;
1157 _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1159 EXPORT_SYMBOL(tcf_action_update_stats);
1161 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1170 /* compat_mode being true specifies a call that is supposed
1171 * to add additional backward compatibility statistic TLVs.
1174 if (p->type == TCA_OLD_COMPAT)
1175 err = gnet_stats_start_copy_compat(skb, 0,
1183 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1184 &p->tcfa_lock, &d, TCA_ACT_PAD);
1189 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
1190 gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
1191 &p->tcfa_bstats_hw) < 0 ||
1192 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1193 gnet_stats_copy_queue(&d, p->cpu_qstats,
1195 p->tcfa_qstats.qlen) < 0)
1198 if (gnet_stats_finish_copy(&d) < 0)
1207 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1208 u32 portid, u32 seq, u16 flags, int event, int bind,
1212 struct nlmsghdr *nlh;
1213 unsigned char *b = skb_tail_pointer(skb);
1214 struct nlattr *nest;
1216 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1218 goto out_nlmsg_trim;
1219 t = nlmsg_data(nlh);
1220 t->tca_family = AF_UNSPEC;
1224 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1226 goto out_nlmsg_trim;
1228 if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1229 goto out_nlmsg_trim;
1231 nla_nest_end(skb, nest);
1233 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1242 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1243 struct tc_action *actions[], int event,
1244 struct netlink_ext_ack *extack)
1246 struct sk_buff *skb;
1248 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1251 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1253 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1258 return rtnl_unicast(skb, net, portid);
1261 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1262 struct nlmsghdr *n, u32 portid,
1263 struct netlink_ext_ack *extack)
1265 struct nlattr *tb[TCA_ACT_MAX + 1];
1266 const struct tc_action_ops *ops;
1267 struct tc_action *a;
1271 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1272 tcf_action_policy, extack);
1277 if (tb[TCA_ACT_INDEX] == NULL ||
1278 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1279 NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1282 index = nla_get_u32(tb[TCA_ACT_INDEX]);
1285 ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1286 if (!ops) { /* could happen in batch of actions */
1287 NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1291 if (ops->lookup(net, &a, index) == 0) {
1292 NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1296 module_put(ops->owner);
1300 module_put(ops->owner);
1302 return ERR_PTR(err);
1305 static int tca_action_flush(struct net *net, struct nlattr *nla,
1306 struct nlmsghdr *n, u32 portid,
1307 struct netlink_ext_ack *extack)
1309 struct sk_buff *skb;
1311 struct nlmsghdr *nlh;
1313 struct netlink_callback dcb;
1314 struct nlattr *nest;
1315 struct nlattr *tb[TCA_ACT_MAX + 1];
1316 const struct tc_action_ops *ops;
1317 struct nlattr *kind;
1320 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1324 b = skb_tail_pointer(skb);
1326 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1327 tcf_action_policy, extack);
1332 kind = tb[TCA_ACT_KIND];
1333 ops = tc_lookup_action(kind);
1334 if (!ops) { /*some idjot trying to flush unknown action */
1335 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1339 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1342 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1343 goto out_module_put;
1345 t = nlmsg_data(nlh);
1346 t->tca_family = AF_UNSPEC;
1350 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1352 NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1353 goto out_module_put;
1356 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
1358 nla_nest_cancel(skb, nest);
1359 goto out_module_put;
1362 nla_nest_end(skb, nest);
1364 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1365 nlh->nlmsg_flags |= NLM_F_ROOT;
1366 module_put(ops->owner);
1367 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1368 n->nlmsg_flags & NLM_F_ECHO);
1370 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1375 module_put(ops->owner);
1381 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1385 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1386 struct tc_action *a = actions[i];
1387 const struct tc_action_ops *ops = a->ops;
1388 /* Actions can be deleted concurrently so we must save their
1389 * type and id to search again after reference is released.
1391 struct tcf_idrinfo *idrinfo = a->idrinfo;
1392 u32 act_index = a->tcfa_index;
1395 if (tcf_action_put(a)) {
1396 /* last reference, action was deleted concurrently */
1397 module_put(ops->owner);
1401 /* now do the delete */
1402 ret = tcf_idr_delete_index(idrinfo, act_index);
1411 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1412 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1415 struct sk_buff *skb;
1417 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1422 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1424 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1429 /* now do the delete */
1430 ret = tcf_action_delete(net, actions);
1432 NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1437 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1438 n->nlmsg_flags & NLM_F_ECHO);
1443 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1444 u32 portid, int event, struct netlink_ext_ack *extack)
1447 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1448 struct tc_action *act;
1449 size_t attr_size = 0;
1450 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1452 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1457 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1459 return tca_action_flush(net, tb[1], n, portid, extack);
1461 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1465 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1466 act = tcf_action_get_1(net, tb[i], n, portid, extack);
1471 attr_size += tcf_action_fill_size(act);
1472 actions[i - 1] = act;
1475 attr_size = tcf_action_full_attrs_size(attr_size);
1477 if (event == RTM_GETACTION)
1478 ret = tcf_get_notify(net, portid, n, actions, event, extack);
1480 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1486 tcf_action_put_many(actions);
1491 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1492 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1494 struct sk_buff *skb;
1496 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1501 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1502 RTM_NEWACTION, 0, 0) <= 0) {
1503 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1508 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1509 n->nlmsg_flags & NLM_F_ECHO);
1512 static int tcf_action_add(struct net *net, struct nlattr *nla,
1513 struct nlmsghdr *n, u32 portid, u32 flags,
1514 struct netlink_ext_ack *extack)
1516 size_t attr_size = 0;
1518 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1519 int init_res[TCA_ACT_MAX_PRIO] = {};
1521 for (loop = 0; loop < 10; loop++) {
1522 ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
1523 &attr_size, flags, extack);
1530 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
1532 /* only put existing actions */
1533 for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
1534 if (init_res[i] == ACT_P_CREATED)
1536 tcf_action_put_many(actions);
1541 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
1542 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
1543 TCA_ACT_FLAG_TERSE_DUMP),
1544 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
1547 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1548 struct netlink_ext_ack *extack)
1550 struct net *net = sock_net(skb->sk);
1551 struct nlattr *tca[TCA_ROOT_MAX + 1];
1552 u32 portid = NETLINK_CB(skb).portid;
1556 if ((n->nlmsg_type != RTM_GETACTION) &&
1557 !netlink_capable(skb, CAP_NET_ADMIN))
1560 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
1561 TCA_ROOT_MAX, NULL, extack);
1565 if (tca[TCA_ACT_TAB] == NULL) {
1566 NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
1570 /* n->nlmsg_flags & NLM_F_CREATE */
1571 switch (n->nlmsg_type) {
1573 /* we are going to assume all other flags
1574 * imply create only if it doesn't exist
1575 * Note that CREATE | EXCL implies that
1576 * but since we want avoid ambiguity (eg when flags
1577 * is zero) then just set this
1579 if (n->nlmsg_flags & NLM_F_REPLACE)
1580 flags = TCA_ACT_FLAGS_REPLACE;
1581 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
1585 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1586 portid, RTM_DELACTION, extack);
1589 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1590 portid, RTM_GETACTION, extack);
1599 static struct nlattr *find_dump_kind(struct nlattr **nla)
1601 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1602 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1603 struct nlattr *kind;
1605 tb1 = nla[TCA_ACT_TAB];
1609 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
1614 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
1616 kind = tb2[TCA_ACT_KIND];
1621 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1623 struct net *net = sock_net(skb->sk);
1624 struct nlmsghdr *nlh;
1625 unsigned char *b = skb_tail_pointer(skb);
1626 struct nlattr *nest;
1627 struct tc_action_ops *a_o;
1629 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1630 struct nlattr *tb[TCA_ROOT_MAX + 1];
1631 struct nlattr *count_attr = NULL;
1632 unsigned long jiffy_since = 0;
1633 struct nlattr *kind = NULL;
1634 struct nla_bitfield32 bf;
1635 u32 msecs_since = 0;
1638 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
1639 TCA_ROOT_MAX, tcaa_policy, cb->extack);
1643 kind = find_dump_kind(tb);
1645 pr_info("tc_dump_action: action bad kind\n");
1649 a_o = tc_lookup_action(kind);
1654 if (tb[TCA_ROOT_FLAGS]) {
1655 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
1656 cb->args[2] = bf.value;
1659 if (tb[TCA_ROOT_TIME_DELTA]) {
1660 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
1663 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1664 cb->nlh->nlmsg_type, sizeof(*t), 0);
1666 goto out_module_put;
1669 jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
1671 t = nlmsg_data(nlh);
1672 t->tca_family = AF_UNSPEC;
1675 cb->args[3] = jiffy_since;
1676 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
1678 goto out_module_put;
1680 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1682 goto out_module_put;
1684 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
1686 goto out_module_put;
1689 nla_nest_end(skb, nest);
1691 act_count = cb->args[1];
1692 memcpy(nla_data(count_attr), &act_count, sizeof(u32));
1697 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1698 if (NETLINK_CB(cb->skb).portid && ret)
1699 nlh->nlmsg_flags |= NLM_F_MULTI;
1700 module_put(a_o->owner);
1704 module_put(a_o->owner);
1709 static int __init tc_action_init(void)
1711 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
1712 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
1713 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1719 subsys_initcall(tc_action_init);