1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/vxlan.h>
14 #include <net/erspan.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
20 #include <linux/tc_act/tc_tunnel_key.h>
21 #include <net/tc_act/tc_tunnel_key.h>
23 static struct tc_action_ops act_tunnel_key_ops;
25 static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
26 struct tcf_result *res)
28 struct tcf_tunnel_key *t = to_tunnel_key(a);
29 struct tcf_tunnel_key_params *params;
32 params = rcu_dereference_bh(t->params);
34 tcf_lastuse_update(&t->tcf_tm);
35 tcf_action_update_bstats(&t->common, skb);
36 action = READ_ONCE(t->tcf_action);
38 switch (params->tcft_action) {
39 case TCA_TUNNEL_KEY_ACT_RELEASE:
42 case TCA_TUNNEL_KEY_ACT_SET:
44 skb_dst_set(skb, dst_clone(¶ms->tcft_enc_metadata->dst));
47 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
55 static const struct nla_policy
56 enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
57 [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC] = {
58 .strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
59 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
60 [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
61 [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
64 static const struct nla_policy
65 geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
66 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
67 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
68 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
72 static const struct nla_policy
73 vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = {
74 [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
77 static const struct nla_policy
78 erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
79 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
80 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
81 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
82 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
86 tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
87 struct netlink_ext_ack *extack)
89 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
90 int err, data_len, opt_len;
93 err = nla_parse_nested_deprecated(tb,
94 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
95 nla, geneve_opt_policy, extack);
99 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
100 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
101 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
102 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
106 data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
107 data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
109 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
113 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
117 opt_len = sizeof(struct geneve_opt) + data_len;
119 struct geneve_opt *opt = dst;
121 WARN_ON(dst_len < opt_len);
124 nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
125 opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
126 opt->length = data_len / 4; /* length is in units of 4 bytes */
131 memcpy(opt + 1, data, data_len);
138 tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
139 struct netlink_ext_ack *extack)
141 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1];
144 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla,
145 vxlan_opt_policy, extack);
149 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) {
150 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
155 struct vxlan_metadata *md = dst;
157 md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
158 md->gbp &= VXLAN_GBP_MASK;
161 return sizeof(struct vxlan_metadata);
165 tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len,
166 struct netlink_ext_ack *extack)
168 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1];
172 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla,
173 erspan_opt_policy, extack);
177 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) {
178 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
182 ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]);
184 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) {
185 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
188 } else if (ver == 2) {
189 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] ||
190 !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) {
191 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
195 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
200 struct erspan_metadata *md = dst;
204 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX];
205 md->u.index = nla_get_be32(nla);
207 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR];
208 md->u.md2.dir = nla_get_u8(nla);
209 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID];
210 set_hwid(&md->u.md2, nla_get_u8(nla));
214 return sizeof(struct erspan_metadata);
217 static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
218 int dst_len, struct netlink_ext_ack *extack)
220 int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0;
221 const struct nlattr *attr, *head = nla_data(nla);
223 err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
224 enc_opts_policy, extack);
228 nla_for_each_attr(attr, head, len, rem) {
229 switch (nla_type(attr)) {
230 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
231 if (type && type != TUNNEL_GENEVE_OPT) {
232 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
235 opt_len = tunnel_key_copy_geneve_opt(attr, dst,
240 if (opts_len > IP_TUNNEL_OPTS_MAX) {
241 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
248 type = TUNNEL_GENEVE_OPT;
250 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
252 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
255 opt_len = tunnel_key_copy_vxlan_opt(attr, dst,
260 type = TUNNEL_VXLAN_OPT;
262 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
264 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
267 opt_len = tunnel_key_copy_erspan_opt(attr, dst,
272 type = TUNNEL_ERSPAN_OPT;
278 NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
283 NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
290 static int tunnel_key_get_opts_len(struct nlattr *nla,
291 struct netlink_ext_ack *extack)
293 return tunnel_key_copy_opts(nla, NULL, 0, extack);
296 static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
297 int opts_len, struct netlink_ext_ack *extack)
299 info->options_len = opts_len;
300 switch (nla_type(nla_data(nla))) {
301 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
302 #if IS_ENABLED(CONFIG_INET)
303 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
304 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
307 return -EAFNOSUPPORT;
309 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
310 #if IS_ENABLED(CONFIG_INET)
311 info->key.tun_flags |= TUNNEL_VXLAN_OPT;
312 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
315 return -EAFNOSUPPORT;
317 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
318 #if IS_ENABLED(CONFIG_INET)
319 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
320 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
323 return -EAFNOSUPPORT;
326 NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
331 static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
332 [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
333 [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
334 [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
335 [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
336 [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
337 [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
338 [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
339 [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
340 [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
341 [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
342 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
345 static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
349 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
350 dst_release(&p->tcft_enc_metadata->dst);
355 static int tunnel_key_init(struct net *net, struct nlattr *nla,
356 struct nlattr *est, struct tc_action **a,
357 struct tcf_proto *tp, u32 act_flags,
358 struct netlink_ext_ack *extack)
360 struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id);
361 bool bind = act_flags & TCA_ACT_FLAGS_BIND;
362 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
363 struct tcf_tunnel_key_params *params_new;
364 struct metadata_dst *metadata = NULL;
365 struct tcf_chain *goto_ch = NULL;
366 struct tc_tunnel_key *parm;
367 struct tcf_tunnel_key *t;
379 NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
383 err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
384 tunnel_key_policy, extack);
386 NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
390 if (!tb[TCA_TUNNEL_KEY_PARMS]) {
391 NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
395 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
397 err = tcf_idr_check_alloc(tn, &index, a, bind);
404 switch (parm->t_action) {
405 case TCA_TUNNEL_KEY_ACT_RELEASE:
407 case TCA_TUNNEL_KEY_ACT_SET:
408 if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
411 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
412 key_id = key32_to_tunnel_id(key32);
416 flags |= TUNNEL_CSUM;
417 if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
418 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
419 flags &= ~TUNNEL_CSUM;
421 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
422 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
424 if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
425 opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
434 if (tb[TCA_TUNNEL_KEY_ENC_TOS])
435 tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
437 if (tb[TCA_TUNNEL_KEY_ENC_TTL])
438 ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
440 if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
441 tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
445 saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
446 daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
448 metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
451 } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
452 tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
453 struct in6_addr saddr;
454 struct in6_addr daddr;
456 saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
457 daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
459 metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
463 NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
469 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
474 #ifdef CONFIG_DST_CACHE
475 ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
477 goto release_tun_meta;
481 ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
482 &metadata->u.tun_info,
485 goto release_tun_meta;
488 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
491 NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
497 ret = tcf_idr_create_from_flags(tn, index, est, a,
498 &act_tunnel_key_ops, bind,
501 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
502 goto release_tun_meta;
506 } else if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
507 NL_SET_ERR_MSG(extack, "TC IDR already exists");
509 goto release_tun_meta;
512 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
516 goto release_tun_meta;
518 t = to_tunnel_key(*a);
520 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
521 if (unlikely(!params_new)) {
522 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
527 params_new->tcft_action = parm->t_action;
528 params_new->tcft_enc_metadata = metadata;
530 spin_lock_bh(&t->tcf_lock);
531 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
532 params_new = rcu_replace_pointer(t->params, params_new,
533 lockdep_is_held(&t->tcf_lock));
534 spin_unlock_bh(&t->tcf_lock);
535 tunnel_key_release_params(params_new);
537 tcf_chain_put_by_act(goto_ch);
543 tcf_chain_put_by_act(goto_ch);
547 dst_release(&metadata->dst);
551 tcf_idr_release(*a, bind);
553 tcf_idr_cleanup(tn, index);
557 static void tunnel_key_release(struct tc_action *a)
559 struct tcf_tunnel_key *t = to_tunnel_key(a);
560 struct tcf_tunnel_key_params *params;
562 params = rcu_dereference_protected(t->params, 1);
563 tunnel_key_release_params(params);
566 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
567 const struct ip_tunnel_info *info)
569 int len = info->options_len;
570 u8 *src = (u8 *)(info + 1);
571 struct nlattr *start;
573 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
578 struct geneve_opt *opt = (struct geneve_opt *)src;
580 if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
582 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
584 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
585 opt->length * 4, opt + 1)) {
586 nla_nest_cancel(skb, start);
590 len -= sizeof(struct geneve_opt) + opt->length * 4;
591 src += sizeof(struct geneve_opt) + opt->length * 4;
594 nla_nest_end(skb, start);
598 static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
599 const struct ip_tunnel_info *info)
601 struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
602 struct nlattr *start;
604 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
608 if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) {
609 nla_nest_cancel(skb, start);
613 nla_nest_end(skb, start);
617 static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
618 const struct ip_tunnel_info *info)
620 struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
621 struct nlattr *start;
623 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
627 if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version))
630 if (md->version == 1 &&
631 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
634 if (md->version == 2 &&
635 (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR,
637 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID,
638 get_hwid(&md->u.md2))))
641 nla_nest_end(skb, start);
644 nla_nest_cancel(skb, start);
648 static int tunnel_key_opts_dump(struct sk_buff *skb,
649 const struct ip_tunnel_info *info)
651 struct nlattr *start;
654 if (!info->options_len)
657 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
661 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
662 err = tunnel_key_geneve_opts_dump(skb, info);
665 } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
666 err = tunnel_key_vxlan_opts_dump(skb, info);
669 } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
670 err = tunnel_key_erspan_opts_dump(skb, info);
675 nla_nest_cancel(skb, start);
679 nla_nest_end(skb, start);
683 static int tunnel_key_dump_addresses(struct sk_buff *skb,
684 const struct ip_tunnel_info *info)
686 unsigned short family = ip_tunnel_info_af(info);
688 if (family == AF_INET) {
689 __be32 saddr = info->key.u.ipv4.src;
690 __be32 daddr = info->key.u.ipv4.dst;
692 if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
693 !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
697 if (family == AF_INET6) {
698 const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
699 const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
701 if (!nla_put_in6_addr(skb,
702 TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
703 !nla_put_in6_addr(skb,
704 TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
711 static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
714 unsigned char *b = skb_tail_pointer(skb);
715 struct tcf_tunnel_key *t = to_tunnel_key(a);
716 struct tcf_tunnel_key_params *params;
717 struct tc_tunnel_key opt = {
718 .index = t->tcf_index,
719 .refcnt = refcount_read(&t->tcf_refcnt) - ref,
720 .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
724 spin_lock_bh(&t->tcf_lock);
725 params = rcu_dereference_protected(t->params,
726 lockdep_is_held(&t->tcf_lock));
727 opt.action = t->tcf_action;
728 opt.t_action = params->tcft_action;
730 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
731 goto nla_put_failure;
733 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
734 struct ip_tunnel_info *info =
735 ¶ms->tcft_enc_metadata->u.tun_info;
736 struct ip_tunnel_key *key = &info->key;
737 __be32 key_id = tunnel_id_to_key32(key->tun_id);
739 if (((key->tun_flags & TUNNEL_KEY) &&
740 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
741 tunnel_key_dump_addresses(skb,
742 ¶ms->tcft_enc_metadata->u.tun_info) ||
744 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
746 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
747 !(key->tun_flags & TUNNEL_CSUM)) ||
748 tunnel_key_opts_dump(skb, info))
749 goto nla_put_failure;
751 if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
752 goto nla_put_failure;
754 if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
755 goto nla_put_failure;
758 tcf_tm_dump(&tm, &t->tcf_tm);
759 if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
760 &tm, TCA_TUNNEL_KEY_PAD))
761 goto nla_put_failure;
762 spin_unlock_bh(&t->tcf_lock);
767 spin_unlock_bh(&t->tcf_lock);
772 static void tcf_tunnel_encap_put_tunnel(void *priv)
774 struct ip_tunnel_info *tunnel = priv;
779 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
780 const struct tc_action *act)
782 entry->tunnel = tcf_tunnel_info_copy(act);
785 entry->destructor = tcf_tunnel_encap_put_tunnel;
786 entry->destructor_priv = entry->tunnel;
790 static int tcf_tunnel_key_offload_act_setup(struct tc_action *act,
794 struct netlink_ext_ack *extack)
799 struct flow_action_entry *entry = entry_data;
801 if (is_tcf_tunnel_set(act)) {
802 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
803 err = tcf_tunnel_encap_get_tunnel(entry, act);
806 } else if (is_tcf_tunnel_release(act)) {
807 entry->id = FLOW_ACTION_TUNNEL_DECAP;
809 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel key mode offload");
814 struct flow_offload_action *fl_action = entry_data;
816 if (is_tcf_tunnel_set(act))
817 fl_action->id = FLOW_ACTION_TUNNEL_ENCAP;
818 else if (is_tcf_tunnel_release(act))
819 fl_action->id = FLOW_ACTION_TUNNEL_DECAP;
827 static struct tc_action_ops act_tunnel_key_ops = {
828 .kind = "tunnel_key",
829 .id = TCA_ID_TUNNEL_KEY,
830 .owner = THIS_MODULE,
831 .act = tunnel_key_act,
832 .dump = tunnel_key_dump,
833 .init = tunnel_key_init,
834 .cleanup = tunnel_key_release,
835 .offload_act_setup = tcf_tunnel_key_offload_act_setup,
836 .size = sizeof(struct tcf_tunnel_key),
839 static __net_init int tunnel_key_init_net(struct net *net)
841 struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id);
843 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
846 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
848 tc_action_net_exit(net_list, act_tunnel_key_ops.net_id);
851 static struct pernet_operations tunnel_key_net_ops = {
852 .init = tunnel_key_init_net,
853 .exit_batch = tunnel_key_exit_net,
854 .id = &act_tunnel_key_ops.net_id,
855 .size = sizeof(struct tc_action_net),
858 static int __init tunnel_key_init_module(void)
860 return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
863 static void __exit tunnel_key_cleanup_module(void)
865 tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
868 module_init(tunnel_key_init_module);
869 module_exit(tunnel_key_cleanup_module);
871 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
872 MODULE_DESCRIPTION("ip tunnel manipulation actions");
873 MODULE_LICENSE("GPL v2");