1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/seqlock.h>
6 #include <linux/netlink.h>
7 #include <linux/netfilter.h>
8 #include <linux/netfilter/nf_tables.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip_tunnels.h>
12 #include <net/vxlan.h>
13 #include <net/erspan.h>
14 #include <net/geneve.h>
17 enum nft_tunnel_keys key:8;
19 enum nft_tunnel_mode mode:8;
22 static void nft_tunnel_get_eval(const struct nft_expr *expr,
23 struct nft_regs *regs,
24 const struct nft_pktinfo *pkt)
26 const struct nft_tunnel *priv = nft_expr_priv(expr);
27 u32 *dest = ®s->data[priv->dreg];
28 struct ip_tunnel_info *tun_info;
30 tun_info = skb_tunnel_info(pkt->skb);
35 nft_reg_store8(dest, false);
38 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
39 (priv->mode == NFT_TUNNEL_MODE_RX &&
40 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
41 (priv->mode == NFT_TUNNEL_MODE_TX &&
42 (tun_info->mode & IP_TUNNEL_INFO_TX)))
43 nft_reg_store8(dest, true);
45 nft_reg_store8(dest, false);
49 regs->verdict.code = NFT_BREAK;
52 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
53 (priv->mode == NFT_TUNNEL_MODE_RX &&
54 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
55 (priv->mode == NFT_TUNNEL_MODE_TX &&
56 (tun_info->mode & IP_TUNNEL_INFO_TX)))
57 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
59 regs->verdict.code = NFT_BREAK;
63 regs->verdict.code = NFT_BREAK;
67 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
68 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
69 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
70 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
73 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
74 const struct nft_expr *expr,
75 const struct nlattr * const tb[])
77 struct nft_tunnel *priv = nft_expr_priv(expr);
80 if (!tb[NFTA_TUNNEL_KEY] ||
81 !tb[NFTA_TUNNEL_DREG])
84 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
96 if (tb[NFTA_TUNNEL_MODE]) {
97 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
98 if (priv->mode > NFT_TUNNEL_MODE_MAX)
101 priv->mode = NFT_TUNNEL_MODE_NONE;
104 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
105 NULL, NFT_DATA_VALUE, len);
108 static int nft_tunnel_get_dump(struct sk_buff *skb,
109 const struct nft_expr *expr)
111 const struct nft_tunnel *priv = nft_expr_priv(expr);
113 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
114 goto nla_put_failure;
115 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
116 goto nla_put_failure;
117 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
118 goto nla_put_failure;
125 static struct nft_expr_type nft_tunnel_type;
126 static const struct nft_expr_ops nft_tunnel_get_ops = {
127 .type = &nft_tunnel_type,
128 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
129 .eval = nft_tunnel_get_eval,
130 .init = nft_tunnel_get_init,
131 .dump = nft_tunnel_get_dump,
134 static struct nft_expr_type nft_tunnel_type __read_mostly = {
136 .family = NFPROTO_NETDEV,
137 .ops = &nft_tunnel_get_ops,
138 .policy = nft_tunnel_policy,
139 .maxattr = NFTA_TUNNEL_MAX,
140 .owner = THIS_MODULE,
143 struct nft_tunnel_opts {
145 struct vxlan_metadata vxlan;
146 struct erspan_metadata erspan;
147 u8 data[IP_TUNNEL_OPTS_MAX];
153 struct nft_tunnel_obj {
154 struct metadata_dst *md;
155 struct nft_tunnel_opts opts;
158 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
159 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
160 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
163 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
164 const struct nlattr *attr,
165 struct ip_tunnel_info *info)
167 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
170 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
171 nft_tunnel_ip_policy, NULL);
175 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
178 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
179 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
180 if (tb[NFTA_TUNNEL_KEY_IP_DST])
181 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
186 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
187 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
188 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
189 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
192 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
193 const struct nlattr *attr,
194 struct ip_tunnel_info *info)
196 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
199 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
200 nft_tunnel_ip6_policy, NULL);
204 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
207 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
208 memcpy(&info->key.u.ipv6.src,
209 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
210 sizeof(struct in6_addr));
212 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
213 memcpy(&info->key.u.ipv6.dst,
214 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
215 sizeof(struct in6_addr));
217 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
218 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
220 info->mode |= IP_TUNNEL_INFO_IPV6;
225 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
226 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
229 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
230 struct nft_tunnel_opts *opts)
232 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
235 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
236 nft_tunnel_opts_vxlan_policy, NULL);
240 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
243 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
245 opts->len = sizeof(struct vxlan_metadata);
246 opts->flags = TUNNEL_VXLAN_OPT;
251 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
252 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
253 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
254 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
255 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
258 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
259 struct nft_tunnel_opts *opts)
261 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
265 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
266 attr, nft_tunnel_opts_erspan_policy,
271 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
274 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
277 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
280 opts->u.erspan.u.index =
281 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
283 case ERSPAN_VERSION2:
284 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
285 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
288 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
289 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
291 set_hwid(&opts->u.erspan.u.md2, hwid);
292 opts->u.erspan.u.md2.dir = dir;
297 opts->u.erspan.version = version;
299 opts->len = sizeof(struct erspan_metadata);
300 opts->flags = TUNNEL_ERSPAN_OPT;
305 static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
306 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
307 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
308 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
311 static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
312 struct nft_tunnel_opts *opts)
314 struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
315 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
318 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
319 nft_tunnel_opts_geneve_policy, NULL);
323 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
324 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
325 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
328 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
329 data_len = nla_len(attr);
333 opts->len += sizeof(*opt) + data_len;
334 if (opts->len > IP_TUNNEL_OPTS_MAX)
337 memcpy(opt->opt_data, nla_data(attr), data_len);
338 opt->length = data_len / 4;
339 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
340 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
341 opts->flags = TUNNEL_GENEVE_OPT;
346 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
347 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
348 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
349 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
350 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
351 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
354 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
355 const struct nlattr *attr,
356 struct ip_tunnel_info *info,
357 struct nft_tunnel_opts *opts)
359 int err, rem, type = 0;
362 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
363 nft_tunnel_opts_policy, NULL);
367 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
368 switch (nla_type(nla)) {
369 case NFTA_TUNNEL_KEY_OPTS_VXLAN:
372 err = nft_tunnel_obj_vxlan_init(nla, opts);
375 type = TUNNEL_VXLAN_OPT;
377 case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
380 err = nft_tunnel_obj_erspan_init(nla, opts);
383 type = TUNNEL_ERSPAN_OPT;
385 case NFTA_TUNNEL_KEY_OPTS_GENEVE:
386 if (type && type != TUNNEL_GENEVE_OPT)
388 err = nft_tunnel_obj_geneve_init(nla, opts);
391 type = TUNNEL_GENEVE_OPT;
401 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
402 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
403 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
404 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
405 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
406 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
407 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
408 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
409 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
410 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
413 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
414 const struct nlattr * const tb[],
415 struct nft_object *obj)
417 struct nft_tunnel_obj *priv = nft_obj_data(obj);
418 struct ip_tunnel_info info;
419 struct metadata_dst *md;
422 if (!tb[NFTA_TUNNEL_KEY_ID])
425 memset(&info, 0, sizeof(info));
426 info.mode = IP_TUNNEL_INFO_TX;
427 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
428 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
430 if (tb[NFTA_TUNNEL_KEY_IP]) {
431 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
434 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
435 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
442 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
443 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
445 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
446 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
449 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
452 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
453 if (tun_flags & ~NFT_TUNNEL_F_MASK)
456 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
457 info.key.tun_flags &= ~TUNNEL_CSUM;
458 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
459 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
460 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
461 info.key.tun_flags |= TUNNEL_SEQ;
463 if (tb[NFTA_TUNNEL_KEY_TOS])
464 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
465 if (tb[NFTA_TUNNEL_KEY_TTL])
466 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
468 info.key.ttl = U8_MAX;
470 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
471 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
477 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
481 memcpy(&md->u.tun_info, &info, sizeof(info));
482 #ifdef CONFIG_DST_CACHE
483 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
485 metadata_dst_free(md);
489 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
496 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
497 struct nft_regs *regs,
498 const struct nft_pktinfo *pkt)
500 struct nft_tunnel_obj *priv = nft_obj_data(obj);
501 struct sk_buff *skb = pkt->skb;
504 dst_hold((struct dst_entry *) priv->md);
505 skb_dst_set(skb, (struct dst_entry *) priv->md);
508 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
512 if (info->mode & IP_TUNNEL_INFO_IPV6) {
513 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
517 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
518 &info->key.u.ipv6.src) < 0 ||
519 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
520 &info->key.u.ipv6.dst) < 0 ||
521 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
523 nla_nest_cancel(skb, nest);
527 nla_nest_end(skb, nest);
529 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
533 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
534 info->key.u.ipv4.src) < 0 ||
535 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
536 info->key.u.ipv4.dst) < 0) {
537 nla_nest_cancel(skb, nest);
541 nla_nest_end(skb, nest);
547 static int nft_tunnel_opts_dump(struct sk_buff *skb,
548 struct nft_tunnel_obj *priv)
550 struct nft_tunnel_opts *opts = &priv->opts;
551 struct nlattr *nest, *inner;
553 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
557 if (opts->flags & TUNNEL_VXLAN_OPT) {
558 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
561 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
562 htonl(opts->u.vxlan.gbp)))
564 nla_nest_end(skb, inner);
565 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
566 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
569 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
570 htonl(opts->u.erspan.version)))
572 switch (opts->u.erspan.version) {
574 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
575 opts->u.erspan.u.index))
578 case ERSPAN_VERSION2:
579 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
580 get_hwid(&opts->u.erspan.u.md2)) ||
581 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
582 opts->u.erspan.u.md2.dir))
586 nla_nest_end(skb, inner);
587 } else if (opts->flags & TUNNEL_GENEVE_OPT) {
588 struct geneve_opt *opt;
591 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
594 while (opts->len > offset) {
595 opt = (struct geneve_opt *)opts->u.data + offset;
596 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
598 nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
600 nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
601 opt->length * 4, opt->opt_data))
603 offset += sizeof(*opt) + opt->length * 4;
605 nla_nest_end(skb, inner);
607 nla_nest_end(skb, nest);
611 nla_nest_cancel(skb, inner);
613 nla_nest_cancel(skb, nest);
617 static int nft_tunnel_ports_dump(struct sk_buff *skb,
618 struct ip_tunnel_info *info)
620 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
621 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
627 static int nft_tunnel_flags_dump(struct sk_buff *skb,
628 struct ip_tunnel_info *info)
632 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
633 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
634 if (!(info->key.tun_flags & TUNNEL_CSUM))
635 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
636 if (info->key.tun_flags & TUNNEL_SEQ)
637 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
639 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
645 static int nft_tunnel_obj_dump(struct sk_buff *skb,
646 struct nft_object *obj, bool reset)
648 struct nft_tunnel_obj *priv = nft_obj_data(obj);
649 struct ip_tunnel_info *info = &priv->md->u.tun_info;
651 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
652 tunnel_id_to_key32(info->key.tun_id)) ||
653 nft_tunnel_ip_dump(skb, info) < 0 ||
654 nft_tunnel_ports_dump(skb, info) < 0 ||
655 nft_tunnel_flags_dump(skb, info) < 0 ||
656 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
657 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
658 nft_tunnel_opts_dump(skb, priv) < 0)
659 goto nla_put_failure;
667 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
668 struct nft_object *obj)
670 struct nft_tunnel_obj *priv = nft_obj_data(obj);
672 metadata_dst_free(priv->md);
675 static struct nft_object_type nft_tunnel_obj_type;
676 static const struct nft_object_ops nft_tunnel_obj_ops = {
677 .type = &nft_tunnel_obj_type,
678 .size = sizeof(struct nft_tunnel_obj),
679 .eval = nft_tunnel_obj_eval,
680 .init = nft_tunnel_obj_init,
681 .destroy = nft_tunnel_obj_destroy,
682 .dump = nft_tunnel_obj_dump,
685 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
686 .type = NFT_OBJECT_TUNNEL,
687 .ops = &nft_tunnel_obj_ops,
688 .maxattr = NFTA_TUNNEL_KEY_MAX,
689 .policy = nft_tunnel_key_policy,
690 .owner = THIS_MODULE,
693 static int __init nft_tunnel_module_init(void)
697 err = nft_register_expr(&nft_tunnel_type);
701 err = nft_register_obj(&nft_tunnel_obj_type);
703 nft_unregister_expr(&nft_tunnel_type);
708 static void __exit nft_tunnel_module_exit(void)
710 nft_unregister_obj(&nft_tunnel_obj_type);
711 nft_unregister_expr(&nft_tunnel_type);
714 module_init(nft_tunnel_module_init);
715 module_exit(nft_tunnel_module_exit);
717 MODULE_LICENSE("GPL");
718 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
719 MODULE_ALIAS_NFT_EXPR("tunnel");
720 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
721 MODULE_DESCRIPTION("nftables tunnel expression support");