1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
24 #include <linux/ipv6.h>
26 #include <net/sctp/checksum.h>
28 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
29 struct vlan_ethhdr *veth)
31 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
34 veth->h_vlan_proto = skb->vlan_proto;
35 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
36 veth->h_vlan_encapsulated_proto = skb->protocol;
41 /* add vlan header into the user buffer for if tag was removed by offloads */
43 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
45 int mac_off = skb_mac_header(skb) - skb->data;
46 u8 *vlanh, *dst_u8 = (u8 *) d;
47 struct vlan_ethhdr veth;
50 if ((skb->protocol == htons(ETH_P_8021AD) ||
51 skb->protocol == htons(ETH_P_8021Q)) &&
52 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
53 vlan_hlen += VLAN_HLEN;
56 if (offset < VLAN_ETH_HLEN + vlan_hlen) {
60 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
62 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
65 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
66 ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
68 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
75 offset = ETH_HLEN + vlan_hlen;
77 offset -= VLAN_HLEN + vlan_hlen;
80 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
83 static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
85 unsigned int thoff = nft_thoff(pkt);
87 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
92 pkt->inneroff = thoff + sizeof(struct udphdr);
95 struct tcphdr *th, _tcph;
97 th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
101 pkt->inneroff = thoff + __tcp_hdrlen(th);
108 pkt->flags |= NFT_PKTINFO_INNER;
113 static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
115 if (!(pkt->flags & NFT_PKTINFO_INNER) &&
116 __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
119 return pkt->inneroff;
122 void nft_payload_eval(const struct nft_expr *expr,
123 struct nft_regs *regs,
124 const struct nft_pktinfo *pkt)
126 const struct nft_payload *priv = nft_expr_priv(expr);
127 const struct sk_buff *skb = pkt->skb;
128 u32 *dest = ®s->data[priv->dreg];
131 if (priv->len % NFT_REG32_SIZE)
132 dest[priv->len / NFT_REG32_SIZE] = 0;
134 switch (priv->base) {
135 case NFT_PAYLOAD_LL_HEADER:
136 if (!skb_mac_header_was_set(skb))
139 if (skb_vlan_tag_present(skb)) {
140 if (!nft_payload_copy_vlan(dest, skb,
141 priv->offset, priv->len))
145 offset = skb_mac_header(skb) - skb->data;
147 case NFT_PAYLOAD_NETWORK_HEADER:
148 offset = skb_network_offset(skb);
150 case NFT_PAYLOAD_TRANSPORT_HEADER:
151 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
153 offset = nft_thoff(pkt);
155 case NFT_PAYLOAD_INNER_HEADER:
156 offset = nft_payload_inner_offset(pkt);
163 offset += priv->offset;
165 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
169 regs->verdict.code = NFT_BREAK;
172 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
173 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
174 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
175 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
176 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
177 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
178 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
179 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
180 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
183 static int nft_payload_init(const struct nft_ctx *ctx,
184 const struct nft_expr *expr,
185 const struct nlattr * const tb[])
187 struct nft_payload *priv = nft_expr_priv(expr);
189 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
190 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
191 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
193 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
194 &priv->dreg, NULL, NFT_DATA_VALUE,
198 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
200 const struct nft_payload *priv = nft_expr_priv(expr);
202 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
203 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
204 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
205 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
206 goto nla_put_failure;
213 static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
214 u32 priv_len, u32 field_len)
216 unsigned int remainder, delta, k;
217 struct nft_data mask = {};
218 __be32 remainder_mask;
220 if (priv_len == field_len) {
221 memset(®->mask, 0xff, priv_len);
223 } else if (priv_len > field_len) {
227 memset(&mask, 0xff, field_len);
228 remainder = priv_len % sizeof(u32);
230 k = priv_len / sizeof(u32);
231 delta = field_len - priv_len;
232 remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
233 mask.data[k] = (__force u32)remainder_mask;
236 memcpy(®->mask, &mask, field_len);
241 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
242 struct nft_flow_rule *flow,
243 const struct nft_payload *priv)
245 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
247 switch (priv->offset) {
248 case offsetof(struct ethhdr, h_source):
249 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
252 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
255 case offsetof(struct ethhdr, h_dest):
256 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
259 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
262 case offsetof(struct ethhdr, h_proto):
263 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
266 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
267 n_proto, sizeof(__be16), reg);
268 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
270 case offsetof(struct vlan_ethhdr, h_vlan_TCI):
271 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
274 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
275 vlan_tci, sizeof(__be16), reg,
276 NFT_OFFLOAD_F_NETWORK2HOST);
278 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
279 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
282 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
283 vlan_tpid, sizeof(__be16), reg);
284 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
286 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
287 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
290 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
291 vlan_tci, sizeof(__be16), reg,
292 NFT_OFFLOAD_F_NETWORK2HOST);
294 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
295 sizeof(struct vlan_hdr):
296 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
299 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
300 vlan_tpid, sizeof(__be16), reg);
301 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
310 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
311 struct nft_flow_rule *flow,
312 const struct nft_payload *priv)
314 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
316 switch (priv->offset) {
317 case offsetof(struct iphdr, saddr):
318 if (!nft_payload_offload_mask(reg, priv->len,
319 sizeof(struct in_addr)))
322 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
323 sizeof(struct in_addr), reg);
324 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
326 case offsetof(struct iphdr, daddr):
327 if (!nft_payload_offload_mask(reg, priv->len,
328 sizeof(struct in_addr)))
331 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
332 sizeof(struct in_addr), reg);
333 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
335 case offsetof(struct iphdr, protocol):
336 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
339 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
341 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
350 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
351 struct nft_flow_rule *flow,
352 const struct nft_payload *priv)
354 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
356 switch (priv->offset) {
357 case offsetof(struct ipv6hdr, saddr):
358 if (!nft_payload_offload_mask(reg, priv->len,
359 sizeof(struct in6_addr)))
362 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
363 sizeof(struct in6_addr), reg);
364 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
366 case offsetof(struct ipv6hdr, daddr):
367 if (!nft_payload_offload_mask(reg, priv->len,
368 sizeof(struct in6_addr)))
371 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
372 sizeof(struct in6_addr), reg);
373 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
375 case offsetof(struct ipv6hdr, nexthdr):
376 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
379 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
381 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
390 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
391 struct nft_flow_rule *flow,
392 const struct nft_payload *priv)
396 switch (ctx->dep.l3num) {
397 case htons(ETH_P_IP):
398 err = nft_payload_offload_ip(ctx, flow, priv);
400 case htons(ETH_P_IPV6):
401 err = nft_payload_offload_ip6(ctx, flow, priv);
410 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
411 struct nft_flow_rule *flow,
412 const struct nft_payload *priv)
414 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
416 switch (priv->offset) {
417 case offsetof(struct tcphdr, source):
418 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
421 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
422 sizeof(__be16), reg);
424 case offsetof(struct tcphdr, dest):
425 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
428 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
429 sizeof(__be16), reg);
438 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
439 struct nft_flow_rule *flow,
440 const struct nft_payload *priv)
442 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
444 switch (priv->offset) {
445 case offsetof(struct udphdr, source):
446 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
449 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
450 sizeof(__be16), reg);
452 case offsetof(struct udphdr, dest):
453 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
456 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
457 sizeof(__be16), reg);
466 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
467 struct nft_flow_rule *flow,
468 const struct nft_payload *priv)
472 switch (ctx->dep.protonum) {
474 err = nft_payload_offload_tcp(ctx, flow, priv);
477 err = nft_payload_offload_udp(ctx, flow, priv);
486 static int nft_payload_offload(struct nft_offload_ctx *ctx,
487 struct nft_flow_rule *flow,
488 const struct nft_expr *expr)
490 const struct nft_payload *priv = nft_expr_priv(expr);
493 switch (priv->base) {
494 case NFT_PAYLOAD_LL_HEADER:
495 err = nft_payload_offload_ll(ctx, flow, priv);
497 case NFT_PAYLOAD_NETWORK_HEADER:
498 err = nft_payload_offload_nh(ctx, flow, priv);
500 case NFT_PAYLOAD_TRANSPORT_HEADER:
501 err = nft_payload_offload_th(ctx, flow, priv);
510 static const struct nft_expr_ops nft_payload_ops = {
511 .type = &nft_payload_type,
512 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
513 .eval = nft_payload_eval,
514 .init = nft_payload_init,
515 .dump = nft_payload_dump,
516 .offload = nft_payload_offload,
519 const struct nft_expr_ops nft_payload_fast_ops = {
520 .type = &nft_payload_type,
521 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
522 .eval = nft_payload_eval,
523 .init = nft_payload_init,
524 .dump = nft_payload_dump,
525 .offload = nft_payload_offload,
528 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
530 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
532 *sum = CSUM_MANGLED_0;
535 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
537 struct udphdr *uh, _uh;
539 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
543 return (__force bool)uh->check;
546 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
548 unsigned int *l4csum_offset)
553 switch (pkt->tprot) {
555 *l4csum_offset = offsetof(struct tcphdr, check);
558 if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
561 case IPPROTO_UDPLITE:
562 *l4csum_offset = offsetof(struct udphdr, check);
565 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
571 *l4csum_offset += nft_thoff(pkt);
575 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
579 if (skb_ensure_writable(skb, offset + sizeof(*sh)))
582 sh = (struct sctphdr *)(skb->data + offset);
583 sh->checksum = sctp_compute_cksum(skb, offset);
584 skb->ip_summed = CHECKSUM_UNNECESSARY;
588 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
590 __wsum fsum, __wsum tsum)
595 /* If we cannot determine layer 4 checksum offset or this packet doesn't
596 * require layer 4 checksum recalculation, skip this packet.
598 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
601 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
604 /* Checksum mangling for an arbitrary amount of bytes, based on
605 * inet_proto_csum_replace*() functions.
607 if (skb->ip_summed != CHECKSUM_PARTIAL) {
608 nft_csum_replace(&sum, fsum, tsum);
609 if (skb->ip_summed == CHECKSUM_COMPLETE) {
610 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
614 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
618 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
619 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
625 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
626 __wsum fsum, __wsum tsum, int csum_offset)
630 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
633 nft_csum_replace(&sum, fsum, tsum);
634 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
635 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
641 static void nft_payload_set_eval(const struct nft_expr *expr,
642 struct nft_regs *regs,
643 const struct nft_pktinfo *pkt)
645 const struct nft_payload_set *priv = nft_expr_priv(expr);
646 struct sk_buff *skb = pkt->skb;
647 const u32 *src = ®s->data[priv->sreg];
648 int offset, csum_offset;
651 switch (priv->base) {
652 case NFT_PAYLOAD_LL_HEADER:
653 if (!skb_mac_header_was_set(skb))
655 offset = skb_mac_header(skb) - skb->data;
657 case NFT_PAYLOAD_NETWORK_HEADER:
658 offset = skb_network_offset(skb);
660 case NFT_PAYLOAD_TRANSPORT_HEADER:
661 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
663 offset = nft_thoff(pkt);
665 case NFT_PAYLOAD_INNER_HEADER:
666 offset = nft_payload_inner_offset(pkt);
674 csum_offset = offset + priv->csum_offset;
675 offset += priv->offset;
677 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
678 ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
679 priv->base != NFT_PAYLOAD_INNER_HEADER) ||
680 skb->ip_summed != CHECKSUM_PARTIAL)) {
681 fsum = skb_checksum(skb, offset, priv->len, 0);
682 tsum = csum_partial(src, priv->len, 0);
684 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
685 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
688 if (priv->csum_flags &&
689 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
693 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
694 skb_store_bits(skb, offset, src, priv->len) < 0)
697 if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
698 pkt->tprot == IPPROTO_SCTP &&
699 skb->ip_summed != CHECKSUM_PARTIAL) {
700 if (pkt->fragoff == 0 &&
701 nft_payload_csum_sctp(skb, nft_thoff(pkt)))
707 regs->verdict.code = NFT_BREAK;
710 static int nft_payload_set_init(const struct nft_ctx *ctx,
711 const struct nft_expr *expr,
712 const struct nlattr * const tb[])
714 struct nft_payload_set *priv = nft_expr_priv(expr);
715 u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
718 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
719 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
720 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
722 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
723 csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
724 if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
725 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
730 priv->csum_offset = csum_offset;
732 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
735 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
736 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
739 priv->csum_flags = flags;
743 case NFT_PAYLOAD_CSUM_NONE:
744 case NFT_PAYLOAD_CSUM_INET:
746 case NFT_PAYLOAD_CSUM_SCTP:
747 if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
750 if (priv->csum_offset != offsetof(struct sctphdr, checksum))
756 priv->csum_type = csum_type;
758 return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
762 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
764 const struct nft_payload_set *priv = nft_expr_priv(expr);
766 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
767 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
768 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
769 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
770 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
771 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
772 htonl(priv->csum_offset)) ||
773 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
774 goto nla_put_failure;
781 static const struct nft_expr_ops nft_payload_set_ops = {
782 .type = &nft_payload_type,
783 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
784 .eval = nft_payload_set_eval,
785 .init = nft_payload_set_init,
786 .dump = nft_payload_set_dump,
789 static const struct nft_expr_ops *
790 nft_payload_select_ops(const struct nft_ctx *ctx,
791 const struct nlattr * const tb[])
793 enum nft_payload_bases base;
794 unsigned int offset, len;
797 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
798 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
799 tb[NFTA_PAYLOAD_LEN] == NULL)
800 return ERR_PTR(-EINVAL);
802 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
804 case NFT_PAYLOAD_LL_HEADER:
805 case NFT_PAYLOAD_NETWORK_HEADER:
806 case NFT_PAYLOAD_TRANSPORT_HEADER:
807 case NFT_PAYLOAD_INNER_HEADER:
810 return ERR_PTR(-EOPNOTSUPP);
813 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
814 if (tb[NFTA_PAYLOAD_DREG] != NULL)
815 return ERR_PTR(-EINVAL);
816 return &nft_payload_set_ops;
819 if (tb[NFTA_PAYLOAD_DREG] == NULL)
820 return ERR_PTR(-EINVAL);
822 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
826 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
830 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
831 base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
832 return &nft_payload_fast_ops;
834 return &nft_payload_ops;
837 struct nft_expr_type nft_payload_type __read_mostly = {
839 .select_ops = nft_payload_select_ops,
840 .policy = nft_payload_policy,
841 .maxattr = NFTA_PAYLOAD_MAX,
842 .owner = THIS_MODULE,