return -1;
}
-static void nft_masq_ipv4_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+static void nft_masq_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
- struct nft_masq *priv = nft_expr_priv(expr);
+ const struct nft_masq *priv = nft_expr_priv(expr);
struct nf_nat_range2 range;
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)nft_reg_load16(
- ®s->data[priv->sreg_proto_min]);
- range.max_proto.all = (__force __be16)nft_reg_load16(
- ®s->data[priv->sreg_proto_max]);
+ range.min_proto.all = (__force __be16)
+ nft_reg_load16(®s->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)
+ nft_reg_load16(®s->data[priv->sreg_proto_max]);
+ }
+
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb,
+ nft_hook(pkt),
+ &range,
+ nft_out(pkt));
+ break;
+#ifdef CONFIG_NF_TABLES_IPV6
+ case NFPROTO_IPV6:
+ regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
+ nft_out(pkt));
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
- regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
- &range, nft_out(pkt));
}
static void
static const struct nft_expr_ops nft_masq_ipv4_ops = {
.type = &nft_masq_ipv4_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
- .eval = nft_masq_ipv4_eval,
+ .eval = nft_masq_eval,
.init = nft_masq_init,
.destroy = nft_masq_ipv4_destroy,
.dump = nft_masq_dump,
};
#ifdef CONFIG_NF_TABLES_IPV6
-static void nft_masq_ipv6_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_masq *priv = nft_expr_priv(expr);
- struct nf_nat_range2 range;
-
- memset(&range, 0, sizeof(range));
- range.flags = priv->flags;
- if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)nft_reg_load16(
- ®s->data[priv->sreg_proto_min]);
- range.max_proto.all = (__force __be16)nft_reg_load16(
- ®s->data[priv->sreg_proto_max]);
- }
- regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
- nft_out(pkt));
-}
-
static void
nft_masq_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
static const struct nft_expr_ops nft_masq_ipv6_ops = {
.type = &nft_masq_ipv6_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
- .eval = nft_masq_ipv6_eval,
+ .eval = nft_masq_eval,
.init = nft_masq_init,
.destroy = nft_masq_ipv6_destroy,
.dump = nft_masq_dump,
#endif
#ifdef CONFIG_NF_TABLES_INET
-static void nft_masq_inet_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- switch (nft_pf(pkt)) {
- case NFPROTO_IPV4:
- return nft_masq_ipv4_eval(expr, regs, pkt);
- case NFPROTO_IPV6:
- return nft_masq_ipv6_eval(expr, regs, pkt);
- }
-
- WARN_ON_ONCE(1);
-}
-
static void
nft_masq_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
static const struct nft_expr_ops nft_masq_inet_ops = {
.type = &nft_masq_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
- .eval = nft_masq_inet_eval,
+ .eval = nft_masq_eval,
.init = nft_masq_init,
.destroy = nft_masq_inet_destroy,
.dump = nft_masq_dump,