mac80211: Fix MLO address translation for multiple bss case
[platform/kernel/linux-starfive.git] / net / sched / act_nat.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Stateless NAT actions
4  *
5  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7
8 #include <linux/errno.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/netfilter.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/string.h>
18 #include <linux/tc_act/tc_nat.h>
19 #include <net/act_api.h>
20 #include <net/pkt_cls.h>
21 #include <net/icmp.h>
22 #include <net/ip.h>
23 #include <net/netlink.h>
24 #include <net/tc_act/tc_nat.h>
25 #include <net/tcp.h>
26 #include <net/udp.h>
27
28
29 static struct tc_action_ops act_nat_ops;
30
31 static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
32         [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
33 };
34
35 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
36                         struct tc_action **a, struct tcf_proto *tp,
37                         u32 flags, struct netlink_ext_ack *extack)
38 {
39         struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id);
40         bool bind = flags & TCA_ACT_FLAGS_BIND;
41         struct nlattr *tb[TCA_NAT_MAX + 1];
42         struct tcf_chain *goto_ch = NULL;
43         struct tc_nat *parm;
44         int ret = 0, err;
45         struct tcf_nat *p;
46         u32 index;
47
48         if (nla == NULL)
49                 return -EINVAL;
50
51         err = nla_parse_nested_deprecated(tb, TCA_NAT_MAX, nla, nat_policy,
52                                           NULL);
53         if (err < 0)
54                 return err;
55
56         if (tb[TCA_NAT_PARMS] == NULL)
57                 return -EINVAL;
58         parm = nla_data(tb[TCA_NAT_PARMS]);
59         index = parm->index;
60         err = tcf_idr_check_alloc(tn, &index, a, bind);
61         if (!err) {
62                 ret = tcf_idr_create(tn, index, est, a,
63                                      &act_nat_ops, bind, false, flags);
64                 if (ret) {
65                         tcf_idr_cleanup(tn, index);
66                         return ret;
67                 }
68                 ret = ACT_P_CREATED;
69         } else if (err > 0) {
70                 if (bind)
71                         return 0;
72                 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
73                         tcf_idr_release(*a, bind);
74                         return -EEXIST;
75                 }
76         } else {
77                 return err;
78         }
79         err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
80         if (err < 0)
81                 goto release_idr;
82         p = to_tcf_nat(*a);
83
84         spin_lock_bh(&p->tcf_lock);
85         p->old_addr = parm->old_addr;
86         p->new_addr = parm->new_addr;
87         p->mask = parm->mask;
88         p->flags = parm->flags;
89
90         goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
91         spin_unlock_bh(&p->tcf_lock);
92         if (goto_ch)
93                 tcf_chain_put_by_act(goto_ch);
94
95         return ret;
96 release_idr:
97         tcf_idr_release(*a, bind);
98         return err;
99 }
100
101 static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
102                        struct tcf_result *res)
103 {
104         struct tcf_nat *p = to_tcf_nat(a);
105         struct iphdr *iph;
106         __be32 old_addr;
107         __be32 new_addr;
108         __be32 mask;
109         __be32 addr;
110         int egress;
111         int action;
112         int ihl;
113         int noff;
114
115         spin_lock(&p->tcf_lock);
116
117         tcf_lastuse_update(&p->tcf_tm);
118         old_addr = p->old_addr;
119         new_addr = p->new_addr;
120         mask = p->mask;
121         egress = p->flags & TCA_NAT_FLAG_EGRESS;
122         action = p->tcf_action;
123
124         bstats_update(&p->tcf_bstats, skb);
125
126         spin_unlock(&p->tcf_lock);
127
128         if (unlikely(action == TC_ACT_SHOT))
129                 goto drop;
130
131         noff = skb_network_offset(skb);
132         if (!pskb_may_pull(skb, sizeof(*iph) + noff))
133                 goto drop;
134
135         iph = ip_hdr(skb);
136
137         if (egress)
138                 addr = iph->saddr;
139         else
140                 addr = iph->daddr;
141
142         if (!((old_addr ^ addr) & mask)) {
143                 if (skb_try_make_writable(skb, sizeof(*iph) + noff))
144                         goto drop;
145
146                 new_addr &= mask;
147                 new_addr |= addr & ~mask;
148
149                 /* Rewrite IP header */
150                 iph = ip_hdr(skb);
151                 if (egress)
152                         iph->saddr = new_addr;
153                 else
154                         iph->daddr = new_addr;
155
156                 csum_replace4(&iph->check, addr, new_addr);
157         } else if ((iph->frag_off & htons(IP_OFFSET)) ||
158                    iph->protocol != IPPROTO_ICMP) {
159                 goto out;
160         }
161
162         ihl = iph->ihl * 4;
163
164         /* It would be nice to share code with stateful NAT. */
165         switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
166         case IPPROTO_TCP:
167         {
168                 struct tcphdr *tcph;
169
170                 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
171                     skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
172                         goto drop;
173
174                 tcph = (void *)(skb_network_header(skb) + ihl);
175                 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
176                                          true);
177                 break;
178         }
179         case IPPROTO_UDP:
180         {
181                 struct udphdr *udph;
182
183                 if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
184                     skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
185                         goto drop;
186
187                 udph = (void *)(skb_network_header(skb) + ihl);
188                 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
189                         inet_proto_csum_replace4(&udph->check, skb, addr,
190                                                  new_addr, true);
191                         if (!udph->check)
192                                 udph->check = CSUM_MANGLED_0;
193                 }
194                 break;
195         }
196         case IPPROTO_ICMP:
197         {
198                 struct icmphdr *icmph;
199
200                 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff))
201                         goto drop;
202
203                 icmph = (void *)(skb_network_header(skb) + ihl);
204
205                 if (!icmp_is_err(icmph->type))
206                         break;
207
208                 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
209                                         noff))
210                         goto drop;
211
212                 icmph = (void *)(skb_network_header(skb) + ihl);
213                 iph = (void *)(icmph + 1);
214                 if (egress)
215                         addr = iph->daddr;
216                 else
217                         addr = iph->saddr;
218
219                 if ((old_addr ^ addr) & mask)
220                         break;
221
222                 if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
223                                           sizeof(*iph) + noff))
224                         goto drop;
225
226                 icmph = (void *)(skb_network_header(skb) + ihl);
227                 iph = (void *)(icmph + 1);
228
229                 new_addr &= mask;
230                 new_addr |= addr & ~mask;
231
232                 /* XXX Fix up the inner checksums. */
233                 if (egress)
234                         iph->daddr = new_addr;
235                 else
236                         iph->saddr = new_addr;
237
238                 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
239                                          false);
240                 break;
241         }
242         default:
243                 break;
244         }
245
246 out:
247         return action;
248
249 drop:
250         spin_lock(&p->tcf_lock);
251         p->tcf_qstats.drops++;
252         spin_unlock(&p->tcf_lock);
253         return TC_ACT_SHOT;
254 }
255
256 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
257                         int bind, int ref)
258 {
259         unsigned char *b = skb_tail_pointer(skb);
260         struct tcf_nat *p = to_tcf_nat(a);
261         struct tc_nat opt = {
262                 .index    = p->tcf_index,
263                 .refcnt   = refcount_read(&p->tcf_refcnt) - ref,
264                 .bindcnt  = atomic_read(&p->tcf_bindcnt) - bind,
265         };
266         struct tcf_t t;
267
268         spin_lock_bh(&p->tcf_lock);
269         opt.old_addr = p->old_addr;
270         opt.new_addr = p->new_addr;
271         opt.mask = p->mask;
272         opt.flags = p->flags;
273         opt.action = p->tcf_action;
274
275         if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
276                 goto nla_put_failure;
277
278         tcf_tm_dump(&t, &p->tcf_tm);
279         if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
280                 goto nla_put_failure;
281         spin_unlock_bh(&p->tcf_lock);
282
283         return skb->len;
284
285 nla_put_failure:
286         spin_unlock_bh(&p->tcf_lock);
287         nlmsg_trim(skb, b);
288         return -1;
289 }
290
291 static struct tc_action_ops act_nat_ops = {
292         .kind           =       "nat",
293         .id             =       TCA_ID_NAT,
294         .owner          =       THIS_MODULE,
295         .act            =       tcf_nat_act,
296         .dump           =       tcf_nat_dump,
297         .init           =       tcf_nat_init,
298         .size           =       sizeof(struct tcf_nat),
299 };
300
301 static __net_init int nat_init_net(struct net *net)
302 {
303         struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id);
304
305         return tc_action_net_init(net, tn, &act_nat_ops);
306 }
307
308 static void __net_exit nat_exit_net(struct list_head *net_list)
309 {
310         tc_action_net_exit(net_list, act_nat_ops.net_id);
311 }
312
313 static struct pernet_operations nat_net_ops = {
314         .init = nat_init_net,
315         .exit_batch = nat_exit_net,
316         .id   = &act_nat_ops.net_id,
317         .size = sizeof(struct tc_action_net),
318 };
319
320 MODULE_DESCRIPTION("Stateless NAT actions");
321 MODULE_LICENSE("GPL");
322
323 static int __init nat_init_module(void)
324 {
325         return tcf_register_action(&act_nat_ops, &nat_net_ops);
326 }
327
328 static void __exit nat_cleanup_module(void)
329 {
330         tcf_unregister_action(&act_nat_ops, &nat_net_ops);
331 }
332
333 module_init(nat_init_module);
334 module_exit(nat_cleanup_module);