xfrm: fix inbound ipv4/udp/esp packets to UDPv6 dualstack sockets
[platform/kernel/linux-starfive.git] / net / sched / act_ipt.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_ipt.c          iptables target interface
4  *
5  *TODO: Add other tables. For now we only support the ipv4 table targets
6  *
7  * Copyright:   Jamal Hadi Salim (2002-13)
8  */
9
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <linux/tc_act/tc_ipt.h>
22 #include <net/tc_act/tc_ipt.h>
23
24 #include <linux/netfilter_ipv4/ip_tables.h>
25
26
27 static struct tc_action_ops act_ipt_ops;
28 static struct tc_action_ops act_xt_ops;
29
30 static int ipt_init_target(struct net *net, struct xt_entry_target *t,
31                            char *table, unsigned int hook)
32 {
33         struct xt_tgchk_param par;
34         struct xt_target *target;
35         struct ipt_entry e = {};
36         int ret = 0;
37
38         target = xt_request_find_target(AF_INET, t->u.user.name,
39                                         t->u.user.revision);
40         if (IS_ERR(target))
41                 return PTR_ERR(target);
42
43         t->u.kernel.target = target;
44         memset(&par, 0, sizeof(par));
45         par.net       = net;
46         par.table     = table;
47         par.entryinfo = &e;
48         par.target    = target;
49         par.targinfo  = t->data;
50         par.hook_mask = hook;
51         par.family    = NFPROTO_IPV4;
52
53         ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
54         if (ret < 0) {
55                 module_put(t->u.kernel.target->me);
56                 return ret;
57         }
58         return 0;
59 }
60
61 static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
62 {
63         struct xt_tgdtor_param par = {
64                 .target   = t->u.kernel.target,
65                 .targinfo = t->data,
66                 .family   = NFPROTO_IPV4,
67                 .net      = net,
68         };
69         if (par.target->destroy != NULL)
70                 par.target->destroy(&par);
71         module_put(par.target->me);
72 }
73
74 static void tcf_ipt_release(struct tc_action *a)
75 {
76         struct tcf_ipt *ipt = to_ipt(a);
77
78         if (ipt->tcfi_t) {
79                 ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
80                 kfree(ipt->tcfi_t);
81         }
82         kfree(ipt->tcfi_tname);
83 }
84
85 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
86         [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
87         [TCA_IPT_HOOK]  = { .type = NLA_U32 },
88         [TCA_IPT_INDEX] = { .type = NLA_U32 },
89         [TCA_IPT_TARG]  = { .len = sizeof(struct xt_entry_target) },
90 };
91
92 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
93                           struct nlattr *est, struct tc_action **a,
94                           const struct tc_action_ops *ops,
95                           struct tcf_proto *tp, u32 flags)
96 {
97         struct tc_action_net *tn = net_generic(net, id);
98         bool bind = flags & TCA_ACT_FLAGS_BIND;
99         struct nlattr *tb[TCA_IPT_MAX + 1];
100         struct tcf_ipt *ipt;
101         struct xt_entry_target *td, *t;
102         char *tname;
103         bool exists = false;
104         int ret = 0, err;
105         u32 hook = 0;
106         u32 index = 0;
107
108         if (nla == NULL)
109                 return -EINVAL;
110
111         err = nla_parse_nested_deprecated(tb, TCA_IPT_MAX, nla, ipt_policy,
112                                           NULL);
113         if (err < 0)
114                 return err;
115
116         if (tb[TCA_IPT_INDEX] != NULL)
117                 index = nla_get_u32(tb[TCA_IPT_INDEX]);
118
119         err = tcf_idr_check_alloc(tn, &index, a, bind);
120         if (err < 0)
121                 return err;
122         exists = err;
123         if (exists && bind)
124                 return 0;
125
126         if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
127                 if (exists)
128                         tcf_idr_release(*a, bind);
129                 else
130                         tcf_idr_cleanup(tn, index);
131                 return -EINVAL;
132         }
133
134         td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
135         if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
136                 if (exists)
137                         tcf_idr_release(*a, bind);
138                 else
139                         tcf_idr_cleanup(tn, index);
140                 return -EINVAL;
141         }
142
143         if (!exists) {
144                 ret = tcf_idr_create(tn, index, est, a, ops, bind,
145                                      false, flags);
146                 if (ret) {
147                         tcf_idr_cleanup(tn, index);
148                         return ret;
149                 }
150                 ret = ACT_P_CREATED;
151         } else {
152                 if (bind)/* dont override defaults */
153                         return 0;
154
155                 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
156                         tcf_idr_release(*a, bind);
157                         return -EEXIST;
158                 }
159         }
160         hook = nla_get_u32(tb[TCA_IPT_HOOK]);
161
162         err = -ENOMEM;
163         tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
164         if (unlikely(!tname))
165                 goto err1;
166         if (tb[TCA_IPT_TABLE] == NULL ||
167             nla_strscpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
168                 strcpy(tname, "mangle");
169
170         t = kmemdup(td, td->u.target_size, GFP_KERNEL);
171         if (unlikely(!t))
172                 goto err2;
173
174         err = ipt_init_target(net, t, tname, hook);
175         if (err < 0)
176                 goto err3;
177
178         ipt = to_ipt(*a);
179
180         spin_lock_bh(&ipt->tcf_lock);
181         if (ret != ACT_P_CREATED) {
182                 ipt_destroy_target(ipt->tcfi_t, net);
183                 kfree(ipt->tcfi_tname);
184                 kfree(ipt->tcfi_t);
185         }
186         ipt->tcfi_tname = tname;
187         ipt->tcfi_t     = t;
188         ipt->tcfi_hook  = hook;
189         spin_unlock_bh(&ipt->tcf_lock);
190         return ret;
191
192 err3:
193         kfree(t);
194 err2:
195         kfree(tname);
196 err1:
197         tcf_idr_release(*a, bind);
198         return err;
199 }
200
201 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
202                         struct nlattr *est, struct tc_action **a,
203                         struct tcf_proto *tp,
204                         u32 flags, struct netlink_ext_ack *extack)
205 {
206         return __tcf_ipt_init(net, act_ipt_ops.net_id, nla, est,
207                               a, &act_ipt_ops, tp, flags);
208 }
209
210 static int tcf_xt_init(struct net *net, struct nlattr *nla,
211                        struct nlattr *est, struct tc_action **a,
212                        struct tcf_proto *tp,
213                        u32 flags, struct netlink_ext_ack *extack)
214 {
215         return __tcf_ipt_init(net, act_xt_ops.net_id, nla, est,
216                               a, &act_xt_ops, tp, flags);
217 }
218
219 static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
220                        struct tcf_result *res)
221 {
222         int ret = 0, result = 0;
223         struct tcf_ipt *ipt = to_ipt(a);
224         struct xt_action_param par;
225         struct nf_hook_state state = {
226                 .net    = dev_net(skb->dev),
227                 .in     = skb->dev,
228                 .hook   = ipt->tcfi_hook,
229                 .pf     = NFPROTO_IPV4,
230         };
231
232         if (skb_unclone(skb, GFP_ATOMIC))
233                 return TC_ACT_UNSPEC;
234
235         spin_lock(&ipt->tcf_lock);
236
237         tcf_lastuse_update(&ipt->tcf_tm);
238         bstats_update(&ipt->tcf_bstats, skb);
239
240         /* yes, we have to worry about both in and out dev
241          * worry later - danger - this API seems to have changed
242          * from earlier kernels
243          */
244         par.state    = &state;
245         par.target   = ipt->tcfi_t->u.kernel.target;
246         par.targinfo = ipt->tcfi_t->data;
247         ret = par.target->target(skb, &par);
248
249         switch (ret) {
250         case NF_ACCEPT:
251                 result = TC_ACT_OK;
252                 break;
253         case NF_DROP:
254                 result = TC_ACT_SHOT;
255                 ipt->tcf_qstats.drops++;
256                 break;
257         case XT_CONTINUE:
258                 result = TC_ACT_PIPE;
259                 break;
260         default:
261                 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
262                                        ret);
263                 result = TC_ACT_OK;
264                 break;
265         }
266         spin_unlock(&ipt->tcf_lock);
267         return result;
268
269 }
270
271 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
272                         int ref)
273 {
274         unsigned char *b = skb_tail_pointer(skb);
275         struct tcf_ipt *ipt = to_ipt(a);
276         struct xt_entry_target *t;
277         struct tcf_t tm;
278         struct tc_cnt c;
279
280         /* for simple targets kernel size == user size
281          * user name = target name
282          * for foolproof you need to not assume this
283          */
284
285         spin_lock_bh(&ipt->tcf_lock);
286         t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
287         if (unlikely(!t))
288                 goto nla_put_failure;
289
290         c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind;
291         c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref;
292         strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
293
294         if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
295             nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
296             nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
297             nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
298             nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
299                 goto nla_put_failure;
300
301         tcf_tm_dump(&tm, &ipt->tcf_tm);
302         if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
303                 goto nla_put_failure;
304
305         spin_unlock_bh(&ipt->tcf_lock);
306         kfree(t);
307         return skb->len;
308
309 nla_put_failure:
310         spin_unlock_bh(&ipt->tcf_lock);
311         nlmsg_trim(skb, b);
312         kfree(t);
313         return -1;
314 }
315
316 static struct tc_action_ops act_ipt_ops = {
317         .kind           =       "ipt",
318         .id             =       TCA_ID_IPT,
319         .owner          =       THIS_MODULE,
320         .act            =       tcf_ipt_act,
321         .dump           =       tcf_ipt_dump,
322         .cleanup        =       tcf_ipt_release,
323         .init           =       tcf_ipt_init,
324         .size           =       sizeof(struct tcf_ipt),
325 };
326
327 static __net_init int ipt_init_net(struct net *net)
328 {
329         struct tc_action_net *tn = net_generic(net, act_ipt_ops.net_id);
330
331         return tc_action_net_init(net, tn, &act_ipt_ops);
332 }
333
334 static void __net_exit ipt_exit_net(struct list_head *net_list)
335 {
336         tc_action_net_exit(net_list, act_ipt_ops.net_id);
337 }
338
339 static struct pernet_operations ipt_net_ops = {
340         .init = ipt_init_net,
341         .exit_batch = ipt_exit_net,
342         .id   = &act_ipt_ops.net_id,
343         .size = sizeof(struct tc_action_net),
344 };
345
346 static struct tc_action_ops act_xt_ops = {
347         .kind           =       "xt",
348         .id             =       TCA_ID_XT,
349         .owner          =       THIS_MODULE,
350         .act            =       tcf_ipt_act,
351         .dump           =       tcf_ipt_dump,
352         .cleanup        =       tcf_ipt_release,
353         .init           =       tcf_xt_init,
354         .size           =       sizeof(struct tcf_ipt),
355 };
356
357 static __net_init int xt_init_net(struct net *net)
358 {
359         struct tc_action_net *tn = net_generic(net, act_xt_ops.net_id);
360
361         return tc_action_net_init(net, tn, &act_xt_ops);
362 }
363
364 static void __net_exit xt_exit_net(struct list_head *net_list)
365 {
366         tc_action_net_exit(net_list, act_xt_ops.net_id);
367 }
368
369 static struct pernet_operations xt_net_ops = {
370         .init = xt_init_net,
371         .exit_batch = xt_exit_net,
372         .id   = &act_xt_ops.net_id,
373         .size = sizeof(struct tc_action_net),
374 };
375
376 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
377 MODULE_DESCRIPTION("Iptables target actions");
378 MODULE_LICENSE("GPL");
379 MODULE_ALIAS("act_xt");
380
381 static int __init ipt_init_module(void)
382 {
383         int ret1, ret2;
384
385         ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
386         if (ret1 < 0)
387                 pr_err("Failed to load xt action\n");
388
389         ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
390         if (ret2 < 0)
391                 pr_err("Failed to load ipt action\n");
392
393         if (ret1 < 0 && ret2 < 0) {
394                 return ret1;
395         } else
396                 return 0;
397 }
398
399 static void __exit ipt_cleanup_module(void)
400 {
401         tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
402         tcf_unregister_action(&act_xt_ops, &xt_net_ops);
403 }
404
405 module_init(ipt_init_module);
406 module_exit(ipt_cleanup_module);