Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[platform/kernel/linux-rpi.git] / net / sched / act_mirred.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_mirred.c       packet mirroring and redirect actions
4  *
5  * Authors:     Jamal Hadi Salim (2002-4)
6  *
7  * TODO: Add ingress support (and socket redirect support)
8  */
9
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
23 #include <net/pkt_cls.h>
24 #include <linux/tc_act/tc_mirred.h>
25 #include <net/tc_act/tc_mirred.h>
26
27 static LIST_HEAD(mirred_list);
28 static DEFINE_SPINLOCK(mirred_list_lock);
29
30 #define MIRRED_RECURSION_LIMIT    4
31 static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
32
33 static bool tcf_mirred_is_act_redirect(int action)
34 {
35         return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
36 }
37
38 static bool tcf_mirred_act_wants_ingress(int action)
39 {
40         switch (action) {
41         case TCA_EGRESS_REDIR:
42         case TCA_EGRESS_MIRROR:
43                 return false;
44         case TCA_INGRESS_REDIR:
45         case TCA_INGRESS_MIRROR:
46                 return true;
47         default:
48                 BUG();
49         }
50 }
51
52 static bool tcf_mirred_can_reinsert(int action)
53 {
54         switch (action) {
55         case TC_ACT_SHOT:
56         case TC_ACT_STOLEN:
57         case TC_ACT_QUEUED:
58         case TC_ACT_TRAP:
59                 return true;
60         }
61         return false;
62 }
63
64 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
65 {
66         return rcu_dereference_protected(m->tcfm_dev,
67                                          lockdep_is_held(&m->tcf_lock));
68 }
69
70 static void tcf_mirred_release(struct tc_action *a)
71 {
72         struct tcf_mirred *m = to_mirred(a);
73         struct net_device *dev;
74
75         spin_lock(&mirred_list_lock);
76         list_del(&m->tcfm_list);
77         spin_unlock(&mirred_list_lock);
78
79         /* last reference to action, no need to lock */
80         dev = rcu_dereference_protected(m->tcfm_dev, 1);
81         dev_put(dev);
82 }
83
84 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
85         [TCA_MIRRED_PARMS]      = { .len = sizeof(struct tc_mirred) },
86 };
87
88 static unsigned int mirred_net_id;
89 static struct tc_action_ops act_mirred_ops;
90
91 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
92                            struct nlattr *est, struct tc_action **a,
93                            struct tcf_proto *tp,
94                            u32 flags, struct netlink_ext_ack *extack)
95 {
96         struct tc_action_net *tn = net_generic(net, mirred_net_id);
97         bool bind = flags & TCA_ACT_FLAGS_BIND;
98         struct nlattr *tb[TCA_MIRRED_MAX + 1];
99         struct tcf_chain *goto_ch = NULL;
100         bool mac_header_xmit = false;
101         struct tc_mirred *parm;
102         struct tcf_mirred *m;
103         struct net_device *dev;
104         bool exists = false;
105         int ret, err;
106         u32 index;
107
108         if (!nla) {
109                 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
110                 return -EINVAL;
111         }
112         ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
113                                           mirred_policy, extack);
114         if (ret < 0)
115                 return ret;
116         if (!tb[TCA_MIRRED_PARMS]) {
117                 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
118                 return -EINVAL;
119         }
120         parm = nla_data(tb[TCA_MIRRED_PARMS]);
121         index = parm->index;
122         err = tcf_idr_check_alloc(tn, &index, a, bind);
123         if (err < 0)
124                 return err;
125         exists = err;
126         if (exists && bind)
127                 return 0;
128
129         switch (parm->eaction) {
130         case TCA_EGRESS_MIRROR:
131         case TCA_EGRESS_REDIR:
132         case TCA_INGRESS_REDIR:
133         case TCA_INGRESS_MIRROR:
134                 break;
135         default:
136                 if (exists)
137                         tcf_idr_release(*a, bind);
138                 else
139                         tcf_idr_cleanup(tn, index);
140                 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
141                 return -EINVAL;
142         }
143
144         if (!exists) {
145                 if (!parm->ifindex) {
146                         tcf_idr_cleanup(tn, index);
147                         NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
148                         return -EINVAL;
149                 }
150                 ret = tcf_idr_create_from_flags(tn, index, est, a,
151                                                 &act_mirred_ops, bind, flags);
152                 if (ret) {
153                         tcf_idr_cleanup(tn, index);
154                         return ret;
155                 }
156                 ret = ACT_P_CREATED;
157         } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
158                 tcf_idr_release(*a, bind);
159                 return -EEXIST;
160         }
161
162         m = to_mirred(*a);
163         if (ret == ACT_P_CREATED)
164                 INIT_LIST_HEAD(&m->tcfm_list);
165
166         err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
167         if (err < 0)
168                 goto release_idr;
169
170         spin_lock_bh(&m->tcf_lock);
171
172         if (parm->ifindex) {
173                 dev = dev_get_by_index(net, parm->ifindex);
174                 if (!dev) {
175                         spin_unlock_bh(&m->tcf_lock);
176                         err = -ENODEV;
177                         goto put_chain;
178                 }
179                 mac_header_xmit = dev_is_mac_header_xmit(dev);
180                 dev = rcu_replace_pointer(m->tcfm_dev, dev,
181                                           lockdep_is_held(&m->tcf_lock));
182                 dev_put(dev);
183                 m->tcfm_mac_header_xmit = mac_header_xmit;
184         }
185         goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
186         m->tcfm_eaction = parm->eaction;
187         spin_unlock_bh(&m->tcf_lock);
188         if (goto_ch)
189                 tcf_chain_put_by_act(goto_ch);
190
191         if (ret == ACT_P_CREATED) {
192                 spin_lock(&mirred_list_lock);
193                 list_add(&m->tcfm_list, &mirred_list);
194                 spin_unlock(&mirred_list_lock);
195         }
196
197         return ret;
198 put_chain:
199         if (goto_ch)
200                 tcf_chain_put_by_act(goto_ch);
201 release_idr:
202         tcf_idr_release(*a, bind);
203         return err;
204 }
205
206 static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
207 {
208         int err;
209
210         if (!want_ingress)
211                 err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
212         else
213                 err = netif_receive_skb(skb);
214
215         return err;
216 }
217
218 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
219                           struct tcf_result *res)
220 {
221         struct tcf_mirred *m = to_mirred(a);
222         struct sk_buff *skb2 = skb;
223         bool m_mac_header_xmit;
224         struct net_device *dev;
225         unsigned int rec_level;
226         int retval, err = 0;
227         bool use_reinsert;
228         bool want_ingress;
229         bool is_redirect;
230         bool expects_nh;
231         int m_eaction;
232         int mac_len;
233         bool at_nh;
234
235         rec_level = __this_cpu_inc_return(mirred_rec_level);
236         if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
237                 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
238                                      netdev_name(skb->dev));
239                 __this_cpu_dec(mirred_rec_level);
240                 return TC_ACT_SHOT;
241         }
242
243         tcf_lastuse_update(&m->tcf_tm);
244         tcf_action_update_bstats(&m->common, skb);
245
246         m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
247         m_eaction = READ_ONCE(m->tcfm_eaction);
248         retval = READ_ONCE(m->tcf_action);
249         dev = rcu_dereference_bh(m->tcfm_dev);
250         if (unlikely(!dev)) {
251                 pr_notice_once("tc mirred: target device is gone\n");
252                 goto out;
253         }
254
255         if (unlikely(!(dev->flags & IFF_UP))) {
256                 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
257                                        dev->name);
258                 goto out;
259         }
260
261         /* we could easily avoid the clone only if called by ingress and clsact;
262          * since we can't easily detect the clsact caller, skip clone only for
263          * ingress - that covers the TC S/W datapath.
264          */
265         is_redirect = tcf_mirred_is_act_redirect(m_eaction);
266         use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
267                        tcf_mirred_can_reinsert(retval);
268         if (!use_reinsert) {
269                 skb2 = skb_clone(skb, GFP_ATOMIC);
270                 if (!skb2)
271                         goto out;
272         }
273
274         /* All mirred/redirected skbs should clear previous ct info */
275         nf_reset_ct(skb2);
276
277         want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
278
279         expects_nh = want_ingress || !m_mac_header_xmit;
280         at_nh = skb->data == skb_network_header(skb);
281         if (at_nh != expects_nh) {
282                 mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
283                           skb_network_header(skb) - skb_mac_header(skb);
284                 if (expects_nh) {
285                         /* target device/action expect data at nh */
286                         skb_pull_rcsum(skb2, mac_len);
287                 } else {
288                         /* target device/action expect data at mac */
289                         skb_push_rcsum(skb2, mac_len);
290                 }
291         }
292
293         skb2->skb_iif = skb->dev->ifindex;
294         skb2->dev = dev;
295
296         /* mirror is always swallowed */
297         if (is_redirect) {
298                 skb_set_redirected(skb2, skb2->tc_at_ingress);
299
300                 /* let's the caller reinsert the packet, if possible */
301                 if (use_reinsert) {
302                         res->ingress = want_ingress;
303                         err = tcf_mirred_forward(res->ingress, skb);
304                         if (err)
305                                 tcf_action_inc_overlimit_qstats(&m->common);
306                         __this_cpu_dec(mirred_rec_level);
307                         return TC_ACT_CONSUMED;
308                 }
309         }
310
311         err = tcf_mirred_forward(want_ingress, skb2);
312         if (err) {
313 out:
314                 tcf_action_inc_overlimit_qstats(&m->common);
315                 if (tcf_mirred_is_act_redirect(m_eaction))
316                         retval = TC_ACT_SHOT;
317         }
318         __this_cpu_dec(mirred_rec_level);
319
320         return retval;
321 }
322
323 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
324                              u64 drops, u64 lastuse, bool hw)
325 {
326         struct tcf_mirred *m = to_mirred(a);
327         struct tcf_t *tm = &m->tcf_tm;
328
329         tcf_action_update_stats(a, bytes, packets, drops, hw);
330         tm->lastuse = max_t(u64, tm->lastuse, lastuse);
331 }
332
333 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
334                            int ref)
335 {
336         unsigned char *b = skb_tail_pointer(skb);
337         struct tcf_mirred *m = to_mirred(a);
338         struct tc_mirred opt = {
339                 .index   = m->tcf_index,
340                 .refcnt  = refcount_read(&m->tcf_refcnt) - ref,
341                 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
342         };
343         struct net_device *dev;
344         struct tcf_t t;
345
346         spin_lock_bh(&m->tcf_lock);
347         opt.action = m->tcf_action;
348         opt.eaction = m->tcfm_eaction;
349         dev = tcf_mirred_dev_dereference(m);
350         if (dev)
351                 opt.ifindex = dev->ifindex;
352
353         if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
354                 goto nla_put_failure;
355
356         tcf_tm_dump(&t, &m->tcf_tm);
357         if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
358                 goto nla_put_failure;
359         spin_unlock_bh(&m->tcf_lock);
360
361         return skb->len;
362
363 nla_put_failure:
364         spin_unlock_bh(&m->tcf_lock);
365         nlmsg_trim(skb, b);
366         return -1;
367 }
368
369 static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
370                              struct netlink_callback *cb, int type,
371                              const struct tc_action_ops *ops,
372                              struct netlink_ext_ack *extack)
373 {
374         struct tc_action_net *tn = net_generic(net, mirred_net_id);
375
376         return tcf_generic_walker(tn, skb, cb, type, ops, extack);
377 }
378
379 static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
380 {
381         struct tc_action_net *tn = net_generic(net, mirred_net_id);
382
383         return tcf_idr_search(tn, a, index);
384 }
385
386 static int mirred_device_event(struct notifier_block *unused,
387                                unsigned long event, void *ptr)
388 {
389         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
390         struct tcf_mirred *m;
391
392         ASSERT_RTNL();
393         if (event == NETDEV_UNREGISTER) {
394                 spin_lock(&mirred_list_lock);
395                 list_for_each_entry(m, &mirred_list, tcfm_list) {
396                         spin_lock_bh(&m->tcf_lock);
397                         if (tcf_mirred_dev_dereference(m) == dev) {
398                                 dev_put(dev);
399                                 /* Note : no rcu grace period necessary, as
400                                  * net_device are already rcu protected.
401                                  */
402                                 RCU_INIT_POINTER(m->tcfm_dev, NULL);
403                         }
404                         spin_unlock_bh(&m->tcf_lock);
405                 }
406                 spin_unlock(&mirred_list_lock);
407         }
408
409         return NOTIFY_DONE;
410 }
411
412 static struct notifier_block mirred_device_notifier = {
413         .notifier_call = mirred_device_event,
414 };
415
416 static void tcf_mirred_dev_put(void *priv)
417 {
418         struct net_device *dev = priv;
419
420         dev_put(dev);
421 }
422
423 static struct net_device *
424 tcf_mirred_get_dev(const struct tc_action *a,
425                    tc_action_priv_destructor *destructor)
426 {
427         struct tcf_mirred *m = to_mirred(a);
428         struct net_device *dev;
429
430         rcu_read_lock();
431         dev = rcu_dereference(m->tcfm_dev);
432         if (dev) {
433                 dev_hold(dev);
434                 *destructor = tcf_mirred_dev_put;
435         }
436         rcu_read_unlock();
437
438         return dev;
439 }
440
441 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
442 {
443         return nla_total_size(sizeof(struct tc_mirred));
444 }
445
446 static struct tc_action_ops act_mirred_ops = {
447         .kind           =       "mirred",
448         .id             =       TCA_ID_MIRRED,
449         .owner          =       THIS_MODULE,
450         .act            =       tcf_mirred_act,
451         .stats_update   =       tcf_stats_update,
452         .dump           =       tcf_mirred_dump,
453         .cleanup        =       tcf_mirred_release,
454         .init           =       tcf_mirred_init,
455         .walk           =       tcf_mirred_walker,
456         .lookup         =       tcf_mirred_search,
457         .get_fill_size  =       tcf_mirred_get_fill_size,
458         .size           =       sizeof(struct tcf_mirred),
459         .get_dev        =       tcf_mirred_get_dev,
460 };
461
462 static __net_init int mirred_init_net(struct net *net)
463 {
464         struct tc_action_net *tn = net_generic(net, mirred_net_id);
465
466         return tc_action_net_init(net, tn, &act_mirred_ops);
467 }
468
469 static void __net_exit mirred_exit_net(struct list_head *net_list)
470 {
471         tc_action_net_exit(net_list, mirred_net_id);
472 }
473
474 static struct pernet_operations mirred_net_ops = {
475         .init = mirred_init_net,
476         .exit_batch = mirred_exit_net,
477         .id   = &mirred_net_id,
478         .size = sizeof(struct tc_action_net),
479 };
480
481 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
482 MODULE_DESCRIPTION("Device Mirror/redirect actions");
483 MODULE_LICENSE("GPL");
484
485 static int __init mirred_init_module(void)
486 {
487         int err = register_netdevice_notifier(&mirred_device_notifier);
488         if (err)
489                 return err;
490
491         pr_info("Mirror/redirect action on\n");
492         err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
493         if (err)
494                 unregister_netdevice_notifier(&mirred_device_notifier);
495
496         return err;
497 }
498
499 static void __exit mirred_cleanup_module(void)
500 {
501         tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
502         unregister_netdevice_notifier(&mirred_device_notifier);
503 }
504
505 module_init(mirred_init_module);
506 module_exit(mirred_cleanup_module);