Merge tag 'sound-fix-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[platform/kernel/linux-starfive.git] / net / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43
44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
45
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51
52 #ifdef CONFIG_NET_CLS_ACT
53 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
54 EXPORT_SYMBOL(tc_skb_ext_tc);
55
56 void tc_skb_ext_tc_enable(void)
57 {
58         static_branch_inc(&tc_skb_ext_tc);
59 }
60 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
61
62 void tc_skb_ext_tc_disable(void)
63 {
64         static_branch_dec(&tc_skb_ext_tc);
65 }
66 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
67 #endif
68
69 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
70 {
71         return jhash_3words(tp->chain->index, tp->prio,
72                             (__force __u32)tp->protocol, 0);
73 }
74
75 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
76                                         struct tcf_proto *tp)
77 {
78         struct tcf_block *block = chain->block;
79
80         mutex_lock(&block->proto_destroy_lock);
81         hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
82                      destroy_obj_hashfn(tp));
83         mutex_unlock(&block->proto_destroy_lock);
84 }
85
86 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
87                           const struct tcf_proto *tp2)
88 {
89         return tp1->chain->index == tp2->chain->index &&
90                tp1->prio == tp2->prio &&
91                tp1->protocol == tp2->protocol;
92 }
93
94 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
95                                         struct tcf_proto *tp)
96 {
97         u32 hash = destroy_obj_hashfn(tp);
98         struct tcf_proto *iter;
99         bool found = false;
100
101         rcu_read_lock();
102         hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
103                                    destroy_ht_node, hash) {
104                 if (tcf_proto_cmp(tp, iter)) {
105                         found = true;
106                         break;
107                 }
108         }
109         rcu_read_unlock();
110
111         return found;
112 }
113
114 static void
115 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
116 {
117         struct tcf_block *block = chain->block;
118
119         mutex_lock(&block->proto_destroy_lock);
120         if (hash_hashed(&tp->destroy_ht_node))
121                 hash_del_rcu(&tp->destroy_ht_node);
122         mutex_unlock(&block->proto_destroy_lock);
123 }
124
125 /* Find classifier type by string name */
126
127 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
128 {
129         const struct tcf_proto_ops *t, *res = NULL;
130
131         if (kind) {
132                 read_lock(&cls_mod_lock);
133                 list_for_each_entry(t, &tcf_proto_base, head) {
134                         if (strcmp(kind, t->kind) == 0) {
135                                 if (try_module_get(t->owner))
136                                         res = t;
137                                 break;
138                         }
139                 }
140                 read_unlock(&cls_mod_lock);
141         }
142         return res;
143 }
144
145 static const struct tcf_proto_ops *
146 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
147                      struct netlink_ext_ack *extack)
148 {
149         const struct tcf_proto_ops *ops;
150
151         ops = __tcf_proto_lookup_ops(kind);
152         if (ops)
153                 return ops;
154 #ifdef CONFIG_MODULES
155         if (rtnl_held)
156                 rtnl_unlock();
157         request_module("cls_%s", kind);
158         if (rtnl_held)
159                 rtnl_lock();
160         ops = __tcf_proto_lookup_ops(kind);
161         /* We dropped the RTNL semaphore in order to perform
162          * the module load. So, even if we succeeded in loading
163          * the module we have to replay the request. We indicate
164          * this using -EAGAIN.
165          */
166         if (ops) {
167                 module_put(ops->owner);
168                 return ERR_PTR(-EAGAIN);
169         }
170 #endif
171         NL_SET_ERR_MSG(extack, "TC classifier not found");
172         return ERR_PTR(-ENOENT);
173 }
174
175 /* Register(unregister) new classifier type */
176
177 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
178 {
179         struct tcf_proto_ops *t;
180         int rc = -EEXIST;
181
182         write_lock(&cls_mod_lock);
183         list_for_each_entry(t, &tcf_proto_base, head)
184                 if (!strcmp(ops->kind, t->kind))
185                         goto out;
186
187         list_add_tail(&ops->head, &tcf_proto_base);
188         rc = 0;
189 out:
190         write_unlock(&cls_mod_lock);
191         return rc;
192 }
193 EXPORT_SYMBOL(register_tcf_proto_ops);
194
195 static struct workqueue_struct *tc_filter_wq;
196
197 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
198 {
199         struct tcf_proto_ops *t;
200         int rc = -ENOENT;
201
202         /* Wait for outstanding call_rcu()s, if any, from a
203          * tcf_proto_ops's destroy() handler.
204          */
205         rcu_barrier();
206         flush_workqueue(tc_filter_wq);
207
208         write_lock(&cls_mod_lock);
209         list_for_each_entry(t, &tcf_proto_base, head) {
210                 if (t == ops) {
211                         list_del(&t->head);
212                         rc = 0;
213                         break;
214                 }
215         }
216         write_unlock(&cls_mod_lock);
217
218         WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
219 }
220 EXPORT_SYMBOL(unregister_tcf_proto_ops);
221
222 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
223 {
224         INIT_RCU_WORK(rwork, func);
225         return queue_rcu_work(tc_filter_wq, rwork);
226 }
227 EXPORT_SYMBOL(tcf_queue_work);
228
229 /* Select new prio value from the range, managed by kernel. */
230
231 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
232 {
233         u32 first = TC_H_MAKE(0xC0000000U, 0U);
234
235         if (tp)
236                 first = tp->prio - 1;
237
238         return TC_H_MAJ(first);
239 }
240
241 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
242 {
243         if (kind)
244                 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
245         memset(name, 0, IFNAMSIZ);
246         return false;
247 }
248
249 static bool tcf_proto_is_unlocked(const char *kind)
250 {
251         const struct tcf_proto_ops *ops;
252         bool ret;
253
254         if (strlen(kind) == 0)
255                 return false;
256
257         ops = tcf_proto_lookup_ops(kind, false, NULL);
258         /* On error return false to take rtnl lock. Proto lookup/create
259          * functions will perform lookup again and properly handle errors.
260          */
261         if (IS_ERR(ops))
262                 return false;
263
264         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
265         module_put(ops->owner);
266         return ret;
267 }
268
269 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
270                                           u32 prio, struct tcf_chain *chain,
271                                           bool rtnl_held,
272                                           struct netlink_ext_ack *extack)
273 {
274         struct tcf_proto *tp;
275         int err;
276
277         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
278         if (!tp)
279                 return ERR_PTR(-ENOBUFS);
280
281         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
282         if (IS_ERR(tp->ops)) {
283                 err = PTR_ERR(tp->ops);
284                 goto errout;
285         }
286         tp->classify = tp->ops->classify;
287         tp->protocol = protocol;
288         tp->prio = prio;
289         tp->chain = chain;
290         spin_lock_init(&tp->lock);
291         refcount_set(&tp->refcnt, 1);
292
293         err = tp->ops->init(tp);
294         if (err) {
295                 module_put(tp->ops->owner);
296                 goto errout;
297         }
298         return tp;
299
300 errout:
301         kfree(tp);
302         return ERR_PTR(err);
303 }
304
305 static void tcf_proto_get(struct tcf_proto *tp)
306 {
307         refcount_inc(&tp->refcnt);
308 }
309
310 static void tcf_chain_put(struct tcf_chain *chain);
311
312 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
313                               bool sig_destroy, struct netlink_ext_ack *extack)
314 {
315         tp->ops->destroy(tp, rtnl_held, extack);
316         if (sig_destroy)
317                 tcf_proto_signal_destroyed(tp->chain, tp);
318         tcf_chain_put(tp->chain);
319         module_put(tp->ops->owner);
320         kfree_rcu(tp, rcu);
321 }
322
323 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
324                           struct netlink_ext_ack *extack)
325 {
326         if (refcount_dec_and_test(&tp->refcnt))
327                 tcf_proto_destroy(tp, rtnl_held, true, extack);
328 }
329
330 static bool tcf_proto_check_delete(struct tcf_proto *tp)
331 {
332         if (tp->ops->delete_empty)
333                 return tp->ops->delete_empty(tp);
334
335         tp->deleting = true;
336         return tp->deleting;
337 }
338
339 static void tcf_proto_mark_delete(struct tcf_proto *tp)
340 {
341         spin_lock(&tp->lock);
342         tp->deleting = true;
343         spin_unlock(&tp->lock);
344 }
345
346 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
347 {
348         bool deleting;
349
350         spin_lock(&tp->lock);
351         deleting = tp->deleting;
352         spin_unlock(&tp->lock);
353
354         return deleting;
355 }
356
357 #define ASSERT_BLOCK_LOCKED(block)                                      \
358         lockdep_assert_held(&(block)->lock)
359
360 struct tcf_filter_chain_list_item {
361         struct list_head list;
362         tcf_chain_head_change_t *chain_head_change;
363         void *chain_head_change_priv;
364 };
365
366 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
367                                           u32 chain_index)
368 {
369         struct tcf_chain *chain;
370
371         ASSERT_BLOCK_LOCKED(block);
372
373         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
374         if (!chain)
375                 return NULL;
376         list_add_tail_rcu(&chain->list, &block->chain_list);
377         mutex_init(&chain->filter_chain_lock);
378         chain->block = block;
379         chain->index = chain_index;
380         chain->refcnt = 1;
381         if (!chain->index)
382                 block->chain0.chain = chain;
383         return chain;
384 }
385
386 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
387                                        struct tcf_proto *tp_head)
388 {
389         if (item->chain_head_change)
390                 item->chain_head_change(tp_head, item->chain_head_change_priv);
391 }
392
393 static void tcf_chain0_head_change(struct tcf_chain *chain,
394                                    struct tcf_proto *tp_head)
395 {
396         struct tcf_filter_chain_list_item *item;
397         struct tcf_block *block = chain->block;
398
399         if (chain->index)
400                 return;
401
402         mutex_lock(&block->lock);
403         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
404                 tcf_chain_head_change_item(item, tp_head);
405         mutex_unlock(&block->lock);
406 }
407
408 /* Returns true if block can be safely freed. */
409
410 static bool tcf_chain_detach(struct tcf_chain *chain)
411 {
412         struct tcf_block *block = chain->block;
413
414         ASSERT_BLOCK_LOCKED(block);
415
416         list_del_rcu(&chain->list);
417         if (!chain->index)
418                 block->chain0.chain = NULL;
419
420         if (list_empty(&block->chain_list) &&
421             refcount_read(&block->refcnt) == 0)
422                 return true;
423
424         return false;
425 }
426
427 static void tcf_block_destroy(struct tcf_block *block)
428 {
429         mutex_destroy(&block->lock);
430         mutex_destroy(&block->proto_destroy_lock);
431         kfree_rcu(block, rcu);
432 }
433
434 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
435 {
436         struct tcf_block *block = chain->block;
437
438         mutex_destroy(&chain->filter_chain_lock);
439         kfree_rcu(chain, rcu);
440         if (free_block)
441                 tcf_block_destroy(block);
442 }
443
444 static void tcf_chain_hold(struct tcf_chain *chain)
445 {
446         ASSERT_BLOCK_LOCKED(chain->block);
447
448         ++chain->refcnt;
449 }
450
451 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
452 {
453         ASSERT_BLOCK_LOCKED(chain->block);
454
455         /* In case all the references are action references, this
456          * chain should not be shown to the user.
457          */
458         return chain->refcnt == chain->action_refcnt;
459 }
460
461 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
462                                           u32 chain_index)
463 {
464         struct tcf_chain *chain;
465
466         ASSERT_BLOCK_LOCKED(block);
467
468         list_for_each_entry(chain, &block->chain_list, list) {
469                 if (chain->index == chain_index)
470                         return chain;
471         }
472         return NULL;
473 }
474
475 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
476 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
477                                               u32 chain_index)
478 {
479         struct tcf_chain *chain;
480
481         list_for_each_entry_rcu(chain, &block->chain_list, list) {
482                 if (chain->index == chain_index)
483                         return chain;
484         }
485         return NULL;
486 }
487 #endif
488
489 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
490                            u32 seq, u16 flags, int event, bool unicast);
491
492 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
493                                          u32 chain_index, bool create,
494                                          bool by_act)
495 {
496         struct tcf_chain *chain = NULL;
497         bool is_first_reference;
498
499         mutex_lock(&block->lock);
500         chain = tcf_chain_lookup(block, chain_index);
501         if (chain) {
502                 tcf_chain_hold(chain);
503         } else {
504                 if (!create)
505                         goto errout;
506                 chain = tcf_chain_create(block, chain_index);
507                 if (!chain)
508                         goto errout;
509         }
510
511         if (by_act)
512                 ++chain->action_refcnt;
513         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
514         mutex_unlock(&block->lock);
515
516         /* Send notification only in case we got the first
517          * non-action reference. Until then, the chain acts only as
518          * a placeholder for actions pointing to it and user ought
519          * not know about them.
520          */
521         if (is_first_reference && !by_act)
522                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
523                                 RTM_NEWCHAIN, false);
524
525         return chain;
526
527 errout:
528         mutex_unlock(&block->lock);
529         return chain;
530 }
531
532 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
533                                        bool create)
534 {
535         return __tcf_chain_get(block, chain_index, create, false);
536 }
537
538 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
539 {
540         return __tcf_chain_get(block, chain_index, true, true);
541 }
542 EXPORT_SYMBOL(tcf_chain_get_by_act);
543
544 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
545                                void *tmplt_priv);
546 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
547                                   void *tmplt_priv, u32 chain_index,
548                                   struct tcf_block *block, struct sk_buff *oskb,
549                                   u32 seq, u16 flags, bool unicast);
550
551 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
552                             bool explicitly_created)
553 {
554         struct tcf_block *block = chain->block;
555         const struct tcf_proto_ops *tmplt_ops;
556         bool free_block = false;
557         unsigned int refcnt;
558         void *tmplt_priv;
559
560         mutex_lock(&block->lock);
561         if (explicitly_created) {
562                 if (!chain->explicitly_created) {
563                         mutex_unlock(&block->lock);
564                         return;
565                 }
566                 chain->explicitly_created = false;
567         }
568
569         if (by_act)
570                 chain->action_refcnt--;
571
572         /* tc_chain_notify_delete can't be called while holding block lock.
573          * However, when block is unlocked chain can be changed concurrently, so
574          * save these to temporary variables.
575          */
576         refcnt = --chain->refcnt;
577         tmplt_ops = chain->tmplt_ops;
578         tmplt_priv = chain->tmplt_priv;
579
580         /* The last dropped non-action reference will trigger notification. */
581         if (refcnt - chain->action_refcnt == 0 && !by_act) {
582                 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
583                                        block, NULL, 0, 0, false);
584                 /* Last reference to chain, no need to lock. */
585                 chain->flushing = false;
586         }
587
588         if (refcnt == 0)
589                 free_block = tcf_chain_detach(chain);
590         mutex_unlock(&block->lock);
591
592         if (refcnt == 0) {
593                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
594                 tcf_chain_destroy(chain, free_block);
595         }
596 }
597
598 static void tcf_chain_put(struct tcf_chain *chain)
599 {
600         __tcf_chain_put(chain, false, false);
601 }
602
603 void tcf_chain_put_by_act(struct tcf_chain *chain)
604 {
605         __tcf_chain_put(chain, true, false);
606 }
607 EXPORT_SYMBOL(tcf_chain_put_by_act);
608
609 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
610 {
611         __tcf_chain_put(chain, false, true);
612 }
613
614 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
615 {
616         struct tcf_proto *tp, *tp_next;
617
618         mutex_lock(&chain->filter_chain_lock);
619         tp = tcf_chain_dereference(chain->filter_chain, chain);
620         while (tp) {
621                 tp_next = rcu_dereference_protected(tp->next, 1);
622                 tcf_proto_signal_destroying(chain, tp);
623                 tp = tp_next;
624         }
625         tp = tcf_chain_dereference(chain->filter_chain, chain);
626         RCU_INIT_POINTER(chain->filter_chain, NULL);
627         tcf_chain0_head_change(chain, NULL);
628         chain->flushing = true;
629         mutex_unlock(&chain->filter_chain_lock);
630
631         while (tp) {
632                 tp_next = rcu_dereference_protected(tp->next, 1);
633                 tcf_proto_put(tp, rtnl_held, NULL);
634                 tp = tp_next;
635         }
636 }
637
638 static int tcf_block_setup(struct tcf_block *block,
639                            struct flow_block_offload *bo);
640
641 static void tcf_block_offload_init(struct flow_block_offload *bo,
642                                    struct net_device *dev, struct Qdisc *sch,
643                                    enum flow_block_command command,
644                                    enum flow_block_binder_type binder_type,
645                                    struct flow_block *flow_block,
646                                    bool shared, struct netlink_ext_ack *extack)
647 {
648         bo->net = dev_net(dev);
649         bo->command = command;
650         bo->binder_type = binder_type;
651         bo->block = flow_block;
652         bo->block_shared = shared;
653         bo->extack = extack;
654         bo->sch = sch;
655         bo->cb_list_head = &flow_block->cb_list;
656         INIT_LIST_HEAD(&bo->cb_list);
657 }
658
659 static void tcf_block_unbind(struct tcf_block *block,
660                              struct flow_block_offload *bo);
661
662 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
663 {
664         struct tcf_block *block = block_cb->indr.data;
665         struct net_device *dev = block_cb->indr.dev;
666         struct Qdisc *sch = block_cb->indr.sch;
667         struct netlink_ext_ack extack = {};
668         struct flow_block_offload bo = {};
669
670         tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
671                                block_cb->indr.binder_type,
672                                &block->flow_block, tcf_block_shared(block),
673                                &extack);
674         rtnl_lock();
675         down_write(&block->cb_lock);
676         list_del(&block_cb->driver_list);
677         list_move(&block_cb->list, &bo.cb_list);
678         tcf_block_unbind(block, &bo);
679         up_write(&block->cb_lock);
680         rtnl_unlock();
681 }
682
683 static bool tcf_block_offload_in_use(struct tcf_block *block)
684 {
685         return atomic_read(&block->offloadcnt);
686 }
687
688 static int tcf_block_offload_cmd(struct tcf_block *block,
689                                  struct net_device *dev, struct Qdisc *sch,
690                                  struct tcf_block_ext_info *ei,
691                                  enum flow_block_command command,
692                                  struct netlink_ext_ack *extack)
693 {
694         struct flow_block_offload bo = {};
695
696         tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
697                                &block->flow_block, tcf_block_shared(block),
698                                extack);
699
700         if (dev->netdev_ops->ndo_setup_tc) {
701                 int err;
702
703                 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
704                 if (err < 0) {
705                         if (err != -EOPNOTSUPP)
706                                 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
707                         return err;
708                 }
709
710                 return tcf_block_setup(block, &bo);
711         }
712
713         flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
714                                     tc_block_indr_cleanup);
715         tcf_block_setup(block, &bo);
716
717         return -EOPNOTSUPP;
718 }
719
720 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
721                                   struct tcf_block_ext_info *ei,
722                                   struct netlink_ext_ack *extack)
723 {
724         struct net_device *dev = q->dev_queue->dev;
725         int err;
726
727         down_write(&block->cb_lock);
728
729         /* If tc offload feature is disabled and the block we try to bind
730          * to already has some offloaded filters, forbid to bind.
731          */
732         if (dev->netdev_ops->ndo_setup_tc &&
733             !tc_can_offload(dev) &&
734             tcf_block_offload_in_use(block)) {
735                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
736                 err = -EOPNOTSUPP;
737                 goto err_unlock;
738         }
739
740         err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
741         if (err == -EOPNOTSUPP)
742                 goto no_offload_dev_inc;
743         if (err)
744                 goto err_unlock;
745
746         up_write(&block->cb_lock);
747         return 0;
748
749 no_offload_dev_inc:
750         if (tcf_block_offload_in_use(block))
751                 goto err_unlock;
752
753         err = 0;
754         block->nooffloaddevcnt++;
755 err_unlock:
756         up_write(&block->cb_lock);
757         return err;
758 }
759
760 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
761                                      struct tcf_block_ext_info *ei)
762 {
763         struct net_device *dev = q->dev_queue->dev;
764         int err;
765
766         down_write(&block->cb_lock);
767         err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
768         if (err == -EOPNOTSUPP)
769                 goto no_offload_dev_dec;
770         up_write(&block->cb_lock);
771         return;
772
773 no_offload_dev_dec:
774         WARN_ON(block->nooffloaddevcnt-- == 0);
775         up_write(&block->cb_lock);
776 }
777
778 static int
779 tcf_chain0_head_change_cb_add(struct tcf_block *block,
780                               struct tcf_block_ext_info *ei,
781                               struct netlink_ext_ack *extack)
782 {
783         struct tcf_filter_chain_list_item *item;
784         struct tcf_chain *chain0;
785
786         item = kmalloc(sizeof(*item), GFP_KERNEL);
787         if (!item) {
788                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
789                 return -ENOMEM;
790         }
791         item->chain_head_change = ei->chain_head_change;
792         item->chain_head_change_priv = ei->chain_head_change_priv;
793
794         mutex_lock(&block->lock);
795         chain0 = block->chain0.chain;
796         if (chain0)
797                 tcf_chain_hold(chain0);
798         else
799                 list_add(&item->list, &block->chain0.filter_chain_list);
800         mutex_unlock(&block->lock);
801
802         if (chain0) {
803                 struct tcf_proto *tp_head;
804
805                 mutex_lock(&chain0->filter_chain_lock);
806
807                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
808                 if (tp_head)
809                         tcf_chain_head_change_item(item, tp_head);
810
811                 mutex_lock(&block->lock);
812                 list_add(&item->list, &block->chain0.filter_chain_list);
813                 mutex_unlock(&block->lock);
814
815                 mutex_unlock(&chain0->filter_chain_lock);
816                 tcf_chain_put(chain0);
817         }
818
819         return 0;
820 }
821
822 static void
823 tcf_chain0_head_change_cb_del(struct tcf_block *block,
824                               struct tcf_block_ext_info *ei)
825 {
826         struct tcf_filter_chain_list_item *item;
827
828         mutex_lock(&block->lock);
829         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
830                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
831                     (item->chain_head_change == ei->chain_head_change &&
832                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
833                         if (block->chain0.chain)
834                                 tcf_chain_head_change_item(item, NULL);
835                         list_del(&item->list);
836                         mutex_unlock(&block->lock);
837
838                         kfree(item);
839                         return;
840                 }
841         }
842         mutex_unlock(&block->lock);
843         WARN_ON(1);
844 }
845
846 struct tcf_net {
847         spinlock_t idr_lock; /* Protects idr */
848         struct idr idr;
849 };
850
851 static unsigned int tcf_net_id;
852
853 static int tcf_block_insert(struct tcf_block *block, struct net *net,
854                             struct netlink_ext_ack *extack)
855 {
856         struct tcf_net *tn = net_generic(net, tcf_net_id);
857         int err;
858
859         idr_preload(GFP_KERNEL);
860         spin_lock(&tn->idr_lock);
861         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
862                             GFP_NOWAIT);
863         spin_unlock(&tn->idr_lock);
864         idr_preload_end();
865
866         return err;
867 }
868
869 static void tcf_block_remove(struct tcf_block *block, struct net *net)
870 {
871         struct tcf_net *tn = net_generic(net, tcf_net_id);
872
873         spin_lock(&tn->idr_lock);
874         idr_remove(&tn->idr, block->index);
875         spin_unlock(&tn->idr_lock);
876 }
877
878 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
879                                           u32 block_index,
880                                           struct netlink_ext_ack *extack)
881 {
882         struct tcf_block *block;
883
884         block = kzalloc(sizeof(*block), GFP_KERNEL);
885         if (!block) {
886                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
887                 return ERR_PTR(-ENOMEM);
888         }
889         mutex_init(&block->lock);
890         mutex_init(&block->proto_destroy_lock);
891         init_rwsem(&block->cb_lock);
892         flow_block_init(&block->flow_block);
893         INIT_LIST_HEAD(&block->chain_list);
894         INIT_LIST_HEAD(&block->owner_list);
895         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
896
897         refcount_set(&block->refcnt, 1);
898         block->net = net;
899         block->index = block_index;
900
901         /* Don't store q pointer for blocks which are shared */
902         if (!tcf_block_shared(block))
903                 block->q = q;
904         return block;
905 }
906
907 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
908 {
909         struct tcf_net *tn = net_generic(net, tcf_net_id);
910
911         return idr_find(&tn->idr, block_index);
912 }
913
914 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
915 {
916         struct tcf_block *block;
917
918         rcu_read_lock();
919         block = tcf_block_lookup(net, block_index);
920         if (block && !refcount_inc_not_zero(&block->refcnt))
921                 block = NULL;
922         rcu_read_unlock();
923
924         return block;
925 }
926
927 static struct tcf_chain *
928 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
929 {
930         mutex_lock(&block->lock);
931         if (chain)
932                 chain = list_is_last(&chain->list, &block->chain_list) ?
933                         NULL : list_next_entry(chain, list);
934         else
935                 chain = list_first_entry_or_null(&block->chain_list,
936                                                  struct tcf_chain, list);
937
938         /* skip all action-only chains */
939         while (chain && tcf_chain_held_by_acts_only(chain))
940                 chain = list_is_last(&chain->list, &block->chain_list) ?
941                         NULL : list_next_entry(chain, list);
942
943         if (chain)
944                 tcf_chain_hold(chain);
945         mutex_unlock(&block->lock);
946
947         return chain;
948 }
949
950 /* Function to be used by all clients that want to iterate over all chains on
951  * block. It properly obtains block->lock and takes reference to chain before
952  * returning it. Users of this function must be tolerant to concurrent chain
953  * insertion/deletion or ensure that no concurrent chain modification is
954  * possible. Note that all netlink dump callbacks cannot guarantee to provide
955  * consistent dump because rtnl lock is released each time skb is filled with
956  * data and sent to user-space.
957  */
958
959 struct tcf_chain *
960 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
961 {
962         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
963
964         if (chain)
965                 tcf_chain_put(chain);
966
967         return chain_next;
968 }
969 EXPORT_SYMBOL(tcf_get_next_chain);
970
971 static struct tcf_proto *
972 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
973 {
974         u32 prio = 0;
975
976         ASSERT_RTNL();
977         mutex_lock(&chain->filter_chain_lock);
978
979         if (!tp) {
980                 tp = tcf_chain_dereference(chain->filter_chain, chain);
981         } else if (tcf_proto_is_deleting(tp)) {
982                 /* 'deleting' flag is set and chain->filter_chain_lock was
983                  * unlocked, which means next pointer could be invalid. Restart
984                  * search.
985                  */
986                 prio = tp->prio + 1;
987                 tp = tcf_chain_dereference(chain->filter_chain, chain);
988
989                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
990                         if (!tp->deleting && tp->prio >= prio)
991                                 break;
992         } else {
993                 tp = tcf_chain_dereference(tp->next, chain);
994         }
995
996         if (tp)
997                 tcf_proto_get(tp);
998
999         mutex_unlock(&chain->filter_chain_lock);
1000
1001         return tp;
1002 }
1003
1004 /* Function to be used by all clients that want to iterate over all tp's on
1005  * chain. Users of this function must be tolerant to concurrent tp
1006  * insertion/deletion or ensure that no concurrent chain modification is
1007  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1008  * consistent dump because rtnl lock is released each time skb is filled with
1009  * data and sent to user-space.
1010  */
1011
1012 struct tcf_proto *
1013 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1014 {
1015         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1016
1017         if (tp)
1018                 tcf_proto_put(tp, true, NULL);
1019
1020         return tp_next;
1021 }
1022 EXPORT_SYMBOL(tcf_get_next_proto);
1023
1024 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1025 {
1026         struct tcf_chain *chain;
1027
1028         /* Last reference to block. At this point chains cannot be added or
1029          * removed concurrently.
1030          */
1031         for (chain = tcf_get_next_chain(block, NULL);
1032              chain;
1033              chain = tcf_get_next_chain(block, chain)) {
1034                 tcf_chain_put_explicitly_created(chain);
1035                 tcf_chain_flush(chain, rtnl_held);
1036         }
1037 }
1038
1039 /* Lookup Qdisc and increments its reference counter.
1040  * Set parent, if necessary.
1041  */
1042
1043 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1044                             u32 *parent, int ifindex, bool rtnl_held,
1045                             struct netlink_ext_ack *extack)
1046 {
1047         const struct Qdisc_class_ops *cops;
1048         struct net_device *dev;
1049         int err = 0;
1050
1051         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1052                 return 0;
1053
1054         rcu_read_lock();
1055
1056         /* Find link */
1057         dev = dev_get_by_index_rcu(net, ifindex);
1058         if (!dev) {
1059                 rcu_read_unlock();
1060                 return -ENODEV;
1061         }
1062
1063         /* Find qdisc */
1064         if (!*parent) {
1065                 *q = rcu_dereference(dev->qdisc);
1066                 *parent = (*q)->handle;
1067         } else {
1068                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1069                 if (!*q) {
1070                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1071                         err = -EINVAL;
1072                         goto errout_rcu;
1073                 }
1074         }
1075
1076         *q = qdisc_refcount_inc_nz(*q);
1077         if (!*q) {
1078                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1079                 err = -EINVAL;
1080                 goto errout_rcu;
1081         }
1082
1083         /* Is it classful? */
1084         cops = (*q)->ops->cl_ops;
1085         if (!cops) {
1086                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1087                 err = -EINVAL;
1088                 goto errout_qdisc;
1089         }
1090
1091         if (!cops->tcf_block) {
1092                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1093                 err = -EOPNOTSUPP;
1094                 goto errout_qdisc;
1095         }
1096
1097 errout_rcu:
1098         /* At this point we know that qdisc is not noop_qdisc,
1099          * which means that qdisc holds a reference to net_device
1100          * and we hold a reference to qdisc, so it is safe to release
1101          * rcu read lock.
1102          */
1103         rcu_read_unlock();
1104         return err;
1105
1106 errout_qdisc:
1107         rcu_read_unlock();
1108
1109         if (rtnl_held)
1110                 qdisc_put(*q);
1111         else
1112                 qdisc_put_unlocked(*q);
1113         *q = NULL;
1114
1115         return err;
1116 }
1117
1118 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1119                                int ifindex, struct netlink_ext_ack *extack)
1120 {
1121         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1122                 return 0;
1123
1124         /* Do we search for filter, attached to class? */
1125         if (TC_H_MIN(parent)) {
1126                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1127
1128                 *cl = cops->find(q, parent);
1129                 if (*cl == 0) {
1130                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1131                         return -ENOENT;
1132                 }
1133         }
1134
1135         return 0;
1136 }
1137
1138 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1139                                           unsigned long cl, int ifindex,
1140                                           u32 block_index,
1141                                           struct netlink_ext_ack *extack)
1142 {
1143         struct tcf_block *block;
1144
1145         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1146                 block = tcf_block_refcnt_get(net, block_index);
1147                 if (!block) {
1148                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1149                         return ERR_PTR(-EINVAL);
1150                 }
1151         } else {
1152                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1153
1154                 block = cops->tcf_block(q, cl, extack);
1155                 if (!block)
1156                         return ERR_PTR(-EINVAL);
1157
1158                 if (tcf_block_shared(block)) {
1159                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1160                         return ERR_PTR(-EOPNOTSUPP);
1161                 }
1162
1163                 /* Always take reference to block in order to support execution
1164                  * of rules update path of cls API without rtnl lock. Caller
1165                  * must release block when it is finished using it. 'if' block
1166                  * of this conditional obtain reference to block by calling
1167                  * tcf_block_refcnt_get().
1168                  */
1169                 refcount_inc(&block->refcnt);
1170         }
1171
1172         return block;
1173 }
1174
1175 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1176                             struct tcf_block_ext_info *ei, bool rtnl_held)
1177 {
1178         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1179                 /* Flushing/putting all chains will cause the block to be
1180                  * deallocated when last chain is freed. However, if chain_list
1181                  * is empty, block has to be manually deallocated. After block
1182                  * reference counter reached 0, it is no longer possible to
1183                  * increment it or add new chains to block.
1184                  */
1185                 bool free_block = list_empty(&block->chain_list);
1186
1187                 mutex_unlock(&block->lock);
1188                 if (tcf_block_shared(block))
1189                         tcf_block_remove(block, block->net);
1190
1191                 if (q)
1192                         tcf_block_offload_unbind(block, q, ei);
1193
1194                 if (free_block)
1195                         tcf_block_destroy(block);
1196                 else
1197                         tcf_block_flush_all_chains(block, rtnl_held);
1198         } else if (q) {
1199                 tcf_block_offload_unbind(block, q, ei);
1200         }
1201 }
1202
1203 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1204 {
1205         __tcf_block_put(block, NULL, NULL, rtnl_held);
1206 }
1207
1208 /* Find tcf block.
1209  * Set q, parent, cl when appropriate.
1210  */
1211
1212 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1213                                         u32 *parent, unsigned long *cl,
1214                                         int ifindex, u32 block_index,
1215                                         struct netlink_ext_ack *extack)
1216 {
1217         struct tcf_block *block;
1218         int err = 0;
1219
1220         ASSERT_RTNL();
1221
1222         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1223         if (err)
1224                 goto errout;
1225
1226         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1227         if (err)
1228                 goto errout_qdisc;
1229
1230         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1231         if (IS_ERR(block)) {
1232                 err = PTR_ERR(block);
1233                 goto errout_qdisc;
1234         }
1235
1236         return block;
1237
1238 errout_qdisc:
1239         if (*q)
1240                 qdisc_put(*q);
1241 errout:
1242         *q = NULL;
1243         return ERR_PTR(err);
1244 }
1245
1246 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1247                               bool rtnl_held)
1248 {
1249         if (!IS_ERR_OR_NULL(block))
1250                 tcf_block_refcnt_put(block, rtnl_held);
1251
1252         if (q) {
1253                 if (rtnl_held)
1254                         qdisc_put(q);
1255                 else
1256                         qdisc_put_unlocked(q);
1257         }
1258 }
1259
1260 struct tcf_block_owner_item {
1261         struct list_head list;
1262         struct Qdisc *q;
1263         enum flow_block_binder_type binder_type;
1264 };
1265
1266 static void
1267 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1268                                struct Qdisc *q,
1269                                enum flow_block_binder_type binder_type)
1270 {
1271         if (block->keep_dst &&
1272             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1273             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1274                 netif_keep_dst(qdisc_dev(q));
1275 }
1276
1277 void tcf_block_netif_keep_dst(struct tcf_block *block)
1278 {
1279         struct tcf_block_owner_item *item;
1280
1281         block->keep_dst = true;
1282         list_for_each_entry(item, &block->owner_list, list)
1283                 tcf_block_owner_netif_keep_dst(block, item->q,
1284                                                item->binder_type);
1285 }
1286 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1287
1288 static int tcf_block_owner_add(struct tcf_block *block,
1289                                struct Qdisc *q,
1290                                enum flow_block_binder_type binder_type)
1291 {
1292         struct tcf_block_owner_item *item;
1293
1294         item = kmalloc(sizeof(*item), GFP_KERNEL);
1295         if (!item)
1296                 return -ENOMEM;
1297         item->q = q;
1298         item->binder_type = binder_type;
1299         list_add(&item->list, &block->owner_list);
1300         return 0;
1301 }
1302
1303 static void tcf_block_owner_del(struct tcf_block *block,
1304                                 struct Qdisc *q,
1305                                 enum flow_block_binder_type binder_type)
1306 {
1307         struct tcf_block_owner_item *item;
1308
1309         list_for_each_entry(item, &block->owner_list, list) {
1310                 if (item->q == q && item->binder_type == binder_type) {
1311                         list_del(&item->list);
1312                         kfree(item);
1313                         return;
1314                 }
1315         }
1316         WARN_ON(1);
1317 }
1318
1319 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1320                       struct tcf_block_ext_info *ei,
1321                       struct netlink_ext_ack *extack)
1322 {
1323         struct net *net = qdisc_net(q);
1324         struct tcf_block *block = NULL;
1325         int err;
1326
1327         if (ei->block_index)
1328                 /* block_index not 0 means the shared block is requested */
1329                 block = tcf_block_refcnt_get(net, ei->block_index);
1330
1331         if (!block) {
1332                 block = tcf_block_create(net, q, ei->block_index, extack);
1333                 if (IS_ERR(block))
1334                         return PTR_ERR(block);
1335                 if (tcf_block_shared(block)) {
1336                         err = tcf_block_insert(block, net, extack);
1337                         if (err)
1338                                 goto err_block_insert;
1339                 }
1340         }
1341
1342         err = tcf_block_owner_add(block, q, ei->binder_type);
1343         if (err)
1344                 goto err_block_owner_add;
1345
1346         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1347
1348         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1349         if (err)
1350                 goto err_chain0_head_change_cb_add;
1351
1352         err = tcf_block_offload_bind(block, q, ei, extack);
1353         if (err)
1354                 goto err_block_offload_bind;
1355
1356         *p_block = block;
1357         return 0;
1358
1359 err_block_offload_bind:
1360         tcf_chain0_head_change_cb_del(block, ei);
1361 err_chain0_head_change_cb_add:
1362         tcf_block_owner_del(block, q, ei->binder_type);
1363 err_block_owner_add:
1364 err_block_insert:
1365         tcf_block_refcnt_put(block, true);
1366         return err;
1367 }
1368 EXPORT_SYMBOL(tcf_block_get_ext);
1369
1370 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1371 {
1372         struct tcf_proto __rcu **p_filter_chain = priv;
1373
1374         rcu_assign_pointer(*p_filter_chain, tp_head);
1375 }
1376
1377 int tcf_block_get(struct tcf_block **p_block,
1378                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1379                   struct netlink_ext_ack *extack)
1380 {
1381         struct tcf_block_ext_info ei = {
1382                 .chain_head_change = tcf_chain_head_change_dflt,
1383                 .chain_head_change_priv = p_filter_chain,
1384         };
1385
1386         WARN_ON(!p_filter_chain);
1387         return tcf_block_get_ext(p_block, q, &ei, extack);
1388 }
1389 EXPORT_SYMBOL(tcf_block_get);
1390
1391 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1392  * actions should be all removed after flushing.
1393  */
1394 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1395                        struct tcf_block_ext_info *ei)
1396 {
1397         if (!block)
1398                 return;
1399         tcf_chain0_head_change_cb_del(block, ei);
1400         tcf_block_owner_del(block, q, ei->binder_type);
1401
1402         __tcf_block_put(block, q, ei, true);
1403 }
1404 EXPORT_SYMBOL(tcf_block_put_ext);
1405
1406 void tcf_block_put(struct tcf_block *block)
1407 {
1408         struct tcf_block_ext_info ei = {0, };
1409
1410         if (!block)
1411                 return;
1412         tcf_block_put_ext(block, block->q, &ei);
1413 }
1414
1415 EXPORT_SYMBOL(tcf_block_put);
1416
1417 static int
1418 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1419                             void *cb_priv, bool add, bool offload_in_use,
1420                             struct netlink_ext_ack *extack)
1421 {
1422         struct tcf_chain *chain, *chain_prev;
1423         struct tcf_proto *tp, *tp_prev;
1424         int err;
1425
1426         lockdep_assert_held(&block->cb_lock);
1427
1428         for (chain = __tcf_get_next_chain(block, NULL);
1429              chain;
1430              chain_prev = chain,
1431                      chain = __tcf_get_next_chain(block, chain),
1432                      tcf_chain_put(chain_prev)) {
1433                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1434                      tp_prev = tp,
1435                              tp = __tcf_get_next_proto(chain, tp),
1436                              tcf_proto_put(tp_prev, true, NULL)) {
1437                         if (tp->ops->reoffload) {
1438                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1439                                                          extack);
1440                                 if (err && add)
1441                                         goto err_playback_remove;
1442                         } else if (add && offload_in_use) {
1443                                 err = -EOPNOTSUPP;
1444                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1445                                 goto err_playback_remove;
1446                         }
1447                 }
1448         }
1449
1450         return 0;
1451
1452 err_playback_remove:
1453         tcf_proto_put(tp, true, NULL);
1454         tcf_chain_put(chain);
1455         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1456                                     extack);
1457         return err;
1458 }
1459
1460 static int tcf_block_bind(struct tcf_block *block,
1461                           struct flow_block_offload *bo)
1462 {
1463         struct flow_block_cb *block_cb, *next;
1464         int err, i = 0;
1465
1466         lockdep_assert_held(&block->cb_lock);
1467
1468         list_for_each_entry(block_cb, &bo->cb_list, list) {
1469                 err = tcf_block_playback_offloads(block, block_cb->cb,
1470                                                   block_cb->cb_priv, true,
1471                                                   tcf_block_offload_in_use(block),
1472                                                   bo->extack);
1473                 if (err)
1474                         goto err_unroll;
1475                 if (!bo->unlocked_driver_cb)
1476                         block->lockeddevcnt++;
1477
1478                 i++;
1479         }
1480         list_splice(&bo->cb_list, &block->flow_block.cb_list);
1481
1482         return 0;
1483
1484 err_unroll:
1485         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1486                 if (i-- > 0) {
1487                         list_del(&block_cb->list);
1488                         tcf_block_playback_offloads(block, block_cb->cb,
1489                                                     block_cb->cb_priv, false,
1490                                                     tcf_block_offload_in_use(block),
1491                                                     NULL);
1492                         if (!bo->unlocked_driver_cb)
1493                                 block->lockeddevcnt--;
1494                 }
1495                 flow_block_cb_free(block_cb);
1496         }
1497
1498         return err;
1499 }
1500
1501 static void tcf_block_unbind(struct tcf_block *block,
1502                              struct flow_block_offload *bo)
1503 {
1504         struct flow_block_cb *block_cb, *next;
1505
1506         lockdep_assert_held(&block->cb_lock);
1507
1508         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1509                 tcf_block_playback_offloads(block, block_cb->cb,
1510                                             block_cb->cb_priv, false,
1511                                             tcf_block_offload_in_use(block),
1512                                             NULL);
1513                 list_del(&block_cb->list);
1514                 flow_block_cb_free(block_cb);
1515                 if (!bo->unlocked_driver_cb)
1516                         block->lockeddevcnt--;
1517         }
1518 }
1519
1520 static int tcf_block_setup(struct tcf_block *block,
1521                            struct flow_block_offload *bo)
1522 {
1523         int err;
1524
1525         switch (bo->command) {
1526         case FLOW_BLOCK_BIND:
1527                 err = tcf_block_bind(block, bo);
1528                 break;
1529         case FLOW_BLOCK_UNBIND:
1530                 err = 0;
1531                 tcf_block_unbind(block, bo);
1532                 break;
1533         default:
1534                 WARN_ON_ONCE(1);
1535                 err = -EOPNOTSUPP;
1536         }
1537
1538         return err;
1539 }
1540
1541 /* Main classifier routine: scans classifier chain attached
1542  * to this qdisc, (optionally) tests for protocol and asks
1543  * specific classifiers.
1544  */
1545 static inline int __tcf_classify(struct sk_buff *skb,
1546                                  const struct tcf_proto *tp,
1547                                  const struct tcf_proto *orig_tp,
1548                                  struct tcf_result *res,
1549                                  bool compat_mode,
1550                                  u32 *last_executed_chain)
1551 {
1552 #ifdef CONFIG_NET_CLS_ACT
1553         const int max_reclassify_loop = 16;
1554         const struct tcf_proto *first_tp;
1555         int limit = 0;
1556
1557 reclassify:
1558 #endif
1559         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1560                 __be16 protocol = skb_protocol(skb, false);
1561                 int err;
1562
1563                 if (tp->protocol != protocol &&
1564                     tp->protocol != htons(ETH_P_ALL))
1565                         continue;
1566
1567                 err = tp->classify(skb, tp, res);
1568 #ifdef CONFIG_NET_CLS_ACT
1569                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1570                         first_tp = orig_tp;
1571                         *last_executed_chain = first_tp->chain->index;
1572                         goto reset;
1573                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1574                         first_tp = res->goto_tp;
1575                         *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1576                         goto reset;
1577                 }
1578 #endif
1579                 if (err >= 0)
1580                         return err;
1581         }
1582
1583         return TC_ACT_UNSPEC; /* signal: continue lookup */
1584 #ifdef CONFIG_NET_CLS_ACT
1585 reset:
1586         if (unlikely(limit++ >= max_reclassify_loop)) {
1587                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1588                                        tp->chain->block->index,
1589                                        tp->prio & 0xffff,
1590                                        ntohs(tp->protocol));
1591                 return TC_ACT_SHOT;
1592         }
1593
1594         tp = first_tp;
1595         goto reclassify;
1596 #endif
1597 }
1598
1599 int tcf_classify(struct sk_buff *skb,
1600                  const struct tcf_block *block,
1601                  const struct tcf_proto *tp,
1602                  struct tcf_result *res, bool compat_mode)
1603 {
1604 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1605         u32 last_executed_chain = 0;
1606
1607         return __tcf_classify(skb, tp, tp, res, compat_mode,
1608                               &last_executed_chain);
1609 #else
1610         u32 last_executed_chain = tp ? tp->chain->index : 0;
1611         const struct tcf_proto *orig_tp = tp;
1612         struct tc_skb_ext *ext;
1613         int ret;
1614
1615         if (block) {
1616                 ext = skb_ext_find(skb, TC_SKB_EXT);
1617
1618                 if (ext && ext->chain) {
1619                         struct tcf_chain *fchain;
1620
1621                         fchain = tcf_chain_lookup_rcu(block, ext->chain);
1622                         if (!fchain)
1623                                 return TC_ACT_SHOT;
1624
1625                         /* Consume, so cloned/redirect skbs won't inherit ext */
1626                         skb_ext_del(skb, TC_SKB_EXT);
1627
1628                         tp = rcu_dereference_bh(fchain->filter_chain);
1629                         last_executed_chain = fchain->index;
1630                 }
1631         }
1632
1633         ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1634                              &last_executed_chain);
1635
1636         if (tc_skb_ext_tc_enabled()) {
1637                 /* If we missed on some chain */
1638                 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1639                         struct tc_skb_cb *cb = tc_skb_cb(skb);
1640
1641                         ext = tc_skb_ext_alloc(skb);
1642                         if (WARN_ON_ONCE(!ext))
1643                                 return TC_ACT_SHOT;
1644                         ext->chain = last_executed_chain;
1645                         ext->mru = cb->mru;
1646                         ext->post_ct = cb->post_ct;
1647                         ext->post_ct_snat = cb->post_ct_snat;
1648                         ext->post_ct_dnat = cb->post_ct_dnat;
1649                         ext->zone = cb->zone;
1650                 }
1651         }
1652
1653         return ret;
1654 #endif
1655 }
1656 EXPORT_SYMBOL(tcf_classify);
1657
1658 struct tcf_chain_info {
1659         struct tcf_proto __rcu **pprev;
1660         struct tcf_proto __rcu *next;
1661 };
1662
1663 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1664                                            struct tcf_chain_info *chain_info)
1665 {
1666         return tcf_chain_dereference(*chain_info->pprev, chain);
1667 }
1668
1669 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1670                                struct tcf_chain_info *chain_info,
1671                                struct tcf_proto *tp)
1672 {
1673         if (chain->flushing)
1674                 return -EAGAIN;
1675
1676         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1677         if (*chain_info->pprev == chain->filter_chain)
1678                 tcf_chain0_head_change(chain, tp);
1679         tcf_proto_get(tp);
1680         rcu_assign_pointer(*chain_info->pprev, tp);
1681
1682         return 0;
1683 }
1684
1685 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1686                                 struct tcf_chain_info *chain_info,
1687                                 struct tcf_proto *tp)
1688 {
1689         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1690
1691         tcf_proto_mark_delete(tp);
1692         if (tp == chain->filter_chain)
1693                 tcf_chain0_head_change(chain, next);
1694         RCU_INIT_POINTER(*chain_info->pprev, next);
1695 }
1696
1697 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1698                                            struct tcf_chain_info *chain_info,
1699                                            u32 protocol, u32 prio,
1700                                            bool prio_allocate);
1701
1702 /* Try to insert new proto.
1703  * If proto with specified priority already exists, free new proto
1704  * and return existing one.
1705  */
1706
1707 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1708                                                     struct tcf_proto *tp_new,
1709                                                     u32 protocol, u32 prio,
1710                                                     bool rtnl_held)
1711 {
1712         struct tcf_chain_info chain_info;
1713         struct tcf_proto *tp;
1714         int err = 0;
1715
1716         mutex_lock(&chain->filter_chain_lock);
1717
1718         if (tcf_proto_exists_destroying(chain, tp_new)) {
1719                 mutex_unlock(&chain->filter_chain_lock);
1720                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1721                 return ERR_PTR(-EAGAIN);
1722         }
1723
1724         tp = tcf_chain_tp_find(chain, &chain_info,
1725                                protocol, prio, false);
1726         if (!tp)
1727                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1728         mutex_unlock(&chain->filter_chain_lock);
1729
1730         if (tp) {
1731                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1732                 tp_new = tp;
1733         } else if (err) {
1734                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1735                 tp_new = ERR_PTR(err);
1736         }
1737
1738         return tp_new;
1739 }
1740
1741 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1742                                       struct tcf_proto *tp, bool rtnl_held,
1743                                       struct netlink_ext_ack *extack)
1744 {
1745         struct tcf_chain_info chain_info;
1746         struct tcf_proto *tp_iter;
1747         struct tcf_proto **pprev;
1748         struct tcf_proto *next;
1749
1750         mutex_lock(&chain->filter_chain_lock);
1751
1752         /* Atomically find and remove tp from chain. */
1753         for (pprev = &chain->filter_chain;
1754              (tp_iter = tcf_chain_dereference(*pprev, chain));
1755              pprev = &tp_iter->next) {
1756                 if (tp_iter == tp) {
1757                         chain_info.pprev = pprev;
1758                         chain_info.next = tp_iter->next;
1759                         WARN_ON(tp_iter->deleting);
1760                         break;
1761                 }
1762         }
1763         /* Verify that tp still exists and no new filters were inserted
1764          * concurrently.
1765          * Mark tp for deletion if it is empty.
1766          */
1767         if (!tp_iter || !tcf_proto_check_delete(tp)) {
1768                 mutex_unlock(&chain->filter_chain_lock);
1769                 return;
1770         }
1771
1772         tcf_proto_signal_destroying(chain, tp);
1773         next = tcf_chain_dereference(chain_info.next, chain);
1774         if (tp == chain->filter_chain)
1775                 tcf_chain0_head_change(chain, next);
1776         RCU_INIT_POINTER(*chain_info.pprev, next);
1777         mutex_unlock(&chain->filter_chain_lock);
1778
1779         tcf_proto_put(tp, rtnl_held, extack);
1780 }
1781
1782 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1783                                            struct tcf_chain_info *chain_info,
1784                                            u32 protocol, u32 prio,
1785                                            bool prio_allocate)
1786 {
1787         struct tcf_proto **pprev;
1788         struct tcf_proto *tp;
1789
1790         /* Check the chain for existence of proto-tcf with this priority */
1791         for (pprev = &chain->filter_chain;
1792              (tp = tcf_chain_dereference(*pprev, chain));
1793              pprev = &tp->next) {
1794                 if (tp->prio >= prio) {
1795                         if (tp->prio == prio) {
1796                                 if (prio_allocate ||
1797                                     (tp->protocol != protocol && protocol))
1798                                         return ERR_PTR(-EINVAL);
1799                         } else {
1800                                 tp = NULL;
1801                         }
1802                         break;
1803                 }
1804         }
1805         chain_info->pprev = pprev;
1806         if (tp) {
1807                 chain_info->next = tp->next;
1808                 tcf_proto_get(tp);
1809         } else {
1810                 chain_info->next = NULL;
1811         }
1812         return tp;
1813 }
1814
1815 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1816                          struct tcf_proto *tp, struct tcf_block *block,
1817                          struct Qdisc *q, u32 parent, void *fh,
1818                          u32 portid, u32 seq, u16 flags, int event,
1819                          bool terse_dump, bool rtnl_held)
1820 {
1821         struct tcmsg *tcm;
1822         struct nlmsghdr  *nlh;
1823         unsigned char *b = skb_tail_pointer(skb);
1824
1825         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1826         if (!nlh)
1827                 goto out_nlmsg_trim;
1828         tcm = nlmsg_data(nlh);
1829         tcm->tcm_family = AF_UNSPEC;
1830         tcm->tcm__pad1 = 0;
1831         tcm->tcm__pad2 = 0;
1832         if (q) {
1833                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1834                 tcm->tcm_parent = parent;
1835         } else {
1836                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1837                 tcm->tcm_block_index = block->index;
1838         }
1839         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1840         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1841                 goto nla_put_failure;
1842         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1843                 goto nla_put_failure;
1844         if (!fh) {
1845                 tcm->tcm_handle = 0;
1846         } else if (terse_dump) {
1847                 if (tp->ops->terse_dump) {
1848                         if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1849                                                 rtnl_held) < 0)
1850                                 goto nla_put_failure;
1851                 } else {
1852                         goto cls_op_not_supp;
1853                 }
1854         } else {
1855                 if (tp->ops->dump &&
1856                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1857                         goto nla_put_failure;
1858         }
1859         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1860         return skb->len;
1861
1862 out_nlmsg_trim:
1863 nla_put_failure:
1864 cls_op_not_supp:
1865         nlmsg_trim(skb, b);
1866         return -1;
1867 }
1868
1869 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1870                           struct nlmsghdr *n, struct tcf_proto *tp,
1871                           struct tcf_block *block, struct Qdisc *q,
1872                           u32 parent, void *fh, int event, bool unicast,
1873                           bool rtnl_held)
1874 {
1875         struct sk_buff *skb;
1876         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1877         int err = 0;
1878
1879         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1880         if (!skb)
1881                 return -ENOBUFS;
1882
1883         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1884                           n->nlmsg_seq, n->nlmsg_flags, event,
1885                           false, rtnl_held) <= 0) {
1886                 kfree_skb(skb);
1887                 return -EINVAL;
1888         }
1889
1890         if (unicast)
1891                 err = rtnl_unicast(skb, net, portid);
1892         else
1893                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1894                                      n->nlmsg_flags & NLM_F_ECHO);
1895         return err;
1896 }
1897
1898 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1899                               struct nlmsghdr *n, struct tcf_proto *tp,
1900                               struct tcf_block *block, struct Qdisc *q,
1901                               u32 parent, void *fh, bool unicast, bool *last,
1902                               bool rtnl_held, struct netlink_ext_ack *extack)
1903 {
1904         struct sk_buff *skb;
1905         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1906         int err;
1907
1908         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1909         if (!skb)
1910                 return -ENOBUFS;
1911
1912         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1913                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1914                           false, rtnl_held) <= 0) {
1915                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1916                 kfree_skb(skb);
1917                 return -EINVAL;
1918         }
1919
1920         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1921         if (err) {
1922                 kfree_skb(skb);
1923                 return err;
1924         }
1925
1926         if (unicast)
1927                 err = rtnl_unicast(skb, net, portid);
1928         else
1929                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1930                                      n->nlmsg_flags & NLM_F_ECHO);
1931         if (err < 0)
1932                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1933
1934         return err;
1935 }
1936
1937 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1938                                  struct tcf_block *block, struct Qdisc *q,
1939                                  u32 parent, struct nlmsghdr *n,
1940                                  struct tcf_chain *chain, int event)
1941 {
1942         struct tcf_proto *tp;
1943
1944         for (tp = tcf_get_next_proto(chain, NULL);
1945              tp; tp = tcf_get_next_proto(chain, tp))
1946                 tfilter_notify(net, oskb, n, tp, block,
1947                                q, parent, NULL, event, false, true);
1948 }
1949
1950 static void tfilter_put(struct tcf_proto *tp, void *fh)
1951 {
1952         if (tp->ops->put && fh)
1953                 tp->ops->put(tp, fh);
1954 }
1955
1956 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1957                           struct netlink_ext_ack *extack)
1958 {
1959         struct net *net = sock_net(skb->sk);
1960         struct nlattr *tca[TCA_MAX + 1];
1961         char name[IFNAMSIZ];
1962         struct tcmsg *t;
1963         u32 protocol;
1964         u32 prio;
1965         bool prio_allocate;
1966         u32 parent;
1967         u32 chain_index;
1968         struct Qdisc *q;
1969         struct tcf_chain_info chain_info;
1970         struct tcf_chain *chain;
1971         struct tcf_block *block;
1972         struct tcf_proto *tp;
1973         unsigned long cl;
1974         void *fh;
1975         int err;
1976         int tp_created;
1977         bool rtnl_held = false;
1978         u32 flags;
1979
1980 replay:
1981         tp_created = 0;
1982
1983         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1984                                      rtm_tca_policy, extack);
1985         if (err < 0)
1986                 return err;
1987
1988         t = nlmsg_data(n);
1989         protocol = TC_H_MIN(t->tcm_info);
1990         prio = TC_H_MAJ(t->tcm_info);
1991         prio_allocate = false;
1992         parent = t->tcm_parent;
1993         tp = NULL;
1994         cl = 0;
1995         block = NULL;
1996         q = NULL;
1997         chain = NULL;
1998         flags = 0;
1999
2000         if (prio == 0) {
2001                 /* If no priority is provided by the user,
2002                  * we allocate one.
2003                  */
2004                 if (n->nlmsg_flags & NLM_F_CREATE) {
2005                         prio = TC_H_MAKE(0x80000000U, 0U);
2006                         prio_allocate = true;
2007                 } else {
2008                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2009                         return -ENOENT;
2010                 }
2011         }
2012
2013         /* Find head of filter chain. */
2014
2015         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2016         if (err)
2017                 return err;
2018
2019         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2020                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2021                 err = -EINVAL;
2022                 goto errout;
2023         }
2024
2025         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2026          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2027          * type is not specified, classifier is not unlocked.
2028          */
2029         if (rtnl_held ||
2030             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2031             !tcf_proto_is_unlocked(name)) {
2032                 rtnl_held = true;
2033                 rtnl_lock();
2034         }
2035
2036         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2037         if (err)
2038                 goto errout;
2039
2040         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2041                                  extack);
2042         if (IS_ERR(block)) {
2043                 err = PTR_ERR(block);
2044                 goto errout;
2045         }
2046         block->classid = parent;
2047
2048         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2049         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2050                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2051                 err = -EINVAL;
2052                 goto errout;
2053         }
2054         chain = tcf_chain_get(block, chain_index, true);
2055         if (!chain) {
2056                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2057                 err = -ENOMEM;
2058                 goto errout;
2059         }
2060
2061         mutex_lock(&chain->filter_chain_lock);
2062         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2063                                prio, prio_allocate);
2064         if (IS_ERR(tp)) {
2065                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2066                 err = PTR_ERR(tp);
2067                 goto errout_locked;
2068         }
2069
2070         if (tp == NULL) {
2071                 struct tcf_proto *tp_new = NULL;
2072
2073                 if (chain->flushing) {
2074                         err = -EAGAIN;
2075                         goto errout_locked;
2076                 }
2077
2078                 /* Proto-tcf does not exist, create new one */
2079
2080                 if (tca[TCA_KIND] == NULL || !protocol) {
2081                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2082                         err = -EINVAL;
2083                         goto errout_locked;
2084                 }
2085
2086                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2087                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2088                         err = -ENOENT;
2089                         goto errout_locked;
2090                 }
2091
2092                 if (prio_allocate)
2093                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2094                                                                &chain_info));
2095
2096                 mutex_unlock(&chain->filter_chain_lock);
2097                 tp_new = tcf_proto_create(name, protocol, prio, chain,
2098                                           rtnl_held, extack);
2099                 if (IS_ERR(tp_new)) {
2100                         err = PTR_ERR(tp_new);
2101                         goto errout_tp;
2102                 }
2103
2104                 tp_created = 1;
2105                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2106                                                 rtnl_held);
2107                 if (IS_ERR(tp)) {
2108                         err = PTR_ERR(tp);
2109                         goto errout_tp;
2110                 }
2111         } else {
2112                 mutex_unlock(&chain->filter_chain_lock);
2113         }
2114
2115         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2116                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2117                 err = -EINVAL;
2118                 goto errout;
2119         }
2120
2121         fh = tp->ops->get(tp, t->tcm_handle);
2122
2123         if (!fh) {
2124                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2125                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2126                         err = -ENOENT;
2127                         goto errout;
2128                 }
2129         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2130                 tfilter_put(tp, fh);
2131                 NL_SET_ERR_MSG(extack, "Filter already exists");
2132                 err = -EEXIST;
2133                 goto errout;
2134         }
2135
2136         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2137                 tfilter_put(tp, fh);
2138                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2139                 err = -EINVAL;
2140                 goto errout;
2141         }
2142
2143         if (!(n->nlmsg_flags & NLM_F_CREATE))
2144                 flags |= TCA_ACT_FLAGS_REPLACE;
2145         if (!rtnl_held)
2146                 flags |= TCA_ACT_FLAGS_NO_RTNL;
2147         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2148                               flags, extack);
2149         if (err == 0) {
2150                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2151                                RTM_NEWTFILTER, false, rtnl_held);
2152                 tfilter_put(tp, fh);
2153                 /* q pointer is NULL for shared blocks */
2154                 if (q)
2155                         q->flags &= ~TCQ_F_CAN_BYPASS;
2156         }
2157
2158 errout:
2159         if (err && tp_created)
2160                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2161 errout_tp:
2162         if (chain) {
2163                 if (tp && !IS_ERR(tp))
2164                         tcf_proto_put(tp, rtnl_held, NULL);
2165                 if (!tp_created)
2166                         tcf_chain_put(chain);
2167         }
2168         tcf_block_release(q, block, rtnl_held);
2169
2170         if (rtnl_held)
2171                 rtnl_unlock();
2172
2173         if (err == -EAGAIN) {
2174                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2175                  * of target chain.
2176                  */
2177                 rtnl_held = true;
2178                 /* Replay the request. */
2179                 goto replay;
2180         }
2181         return err;
2182
2183 errout_locked:
2184         mutex_unlock(&chain->filter_chain_lock);
2185         goto errout;
2186 }
2187
2188 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2189                           struct netlink_ext_ack *extack)
2190 {
2191         struct net *net = sock_net(skb->sk);
2192         struct nlattr *tca[TCA_MAX + 1];
2193         char name[IFNAMSIZ];
2194         struct tcmsg *t;
2195         u32 protocol;
2196         u32 prio;
2197         u32 parent;
2198         u32 chain_index;
2199         struct Qdisc *q = NULL;
2200         struct tcf_chain_info chain_info;
2201         struct tcf_chain *chain = NULL;
2202         struct tcf_block *block = NULL;
2203         struct tcf_proto *tp = NULL;
2204         unsigned long cl = 0;
2205         void *fh = NULL;
2206         int err;
2207         bool rtnl_held = false;
2208
2209         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2210                                      rtm_tca_policy, extack);
2211         if (err < 0)
2212                 return err;
2213
2214         t = nlmsg_data(n);
2215         protocol = TC_H_MIN(t->tcm_info);
2216         prio = TC_H_MAJ(t->tcm_info);
2217         parent = t->tcm_parent;
2218
2219         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2220                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2221                 return -ENOENT;
2222         }
2223
2224         /* Find head of filter chain. */
2225
2226         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2227         if (err)
2228                 return err;
2229
2230         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2231                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2232                 err = -EINVAL;
2233                 goto errout;
2234         }
2235         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2236          * found), qdisc is not unlocked, classifier type is not specified,
2237          * classifier is not unlocked.
2238          */
2239         if (!prio ||
2240             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2241             !tcf_proto_is_unlocked(name)) {
2242                 rtnl_held = true;
2243                 rtnl_lock();
2244         }
2245
2246         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2247         if (err)
2248                 goto errout;
2249
2250         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2251                                  extack);
2252         if (IS_ERR(block)) {
2253                 err = PTR_ERR(block);
2254                 goto errout;
2255         }
2256
2257         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2258         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2259                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2260                 err = -EINVAL;
2261                 goto errout;
2262         }
2263         chain = tcf_chain_get(block, chain_index, false);
2264         if (!chain) {
2265                 /* User requested flush on non-existent chain. Nothing to do,
2266                  * so just return success.
2267                  */
2268                 if (prio == 0) {
2269                         err = 0;
2270                         goto errout;
2271                 }
2272                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2273                 err = -ENOENT;
2274                 goto errout;
2275         }
2276
2277         if (prio == 0) {
2278                 tfilter_notify_chain(net, skb, block, q, parent, n,
2279                                      chain, RTM_DELTFILTER);
2280                 tcf_chain_flush(chain, rtnl_held);
2281                 err = 0;
2282                 goto errout;
2283         }
2284
2285         mutex_lock(&chain->filter_chain_lock);
2286         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2287                                prio, false);
2288         if (!tp || IS_ERR(tp)) {
2289                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2290                 err = tp ? PTR_ERR(tp) : -ENOENT;
2291                 goto errout_locked;
2292         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2293                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2294                 err = -EINVAL;
2295                 goto errout_locked;
2296         } else if (t->tcm_handle == 0) {
2297                 tcf_proto_signal_destroying(chain, tp);
2298                 tcf_chain_tp_remove(chain, &chain_info, tp);
2299                 mutex_unlock(&chain->filter_chain_lock);
2300
2301                 tcf_proto_put(tp, rtnl_held, NULL);
2302                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2303                                RTM_DELTFILTER, false, rtnl_held);
2304                 err = 0;
2305                 goto errout;
2306         }
2307         mutex_unlock(&chain->filter_chain_lock);
2308
2309         fh = tp->ops->get(tp, t->tcm_handle);
2310
2311         if (!fh) {
2312                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2313                 err = -ENOENT;
2314         } else {
2315                 bool last;
2316
2317                 err = tfilter_del_notify(net, skb, n, tp, block,
2318                                          q, parent, fh, false, &last,
2319                                          rtnl_held, extack);
2320
2321                 if (err)
2322                         goto errout;
2323                 if (last)
2324                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2325         }
2326
2327 errout:
2328         if (chain) {
2329                 if (tp && !IS_ERR(tp))
2330                         tcf_proto_put(tp, rtnl_held, NULL);
2331                 tcf_chain_put(chain);
2332         }
2333         tcf_block_release(q, block, rtnl_held);
2334
2335         if (rtnl_held)
2336                 rtnl_unlock();
2337
2338         return err;
2339
2340 errout_locked:
2341         mutex_unlock(&chain->filter_chain_lock);
2342         goto errout;
2343 }
2344
2345 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2346                           struct netlink_ext_ack *extack)
2347 {
2348         struct net *net = sock_net(skb->sk);
2349         struct nlattr *tca[TCA_MAX + 1];
2350         char name[IFNAMSIZ];
2351         struct tcmsg *t;
2352         u32 protocol;
2353         u32 prio;
2354         u32 parent;
2355         u32 chain_index;
2356         struct Qdisc *q = NULL;
2357         struct tcf_chain_info chain_info;
2358         struct tcf_chain *chain = NULL;
2359         struct tcf_block *block = NULL;
2360         struct tcf_proto *tp = NULL;
2361         unsigned long cl = 0;
2362         void *fh = NULL;
2363         int err;
2364         bool rtnl_held = false;
2365
2366         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2367                                      rtm_tca_policy, extack);
2368         if (err < 0)
2369                 return err;
2370
2371         t = nlmsg_data(n);
2372         protocol = TC_H_MIN(t->tcm_info);
2373         prio = TC_H_MAJ(t->tcm_info);
2374         parent = t->tcm_parent;
2375
2376         if (prio == 0) {
2377                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2378                 return -ENOENT;
2379         }
2380
2381         /* Find head of filter chain. */
2382
2383         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2384         if (err)
2385                 return err;
2386
2387         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2388                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2389                 err = -EINVAL;
2390                 goto errout;
2391         }
2392         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2393          * unlocked, classifier type is not specified, classifier is not
2394          * unlocked.
2395          */
2396         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2397             !tcf_proto_is_unlocked(name)) {
2398                 rtnl_held = true;
2399                 rtnl_lock();
2400         }
2401
2402         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2403         if (err)
2404                 goto errout;
2405
2406         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2407                                  extack);
2408         if (IS_ERR(block)) {
2409                 err = PTR_ERR(block);
2410                 goto errout;
2411         }
2412
2413         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2414         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2415                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2416                 err = -EINVAL;
2417                 goto errout;
2418         }
2419         chain = tcf_chain_get(block, chain_index, false);
2420         if (!chain) {
2421                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2422                 err = -EINVAL;
2423                 goto errout;
2424         }
2425
2426         mutex_lock(&chain->filter_chain_lock);
2427         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2428                                prio, false);
2429         mutex_unlock(&chain->filter_chain_lock);
2430         if (!tp || IS_ERR(tp)) {
2431                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2432                 err = tp ? PTR_ERR(tp) : -ENOENT;
2433                 goto errout;
2434         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2435                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2436                 err = -EINVAL;
2437                 goto errout;
2438         }
2439
2440         fh = tp->ops->get(tp, t->tcm_handle);
2441
2442         if (!fh) {
2443                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2444                 err = -ENOENT;
2445         } else {
2446                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2447                                      fh, RTM_NEWTFILTER, true, rtnl_held);
2448                 if (err < 0)
2449                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2450         }
2451
2452         tfilter_put(tp, fh);
2453 errout:
2454         if (chain) {
2455                 if (tp && !IS_ERR(tp))
2456                         tcf_proto_put(tp, rtnl_held, NULL);
2457                 tcf_chain_put(chain);
2458         }
2459         tcf_block_release(q, block, rtnl_held);
2460
2461         if (rtnl_held)
2462                 rtnl_unlock();
2463
2464         return err;
2465 }
2466
2467 struct tcf_dump_args {
2468         struct tcf_walker w;
2469         struct sk_buff *skb;
2470         struct netlink_callback *cb;
2471         struct tcf_block *block;
2472         struct Qdisc *q;
2473         u32 parent;
2474         bool terse_dump;
2475 };
2476
2477 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2478 {
2479         struct tcf_dump_args *a = (void *)arg;
2480         struct net *net = sock_net(a->skb->sk);
2481
2482         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2483                              n, NETLINK_CB(a->cb->skb).portid,
2484                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2485                              RTM_NEWTFILTER, a->terse_dump, true);
2486 }
2487
2488 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2489                            struct sk_buff *skb, struct netlink_callback *cb,
2490                            long index_start, long *p_index, bool terse)
2491 {
2492         struct net *net = sock_net(skb->sk);
2493         struct tcf_block *block = chain->block;
2494         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2495         struct tcf_proto *tp, *tp_prev;
2496         struct tcf_dump_args arg;
2497
2498         for (tp = __tcf_get_next_proto(chain, NULL);
2499              tp;
2500              tp_prev = tp,
2501                      tp = __tcf_get_next_proto(chain, tp),
2502                      tcf_proto_put(tp_prev, true, NULL),
2503                      (*p_index)++) {
2504                 if (*p_index < index_start)
2505                         continue;
2506                 if (TC_H_MAJ(tcm->tcm_info) &&
2507                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2508                         continue;
2509                 if (TC_H_MIN(tcm->tcm_info) &&
2510                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2511                         continue;
2512                 if (*p_index > index_start)
2513                         memset(&cb->args[1], 0,
2514                                sizeof(cb->args) - sizeof(cb->args[0]));
2515                 if (cb->args[1] == 0) {
2516                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2517                                           NETLINK_CB(cb->skb).portid,
2518                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2519                                           RTM_NEWTFILTER, false, true) <= 0)
2520                                 goto errout;
2521                         cb->args[1] = 1;
2522                 }
2523                 if (!tp->ops->walk)
2524                         continue;
2525                 arg.w.fn = tcf_node_dump;
2526                 arg.skb = skb;
2527                 arg.cb = cb;
2528                 arg.block = block;
2529                 arg.q = q;
2530                 arg.parent = parent;
2531                 arg.w.stop = 0;
2532                 arg.w.skip = cb->args[1] - 1;
2533                 arg.w.count = 0;
2534                 arg.w.cookie = cb->args[2];
2535                 arg.terse_dump = terse;
2536                 tp->ops->walk(tp, &arg.w, true);
2537                 cb->args[2] = arg.w.cookie;
2538                 cb->args[1] = arg.w.count + 1;
2539                 if (arg.w.stop)
2540                         goto errout;
2541         }
2542         return true;
2543
2544 errout:
2545         tcf_proto_put(tp, true, NULL);
2546         return false;
2547 }
2548
2549 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2550         [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2551 };
2552
2553 /* called with RTNL */
2554 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2555 {
2556         struct tcf_chain *chain, *chain_prev;
2557         struct net *net = sock_net(skb->sk);
2558         struct nlattr *tca[TCA_MAX + 1];
2559         struct Qdisc *q = NULL;
2560         struct tcf_block *block;
2561         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2562         bool terse_dump = false;
2563         long index_start;
2564         long index;
2565         u32 parent;
2566         int err;
2567
2568         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2569                 return skb->len;
2570
2571         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2572                                      tcf_tfilter_dump_policy, cb->extack);
2573         if (err)
2574                 return err;
2575
2576         if (tca[TCA_DUMP_FLAGS]) {
2577                 struct nla_bitfield32 flags =
2578                         nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2579
2580                 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2581         }
2582
2583         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2584                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2585                 if (!block)
2586                         goto out;
2587                 /* If we work with block index, q is NULL and parent value
2588                  * will never be used in the following code. The check
2589                  * in tcf_fill_node prevents it. However, compiler does not
2590                  * see that far, so set parent to zero to silence the warning
2591                  * about parent being uninitialized.
2592                  */
2593                 parent = 0;
2594         } else {
2595                 const struct Qdisc_class_ops *cops;
2596                 struct net_device *dev;
2597                 unsigned long cl = 0;
2598
2599                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2600                 if (!dev)
2601                         return skb->len;
2602
2603                 parent = tcm->tcm_parent;
2604                 if (!parent)
2605                         q = rtnl_dereference(dev->qdisc);
2606                 else
2607                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2608                 if (!q)
2609                         goto out;
2610                 cops = q->ops->cl_ops;
2611                 if (!cops)
2612                         goto out;
2613                 if (!cops->tcf_block)
2614                         goto out;
2615                 if (TC_H_MIN(tcm->tcm_parent)) {
2616                         cl = cops->find(q, tcm->tcm_parent);
2617                         if (cl == 0)
2618                                 goto out;
2619                 }
2620                 block = cops->tcf_block(q, cl, NULL);
2621                 if (!block)
2622                         goto out;
2623                 parent = block->classid;
2624                 if (tcf_block_shared(block))
2625                         q = NULL;
2626         }
2627
2628         index_start = cb->args[0];
2629         index = 0;
2630
2631         for (chain = __tcf_get_next_chain(block, NULL);
2632              chain;
2633              chain_prev = chain,
2634                      chain = __tcf_get_next_chain(block, chain),
2635                      tcf_chain_put(chain_prev)) {
2636                 if (tca[TCA_CHAIN] &&
2637                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2638                         continue;
2639                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2640                                     index_start, &index, terse_dump)) {
2641                         tcf_chain_put(chain);
2642                         err = -EMSGSIZE;
2643                         break;
2644                 }
2645         }
2646
2647         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2648                 tcf_block_refcnt_put(block, true);
2649         cb->args[0] = index;
2650
2651 out:
2652         /* If we did no progress, the error (EMSGSIZE) is real */
2653         if (skb->len == 0 && err)
2654                 return err;
2655         return skb->len;
2656 }
2657
2658 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2659                               void *tmplt_priv, u32 chain_index,
2660                               struct net *net, struct sk_buff *skb,
2661                               struct tcf_block *block,
2662                               u32 portid, u32 seq, u16 flags, int event)
2663 {
2664         unsigned char *b = skb_tail_pointer(skb);
2665         const struct tcf_proto_ops *ops;
2666         struct nlmsghdr *nlh;
2667         struct tcmsg *tcm;
2668         void *priv;
2669
2670         ops = tmplt_ops;
2671         priv = tmplt_priv;
2672
2673         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2674         if (!nlh)
2675                 goto out_nlmsg_trim;
2676         tcm = nlmsg_data(nlh);
2677         tcm->tcm_family = AF_UNSPEC;
2678         tcm->tcm__pad1 = 0;
2679         tcm->tcm__pad2 = 0;
2680         tcm->tcm_handle = 0;
2681         if (block->q) {
2682                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2683                 tcm->tcm_parent = block->q->handle;
2684         } else {
2685                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2686                 tcm->tcm_block_index = block->index;
2687         }
2688
2689         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2690                 goto nla_put_failure;
2691
2692         if (ops) {
2693                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2694                         goto nla_put_failure;
2695                 if (ops->tmplt_dump(skb, net, priv) < 0)
2696                         goto nla_put_failure;
2697         }
2698
2699         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2700         return skb->len;
2701
2702 out_nlmsg_trim:
2703 nla_put_failure:
2704         nlmsg_trim(skb, b);
2705         return -EMSGSIZE;
2706 }
2707
2708 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2709                            u32 seq, u16 flags, int event, bool unicast)
2710 {
2711         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2712         struct tcf_block *block = chain->block;
2713         struct net *net = block->net;
2714         struct sk_buff *skb;
2715         int err = 0;
2716
2717         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2718         if (!skb)
2719                 return -ENOBUFS;
2720
2721         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2722                                chain->index, net, skb, block, portid,
2723                                seq, flags, event) <= 0) {
2724                 kfree_skb(skb);
2725                 return -EINVAL;
2726         }
2727
2728         if (unicast)
2729                 err = rtnl_unicast(skb, net, portid);
2730         else
2731                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2732                                      flags & NLM_F_ECHO);
2733
2734         return err;
2735 }
2736
2737 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2738                                   void *tmplt_priv, u32 chain_index,
2739                                   struct tcf_block *block, struct sk_buff *oskb,
2740                                   u32 seq, u16 flags, bool unicast)
2741 {
2742         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2743         struct net *net = block->net;
2744         struct sk_buff *skb;
2745
2746         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2747         if (!skb)
2748                 return -ENOBUFS;
2749
2750         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2751                                block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2752                 kfree_skb(skb);
2753                 return -EINVAL;
2754         }
2755
2756         if (unicast)
2757                 return rtnl_unicast(skb, net, portid);
2758
2759         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2760 }
2761
2762 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2763                               struct nlattr **tca,
2764                               struct netlink_ext_ack *extack)
2765 {
2766         const struct tcf_proto_ops *ops;
2767         char name[IFNAMSIZ];
2768         void *tmplt_priv;
2769
2770         /* If kind is not set, user did not specify template. */
2771         if (!tca[TCA_KIND])
2772                 return 0;
2773
2774         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2775                 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2776                 return -EINVAL;
2777         }
2778
2779         ops = tcf_proto_lookup_ops(name, true, extack);
2780         if (IS_ERR(ops))
2781                 return PTR_ERR(ops);
2782         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2783                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2784                 return -EOPNOTSUPP;
2785         }
2786
2787         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2788         if (IS_ERR(tmplt_priv)) {
2789                 module_put(ops->owner);
2790                 return PTR_ERR(tmplt_priv);
2791         }
2792         chain->tmplt_ops = ops;
2793         chain->tmplt_priv = tmplt_priv;
2794         return 0;
2795 }
2796
2797 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2798                                void *tmplt_priv)
2799 {
2800         /* If template ops are set, no work to do for us. */
2801         if (!tmplt_ops)
2802                 return;
2803
2804         tmplt_ops->tmplt_destroy(tmplt_priv);
2805         module_put(tmplt_ops->owner);
2806 }
2807
2808 /* Add/delete/get a chain */
2809
2810 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2811                         struct netlink_ext_ack *extack)
2812 {
2813         struct net *net = sock_net(skb->sk);
2814         struct nlattr *tca[TCA_MAX + 1];
2815         struct tcmsg *t;
2816         u32 parent;
2817         u32 chain_index;
2818         struct Qdisc *q;
2819         struct tcf_chain *chain;
2820         struct tcf_block *block;
2821         unsigned long cl;
2822         int err;
2823
2824 replay:
2825         q = NULL;
2826         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2827                                      rtm_tca_policy, extack);
2828         if (err < 0)
2829                 return err;
2830
2831         t = nlmsg_data(n);
2832         parent = t->tcm_parent;
2833         cl = 0;
2834
2835         block = tcf_block_find(net, &q, &parent, &cl,
2836                                t->tcm_ifindex, t->tcm_block_index, extack);
2837         if (IS_ERR(block))
2838                 return PTR_ERR(block);
2839
2840         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2841         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2842                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2843                 err = -EINVAL;
2844                 goto errout_block;
2845         }
2846
2847         mutex_lock(&block->lock);
2848         chain = tcf_chain_lookup(block, chain_index);
2849         if (n->nlmsg_type == RTM_NEWCHAIN) {
2850                 if (chain) {
2851                         if (tcf_chain_held_by_acts_only(chain)) {
2852                                 /* The chain exists only because there is
2853                                  * some action referencing it.
2854                                  */
2855                                 tcf_chain_hold(chain);
2856                         } else {
2857                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2858                                 err = -EEXIST;
2859                                 goto errout_block_locked;
2860                         }
2861                 } else {
2862                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2863                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2864                                 err = -ENOENT;
2865                                 goto errout_block_locked;
2866                         }
2867                         chain = tcf_chain_create(block, chain_index);
2868                         if (!chain) {
2869                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2870                                 err = -ENOMEM;
2871                                 goto errout_block_locked;
2872                         }
2873                 }
2874         } else {
2875                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2876                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2877                         err = -EINVAL;
2878                         goto errout_block_locked;
2879                 }
2880                 tcf_chain_hold(chain);
2881         }
2882
2883         if (n->nlmsg_type == RTM_NEWCHAIN) {
2884                 /* Modifying chain requires holding parent block lock. In case
2885                  * the chain was successfully added, take a reference to the
2886                  * chain. This ensures that an empty chain does not disappear at
2887                  * the end of this function.
2888                  */
2889                 tcf_chain_hold(chain);
2890                 chain->explicitly_created = true;
2891         }
2892         mutex_unlock(&block->lock);
2893
2894         switch (n->nlmsg_type) {
2895         case RTM_NEWCHAIN:
2896                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2897                 if (err) {
2898                         tcf_chain_put_explicitly_created(chain);
2899                         goto errout;
2900                 }
2901
2902                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2903                                 RTM_NEWCHAIN, false);
2904                 break;
2905         case RTM_DELCHAIN:
2906                 tfilter_notify_chain(net, skb, block, q, parent, n,
2907                                      chain, RTM_DELTFILTER);
2908                 /* Flush the chain first as the user requested chain removal. */
2909                 tcf_chain_flush(chain, true);
2910                 /* In case the chain was successfully deleted, put a reference
2911                  * to the chain previously taken during addition.
2912                  */
2913                 tcf_chain_put_explicitly_created(chain);
2914                 break;
2915         case RTM_GETCHAIN:
2916                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2917                                       n->nlmsg_flags, n->nlmsg_type, true);
2918                 if (err < 0)
2919                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2920                 break;
2921         default:
2922                 err = -EOPNOTSUPP;
2923                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2924                 goto errout;
2925         }
2926
2927 errout:
2928         tcf_chain_put(chain);
2929 errout_block:
2930         tcf_block_release(q, block, true);
2931         if (err == -EAGAIN)
2932                 /* Replay the request. */
2933                 goto replay;
2934         return err;
2935
2936 errout_block_locked:
2937         mutex_unlock(&block->lock);
2938         goto errout_block;
2939 }
2940
2941 /* called with RTNL */
2942 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2943 {
2944         struct net *net = sock_net(skb->sk);
2945         struct nlattr *tca[TCA_MAX + 1];
2946         struct Qdisc *q = NULL;
2947         struct tcf_block *block;
2948         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2949         struct tcf_chain *chain;
2950         long index_start;
2951         long index;
2952         int err;
2953
2954         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2955                 return skb->len;
2956
2957         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2958                                      rtm_tca_policy, cb->extack);
2959         if (err)
2960                 return err;
2961
2962         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2963                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2964                 if (!block)
2965                         goto out;
2966         } else {
2967                 const struct Qdisc_class_ops *cops;
2968                 struct net_device *dev;
2969                 unsigned long cl = 0;
2970
2971                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2972                 if (!dev)
2973                         return skb->len;
2974
2975                 if (!tcm->tcm_parent)
2976                         q = rtnl_dereference(dev->qdisc);
2977                 else
2978                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2979
2980                 if (!q)
2981                         goto out;
2982                 cops = q->ops->cl_ops;
2983                 if (!cops)
2984                         goto out;
2985                 if (!cops->tcf_block)
2986                         goto out;
2987                 if (TC_H_MIN(tcm->tcm_parent)) {
2988                         cl = cops->find(q, tcm->tcm_parent);
2989                         if (cl == 0)
2990                                 goto out;
2991                 }
2992                 block = cops->tcf_block(q, cl, NULL);
2993                 if (!block)
2994                         goto out;
2995                 if (tcf_block_shared(block))
2996                         q = NULL;
2997         }
2998
2999         index_start = cb->args[0];
3000         index = 0;
3001
3002         mutex_lock(&block->lock);
3003         list_for_each_entry(chain, &block->chain_list, list) {
3004                 if ((tca[TCA_CHAIN] &&
3005                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3006                         continue;
3007                 if (index < index_start) {
3008                         index++;
3009                         continue;
3010                 }
3011                 if (tcf_chain_held_by_acts_only(chain))
3012                         continue;
3013                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3014                                          chain->index, net, skb, block,
3015                                          NETLINK_CB(cb->skb).portid,
3016                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3017                                          RTM_NEWCHAIN);
3018                 if (err <= 0)
3019                         break;
3020                 index++;
3021         }
3022         mutex_unlock(&block->lock);
3023
3024         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3025                 tcf_block_refcnt_put(block, true);
3026         cb->args[0] = index;
3027
3028 out:
3029         /* If we did no progress, the error (EMSGSIZE) is real */
3030         if (skb->len == 0 && err)
3031                 return err;
3032         return skb->len;
3033 }
3034
3035 void tcf_exts_destroy(struct tcf_exts *exts)
3036 {
3037 #ifdef CONFIG_NET_CLS_ACT
3038         if (exts->actions) {
3039                 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3040                 kfree(exts->actions);
3041         }
3042         exts->nr_actions = 0;
3043 #endif
3044 }
3045 EXPORT_SYMBOL(tcf_exts_destroy);
3046
3047 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3048                          struct nlattr *rate_tlv, struct tcf_exts *exts,
3049                          u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3050 {
3051 #ifdef CONFIG_NET_CLS_ACT
3052         {
3053                 int init_res[TCA_ACT_MAX_PRIO] = {};
3054                 struct tc_action *act;
3055                 size_t attr_size = 0;
3056
3057                 if (exts->police && tb[exts->police]) {
3058                         struct tc_action_ops *a_o;
3059
3060                         a_o = tc_action_load_ops(tb[exts->police], true,
3061                                                  !(flags & TCA_ACT_FLAGS_NO_RTNL),
3062                                                  extack);
3063                         if (IS_ERR(a_o))
3064                                 return PTR_ERR(a_o);
3065                         flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3066                         act = tcf_action_init_1(net, tp, tb[exts->police],
3067                                                 rate_tlv, a_o, init_res, flags,
3068                                                 extack);
3069                         module_put(a_o->owner);
3070                         if (IS_ERR(act))
3071                                 return PTR_ERR(act);
3072
3073                         act->type = exts->type = TCA_OLD_COMPAT;
3074                         exts->actions[0] = act;
3075                         exts->nr_actions = 1;
3076                         tcf_idr_insert_many(exts->actions);
3077                 } else if (exts->action && tb[exts->action]) {
3078                         int err;
3079
3080                         flags |= TCA_ACT_FLAGS_BIND;
3081                         err = tcf_action_init(net, tp, tb[exts->action],
3082                                               rate_tlv, exts->actions, init_res,
3083                                               &attr_size, flags, fl_flags,
3084                                               extack);
3085                         if (err < 0)
3086                                 return err;
3087                         exts->nr_actions = err;
3088                 }
3089         }
3090 #else
3091         if ((exts->action && tb[exts->action]) ||
3092             (exts->police && tb[exts->police])) {
3093                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3094                 return -EOPNOTSUPP;
3095         }
3096 #endif
3097
3098         return 0;
3099 }
3100 EXPORT_SYMBOL(tcf_exts_validate_ex);
3101
3102 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3103                       struct nlattr *rate_tlv, struct tcf_exts *exts,
3104                       u32 flags, struct netlink_ext_ack *extack)
3105 {
3106         return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3107                                     flags, 0, extack);
3108 }
3109 EXPORT_SYMBOL(tcf_exts_validate);
3110
3111 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3112 {
3113 #ifdef CONFIG_NET_CLS_ACT
3114         struct tcf_exts old = *dst;
3115
3116         *dst = *src;
3117         tcf_exts_destroy(&old);
3118 #endif
3119 }
3120 EXPORT_SYMBOL(tcf_exts_change);
3121
3122 #ifdef CONFIG_NET_CLS_ACT
3123 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3124 {
3125         if (exts->nr_actions == 0)
3126                 return NULL;
3127         else
3128                 return exts->actions[0];
3129 }
3130 #endif
3131
3132 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3133 {
3134 #ifdef CONFIG_NET_CLS_ACT
3135         struct nlattr *nest;
3136
3137         if (exts->action && tcf_exts_has_actions(exts)) {
3138                 /*
3139                  * again for backward compatible mode - we want
3140                  * to work with both old and new modes of entering
3141                  * tc data even if iproute2  was newer - jhs
3142                  */
3143                 if (exts->type != TCA_OLD_COMPAT) {
3144                         nest = nla_nest_start_noflag(skb, exts->action);
3145                         if (nest == NULL)
3146                                 goto nla_put_failure;
3147
3148                         if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3149                             < 0)
3150                                 goto nla_put_failure;
3151                         nla_nest_end(skb, nest);
3152                 } else if (exts->police) {
3153                         struct tc_action *act = tcf_exts_first_act(exts);
3154                         nest = nla_nest_start_noflag(skb, exts->police);
3155                         if (nest == NULL || !act)
3156                                 goto nla_put_failure;
3157                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3158                                 goto nla_put_failure;
3159                         nla_nest_end(skb, nest);
3160                 }
3161         }
3162         return 0;
3163
3164 nla_put_failure:
3165         nla_nest_cancel(skb, nest);
3166         return -1;
3167 #else
3168         return 0;
3169 #endif
3170 }
3171 EXPORT_SYMBOL(tcf_exts_dump);
3172
3173 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3174 {
3175 #ifdef CONFIG_NET_CLS_ACT
3176         struct nlattr *nest;
3177
3178         if (!exts->action || !tcf_exts_has_actions(exts))
3179                 return 0;
3180
3181         nest = nla_nest_start_noflag(skb, exts->action);
3182         if (!nest)
3183                 goto nla_put_failure;
3184
3185         if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3186                 goto nla_put_failure;
3187         nla_nest_end(skb, nest);
3188         return 0;
3189
3190 nla_put_failure:
3191         nla_nest_cancel(skb, nest);
3192         return -1;
3193 #else
3194         return 0;
3195 #endif
3196 }
3197 EXPORT_SYMBOL(tcf_exts_terse_dump);
3198
3199 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3200 {
3201 #ifdef CONFIG_NET_CLS_ACT
3202         struct tc_action *a = tcf_exts_first_act(exts);
3203         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3204                 return -1;
3205 #endif
3206         return 0;
3207 }
3208 EXPORT_SYMBOL(tcf_exts_dump_stats);
3209
3210 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3211 {
3212         if (*flags & TCA_CLS_FLAGS_IN_HW)
3213                 return;
3214         *flags |= TCA_CLS_FLAGS_IN_HW;
3215         atomic_inc(&block->offloadcnt);
3216 }
3217
3218 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3219 {
3220         if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3221                 return;
3222         *flags &= ~TCA_CLS_FLAGS_IN_HW;
3223         atomic_dec(&block->offloadcnt);
3224 }
3225
3226 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3227                                       struct tcf_proto *tp, u32 *cnt,
3228                                       u32 *flags, u32 diff, bool add)
3229 {
3230         lockdep_assert_held(&block->cb_lock);
3231
3232         spin_lock(&tp->lock);
3233         if (add) {
3234                 if (!*cnt)
3235                         tcf_block_offload_inc(block, flags);
3236                 *cnt += diff;
3237         } else {
3238                 *cnt -= diff;
3239                 if (!*cnt)
3240                         tcf_block_offload_dec(block, flags);
3241         }
3242         spin_unlock(&tp->lock);
3243 }
3244
3245 static void
3246 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3247                          u32 *cnt, u32 *flags)
3248 {
3249         lockdep_assert_held(&block->cb_lock);
3250
3251         spin_lock(&tp->lock);
3252         tcf_block_offload_dec(block, flags);
3253         *cnt = 0;
3254         spin_unlock(&tp->lock);
3255 }
3256
3257 static int
3258 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3259                    void *type_data, bool err_stop)
3260 {
3261         struct flow_block_cb *block_cb;
3262         int ok_count = 0;
3263         int err;
3264
3265         list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3266                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3267                 if (err) {
3268                         if (err_stop)
3269                                 return err;
3270                 } else {
3271                         ok_count++;
3272                 }
3273         }
3274         return ok_count;
3275 }
3276
3277 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3278                      void *type_data, bool err_stop, bool rtnl_held)
3279 {
3280         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3281         int ok_count;
3282
3283 retry:
3284         if (take_rtnl)
3285                 rtnl_lock();
3286         down_read(&block->cb_lock);
3287         /* Need to obtain rtnl lock if block is bound to devs that require it.
3288          * In block bind code cb_lock is obtained while holding rtnl, so we must
3289          * obtain the locks in same order here.
3290          */
3291         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3292                 up_read(&block->cb_lock);
3293                 take_rtnl = true;
3294                 goto retry;
3295         }
3296
3297         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3298
3299         up_read(&block->cb_lock);
3300         if (take_rtnl)
3301                 rtnl_unlock();
3302         return ok_count;
3303 }
3304 EXPORT_SYMBOL(tc_setup_cb_call);
3305
3306 /* Non-destructive filter add. If filter that wasn't already in hardware is
3307  * successfully offloaded, increment block offloads counter. On failure,
3308  * previously offloaded filter is considered to be intact and offloads counter
3309  * is not decremented.
3310  */
3311
3312 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3313                     enum tc_setup_type type, void *type_data, bool err_stop,
3314                     u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3315 {
3316         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3317         int ok_count;
3318
3319 retry:
3320         if (take_rtnl)
3321                 rtnl_lock();
3322         down_read(&block->cb_lock);
3323         /* Need to obtain rtnl lock if block is bound to devs that require it.
3324          * In block bind code cb_lock is obtained while holding rtnl, so we must
3325          * obtain the locks in same order here.
3326          */
3327         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3328                 up_read(&block->cb_lock);
3329                 take_rtnl = true;
3330                 goto retry;
3331         }
3332
3333         /* Make sure all netdevs sharing this block are offload-capable. */
3334         if (block->nooffloaddevcnt && err_stop) {
3335                 ok_count = -EOPNOTSUPP;
3336                 goto err_unlock;
3337         }
3338
3339         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3340         if (ok_count < 0)
3341                 goto err_unlock;
3342
3343         if (tp->ops->hw_add)
3344                 tp->ops->hw_add(tp, type_data);
3345         if (ok_count > 0)
3346                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3347                                           ok_count, true);
3348 err_unlock:
3349         up_read(&block->cb_lock);
3350         if (take_rtnl)
3351                 rtnl_unlock();
3352         return min(ok_count, 0);
3353 }
3354 EXPORT_SYMBOL(tc_setup_cb_add);
3355
3356 /* Destructive filter replace. If filter that wasn't already in hardware is
3357  * successfully offloaded, increment block offload counter. On failure,
3358  * previously offloaded filter is considered to be destroyed and offload counter
3359  * is decremented.
3360  */
3361
3362 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3363                         enum tc_setup_type type, void *type_data, bool err_stop,
3364                         u32 *old_flags, unsigned int *old_in_hw_count,
3365                         u32 *new_flags, unsigned int *new_in_hw_count,
3366                         bool rtnl_held)
3367 {
3368         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3369         int ok_count;
3370
3371 retry:
3372         if (take_rtnl)
3373                 rtnl_lock();
3374         down_read(&block->cb_lock);
3375         /* Need to obtain rtnl lock if block is bound to devs that require it.
3376          * In block bind code cb_lock is obtained while holding rtnl, so we must
3377          * obtain the locks in same order here.
3378          */
3379         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3380                 up_read(&block->cb_lock);
3381                 take_rtnl = true;
3382                 goto retry;
3383         }
3384
3385         /* Make sure all netdevs sharing this block are offload-capable. */
3386         if (block->nooffloaddevcnt && err_stop) {
3387                 ok_count = -EOPNOTSUPP;
3388                 goto err_unlock;
3389         }
3390
3391         tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3392         if (tp->ops->hw_del)
3393                 tp->ops->hw_del(tp, type_data);
3394
3395         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3396         if (ok_count < 0)
3397                 goto err_unlock;
3398
3399         if (tp->ops->hw_add)
3400                 tp->ops->hw_add(tp, type_data);
3401         if (ok_count > 0)
3402                 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3403                                           new_flags, ok_count, true);
3404 err_unlock:
3405         up_read(&block->cb_lock);
3406         if (take_rtnl)
3407                 rtnl_unlock();
3408         return min(ok_count, 0);
3409 }
3410 EXPORT_SYMBOL(tc_setup_cb_replace);
3411
3412 /* Destroy filter and decrement block offload counter, if filter was previously
3413  * offloaded.
3414  */
3415
3416 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3417                         enum tc_setup_type type, void *type_data, bool err_stop,
3418                         u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3419 {
3420         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3421         int ok_count;
3422
3423 retry:
3424         if (take_rtnl)
3425                 rtnl_lock();
3426         down_read(&block->cb_lock);
3427         /* Need to obtain rtnl lock if block is bound to devs that require it.
3428          * In block bind code cb_lock is obtained while holding rtnl, so we must
3429          * obtain the locks in same order here.
3430          */
3431         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3432                 up_read(&block->cb_lock);
3433                 take_rtnl = true;
3434                 goto retry;
3435         }
3436
3437         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3438
3439         tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3440         if (tp->ops->hw_del)
3441                 tp->ops->hw_del(tp, type_data);
3442
3443         up_read(&block->cb_lock);
3444         if (take_rtnl)
3445                 rtnl_unlock();
3446         return min(ok_count, 0);
3447 }
3448 EXPORT_SYMBOL(tc_setup_cb_destroy);
3449
3450 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3451                           bool add, flow_setup_cb_t *cb,
3452                           enum tc_setup_type type, void *type_data,
3453                           void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3454 {
3455         int err = cb(type, type_data, cb_priv);
3456
3457         if (err) {
3458                 if (add && tc_skip_sw(*flags))
3459                         return err;
3460         } else {
3461                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3462                                           add);
3463         }
3464
3465         return 0;
3466 }
3467 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3468
3469 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3470                               const struct tc_action *act)
3471 {
3472         struct tc_cookie *cookie;
3473         int err = 0;
3474
3475         rcu_read_lock();
3476         cookie = rcu_dereference(act->act_cookie);
3477         if (cookie) {
3478                 entry->cookie = flow_action_cookie_create(cookie->data,
3479                                                           cookie->len,
3480                                                           GFP_ATOMIC);
3481                 if (!entry->cookie)
3482                         err = -ENOMEM;
3483         }
3484         rcu_read_unlock();
3485         return err;
3486 }
3487
3488 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3489 {
3490         flow_action_cookie_destroy(entry->cookie);
3491 }
3492
3493 void tc_cleanup_offload_action(struct flow_action *flow_action)
3494 {
3495         struct flow_action_entry *entry;
3496         int i;
3497
3498         flow_action_for_each(i, entry, flow_action) {
3499                 tcf_act_put_cookie(entry);
3500                 if (entry->destructor)
3501                         entry->destructor(entry->destructor_priv);
3502         }
3503 }
3504 EXPORT_SYMBOL(tc_cleanup_offload_action);
3505
3506 static int tc_setup_offload_act(struct tc_action *act,
3507                                 struct flow_action_entry *entry,
3508                                 u32 *index_inc,
3509                                 struct netlink_ext_ack *extack)
3510 {
3511 #ifdef CONFIG_NET_CLS_ACT
3512         if (act->ops->offload_act_setup) {
3513                 return act->ops->offload_act_setup(act, entry, index_inc, true,
3514                                                    extack);
3515         } else {
3516                 NL_SET_ERR_MSG(extack, "Action does not support offload");
3517                 return -EOPNOTSUPP;
3518         }
3519 #else
3520         return 0;
3521 #endif
3522 }
3523
3524 int tc_setup_action(struct flow_action *flow_action,
3525                     struct tc_action *actions[],
3526                     struct netlink_ext_ack *extack)
3527 {
3528         int i, j, k, index, err = 0;
3529         struct tc_action *act;
3530
3531         BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3532         BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3533         BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3534
3535         if (!actions)
3536                 return 0;
3537
3538         j = 0;
3539         tcf_act_for_each_action(i, act, actions) {
3540                 struct flow_action_entry *entry;
3541
3542                 entry = &flow_action->entries[j];
3543                 spin_lock_bh(&act->tcfa_lock);
3544                 err = tcf_act_get_cookie(entry, act);
3545                 if (err)
3546                         goto err_out_locked;
3547
3548                 index = 0;
3549                 err = tc_setup_offload_act(act, entry, &index, extack);
3550                 if (err)
3551                         goto err_out_locked;
3552
3553                 for (k = 0; k < index ; k++) {
3554                         entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3555                         entry[k].hw_index = act->tcfa_index;
3556                 }
3557
3558                 j += index;
3559
3560                 spin_unlock_bh(&act->tcfa_lock);
3561         }
3562
3563 err_out:
3564         if (err)
3565                 tc_cleanup_offload_action(flow_action);
3566
3567         return err;
3568 err_out_locked:
3569         spin_unlock_bh(&act->tcfa_lock);
3570         goto err_out;
3571 }
3572
3573 int tc_setup_offload_action(struct flow_action *flow_action,
3574                             const struct tcf_exts *exts,
3575                             struct netlink_ext_ack *extack)
3576 {
3577 #ifdef CONFIG_NET_CLS_ACT
3578         if (!exts)
3579                 return 0;
3580
3581         return tc_setup_action(flow_action, exts->actions, extack);
3582 #else
3583         return 0;
3584 #endif
3585 }
3586 EXPORT_SYMBOL(tc_setup_offload_action);
3587
3588 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3589 {
3590         unsigned int num_acts = 0;
3591         struct tc_action *act;
3592         int i;
3593
3594         tcf_exts_for_each_action(i, act, exts) {
3595                 if (is_tcf_pedit(act))
3596                         num_acts += tcf_pedit_nkeys(act);
3597                 else
3598                         num_acts++;
3599         }
3600         return num_acts;
3601 }
3602 EXPORT_SYMBOL(tcf_exts_num_actions);
3603
3604 #ifdef CONFIG_NET_CLS_ACT
3605 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3606                                         u32 *p_block_index,
3607                                         struct netlink_ext_ack *extack)
3608 {
3609         *p_block_index = nla_get_u32(block_index_attr);
3610         if (!*p_block_index) {
3611                 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3612                 return -EINVAL;
3613         }
3614
3615         return 0;
3616 }
3617
3618 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3619                     enum flow_block_binder_type binder_type,
3620                     struct nlattr *block_index_attr,
3621                     struct netlink_ext_ack *extack)
3622 {
3623         u32 block_index;
3624         int err;
3625
3626         if (!block_index_attr)
3627                 return 0;
3628
3629         err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3630         if (err)
3631                 return err;
3632
3633         qe->info.binder_type = binder_type;
3634         qe->info.chain_head_change = tcf_chain_head_change_dflt;
3635         qe->info.chain_head_change_priv = &qe->filter_chain;
3636         qe->info.block_index = block_index;
3637
3638         return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3639 }
3640 EXPORT_SYMBOL(tcf_qevent_init);
3641
3642 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3643 {
3644         if (qe->info.block_index)
3645                 tcf_block_put_ext(qe->block, sch, &qe->info);
3646 }
3647 EXPORT_SYMBOL(tcf_qevent_destroy);
3648
3649 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3650                                struct netlink_ext_ack *extack)
3651 {
3652         u32 block_index;
3653         int err;
3654
3655         if (!block_index_attr)
3656                 return 0;
3657
3658         err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3659         if (err)
3660                 return err;
3661
3662         /* Bounce newly-configured block or change in block. */
3663         if (block_index != qe->info.block_index) {
3664                 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3665                 return -EINVAL;
3666         }
3667
3668         return 0;
3669 }
3670 EXPORT_SYMBOL(tcf_qevent_validate_change);
3671
3672 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3673                                   struct sk_buff **to_free, int *ret)
3674 {
3675         struct tcf_result cl_res;
3676         struct tcf_proto *fl;
3677
3678         if (!qe->info.block_index)
3679                 return skb;
3680
3681         fl = rcu_dereference_bh(qe->filter_chain);
3682
3683         switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3684         case TC_ACT_SHOT:
3685                 qdisc_qstats_drop(sch);
3686                 __qdisc_drop(skb, to_free);
3687                 *ret = __NET_XMIT_BYPASS;
3688                 return NULL;
3689         case TC_ACT_STOLEN:
3690         case TC_ACT_QUEUED:
3691         case TC_ACT_TRAP:
3692                 __qdisc_drop(skb, to_free);
3693                 *ret = __NET_XMIT_STOLEN;
3694                 return NULL;
3695         case TC_ACT_REDIRECT:
3696                 skb_do_redirect(skb);
3697                 *ret = __NET_XMIT_STOLEN;
3698                 return NULL;
3699         }
3700
3701         return skb;
3702 }
3703 EXPORT_SYMBOL(tcf_qevent_handle);
3704
3705 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3706 {
3707         if (!qe->info.block_index)
3708                 return 0;
3709         return nla_put_u32(skb, attr_name, qe->info.block_index);
3710 }
3711 EXPORT_SYMBOL(tcf_qevent_dump);
3712 #endif
3713
3714 static __net_init int tcf_net_init(struct net *net)
3715 {
3716         struct tcf_net *tn = net_generic(net, tcf_net_id);
3717
3718         spin_lock_init(&tn->idr_lock);
3719         idr_init(&tn->idr);
3720         return 0;
3721 }
3722
3723 static void __net_exit tcf_net_exit(struct net *net)
3724 {
3725         struct tcf_net *tn = net_generic(net, tcf_net_id);
3726
3727         idr_destroy(&tn->idr);
3728 }
3729
3730 static struct pernet_operations tcf_net_ops = {
3731         .init = tcf_net_init,
3732         .exit = tcf_net_exit,
3733         .id   = &tcf_net_id,
3734         .size = sizeof(struct tcf_net),
3735 };
3736
3737 static int __init tc_filter_init(void)
3738 {
3739         int err;
3740
3741         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3742         if (!tc_filter_wq)
3743                 return -ENOMEM;
3744
3745         err = register_pernet_subsys(&tcf_net_ops);
3746         if (err)
3747                 goto err_register_pernet_subsys;
3748
3749         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3750                       RTNL_FLAG_DOIT_UNLOCKED);
3751         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3752                       RTNL_FLAG_DOIT_UNLOCKED);
3753         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3754                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3755         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3756         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3757         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3758                       tc_dump_chain, 0);
3759
3760         return 0;
3761
3762 err_register_pernet_subsys:
3763         destroy_workqueue(tc_filter_wq);
3764         return err;
3765 }
3766
3767 subsys_initcall(tc_filter_init);