net: sched: move rtm_tca_policy declaration to include file
[platform/kernel/linux-starfive.git] / net / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43
44 /* The list of all installed classifier types */
45 static LIST_HEAD(tcf_proto_base);
46
47 /* Protects list of registered TC modules. It is pure SMP lock. */
48 static DEFINE_RWLOCK(cls_mod_lock);
49
50 #ifdef CONFIG_NET_CLS_ACT
51 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
52 EXPORT_SYMBOL(tc_skb_ext_tc);
53
54 void tc_skb_ext_tc_enable(void)
55 {
56         static_branch_inc(&tc_skb_ext_tc);
57 }
58 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
59
60 void tc_skb_ext_tc_disable(void)
61 {
62         static_branch_dec(&tc_skb_ext_tc);
63 }
64 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
65 #endif
66
67 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
68 {
69         return jhash_3words(tp->chain->index, tp->prio,
70                             (__force __u32)tp->protocol, 0);
71 }
72
73 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
74                                         struct tcf_proto *tp)
75 {
76         struct tcf_block *block = chain->block;
77
78         mutex_lock(&block->proto_destroy_lock);
79         hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
80                      destroy_obj_hashfn(tp));
81         mutex_unlock(&block->proto_destroy_lock);
82 }
83
84 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
85                           const struct tcf_proto *tp2)
86 {
87         return tp1->chain->index == tp2->chain->index &&
88                tp1->prio == tp2->prio &&
89                tp1->protocol == tp2->protocol;
90 }
91
92 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
93                                         struct tcf_proto *tp)
94 {
95         u32 hash = destroy_obj_hashfn(tp);
96         struct tcf_proto *iter;
97         bool found = false;
98
99         rcu_read_lock();
100         hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
101                                    destroy_ht_node, hash) {
102                 if (tcf_proto_cmp(tp, iter)) {
103                         found = true;
104                         break;
105                 }
106         }
107         rcu_read_unlock();
108
109         return found;
110 }
111
112 static void
113 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
114 {
115         struct tcf_block *block = chain->block;
116
117         mutex_lock(&block->proto_destroy_lock);
118         if (hash_hashed(&tp->destroy_ht_node))
119                 hash_del_rcu(&tp->destroy_ht_node);
120         mutex_unlock(&block->proto_destroy_lock);
121 }
122
123 /* Find classifier type by string name */
124
125 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
126 {
127         const struct tcf_proto_ops *t, *res = NULL;
128
129         if (kind) {
130                 read_lock(&cls_mod_lock);
131                 list_for_each_entry(t, &tcf_proto_base, head) {
132                         if (strcmp(kind, t->kind) == 0) {
133                                 if (try_module_get(t->owner))
134                                         res = t;
135                                 break;
136                         }
137                 }
138                 read_unlock(&cls_mod_lock);
139         }
140         return res;
141 }
142
143 static const struct tcf_proto_ops *
144 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
145                      struct netlink_ext_ack *extack)
146 {
147         const struct tcf_proto_ops *ops;
148
149         ops = __tcf_proto_lookup_ops(kind);
150         if (ops)
151                 return ops;
152 #ifdef CONFIG_MODULES
153         if (rtnl_held)
154                 rtnl_unlock();
155         request_module("cls_%s", kind);
156         if (rtnl_held)
157                 rtnl_lock();
158         ops = __tcf_proto_lookup_ops(kind);
159         /* We dropped the RTNL semaphore in order to perform
160          * the module load. So, even if we succeeded in loading
161          * the module we have to replay the request. We indicate
162          * this using -EAGAIN.
163          */
164         if (ops) {
165                 module_put(ops->owner);
166                 return ERR_PTR(-EAGAIN);
167         }
168 #endif
169         NL_SET_ERR_MSG(extack, "TC classifier not found");
170         return ERR_PTR(-ENOENT);
171 }
172
173 /* Register(unregister) new classifier type */
174
175 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
176 {
177         struct tcf_proto_ops *t;
178         int rc = -EEXIST;
179
180         write_lock(&cls_mod_lock);
181         list_for_each_entry(t, &tcf_proto_base, head)
182                 if (!strcmp(ops->kind, t->kind))
183                         goto out;
184
185         list_add_tail(&ops->head, &tcf_proto_base);
186         rc = 0;
187 out:
188         write_unlock(&cls_mod_lock);
189         return rc;
190 }
191 EXPORT_SYMBOL(register_tcf_proto_ops);
192
193 static struct workqueue_struct *tc_filter_wq;
194
195 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
196 {
197         struct tcf_proto_ops *t;
198         int rc = -ENOENT;
199
200         /* Wait for outstanding call_rcu()s, if any, from a
201          * tcf_proto_ops's destroy() handler.
202          */
203         rcu_barrier();
204         flush_workqueue(tc_filter_wq);
205
206         write_lock(&cls_mod_lock);
207         list_for_each_entry(t, &tcf_proto_base, head) {
208                 if (t == ops) {
209                         list_del(&t->head);
210                         rc = 0;
211                         break;
212                 }
213         }
214         write_unlock(&cls_mod_lock);
215
216         WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
217 }
218 EXPORT_SYMBOL(unregister_tcf_proto_ops);
219
220 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
221 {
222         INIT_RCU_WORK(rwork, func);
223         return queue_rcu_work(tc_filter_wq, rwork);
224 }
225 EXPORT_SYMBOL(tcf_queue_work);
226
227 /* Select new prio value from the range, managed by kernel. */
228
229 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
230 {
231         u32 first = TC_H_MAKE(0xC0000000U, 0U);
232
233         if (tp)
234                 first = tp->prio - 1;
235
236         return TC_H_MAJ(first);
237 }
238
239 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
240 {
241         if (kind)
242                 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
243         memset(name, 0, IFNAMSIZ);
244         return false;
245 }
246
247 static bool tcf_proto_is_unlocked(const char *kind)
248 {
249         const struct tcf_proto_ops *ops;
250         bool ret;
251
252         if (strlen(kind) == 0)
253                 return false;
254
255         ops = tcf_proto_lookup_ops(kind, false, NULL);
256         /* On error return false to take rtnl lock. Proto lookup/create
257          * functions will perform lookup again and properly handle errors.
258          */
259         if (IS_ERR(ops))
260                 return false;
261
262         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
263         module_put(ops->owner);
264         return ret;
265 }
266
267 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
268                                           u32 prio, struct tcf_chain *chain,
269                                           bool rtnl_held,
270                                           struct netlink_ext_ack *extack)
271 {
272         struct tcf_proto *tp;
273         int err;
274
275         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
276         if (!tp)
277                 return ERR_PTR(-ENOBUFS);
278
279         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
280         if (IS_ERR(tp->ops)) {
281                 err = PTR_ERR(tp->ops);
282                 goto errout;
283         }
284         tp->classify = tp->ops->classify;
285         tp->protocol = protocol;
286         tp->prio = prio;
287         tp->chain = chain;
288         spin_lock_init(&tp->lock);
289         refcount_set(&tp->refcnt, 1);
290
291         err = tp->ops->init(tp);
292         if (err) {
293                 module_put(tp->ops->owner);
294                 goto errout;
295         }
296         return tp;
297
298 errout:
299         kfree(tp);
300         return ERR_PTR(err);
301 }
302
303 static void tcf_proto_get(struct tcf_proto *tp)
304 {
305         refcount_inc(&tp->refcnt);
306 }
307
308 static void tcf_chain_put(struct tcf_chain *chain);
309
310 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
311                               bool sig_destroy, struct netlink_ext_ack *extack)
312 {
313         tp->ops->destroy(tp, rtnl_held, extack);
314         if (sig_destroy)
315                 tcf_proto_signal_destroyed(tp->chain, tp);
316         tcf_chain_put(tp->chain);
317         module_put(tp->ops->owner);
318         kfree_rcu(tp, rcu);
319 }
320
321 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
322                           struct netlink_ext_ack *extack)
323 {
324         if (refcount_dec_and_test(&tp->refcnt))
325                 tcf_proto_destroy(tp, rtnl_held, true, extack);
326 }
327
328 static bool tcf_proto_check_delete(struct tcf_proto *tp)
329 {
330         if (tp->ops->delete_empty)
331                 return tp->ops->delete_empty(tp);
332
333         tp->deleting = true;
334         return tp->deleting;
335 }
336
337 static void tcf_proto_mark_delete(struct tcf_proto *tp)
338 {
339         spin_lock(&tp->lock);
340         tp->deleting = true;
341         spin_unlock(&tp->lock);
342 }
343
344 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
345 {
346         bool deleting;
347
348         spin_lock(&tp->lock);
349         deleting = tp->deleting;
350         spin_unlock(&tp->lock);
351
352         return deleting;
353 }
354
355 #define ASSERT_BLOCK_LOCKED(block)                                      \
356         lockdep_assert_held(&(block)->lock)
357
358 struct tcf_filter_chain_list_item {
359         struct list_head list;
360         tcf_chain_head_change_t *chain_head_change;
361         void *chain_head_change_priv;
362 };
363
364 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
365                                           u32 chain_index)
366 {
367         struct tcf_chain *chain;
368
369         ASSERT_BLOCK_LOCKED(block);
370
371         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
372         if (!chain)
373                 return NULL;
374         list_add_tail_rcu(&chain->list, &block->chain_list);
375         mutex_init(&chain->filter_chain_lock);
376         chain->block = block;
377         chain->index = chain_index;
378         chain->refcnt = 1;
379         if (!chain->index)
380                 block->chain0.chain = chain;
381         return chain;
382 }
383
384 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
385                                        struct tcf_proto *tp_head)
386 {
387         if (item->chain_head_change)
388                 item->chain_head_change(tp_head, item->chain_head_change_priv);
389 }
390
391 static void tcf_chain0_head_change(struct tcf_chain *chain,
392                                    struct tcf_proto *tp_head)
393 {
394         struct tcf_filter_chain_list_item *item;
395         struct tcf_block *block = chain->block;
396
397         if (chain->index)
398                 return;
399
400         mutex_lock(&block->lock);
401         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
402                 tcf_chain_head_change_item(item, tp_head);
403         mutex_unlock(&block->lock);
404 }
405
406 /* Returns true if block can be safely freed. */
407
408 static bool tcf_chain_detach(struct tcf_chain *chain)
409 {
410         struct tcf_block *block = chain->block;
411
412         ASSERT_BLOCK_LOCKED(block);
413
414         list_del_rcu(&chain->list);
415         if (!chain->index)
416                 block->chain0.chain = NULL;
417
418         if (list_empty(&block->chain_list) &&
419             refcount_read(&block->refcnt) == 0)
420                 return true;
421
422         return false;
423 }
424
425 static void tcf_block_destroy(struct tcf_block *block)
426 {
427         mutex_destroy(&block->lock);
428         mutex_destroy(&block->proto_destroy_lock);
429         kfree_rcu(block, rcu);
430 }
431
432 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
433 {
434         struct tcf_block *block = chain->block;
435
436         mutex_destroy(&chain->filter_chain_lock);
437         kfree_rcu(chain, rcu);
438         if (free_block)
439                 tcf_block_destroy(block);
440 }
441
442 static void tcf_chain_hold(struct tcf_chain *chain)
443 {
444         ASSERT_BLOCK_LOCKED(chain->block);
445
446         ++chain->refcnt;
447 }
448
449 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
450 {
451         ASSERT_BLOCK_LOCKED(chain->block);
452
453         /* In case all the references are action references, this
454          * chain should not be shown to the user.
455          */
456         return chain->refcnt == chain->action_refcnt;
457 }
458
459 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
460                                           u32 chain_index)
461 {
462         struct tcf_chain *chain;
463
464         ASSERT_BLOCK_LOCKED(block);
465
466         list_for_each_entry(chain, &block->chain_list, list) {
467                 if (chain->index == chain_index)
468                         return chain;
469         }
470         return NULL;
471 }
472
473 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
474 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
475                                               u32 chain_index)
476 {
477         struct tcf_chain *chain;
478
479         list_for_each_entry_rcu(chain, &block->chain_list, list) {
480                 if (chain->index == chain_index)
481                         return chain;
482         }
483         return NULL;
484 }
485 #endif
486
487 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
488                            u32 seq, u16 flags, int event, bool unicast);
489
490 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
491                                          u32 chain_index, bool create,
492                                          bool by_act)
493 {
494         struct tcf_chain *chain = NULL;
495         bool is_first_reference;
496
497         mutex_lock(&block->lock);
498         chain = tcf_chain_lookup(block, chain_index);
499         if (chain) {
500                 tcf_chain_hold(chain);
501         } else {
502                 if (!create)
503                         goto errout;
504                 chain = tcf_chain_create(block, chain_index);
505                 if (!chain)
506                         goto errout;
507         }
508
509         if (by_act)
510                 ++chain->action_refcnt;
511         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
512         mutex_unlock(&block->lock);
513
514         /* Send notification only in case we got the first
515          * non-action reference. Until then, the chain acts only as
516          * a placeholder for actions pointing to it and user ought
517          * not know about them.
518          */
519         if (is_first_reference && !by_act)
520                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
521                                 RTM_NEWCHAIN, false);
522
523         return chain;
524
525 errout:
526         mutex_unlock(&block->lock);
527         return chain;
528 }
529
530 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
531                                        bool create)
532 {
533         return __tcf_chain_get(block, chain_index, create, false);
534 }
535
536 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
537 {
538         return __tcf_chain_get(block, chain_index, true, true);
539 }
540 EXPORT_SYMBOL(tcf_chain_get_by_act);
541
542 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
543                                void *tmplt_priv);
544 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
545                                   void *tmplt_priv, u32 chain_index,
546                                   struct tcf_block *block, struct sk_buff *oskb,
547                                   u32 seq, u16 flags, bool unicast);
548
549 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
550                             bool explicitly_created)
551 {
552         struct tcf_block *block = chain->block;
553         const struct tcf_proto_ops *tmplt_ops;
554         bool free_block = false;
555         unsigned int refcnt;
556         void *tmplt_priv;
557
558         mutex_lock(&block->lock);
559         if (explicitly_created) {
560                 if (!chain->explicitly_created) {
561                         mutex_unlock(&block->lock);
562                         return;
563                 }
564                 chain->explicitly_created = false;
565         }
566
567         if (by_act)
568                 chain->action_refcnt--;
569
570         /* tc_chain_notify_delete can't be called while holding block lock.
571          * However, when block is unlocked chain can be changed concurrently, so
572          * save these to temporary variables.
573          */
574         refcnt = --chain->refcnt;
575         tmplt_ops = chain->tmplt_ops;
576         tmplt_priv = chain->tmplt_priv;
577
578         /* The last dropped non-action reference will trigger notification. */
579         if (refcnt - chain->action_refcnt == 0 && !by_act) {
580                 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
581                                        block, NULL, 0, 0, false);
582                 /* Last reference to chain, no need to lock. */
583                 chain->flushing = false;
584         }
585
586         if (refcnt == 0)
587                 free_block = tcf_chain_detach(chain);
588         mutex_unlock(&block->lock);
589
590         if (refcnt == 0) {
591                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
592                 tcf_chain_destroy(chain, free_block);
593         }
594 }
595
596 static void tcf_chain_put(struct tcf_chain *chain)
597 {
598         __tcf_chain_put(chain, false, false);
599 }
600
601 void tcf_chain_put_by_act(struct tcf_chain *chain)
602 {
603         __tcf_chain_put(chain, true, false);
604 }
605 EXPORT_SYMBOL(tcf_chain_put_by_act);
606
607 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
608 {
609         __tcf_chain_put(chain, false, true);
610 }
611
612 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
613 {
614         struct tcf_proto *tp, *tp_next;
615
616         mutex_lock(&chain->filter_chain_lock);
617         tp = tcf_chain_dereference(chain->filter_chain, chain);
618         while (tp) {
619                 tp_next = rcu_dereference_protected(tp->next, 1);
620                 tcf_proto_signal_destroying(chain, tp);
621                 tp = tp_next;
622         }
623         tp = tcf_chain_dereference(chain->filter_chain, chain);
624         RCU_INIT_POINTER(chain->filter_chain, NULL);
625         tcf_chain0_head_change(chain, NULL);
626         chain->flushing = true;
627         mutex_unlock(&chain->filter_chain_lock);
628
629         while (tp) {
630                 tp_next = rcu_dereference_protected(tp->next, 1);
631                 tcf_proto_put(tp, rtnl_held, NULL);
632                 tp = tp_next;
633         }
634 }
635
636 static int tcf_block_setup(struct tcf_block *block,
637                            struct flow_block_offload *bo);
638
639 static void tcf_block_offload_init(struct flow_block_offload *bo,
640                                    struct net_device *dev, struct Qdisc *sch,
641                                    enum flow_block_command command,
642                                    enum flow_block_binder_type binder_type,
643                                    struct flow_block *flow_block,
644                                    bool shared, struct netlink_ext_ack *extack)
645 {
646         bo->net = dev_net(dev);
647         bo->command = command;
648         bo->binder_type = binder_type;
649         bo->block = flow_block;
650         bo->block_shared = shared;
651         bo->extack = extack;
652         bo->sch = sch;
653         bo->cb_list_head = &flow_block->cb_list;
654         INIT_LIST_HEAD(&bo->cb_list);
655 }
656
657 static void tcf_block_unbind(struct tcf_block *block,
658                              struct flow_block_offload *bo);
659
660 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
661 {
662         struct tcf_block *block = block_cb->indr.data;
663         struct net_device *dev = block_cb->indr.dev;
664         struct Qdisc *sch = block_cb->indr.sch;
665         struct netlink_ext_ack extack = {};
666         struct flow_block_offload bo = {};
667
668         tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
669                                block_cb->indr.binder_type,
670                                &block->flow_block, tcf_block_shared(block),
671                                &extack);
672         rtnl_lock();
673         down_write(&block->cb_lock);
674         list_del(&block_cb->driver_list);
675         list_move(&block_cb->list, &bo.cb_list);
676         tcf_block_unbind(block, &bo);
677         up_write(&block->cb_lock);
678         rtnl_unlock();
679 }
680
681 static bool tcf_block_offload_in_use(struct tcf_block *block)
682 {
683         return atomic_read(&block->offloadcnt);
684 }
685
686 static int tcf_block_offload_cmd(struct tcf_block *block,
687                                  struct net_device *dev, struct Qdisc *sch,
688                                  struct tcf_block_ext_info *ei,
689                                  enum flow_block_command command,
690                                  struct netlink_ext_ack *extack)
691 {
692         struct flow_block_offload bo = {};
693
694         tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
695                                &block->flow_block, tcf_block_shared(block),
696                                extack);
697
698         if (dev->netdev_ops->ndo_setup_tc) {
699                 int err;
700
701                 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
702                 if (err < 0) {
703                         if (err != -EOPNOTSUPP)
704                                 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
705                         return err;
706                 }
707
708                 return tcf_block_setup(block, &bo);
709         }
710
711         flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
712                                     tc_block_indr_cleanup);
713         tcf_block_setup(block, &bo);
714
715         return -EOPNOTSUPP;
716 }
717
718 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
719                                   struct tcf_block_ext_info *ei,
720                                   struct netlink_ext_ack *extack)
721 {
722         struct net_device *dev = q->dev_queue->dev;
723         int err;
724
725         down_write(&block->cb_lock);
726
727         /* If tc offload feature is disabled and the block we try to bind
728          * to already has some offloaded filters, forbid to bind.
729          */
730         if (dev->netdev_ops->ndo_setup_tc &&
731             !tc_can_offload(dev) &&
732             tcf_block_offload_in_use(block)) {
733                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
734                 err = -EOPNOTSUPP;
735                 goto err_unlock;
736         }
737
738         err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
739         if (err == -EOPNOTSUPP)
740                 goto no_offload_dev_inc;
741         if (err)
742                 goto err_unlock;
743
744         up_write(&block->cb_lock);
745         return 0;
746
747 no_offload_dev_inc:
748         if (tcf_block_offload_in_use(block))
749                 goto err_unlock;
750
751         err = 0;
752         block->nooffloaddevcnt++;
753 err_unlock:
754         up_write(&block->cb_lock);
755         return err;
756 }
757
758 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
759                                      struct tcf_block_ext_info *ei)
760 {
761         struct net_device *dev = q->dev_queue->dev;
762         int err;
763
764         down_write(&block->cb_lock);
765         err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
766         if (err == -EOPNOTSUPP)
767                 goto no_offload_dev_dec;
768         up_write(&block->cb_lock);
769         return;
770
771 no_offload_dev_dec:
772         WARN_ON(block->nooffloaddevcnt-- == 0);
773         up_write(&block->cb_lock);
774 }
775
776 static int
777 tcf_chain0_head_change_cb_add(struct tcf_block *block,
778                               struct tcf_block_ext_info *ei,
779                               struct netlink_ext_ack *extack)
780 {
781         struct tcf_filter_chain_list_item *item;
782         struct tcf_chain *chain0;
783
784         item = kmalloc(sizeof(*item), GFP_KERNEL);
785         if (!item) {
786                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
787                 return -ENOMEM;
788         }
789         item->chain_head_change = ei->chain_head_change;
790         item->chain_head_change_priv = ei->chain_head_change_priv;
791
792         mutex_lock(&block->lock);
793         chain0 = block->chain0.chain;
794         if (chain0)
795                 tcf_chain_hold(chain0);
796         else
797                 list_add(&item->list, &block->chain0.filter_chain_list);
798         mutex_unlock(&block->lock);
799
800         if (chain0) {
801                 struct tcf_proto *tp_head;
802
803                 mutex_lock(&chain0->filter_chain_lock);
804
805                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
806                 if (tp_head)
807                         tcf_chain_head_change_item(item, tp_head);
808
809                 mutex_lock(&block->lock);
810                 list_add(&item->list, &block->chain0.filter_chain_list);
811                 mutex_unlock(&block->lock);
812
813                 mutex_unlock(&chain0->filter_chain_lock);
814                 tcf_chain_put(chain0);
815         }
816
817         return 0;
818 }
819
820 static void
821 tcf_chain0_head_change_cb_del(struct tcf_block *block,
822                               struct tcf_block_ext_info *ei)
823 {
824         struct tcf_filter_chain_list_item *item;
825
826         mutex_lock(&block->lock);
827         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
828                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
829                     (item->chain_head_change == ei->chain_head_change &&
830                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
831                         if (block->chain0.chain)
832                                 tcf_chain_head_change_item(item, NULL);
833                         list_del(&item->list);
834                         mutex_unlock(&block->lock);
835
836                         kfree(item);
837                         return;
838                 }
839         }
840         mutex_unlock(&block->lock);
841         WARN_ON(1);
842 }
843
844 struct tcf_net {
845         spinlock_t idr_lock; /* Protects idr */
846         struct idr idr;
847 };
848
849 static unsigned int tcf_net_id;
850
851 static int tcf_block_insert(struct tcf_block *block, struct net *net,
852                             struct netlink_ext_ack *extack)
853 {
854         struct tcf_net *tn = net_generic(net, tcf_net_id);
855         int err;
856
857         idr_preload(GFP_KERNEL);
858         spin_lock(&tn->idr_lock);
859         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
860                             GFP_NOWAIT);
861         spin_unlock(&tn->idr_lock);
862         idr_preload_end();
863
864         return err;
865 }
866
867 static void tcf_block_remove(struct tcf_block *block, struct net *net)
868 {
869         struct tcf_net *tn = net_generic(net, tcf_net_id);
870
871         spin_lock(&tn->idr_lock);
872         idr_remove(&tn->idr, block->index);
873         spin_unlock(&tn->idr_lock);
874 }
875
876 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
877                                           u32 block_index,
878                                           struct netlink_ext_ack *extack)
879 {
880         struct tcf_block *block;
881
882         block = kzalloc(sizeof(*block), GFP_KERNEL);
883         if (!block) {
884                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
885                 return ERR_PTR(-ENOMEM);
886         }
887         mutex_init(&block->lock);
888         mutex_init(&block->proto_destroy_lock);
889         init_rwsem(&block->cb_lock);
890         flow_block_init(&block->flow_block);
891         INIT_LIST_HEAD(&block->chain_list);
892         INIT_LIST_HEAD(&block->owner_list);
893         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
894
895         refcount_set(&block->refcnt, 1);
896         block->net = net;
897         block->index = block_index;
898
899         /* Don't store q pointer for blocks which are shared */
900         if (!tcf_block_shared(block))
901                 block->q = q;
902         return block;
903 }
904
905 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
906 {
907         struct tcf_net *tn = net_generic(net, tcf_net_id);
908
909         return idr_find(&tn->idr, block_index);
910 }
911
912 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
913 {
914         struct tcf_block *block;
915
916         rcu_read_lock();
917         block = tcf_block_lookup(net, block_index);
918         if (block && !refcount_inc_not_zero(&block->refcnt))
919                 block = NULL;
920         rcu_read_unlock();
921
922         return block;
923 }
924
925 static struct tcf_chain *
926 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
927 {
928         mutex_lock(&block->lock);
929         if (chain)
930                 chain = list_is_last(&chain->list, &block->chain_list) ?
931                         NULL : list_next_entry(chain, list);
932         else
933                 chain = list_first_entry_or_null(&block->chain_list,
934                                                  struct tcf_chain, list);
935
936         /* skip all action-only chains */
937         while (chain && tcf_chain_held_by_acts_only(chain))
938                 chain = list_is_last(&chain->list, &block->chain_list) ?
939                         NULL : list_next_entry(chain, list);
940
941         if (chain)
942                 tcf_chain_hold(chain);
943         mutex_unlock(&block->lock);
944
945         return chain;
946 }
947
948 /* Function to be used by all clients that want to iterate over all chains on
949  * block. It properly obtains block->lock and takes reference to chain before
950  * returning it. Users of this function must be tolerant to concurrent chain
951  * insertion/deletion or ensure that no concurrent chain modification is
952  * possible. Note that all netlink dump callbacks cannot guarantee to provide
953  * consistent dump because rtnl lock is released each time skb is filled with
954  * data and sent to user-space.
955  */
956
957 struct tcf_chain *
958 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
959 {
960         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
961
962         if (chain)
963                 tcf_chain_put(chain);
964
965         return chain_next;
966 }
967 EXPORT_SYMBOL(tcf_get_next_chain);
968
969 static struct tcf_proto *
970 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
971 {
972         u32 prio = 0;
973
974         ASSERT_RTNL();
975         mutex_lock(&chain->filter_chain_lock);
976
977         if (!tp) {
978                 tp = tcf_chain_dereference(chain->filter_chain, chain);
979         } else if (tcf_proto_is_deleting(tp)) {
980                 /* 'deleting' flag is set and chain->filter_chain_lock was
981                  * unlocked, which means next pointer could be invalid. Restart
982                  * search.
983                  */
984                 prio = tp->prio + 1;
985                 tp = tcf_chain_dereference(chain->filter_chain, chain);
986
987                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
988                         if (!tp->deleting && tp->prio >= prio)
989                                 break;
990         } else {
991                 tp = tcf_chain_dereference(tp->next, chain);
992         }
993
994         if (tp)
995                 tcf_proto_get(tp);
996
997         mutex_unlock(&chain->filter_chain_lock);
998
999         return tp;
1000 }
1001
1002 /* Function to be used by all clients that want to iterate over all tp's on
1003  * chain. Users of this function must be tolerant to concurrent tp
1004  * insertion/deletion or ensure that no concurrent chain modification is
1005  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1006  * consistent dump because rtnl lock is released each time skb is filled with
1007  * data and sent to user-space.
1008  */
1009
1010 struct tcf_proto *
1011 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1012 {
1013         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1014
1015         if (tp)
1016                 tcf_proto_put(tp, true, NULL);
1017
1018         return tp_next;
1019 }
1020 EXPORT_SYMBOL(tcf_get_next_proto);
1021
1022 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1023 {
1024         struct tcf_chain *chain;
1025
1026         /* Last reference to block. At this point chains cannot be added or
1027          * removed concurrently.
1028          */
1029         for (chain = tcf_get_next_chain(block, NULL);
1030              chain;
1031              chain = tcf_get_next_chain(block, chain)) {
1032                 tcf_chain_put_explicitly_created(chain);
1033                 tcf_chain_flush(chain, rtnl_held);
1034         }
1035 }
1036
1037 /* Lookup Qdisc and increments its reference counter.
1038  * Set parent, if necessary.
1039  */
1040
1041 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1042                             u32 *parent, int ifindex, bool rtnl_held,
1043                             struct netlink_ext_ack *extack)
1044 {
1045         const struct Qdisc_class_ops *cops;
1046         struct net_device *dev;
1047         int err = 0;
1048
1049         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1050                 return 0;
1051
1052         rcu_read_lock();
1053
1054         /* Find link */
1055         dev = dev_get_by_index_rcu(net, ifindex);
1056         if (!dev) {
1057                 rcu_read_unlock();
1058                 return -ENODEV;
1059         }
1060
1061         /* Find qdisc */
1062         if (!*parent) {
1063                 *q = rcu_dereference(dev->qdisc);
1064                 *parent = (*q)->handle;
1065         } else {
1066                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1067                 if (!*q) {
1068                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1069                         err = -EINVAL;
1070                         goto errout_rcu;
1071                 }
1072         }
1073
1074         *q = qdisc_refcount_inc_nz(*q);
1075         if (!*q) {
1076                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1077                 err = -EINVAL;
1078                 goto errout_rcu;
1079         }
1080
1081         /* Is it classful? */
1082         cops = (*q)->ops->cl_ops;
1083         if (!cops) {
1084                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1085                 err = -EINVAL;
1086                 goto errout_qdisc;
1087         }
1088
1089         if (!cops->tcf_block) {
1090                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1091                 err = -EOPNOTSUPP;
1092                 goto errout_qdisc;
1093         }
1094
1095 errout_rcu:
1096         /* At this point we know that qdisc is not noop_qdisc,
1097          * which means that qdisc holds a reference to net_device
1098          * and we hold a reference to qdisc, so it is safe to release
1099          * rcu read lock.
1100          */
1101         rcu_read_unlock();
1102         return err;
1103
1104 errout_qdisc:
1105         rcu_read_unlock();
1106
1107         if (rtnl_held)
1108                 qdisc_put(*q);
1109         else
1110                 qdisc_put_unlocked(*q);
1111         *q = NULL;
1112
1113         return err;
1114 }
1115
1116 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1117                                int ifindex, struct netlink_ext_ack *extack)
1118 {
1119         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1120                 return 0;
1121
1122         /* Do we search for filter, attached to class? */
1123         if (TC_H_MIN(parent)) {
1124                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1125
1126                 *cl = cops->find(q, parent);
1127                 if (*cl == 0) {
1128                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1129                         return -ENOENT;
1130                 }
1131         }
1132
1133         return 0;
1134 }
1135
1136 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1137                                           unsigned long cl, int ifindex,
1138                                           u32 block_index,
1139                                           struct netlink_ext_ack *extack)
1140 {
1141         struct tcf_block *block;
1142
1143         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1144                 block = tcf_block_refcnt_get(net, block_index);
1145                 if (!block) {
1146                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1147                         return ERR_PTR(-EINVAL);
1148                 }
1149         } else {
1150                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1151
1152                 block = cops->tcf_block(q, cl, extack);
1153                 if (!block)
1154                         return ERR_PTR(-EINVAL);
1155
1156                 if (tcf_block_shared(block)) {
1157                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1158                         return ERR_PTR(-EOPNOTSUPP);
1159                 }
1160
1161                 /* Always take reference to block in order to support execution
1162                  * of rules update path of cls API without rtnl lock. Caller
1163                  * must release block when it is finished using it. 'if' block
1164                  * of this conditional obtain reference to block by calling
1165                  * tcf_block_refcnt_get().
1166                  */
1167                 refcount_inc(&block->refcnt);
1168         }
1169
1170         return block;
1171 }
1172
1173 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1174                             struct tcf_block_ext_info *ei, bool rtnl_held)
1175 {
1176         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1177                 /* Flushing/putting all chains will cause the block to be
1178                  * deallocated when last chain is freed. However, if chain_list
1179                  * is empty, block has to be manually deallocated. After block
1180                  * reference counter reached 0, it is no longer possible to
1181                  * increment it or add new chains to block.
1182                  */
1183                 bool free_block = list_empty(&block->chain_list);
1184
1185                 mutex_unlock(&block->lock);
1186                 if (tcf_block_shared(block))
1187                         tcf_block_remove(block, block->net);
1188
1189                 if (q)
1190                         tcf_block_offload_unbind(block, q, ei);
1191
1192                 if (free_block)
1193                         tcf_block_destroy(block);
1194                 else
1195                         tcf_block_flush_all_chains(block, rtnl_held);
1196         } else if (q) {
1197                 tcf_block_offload_unbind(block, q, ei);
1198         }
1199 }
1200
1201 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1202 {
1203         __tcf_block_put(block, NULL, NULL, rtnl_held);
1204 }
1205
1206 /* Find tcf block.
1207  * Set q, parent, cl when appropriate.
1208  */
1209
1210 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1211                                         u32 *parent, unsigned long *cl,
1212                                         int ifindex, u32 block_index,
1213                                         struct netlink_ext_ack *extack)
1214 {
1215         struct tcf_block *block;
1216         int err = 0;
1217
1218         ASSERT_RTNL();
1219
1220         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1221         if (err)
1222                 goto errout;
1223
1224         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1225         if (err)
1226                 goto errout_qdisc;
1227
1228         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1229         if (IS_ERR(block)) {
1230                 err = PTR_ERR(block);
1231                 goto errout_qdisc;
1232         }
1233
1234         return block;
1235
1236 errout_qdisc:
1237         if (*q)
1238                 qdisc_put(*q);
1239 errout:
1240         *q = NULL;
1241         return ERR_PTR(err);
1242 }
1243
1244 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1245                               bool rtnl_held)
1246 {
1247         if (!IS_ERR_OR_NULL(block))
1248                 tcf_block_refcnt_put(block, rtnl_held);
1249
1250         if (q) {
1251                 if (rtnl_held)
1252                         qdisc_put(q);
1253                 else
1254                         qdisc_put_unlocked(q);
1255         }
1256 }
1257
1258 struct tcf_block_owner_item {
1259         struct list_head list;
1260         struct Qdisc *q;
1261         enum flow_block_binder_type binder_type;
1262 };
1263
1264 static void
1265 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1266                                struct Qdisc *q,
1267                                enum flow_block_binder_type binder_type)
1268 {
1269         if (block->keep_dst &&
1270             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1271             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1272                 netif_keep_dst(qdisc_dev(q));
1273 }
1274
1275 void tcf_block_netif_keep_dst(struct tcf_block *block)
1276 {
1277         struct tcf_block_owner_item *item;
1278
1279         block->keep_dst = true;
1280         list_for_each_entry(item, &block->owner_list, list)
1281                 tcf_block_owner_netif_keep_dst(block, item->q,
1282                                                item->binder_type);
1283 }
1284 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1285
1286 static int tcf_block_owner_add(struct tcf_block *block,
1287                                struct Qdisc *q,
1288                                enum flow_block_binder_type binder_type)
1289 {
1290         struct tcf_block_owner_item *item;
1291
1292         item = kmalloc(sizeof(*item), GFP_KERNEL);
1293         if (!item)
1294                 return -ENOMEM;
1295         item->q = q;
1296         item->binder_type = binder_type;
1297         list_add(&item->list, &block->owner_list);
1298         return 0;
1299 }
1300
1301 static void tcf_block_owner_del(struct tcf_block *block,
1302                                 struct Qdisc *q,
1303                                 enum flow_block_binder_type binder_type)
1304 {
1305         struct tcf_block_owner_item *item;
1306
1307         list_for_each_entry(item, &block->owner_list, list) {
1308                 if (item->q == q && item->binder_type == binder_type) {
1309                         list_del(&item->list);
1310                         kfree(item);
1311                         return;
1312                 }
1313         }
1314         WARN_ON(1);
1315 }
1316
1317 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1318                       struct tcf_block_ext_info *ei,
1319                       struct netlink_ext_ack *extack)
1320 {
1321         struct net *net = qdisc_net(q);
1322         struct tcf_block *block = NULL;
1323         int err;
1324
1325         if (ei->block_index)
1326                 /* block_index not 0 means the shared block is requested */
1327                 block = tcf_block_refcnt_get(net, ei->block_index);
1328
1329         if (!block) {
1330                 block = tcf_block_create(net, q, ei->block_index, extack);
1331                 if (IS_ERR(block))
1332                         return PTR_ERR(block);
1333                 if (tcf_block_shared(block)) {
1334                         err = tcf_block_insert(block, net, extack);
1335                         if (err)
1336                                 goto err_block_insert;
1337                 }
1338         }
1339
1340         err = tcf_block_owner_add(block, q, ei->binder_type);
1341         if (err)
1342                 goto err_block_owner_add;
1343
1344         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1345
1346         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1347         if (err)
1348                 goto err_chain0_head_change_cb_add;
1349
1350         err = tcf_block_offload_bind(block, q, ei, extack);
1351         if (err)
1352                 goto err_block_offload_bind;
1353
1354         *p_block = block;
1355         return 0;
1356
1357 err_block_offload_bind:
1358         tcf_chain0_head_change_cb_del(block, ei);
1359 err_chain0_head_change_cb_add:
1360         tcf_block_owner_del(block, q, ei->binder_type);
1361 err_block_owner_add:
1362 err_block_insert:
1363         tcf_block_refcnt_put(block, true);
1364         return err;
1365 }
1366 EXPORT_SYMBOL(tcf_block_get_ext);
1367
1368 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1369 {
1370         struct tcf_proto __rcu **p_filter_chain = priv;
1371
1372         rcu_assign_pointer(*p_filter_chain, tp_head);
1373 }
1374
1375 int tcf_block_get(struct tcf_block **p_block,
1376                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1377                   struct netlink_ext_ack *extack)
1378 {
1379         struct tcf_block_ext_info ei = {
1380                 .chain_head_change = tcf_chain_head_change_dflt,
1381                 .chain_head_change_priv = p_filter_chain,
1382         };
1383
1384         WARN_ON(!p_filter_chain);
1385         return tcf_block_get_ext(p_block, q, &ei, extack);
1386 }
1387 EXPORT_SYMBOL(tcf_block_get);
1388
1389 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1390  * actions should be all removed after flushing.
1391  */
1392 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1393                        struct tcf_block_ext_info *ei)
1394 {
1395         if (!block)
1396                 return;
1397         tcf_chain0_head_change_cb_del(block, ei);
1398         tcf_block_owner_del(block, q, ei->binder_type);
1399
1400         __tcf_block_put(block, q, ei, true);
1401 }
1402 EXPORT_SYMBOL(tcf_block_put_ext);
1403
1404 void tcf_block_put(struct tcf_block *block)
1405 {
1406         struct tcf_block_ext_info ei = {0, };
1407
1408         if (!block)
1409                 return;
1410         tcf_block_put_ext(block, block->q, &ei);
1411 }
1412
1413 EXPORT_SYMBOL(tcf_block_put);
1414
1415 static int
1416 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1417                             void *cb_priv, bool add, bool offload_in_use,
1418                             struct netlink_ext_ack *extack)
1419 {
1420         struct tcf_chain *chain, *chain_prev;
1421         struct tcf_proto *tp, *tp_prev;
1422         int err;
1423
1424         lockdep_assert_held(&block->cb_lock);
1425
1426         for (chain = __tcf_get_next_chain(block, NULL);
1427              chain;
1428              chain_prev = chain,
1429                      chain = __tcf_get_next_chain(block, chain),
1430                      tcf_chain_put(chain_prev)) {
1431                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1432                      tp_prev = tp,
1433                              tp = __tcf_get_next_proto(chain, tp),
1434                              tcf_proto_put(tp_prev, true, NULL)) {
1435                         if (tp->ops->reoffload) {
1436                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1437                                                          extack);
1438                                 if (err && add)
1439                                         goto err_playback_remove;
1440                         } else if (add && offload_in_use) {
1441                                 err = -EOPNOTSUPP;
1442                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1443                                 goto err_playback_remove;
1444                         }
1445                 }
1446         }
1447
1448         return 0;
1449
1450 err_playback_remove:
1451         tcf_proto_put(tp, true, NULL);
1452         tcf_chain_put(chain);
1453         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1454                                     extack);
1455         return err;
1456 }
1457
1458 static int tcf_block_bind(struct tcf_block *block,
1459                           struct flow_block_offload *bo)
1460 {
1461         struct flow_block_cb *block_cb, *next;
1462         int err, i = 0;
1463
1464         lockdep_assert_held(&block->cb_lock);
1465
1466         list_for_each_entry(block_cb, &bo->cb_list, list) {
1467                 err = tcf_block_playback_offloads(block, block_cb->cb,
1468                                                   block_cb->cb_priv, true,
1469                                                   tcf_block_offload_in_use(block),
1470                                                   bo->extack);
1471                 if (err)
1472                         goto err_unroll;
1473                 if (!bo->unlocked_driver_cb)
1474                         block->lockeddevcnt++;
1475
1476                 i++;
1477         }
1478         list_splice(&bo->cb_list, &block->flow_block.cb_list);
1479
1480         return 0;
1481
1482 err_unroll:
1483         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1484                 list_del(&block_cb->driver_list);
1485                 if (i-- > 0) {
1486                         list_del(&block_cb->list);
1487                         tcf_block_playback_offloads(block, block_cb->cb,
1488                                                     block_cb->cb_priv, false,
1489                                                     tcf_block_offload_in_use(block),
1490                                                     NULL);
1491                         if (!bo->unlocked_driver_cb)
1492                                 block->lockeddevcnt--;
1493                 }
1494                 flow_block_cb_free(block_cb);
1495         }
1496
1497         return err;
1498 }
1499
1500 static void tcf_block_unbind(struct tcf_block *block,
1501                              struct flow_block_offload *bo)
1502 {
1503         struct flow_block_cb *block_cb, *next;
1504
1505         lockdep_assert_held(&block->cb_lock);
1506
1507         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1508                 tcf_block_playback_offloads(block, block_cb->cb,
1509                                             block_cb->cb_priv, false,
1510                                             tcf_block_offload_in_use(block),
1511                                             NULL);
1512                 list_del(&block_cb->list);
1513                 flow_block_cb_free(block_cb);
1514                 if (!bo->unlocked_driver_cb)
1515                         block->lockeddevcnt--;
1516         }
1517 }
1518
1519 static int tcf_block_setup(struct tcf_block *block,
1520                            struct flow_block_offload *bo)
1521 {
1522         int err;
1523
1524         switch (bo->command) {
1525         case FLOW_BLOCK_BIND:
1526                 err = tcf_block_bind(block, bo);
1527                 break;
1528         case FLOW_BLOCK_UNBIND:
1529                 err = 0;
1530                 tcf_block_unbind(block, bo);
1531                 break;
1532         default:
1533                 WARN_ON_ONCE(1);
1534                 err = -EOPNOTSUPP;
1535         }
1536
1537         return err;
1538 }
1539
1540 /* Main classifier routine: scans classifier chain attached
1541  * to this qdisc, (optionally) tests for protocol and asks
1542  * specific classifiers.
1543  */
1544 static inline int __tcf_classify(struct sk_buff *skb,
1545                                  const struct tcf_proto *tp,
1546                                  const struct tcf_proto *orig_tp,
1547                                  struct tcf_result *res,
1548                                  bool compat_mode,
1549                                  u32 *last_executed_chain)
1550 {
1551 #ifdef CONFIG_NET_CLS_ACT
1552         const int max_reclassify_loop = 16;
1553         const struct tcf_proto *first_tp;
1554         int limit = 0;
1555
1556 reclassify:
1557 #endif
1558         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1559                 __be16 protocol = skb_protocol(skb, false);
1560                 int err;
1561
1562                 if (tp->protocol != protocol &&
1563                     tp->protocol != htons(ETH_P_ALL))
1564                         continue;
1565
1566                 err = tp->classify(skb, tp, res);
1567 #ifdef CONFIG_NET_CLS_ACT
1568                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1569                         first_tp = orig_tp;
1570                         *last_executed_chain = first_tp->chain->index;
1571                         goto reset;
1572                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1573                         first_tp = res->goto_tp;
1574                         *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1575                         goto reset;
1576                 }
1577 #endif
1578                 if (err >= 0)
1579                         return err;
1580         }
1581
1582         return TC_ACT_UNSPEC; /* signal: continue lookup */
1583 #ifdef CONFIG_NET_CLS_ACT
1584 reset:
1585         if (unlikely(limit++ >= max_reclassify_loop)) {
1586                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1587                                        tp->chain->block->index,
1588                                        tp->prio & 0xffff,
1589                                        ntohs(tp->protocol));
1590                 return TC_ACT_SHOT;
1591         }
1592
1593         tp = first_tp;
1594         goto reclassify;
1595 #endif
1596 }
1597
1598 int tcf_classify(struct sk_buff *skb,
1599                  const struct tcf_block *block,
1600                  const struct tcf_proto *tp,
1601                  struct tcf_result *res, bool compat_mode)
1602 {
1603 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1604         u32 last_executed_chain = 0;
1605
1606         return __tcf_classify(skb, tp, tp, res, compat_mode,
1607                               &last_executed_chain);
1608 #else
1609         u32 last_executed_chain = tp ? tp->chain->index : 0;
1610         const struct tcf_proto *orig_tp = tp;
1611         struct tc_skb_ext *ext;
1612         int ret;
1613
1614         if (block) {
1615                 ext = skb_ext_find(skb, TC_SKB_EXT);
1616
1617                 if (ext && ext->chain) {
1618                         struct tcf_chain *fchain;
1619
1620                         fchain = tcf_chain_lookup_rcu(block, ext->chain);
1621                         if (!fchain)
1622                                 return TC_ACT_SHOT;
1623
1624                         /* Consume, so cloned/redirect skbs won't inherit ext */
1625                         skb_ext_del(skb, TC_SKB_EXT);
1626
1627                         tp = rcu_dereference_bh(fchain->filter_chain);
1628                         last_executed_chain = fchain->index;
1629                 }
1630         }
1631
1632         ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1633                              &last_executed_chain);
1634
1635         if (tc_skb_ext_tc_enabled()) {
1636                 /* If we missed on some chain */
1637                 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1638                         struct tc_skb_cb *cb = tc_skb_cb(skb);
1639
1640                         ext = tc_skb_ext_alloc(skb);
1641                         if (WARN_ON_ONCE(!ext))
1642                                 return TC_ACT_SHOT;
1643                         ext->chain = last_executed_chain;
1644                         ext->mru = cb->mru;
1645                         ext->post_ct = cb->post_ct;
1646                         ext->post_ct_snat = cb->post_ct_snat;
1647                         ext->post_ct_dnat = cb->post_ct_dnat;
1648                         ext->zone = cb->zone;
1649                 }
1650         }
1651
1652         return ret;
1653 #endif
1654 }
1655 EXPORT_SYMBOL(tcf_classify);
1656
1657 struct tcf_chain_info {
1658         struct tcf_proto __rcu **pprev;
1659         struct tcf_proto __rcu *next;
1660 };
1661
1662 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1663                                            struct tcf_chain_info *chain_info)
1664 {
1665         return tcf_chain_dereference(*chain_info->pprev, chain);
1666 }
1667
1668 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1669                                struct tcf_chain_info *chain_info,
1670                                struct tcf_proto *tp)
1671 {
1672         if (chain->flushing)
1673                 return -EAGAIN;
1674
1675         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1676         if (*chain_info->pprev == chain->filter_chain)
1677                 tcf_chain0_head_change(chain, tp);
1678         tcf_proto_get(tp);
1679         rcu_assign_pointer(*chain_info->pprev, tp);
1680
1681         return 0;
1682 }
1683
1684 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1685                                 struct tcf_chain_info *chain_info,
1686                                 struct tcf_proto *tp)
1687 {
1688         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1689
1690         tcf_proto_mark_delete(tp);
1691         if (tp == chain->filter_chain)
1692                 tcf_chain0_head_change(chain, next);
1693         RCU_INIT_POINTER(*chain_info->pprev, next);
1694 }
1695
1696 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1697                                            struct tcf_chain_info *chain_info,
1698                                            u32 protocol, u32 prio,
1699                                            bool prio_allocate);
1700
1701 /* Try to insert new proto.
1702  * If proto with specified priority already exists, free new proto
1703  * and return existing one.
1704  */
1705
1706 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1707                                                     struct tcf_proto *tp_new,
1708                                                     u32 protocol, u32 prio,
1709                                                     bool rtnl_held)
1710 {
1711         struct tcf_chain_info chain_info;
1712         struct tcf_proto *tp;
1713         int err = 0;
1714
1715         mutex_lock(&chain->filter_chain_lock);
1716
1717         if (tcf_proto_exists_destroying(chain, tp_new)) {
1718                 mutex_unlock(&chain->filter_chain_lock);
1719                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1720                 return ERR_PTR(-EAGAIN);
1721         }
1722
1723         tp = tcf_chain_tp_find(chain, &chain_info,
1724                                protocol, prio, false);
1725         if (!tp)
1726                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1727         mutex_unlock(&chain->filter_chain_lock);
1728
1729         if (tp) {
1730                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1731                 tp_new = tp;
1732         } else if (err) {
1733                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1734                 tp_new = ERR_PTR(err);
1735         }
1736
1737         return tp_new;
1738 }
1739
1740 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1741                                       struct tcf_proto *tp, bool rtnl_held,
1742                                       struct netlink_ext_ack *extack)
1743 {
1744         struct tcf_chain_info chain_info;
1745         struct tcf_proto *tp_iter;
1746         struct tcf_proto **pprev;
1747         struct tcf_proto *next;
1748
1749         mutex_lock(&chain->filter_chain_lock);
1750
1751         /* Atomically find and remove tp from chain. */
1752         for (pprev = &chain->filter_chain;
1753              (tp_iter = tcf_chain_dereference(*pprev, chain));
1754              pprev = &tp_iter->next) {
1755                 if (tp_iter == tp) {
1756                         chain_info.pprev = pprev;
1757                         chain_info.next = tp_iter->next;
1758                         WARN_ON(tp_iter->deleting);
1759                         break;
1760                 }
1761         }
1762         /* Verify that tp still exists and no new filters were inserted
1763          * concurrently.
1764          * Mark tp for deletion if it is empty.
1765          */
1766         if (!tp_iter || !tcf_proto_check_delete(tp)) {
1767                 mutex_unlock(&chain->filter_chain_lock);
1768                 return;
1769         }
1770
1771         tcf_proto_signal_destroying(chain, tp);
1772         next = tcf_chain_dereference(chain_info.next, chain);
1773         if (tp == chain->filter_chain)
1774                 tcf_chain0_head_change(chain, next);
1775         RCU_INIT_POINTER(*chain_info.pprev, next);
1776         mutex_unlock(&chain->filter_chain_lock);
1777
1778         tcf_proto_put(tp, rtnl_held, extack);
1779 }
1780
1781 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1782                                            struct tcf_chain_info *chain_info,
1783                                            u32 protocol, u32 prio,
1784                                            bool prio_allocate)
1785 {
1786         struct tcf_proto **pprev;
1787         struct tcf_proto *tp;
1788
1789         /* Check the chain for existence of proto-tcf with this priority */
1790         for (pprev = &chain->filter_chain;
1791              (tp = tcf_chain_dereference(*pprev, chain));
1792              pprev = &tp->next) {
1793                 if (tp->prio >= prio) {
1794                         if (tp->prio == prio) {
1795                                 if (prio_allocate ||
1796                                     (tp->protocol != protocol && protocol))
1797                                         return ERR_PTR(-EINVAL);
1798                         } else {
1799                                 tp = NULL;
1800                         }
1801                         break;
1802                 }
1803         }
1804         chain_info->pprev = pprev;
1805         if (tp) {
1806                 chain_info->next = tp->next;
1807                 tcf_proto_get(tp);
1808         } else {
1809                 chain_info->next = NULL;
1810         }
1811         return tp;
1812 }
1813
1814 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1815                          struct tcf_proto *tp, struct tcf_block *block,
1816                          struct Qdisc *q, u32 parent, void *fh,
1817                          u32 portid, u32 seq, u16 flags, int event,
1818                          bool terse_dump, bool rtnl_held)
1819 {
1820         struct tcmsg *tcm;
1821         struct nlmsghdr  *nlh;
1822         unsigned char *b = skb_tail_pointer(skb);
1823
1824         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1825         if (!nlh)
1826                 goto out_nlmsg_trim;
1827         tcm = nlmsg_data(nlh);
1828         tcm->tcm_family = AF_UNSPEC;
1829         tcm->tcm__pad1 = 0;
1830         tcm->tcm__pad2 = 0;
1831         if (q) {
1832                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1833                 tcm->tcm_parent = parent;
1834         } else {
1835                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1836                 tcm->tcm_block_index = block->index;
1837         }
1838         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1839         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1840                 goto nla_put_failure;
1841         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1842                 goto nla_put_failure;
1843         if (!fh) {
1844                 tcm->tcm_handle = 0;
1845         } else if (terse_dump) {
1846                 if (tp->ops->terse_dump) {
1847                         if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1848                                                 rtnl_held) < 0)
1849                                 goto nla_put_failure;
1850                 } else {
1851                         goto cls_op_not_supp;
1852                 }
1853         } else {
1854                 if (tp->ops->dump &&
1855                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1856                         goto nla_put_failure;
1857         }
1858         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1859         return skb->len;
1860
1861 out_nlmsg_trim:
1862 nla_put_failure:
1863 cls_op_not_supp:
1864         nlmsg_trim(skb, b);
1865         return -1;
1866 }
1867
1868 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1869                           struct nlmsghdr *n, struct tcf_proto *tp,
1870                           struct tcf_block *block, struct Qdisc *q,
1871                           u32 parent, void *fh, int event, bool unicast,
1872                           bool rtnl_held)
1873 {
1874         struct sk_buff *skb;
1875         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1876         int err = 0;
1877
1878         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1879         if (!skb)
1880                 return -ENOBUFS;
1881
1882         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1883                           n->nlmsg_seq, n->nlmsg_flags, event,
1884                           false, rtnl_held) <= 0) {
1885                 kfree_skb(skb);
1886                 return -EINVAL;
1887         }
1888
1889         if (unicast)
1890                 err = rtnl_unicast(skb, net, portid);
1891         else
1892                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1893                                      n->nlmsg_flags & NLM_F_ECHO);
1894         return err;
1895 }
1896
1897 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1898                               struct nlmsghdr *n, struct tcf_proto *tp,
1899                               struct tcf_block *block, struct Qdisc *q,
1900                               u32 parent, void *fh, bool unicast, bool *last,
1901                               bool rtnl_held, struct netlink_ext_ack *extack)
1902 {
1903         struct sk_buff *skb;
1904         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1905         int err;
1906
1907         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1908         if (!skb)
1909                 return -ENOBUFS;
1910
1911         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1912                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1913                           false, rtnl_held) <= 0) {
1914                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1915                 kfree_skb(skb);
1916                 return -EINVAL;
1917         }
1918
1919         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1920         if (err) {
1921                 kfree_skb(skb);
1922                 return err;
1923         }
1924
1925         if (unicast)
1926                 err = rtnl_unicast(skb, net, portid);
1927         else
1928                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1929                                      n->nlmsg_flags & NLM_F_ECHO);
1930         if (err < 0)
1931                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1932
1933         return err;
1934 }
1935
1936 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1937                                  struct tcf_block *block, struct Qdisc *q,
1938                                  u32 parent, struct nlmsghdr *n,
1939                                  struct tcf_chain *chain, int event)
1940 {
1941         struct tcf_proto *tp;
1942
1943         for (tp = tcf_get_next_proto(chain, NULL);
1944              tp; tp = tcf_get_next_proto(chain, tp))
1945                 tfilter_notify(net, oskb, n, tp, block,
1946                                q, parent, NULL, event, false, true);
1947 }
1948
1949 static void tfilter_put(struct tcf_proto *tp, void *fh)
1950 {
1951         if (tp->ops->put && fh)
1952                 tp->ops->put(tp, fh);
1953 }
1954
1955 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1956                           struct netlink_ext_ack *extack)
1957 {
1958         struct net *net = sock_net(skb->sk);
1959         struct nlattr *tca[TCA_MAX + 1];
1960         char name[IFNAMSIZ];
1961         struct tcmsg *t;
1962         u32 protocol;
1963         u32 prio;
1964         bool prio_allocate;
1965         u32 parent;
1966         u32 chain_index;
1967         struct Qdisc *q;
1968         struct tcf_chain_info chain_info;
1969         struct tcf_chain *chain;
1970         struct tcf_block *block;
1971         struct tcf_proto *tp;
1972         unsigned long cl;
1973         void *fh;
1974         int err;
1975         int tp_created;
1976         bool rtnl_held = false;
1977         u32 flags;
1978
1979 replay:
1980         tp_created = 0;
1981
1982         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1983                                      rtm_tca_policy, extack);
1984         if (err < 0)
1985                 return err;
1986
1987         t = nlmsg_data(n);
1988         protocol = TC_H_MIN(t->tcm_info);
1989         prio = TC_H_MAJ(t->tcm_info);
1990         prio_allocate = false;
1991         parent = t->tcm_parent;
1992         tp = NULL;
1993         cl = 0;
1994         block = NULL;
1995         q = NULL;
1996         chain = NULL;
1997         flags = 0;
1998
1999         if (prio == 0) {
2000                 /* If no priority is provided by the user,
2001                  * we allocate one.
2002                  */
2003                 if (n->nlmsg_flags & NLM_F_CREATE) {
2004                         prio = TC_H_MAKE(0x80000000U, 0U);
2005                         prio_allocate = true;
2006                 } else {
2007                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2008                         return -ENOENT;
2009                 }
2010         }
2011
2012         /* Find head of filter chain. */
2013
2014         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2015         if (err)
2016                 return err;
2017
2018         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2019                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2020                 err = -EINVAL;
2021                 goto errout;
2022         }
2023
2024         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2025          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2026          * type is not specified, classifier is not unlocked.
2027          */
2028         if (rtnl_held ||
2029             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2030             !tcf_proto_is_unlocked(name)) {
2031                 rtnl_held = true;
2032                 rtnl_lock();
2033         }
2034
2035         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2036         if (err)
2037                 goto errout;
2038
2039         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2040                                  extack);
2041         if (IS_ERR(block)) {
2042                 err = PTR_ERR(block);
2043                 goto errout;
2044         }
2045         block->classid = parent;
2046
2047         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2048         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2049                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2050                 err = -EINVAL;
2051                 goto errout;
2052         }
2053         chain = tcf_chain_get(block, chain_index, true);
2054         if (!chain) {
2055                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2056                 err = -ENOMEM;
2057                 goto errout;
2058         }
2059
2060         mutex_lock(&chain->filter_chain_lock);
2061         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2062                                prio, prio_allocate);
2063         if (IS_ERR(tp)) {
2064                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2065                 err = PTR_ERR(tp);
2066                 goto errout_locked;
2067         }
2068
2069         if (tp == NULL) {
2070                 struct tcf_proto *tp_new = NULL;
2071
2072                 if (chain->flushing) {
2073                         err = -EAGAIN;
2074                         goto errout_locked;
2075                 }
2076
2077                 /* Proto-tcf does not exist, create new one */
2078
2079                 if (tca[TCA_KIND] == NULL || !protocol) {
2080                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2081                         err = -EINVAL;
2082                         goto errout_locked;
2083                 }
2084
2085                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2086                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2087                         err = -ENOENT;
2088                         goto errout_locked;
2089                 }
2090
2091                 if (prio_allocate)
2092                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2093                                                                &chain_info));
2094
2095                 mutex_unlock(&chain->filter_chain_lock);
2096                 tp_new = tcf_proto_create(name, protocol, prio, chain,
2097                                           rtnl_held, extack);
2098                 if (IS_ERR(tp_new)) {
2099                         err = PTR_ERR(tp_new);
2100                         goto errout_tp;
2101                 }
2102
2103                 tp_created = 1;
2104                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2105                                                 rtnl_held);
2106                 if (IS_ERR(tp)) {
2107                         err = PTR_ERR(tp);
2108                         goto errout_tp;
2109                 }
2110         } else {
2111                 mutex_unlock(&chain->filter_chain_lock);
2112         }
2113
2114         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2115                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2116                 err = -EINVAL;
2117                 goto errout;
2118         }
2119
2120         fh = tp->ops->get(tp, t->tcm_handle);
2121
2122         if (!fh) {
2123                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2124                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2125                         err = -ENOENT;
2126                         goto errout;
2127                 }
2128         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2129                 tfilter_put(tp, fh);
2130                 NL_SET_ERR_MSG(extack, "Filter already exists");
2131                 err = -EEXIST;
2132                 goto errout;
2133         }
2134
2135         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2136                 tfilter_put(tp, fh);
2137                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2138                 err = -EINVAL;
2139                 goto errout;
2140         }
2141
2142         if (!(n->nlmsg_flags & NLM_F_CREATE))
2143                 flags |= TCA_ACT_FLAGS_REPLACE;
2144         if (!rtnl_held)
2145                 flags |= TCA_ACT_FLAGS_NO_RTNL;
2146         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2147                               flags, extack);
2148         if (err == 0) {
2149                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2150                                RTM_NEWTFILTER, false, rtnl_held);
2151                 tfilter_put(tp, fh);
2152                 /* q pointer is NULL for shared blocks */
2153                 if (q)
2154                         q->flags &= ~TCQ_F_CAN_BYPASS;
2155         }
2156
2157 errout:
2158         if (err && tp_created)
2159                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2160 errout_tp:
2161         if (chain) {
2162                 if (tp && !IS_ERR(tp))
2163                         tcf_proto_put(tp, rtnl_held, NULL);
2164                 if (!tp_created)
2165                         tcf_chain_put(chain);
2166         }
2167         tcf_block_release(q, block, rtnl_held);
2168
2169         if (rtnl_held)
2170                 rtnl_unlock();
2171
2172         if (err == -EAGAIN) {
2173                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2174                  * of target chain.
2175                  */
2176                 rtnl_held = true;
2177                 /* Replay the request. */
2178                 goto replay;
2179         }
2180         return err;
2181
2182 errout_locked:
2183         mutex_unlock(&chain->filter_chain_lock);
2184         goto errout;
2185 }
2186
2187 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2188                           struct netlink_ext_ack *extack)
2189 {
2190         struct net *net = sock_net(skb->sk);
2191         struct nlattr *tca[TCA_MAX + 1];
2192         char name[IFNAMSIZ];
2193         struct tcmsg *t;
2194         u32 protocol;
2195         u32 prio;
2196         u32 parent;
2197         u32 chain_index;
2198         struct Qdisc *q = NULL;
2199         struct tcf_chain_info chain_info;
2200         struct tcf_chain *chain = NULL;
2201         struct tcf_block *block = NULL;
2202         struct tcf_proto *tp = NULL;
2203         unsigned long cl = 0;
2204         void *fh = NULL;
2205         int err;
2206         bool rtnl_held = false;
2207
2208         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2209                                      rtm_tca_policy, extack);
2210         if (err < 0)
2211                 return err;
2212
2213         t = nlmsg_data(n);
2214         protocol = TC_H_MIN(t->tcm_info);
2215         prio = TC_H_MAJ(t->tcm_info);
2216         parent = t->tcm_parent;
2217
2218         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2219                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2220                 return -ENOENT;
2221         }
2222
2223         /* Find head of filter chain. */
2224
2225         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2226         if (err)
2227                 return err;
2228
2229         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2230                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2231                 err = -EINVAL;
2232                 goto errout;
2233         }
2234         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2235          * found), qdisc is not unlocked, classifier type is not specified,
2236          * classifier is not unlocked.
2237          */
2238         if (!prio ||
2239             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2240             !tcf_proto_is_unlocked(name)) {
2241                 rtnl_held = true;
2242                 rtnl_lock();
2243         }
2244
2245         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2246         if (err)
2247                 goto errout;
2248
2249         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2250                                  extack);
2251         if (IS_ERR(block)) {
2252                 err = PTR_ERR(block);
2253                 goto errout;
2254         }
2255
2256         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2257         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2258                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2259                 err = -EINVAL;
2260                 goto errout;
2261         }
2262         chain = tcf_chain_get(block, chain_index, false);
2263         if (!chain) {
2264                 /* User requested flush on non-existent chain. Nothing to do,
2265                  * so just return success.
2266                  */
2267                 if (prio == 0) {
2268                         err = 0;
2269                         goto errout;
2270                 }
2271                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2272                 err = -ENOENT;
2273                 goto errout;
2274         }
2275
2276         if (prio == 0) {
2277                 tfilter_notify_chain(net, skb, block, q, parent, n,
2278                                      chain, RTM_DELTFILTER);
2279                 tcf_chain_flush(chain, rtnl_held);
2280                 err = 0;
2281                 goto errout;
2282         }
2283
2284         mutex_lock(&chain->filter_chain_lock);
2285         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2286                                prio, false);
2287         if (!tp || IS_ERR(tp)) {
2288                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2289                 err = tp ? PTR_ERR(tp) : -ENOENT;
2290                 goto errout_locked;
2291         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2292                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2293                 err = -EINVAL;
2294                 goto errout_locked;
2295         } else if (t->tcm_handle == 0) {
2296                 tcf_proto_signal_destroying(chain, tp);
2297                 tcf_chain_tp_remove(chain, &chain_info, tp);
2298                 mutex_unlock(&chain->filter_chain_lock);
2299
2300                 tcf_proto_put(tp, rtnl_held, NULL);
2301                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2302                                RTM_DELTFILTER, false, rtnl_held);
2303                 err = 0;
2304                 goto errout;
2305         }
2306         mutex_unlock(&chain->filter_chain_lock);
2307
2308         fh = tp->ops->get(tp, t->tcm_handle);
2309
2310         if (!fh) {
2311                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2312                 err = -ENOENT;
2313         } else {
2314                 bool last;
2315
2316                 err = tfilter_del_notify(net, skb, n, tp, block,
2317                                          q, parent, fh, false, &last,
2318                                          rtnl_held, extack);
2319
2320                 if (err)
2321                         goto errout;
2322                 if (last)
2323                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2324         }
2325
2326 errout:
2327         if (chain) {
2328                 if (tp && !IS_ERR(tp))
2329                         tcf_proto_put(tp, rtnl_held, NULL);
2330                 tcf_chain_put(chain);
2331         }
2332         tcf_block_release(q, block, rtnl_held);
2333
2334         if (rtnl_held)
2335                 rtnl_unlock();
2336
2337         return err;
2338
2339 errout_locked:
2340         mutex_unlock(&chain->filter_chain_lock);
2341         goto errout;
2342 }
2343
2344 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2345                           struct netlink_ext_ack *extack)
2346 {
2347         struct net *net = sock_net(skb->sk);
2348         struct nlattr *tca[TCA_MAX + 1];
2349         char name[IFNAMSIZ];
2350         struct tcmsg *t;
2351         u32 protocol;
2352         u32 prio;
2353         u32 parent;
2354         u32 chain_index;
2355         struct Qdisc *q = NULL;
2356         struct tcf_chain_info chain_info;
2357         struct tcf_chain *chain = NULL;
2358         struct tcf_block *block = NULL;
2359         struct tcf_proto *tp = NULL;
2360         unsigned long cl = 0;
2361         void *fh = NULL;
2362         int err;
2363         bool rtnl_held = false;
2364
2365         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2366                                      rtm_tca_policy, extack);
2367         if (err < 0)
2368                 return err;
2369
2370         t = nlmsg_data(n);
2371         protocol = TC_H_MIN(t->tcm_info);
2372         prio = TC_H_MAJ(t->tcm_info);
2373         parent = t->tcm_parent;
2374
2375         if (prio == 0) {
2376                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2377                 return -ENOENT;
2378         }
2379
2380         /* Find head of filter chain. */
2381
2382         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2383         if (err)
2384                 return err;
2385
2386         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2387                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2388                 err = -EINVAL;
2389                 goto errout;
2390         }
2391         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2392          * unlocked, classifier type is not specified, classifier is not
2393          * unlocked.
2394          */
2395         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2396             !tcf_proto_is_unlocked(name)) {
2397                 rtnl_held = true;
2398                 rtnl_lock();
2399         }
2400
2401         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2402         if (err)
2403                 goto errout;
2404
2405         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2406                                  extack);
2407         if (IS_ERR(block)) {
2408                 err = PTR_ERR(block);
2409                 goto errout;
2410         }
2411
2412         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2413         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2414                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2415                 err = -EINVAL;
2416                 goto errout;
2417         }
2418         chain = tcf_chain_get(block, chain_index, false);
2419         if (!chain) {
2420                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2421                 err = -EINVAL;
2422                 goto errout;
2423         }
2424
2425         mutex_lock(&chain->filter_chain_lock);
2426         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2427                                prio, false);
2428         mutex_unlock(&chain->filter_chain_lock);
2429         if (!tp || IS_ERR(tp)) {
2430                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2431                 err = tp ? PTR_ERR(tp) : -ENOENT;
2432                 goto errout;
2433         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2434                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2435                 err = -EINVAL;
2436                 goto errout;
2437         }
2438
2439         fh = tp->ops->get(tp, t->tcm_handle);
2440
2441         if (!fh) {
2442                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2443                 err = -ENOENT;
2444         } else {
2445                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2446                                      fh, RTM_NEWTFILTER, true, rtnl_held);
2447                 if (err < 0)
2448                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2449         }
2450
2451         tfilter_put(tp, fh);
2452 errout:
2453         if (chain) {
2454                 if (tp && !IS_ERR(tp))
2455                         tcf_proto_put(tp, rtnl_held, NULL);
2456                 tcf_chain_put(chain);
2457         }
2458         tcf_block_release(q, block, rtnl_held);
2459
2460         if (rtnl_held)
2461                 rtnl_unlock();
2462
2463         return err;
2464 }
2465
2466 struct tcf_dump_args {
2467         struct tcf_walker w;
2468         struct sk_buff *skb;
2469         struct netlink_callback *cb;
2470         struct tcf_block *block;
2471         struct Qdisc *q;
2472         u32 parent;
2473         bool terse_dump;
2474 };
2475
2476 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2477 {
2478         struct tcf_dump_args *a = (void *)arg;
2479         struct net *net = sock_net(a->skb->sk);
2480
2481         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2482                              n, NETLINK_CB(a->cb->skb).portid,
2483                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2484                              RTM_NEWTFILTER, a->terse_dump, true);
2485 }
2486
2487 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2488                            struct sk_buff *skb, struct netlink_callback *cb,
2489                            long index_start, long *p_index, bool terse)
2490 {
2491         struct net *net = sock_net(skb->sk);
2492         struct tcf_block *block = chain->block;
2493         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2494         struct tcf_proto *tp, *tp_prev;
2495         struct tcf_dump_args arg;
2496
2497         for (tp = __tcf_get_next_proto(chain, NULL);
2498              tp;
2499              tp_prev = tp,
2500                      tp = __tcf_get_next_proto(chain, tp),
2501                      tcf_proto_put(tp_prev, true, NULL),
2502                      (*p_index)++) {
2503                 if (*p_index < index_start)
2504                         continue;
2505                 if (TC_H_MAJ(tcm->tcm_info) &&
2506                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2507                         continue;
2508                 if (TC_H_MIN(tcm->tcm_info) &&
2509                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2510                         continue;
2511                 if (*p_index > index_start)
2512                         memset(&cb->args[1], 0,
2513                                sizeof(cb->args) - sizeof(cb->args[0]));
2514                 if (cb->args[1] == 0) {
2515                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2516                                           NETLINK_CB(cb->skb).portid,
2517                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2518                                           RTM_NEWTFILTER, false, true) <= 0)
2519                                 goto errout;
2520                         cb->args[1] = 1;
2521                 }
2522                 if (!tp->ops->walk)
2523                         continue;
2524                 arg.w.fn = tcf_node_dump;
2525                 arg.skb = skb;
2526                 arg.cb = cb;
2527                 arg.block = block;
2528                 arg.q = q;
2529                 arg.parent = parent;
2530                 arg.w.stop = 0;
2531                 arg.w.skip = cb->args[1] - 1;
2532                 arg.w.count = 0;
2533                 arg.w.cookie = cb->args[2];
2534                 arg.terse_dump = terse;
2535                 tp->ops->walk(tp, &arg.w, true);
2536                 cb->args[2] = arg.w.cookie;
2537                 cb->args[1] = arg.w.count + 1;
2538                 if (arg.w.stop)
2539                         goto errout;
2540         }
2541         return true;
2542
2543 errout:
2544         tcf_proto_put(tp, true, NULL);
2545         return false;
2546 }
2547
2548 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2549         [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2550 };
2551
2552 /* called with RTNL */
2553 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2554 {
2555         struct tcf_chain *chain, *chain_prev;
2556         struct net *net = sock_net(skb->sk);
2557         struct nlattr *tca[TCA_MAX + 1];
2558         struct Qdisc *q = NULL;
2559         struct tcf_block *block;
2560         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2561         bool terse_dump = false;
2562         long index_start;
2563         long index;
2564         u32 parent;
2565         int err;
2566
2567         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2568                 return skb->len;
2569
2570         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2571                                      tcf_tfilter_dump_policy, cb->extack);
2572         if (err)
2573                 return err;
2574
2575         if (tca[TCA_DUMP_FLAGS]) {
2576                 struct nla_bitfield32 flags =
2577                         nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2578
2579                 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2580         }
2581
2582         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2583                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2584                 if (!block)
2585                         goto out;
2586                 /* If we work with block index, q is NULL and parent value
2587                  * will never be used in the following code. The check
2588                  * in tcf_fill_node prevents it. However, compiler does not
2589                  * see that far, so set parent to zero to silence the warning
2590                  * about parent being uninitialized.
2591                  */
2592                 parent = 0;
2593         } else {
2594                 const struct Qdisc_class_ops *cops;
2595                 struct net_device *dev;
2596                 unsigned long cl = 0;
2597
2598                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2599                 if (!dev)
2600                         return skb->len;
2601
2602                 parent = tcm->tcm_parent;
2603                 if (!parent)
2604                         q = rtnl_dereference(dev->qdisc);
2605                 else
2606                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2607                 if (!q)
2608                         goto out;
2609                 cops = q->ops->cl_ops;
2610                 if (!cops)
2611                         goto out;
2612                 if (!cops->tcf_block)
2613                         goto out;
2614                 if (TC_H_MIN(tcm->tcm_parent)) {
2615                         cl = cops->find(q, tcm->tcm_parent);
2616                         if (cl == 0)
2617                                 goto out;
2618                 }
2619                 block = cops->tcf_block(q, cl, NULL);
2620                 if (!block)
2621                         goto out;
2622                 parent = block->classid;
2623                 if (tcf_block_shared(block))
2624                         q = NULL;
2625         }
2626
2627         index_start = cb->args[0];
2628         index = 0;
2629
2630         for (chain = __tcf_get_next_chain(block, NULL);
2631              chain;
2632              chain_prev = chain,
2633                      chain = __tcf_get_next_chain(block, chain),
2634                      tcf_chain_put(chain_prev)) {
2635                 if (tca[TCA_CHAIN] &&
2636                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2637                         continue;
2638                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2639                                     index_start, &index, terse_dump)) {
2640                         tcf_chain_put(chain);
2641                         err = -EMSGSIZE;
2642                         break;
2643                 }
2644         }
2645
2646         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2647                 tcf_block_refcnt_put(block, true);
2648         cb->args[0] = index;
2649
2650 out:
2651         /* If we did no progress, the error (EMSGSIZE) is real */
2652         if (skb->len == 0 && err)
2653                 return err;
2654         return skb->len;
2655 }
2656
2657 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2658                               void *tmplt_priv, u32 chain_index,
2659                               struct net *net, struct sk_buff *skb,
2660                               struct tcf_block *block,
2661                               u32 portid, u32 seq, u16 flags, int event)
2662 {
2663         unsigned char *b = skb_tail_pointer(skb);
2664         const struct tcf_proto_ops *ops;
2665         struct nlmsghdr *nlh;
2666         struct tcmsg *tcm;
2667         void *priv;
2668
2669         ops = tmplt_ops;
2670         priv = tmplt_priv;
2671
2672         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2673         if (!nlh)
2674                 goto out_nlmsg_trim;
2675         tcm = nlmsg_data(nlh);
2676         tcm->tcm_family = AF_UNSPEC;
2677         tcm->tcm__pad1 = 0;
2678         tcm->tcm__pad2 = 0;
2679         tcm->tcm_handle = 0;
2680         if (block->q) {
2681                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2682                 tcm->tcm_parent = block->q->handle;
2683         } else {
2684                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2685                 tcm->tcm_block_index = block->index;
2686         }
2687
2688         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2689                 goto nla_put_failure;
2690
2691         if (ops) {
2692                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2693                         goto nla_put_failure;
2694                 if (ops->tmplt_dump(skb, net, priv) < 0)
2695                         goto nla_put_failure;
2696         }
2697
2698         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2699         return skb->len;
2700
2701 out_nlmsg_trim:
2702 nla_put_failure:
2703         nlmsg_trim(skb, b);
2704         return -EMSGSIZE;
2705 }
2706
2707 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2708                            u32 seq, u16 flags, int event, bool unicast)
2709 {
2710         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2711         struct tcf_block *block = chain->block;
2712         struct net *net = block->net;
2713         struct sk_buff *skb;
2714         int err = 0;
2715
2716         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2717         if (!skb)
2718                 return -ENOBUFS;
2719
2720         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2721                                chain->index, net, skb, block, portid,
2722                                seq, flags, event) <= 0) {
2723                 kfree_skb(skb);
2724                 return -EINVAL;
2725         }
2726
2727         if (unicast)
2728                 err = rtnl_unicast(skb, net, portid);
2729         else
2730                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2731                                      flags & NLM_F_ECHO);
2732
2733         return err;
2734 }
2735
2736 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2737                                   void *tmplt_priv, u32 chain_index,
2738                                   struct tcf_block *block, struct sk_buff *oskb,
2739                                   u32 seq, u16 flags, bool unicast)
2740 {
2741         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2742         struct net *net = block->net;
2743         struct sk_buff *skb;
2744
2745         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2746         if (!skb)
2747                 return -ENOBUFS;
2748
2749         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2750                                block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2751                 kfree_skb(skb);
2752                 return -EINVAL;
2753         }
2754
2755         if (unicast)
2756                 return rtnl_unicast(skb, net, portid);
2757
2758         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2759 }
2760
2761 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2762                               struct nlattr **tca,
2763                               struct netlink_ext_ack *extack)
2764 {
2765         const struct tcf_proto_ops *ops;
2766         char name[IFNAMSIZ];
2767         void *tmplt_priv;
2768
2769         /* If kind is not set, user did not specify template. */
2770         if (!tca[TCA_KIND])
2771                 return 0;
2772
2773         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2774                 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2775                 return -EINVAL;
2776         }
2777
2778         ops = tcf_proto_lookup_ops(name, true, extack);
2779         if (IS_ERR(ops))
2780                 return PTR_ERR(ops);
2781         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2782                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2783                 return -EOPNOTSUPP;
2784         }
2785
2786         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2787         if (IS_ERR(tmplt_priv)) {
2788                 module_put(ops->owner);
2789                 return PTR_ERR(tmplt_priv);
2790         }
2791         chain->tmplt_ops = ops;
2792         chain->tmplt_priv = tmplt_priv;
2793         return 0;
2794 }
2795
2796 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2797                                void *tmplt_priv)
2798 {
2799         /* If template ops are set, no work to do for us. */
2800         if (!tmplt_ops)
2801                 return;
2802
2803         tmplt_ops->tmplt_destroy(tmplt_priv);
2804         module_put(tmplt_ops->owner);
2805 }
2806
2807 /* Add/delete/get a chain */
2808
2809 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2810                         struct netlink_ext_ack *extack)
2811 {
2812         struct net *net = sock_net(skb->sk);
2813         struct nlattr *tca[TCA_MAX + 1];
2814         struct tcmsg *t;
2815         u32 parent;
2816         u32 chain_index;
2817         struct Qdisc *q;
2818         struct tcf_chain *chain;
2819         struct tcf_block *block;
2820         unsigned long cl;
2821         int err;
2822
2823 replay:
2824         q = NULL;
2825         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2826                                      rtm_tca_policy, extack);
2827         if (err < 0)
2828                 return err;
2829
2830         t = nlmsg_data(n);
2831         parent = t->tcm_parent;
2832         cl = 0;
2833
2834         block = tcf_block_find(net, &q, &parent, &cl,
2835                                t->tcm_ifindex, t->tcm_block_index, extack);
2836         if (IS_ERR(block))
2837                 return PTR_ERR(block);
2838
2839         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2840         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2841                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2842                 err = -EINVAL;
2843                 goto errout_block;
2844         }
2845
2846         mutex_lock(&block->lock);
2847         chain = tcf_chain_lookup(block, chain_index);
2848         if (n->nlmsg_type == RTM_NEWCHAIN) {
2849                 if (chain) {
2850                         if (tcf_chain_held_by_acts_only(chain)) {
2851                                 /* The chain exists only because there is
2852                                  * some action referencing it.
2853                                  */
2854                                 tcf_chain_hold(chain);
2855                         } else {
2856                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2857                                 err = -EEXIST;
2858                                 goto errout_block_locked;
2859                         }
2860                 } else {
2861                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2862                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2863                                 err = -ENOENT;
2864                                 goto errout_block_locked;
2865                         }
2866                         chain = tcf_chain_create(block, chain_index);
2867                         if (!chain) {
2868                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2869                                 err = -ENOMEM;
2870                                 goto errout_block_locked;
2871                         }
2872                 }
2873         } else {
2874                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2875                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2876                         err = -EINVAL;
2877                         goto errout_block_locked;
2878                 }
2879                 tcf_chain_hold(chain);
2880         }
2881
2882         if (n->nlmsg_type == RTM_NEWCHAIN) {
2883                 /* Modifying chain requires holding parent block lock. In case
2884                  * the chain was successfully added, take a reference to the
2885                  * chain. This ensures that an empty chain does not disappear at
2886                  * the end of this function.
2887                  */
2888                 tcf_chain_hold(chain);
2889                 chain->explicitly_created = true;
2890         }
2891         mutex_unlock(&block->lock);
2892
2893         switch (n->nlmsg_type) {
2894         case RTM_NEWCHAIN:
2895                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2896                 if (err) {
2897                         tcf_chain_put_explicitly_created(chain);
2898                         goto errout;
2899                 }
2900
2901                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2902                                 RTM_NEWCHAIN, false);
2903                 break;
2904         case RTM_DELCHAIN:
2905                 tfilter_notify_chain(net, skb, block, q, parent, n,
2906                                      chain, RTM_DELTFILTER);
2907                 /* Flush the chain first as the user requested chain removal. */
2908                 tcf_chain_flush(chain, true);
2909                 /* In case the chain was successfully deleted, put a reference
2910                  * to the chain previously taken during addition.
2911                  */
2912                 tcf_chain_put_explicitly_created(chain);
2913                 break;
2914         case RTM_GETCHAIN:
2915                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2916                                       n->nlmsg_flags, n->nlmsg_type, true);
2917                 if (err < 0)
2918                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2919                 break;
2920         default:
2921                 err = -EOPNOTSUPP;
2922                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2923                 goto errout;
2924         }
2925
2926 errout:
2927         tcf_chain_put(chain);
2928 errout_block:
2929         tcf_block_release(q, block, true);
2930         if (err == -EAGAIN)
2931                 /* Replay the request. */
2932                 goto replay;
2933         return err;
2934
2935 errout_block_locked:
2936         mutex_unlock(&block->lock);
2937         goto errout_block;
2938 }
2939
2940 /* called with RTNL */
2941 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2942 {
2943         struct net *net = sock_net(skb->sk);
2944         struct nlattr *tca[TCA_MAX + 1];
2945         struct Qdisc *q = NULL;
2946         struct tcf_block *block;
2947         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2948         struct tcf_chain *chain;
2949         long index_start;
2950         long index;
2951         int err;
2952
2953         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2954                 return skb->len;
2955
2956         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2957                                      rtm_tca_policy, cb->extack);
2958         if (err)
2959                 return err;
2960
2961         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2962                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2963                 if (!block)
2964                         goto out;
2965         } else {
2966                 const struct Qdisc_class_ops *cops;
2967                 struct net_device *dev;
2968                 unsigned long cl = 0;
2969
2970                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2971                 if (!dev)
2972                         return skb->len;
2973
2974                 if (!tcm->tcm_parent)
2975                         q = rtnl_dereference(dev->qdisc);
2976                 else
2977                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2978
2979                 if (!q)
2980                         goto out;
2981                 cops = q->ops->cl_ops;
2982                 if (!cops)
2983                         goto out;
2984                 if (!cops->tcf_block)
2985                         goto out;
2986                 if (TC_H_MIN(tcm->tcm_parent)) {
2987                         cl = cops->find(q, tcm->tcm_parent);
2988                         if (cl == 0)
2989                                 goto out;
2990                 }
2991                 block = cops->tcf_block(q, cl, NULL);
2992                 if (!block)
2993                         goto out;
2994                 if (tcf_block_shared(block))
2995                         q = NULL;
2996         }
2997
2998         index_start = cb->args[0];
2999         index = 0;
3000
3001         mutex_lock(&block->lock);
3002         list_for_each_entry(chain, &block->chain_list, list) {
3003                 if ((tca[TCA_CHAIN] &&
3004                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3005                         continue;
3006                 if (index < index_start) {
3007                         index++;
3008                         continue;
3009                 }
3010                 if (tcf_chain_held_by_acts_only(chain))
3011                         continue;
3012                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3013                                          chain->index, net, skb, block,
3014                                          NETLINK_CB(cb->skb).portid,
3015                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3016                                          RTM_NEWCHAIN);
3017                 if (err <= 0)
3018                         break;
3019                 index++;
3020         }
3021         mutex_unlock(&block->lock);
3022
3023         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3024                 tcf_block_refcnt_put(block, true);
3025         cb->args[0] = index;
3026
3027 out:
3028         /* If we did no progress, the error (EMSGSIZE) is real */
3029         if (skb->len == 0 && err)
3030                 return err;
3031         return skb->len;
3032 }
3033
3034 void tcf_exts_destroy(struct tcf_exts *exts)
3035 {
3036 #ifdef CONFIG_NET_CLS_ACT
3037         if (exts->actions) {
3038                 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3039                 kfree(exts->actions);
3040         }
3041         exts->nr_actions = 0;
3042 #endif
3043 }
3044 EXPORT_SYMBOL(tcf_exts_destroy);
3045
3046 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3047                          struct nlattr *rate_tlv, struct tcf_exts *exts,
3048                          u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3049 {
3050 #ifdef CONFIG_NET_CLS_ACT
3051         {
3052                 int init_res[TCA_ACT_MAX_PRIO] = {};
3053                 struct tc_action *act;
3054                 size_t attr_size = 0;
3055
3056                 if (exts->police && tb[exts->police]) {
3057                         struct tc_action_ops *a_o;
3058
3059                         a_o = tc_action_load_ops(tb[exts->police], true,
3060                                                  !(flags & TCA_ACT_FLAGS_NO_RTNL),
3061                                                  extack);
3062                         if (IS_ERR(a_o))
3063                                 return PTR_ERR(a_o);
3064                         flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3065                         act = tcf_action_init_1(net, tp, tb[exts->police],
3066                                                 rate_tlv, a_o, init_res, flags,
3067                                                 extack);
3068                         module_put(a_o->owner);
3069                         if (IS_ERR(act))
3070                                 return PTR_ERR(act);
3071
3072                         act->type = exts->type = TCA_OLD_COMPAT;
3073                         exts->actions[0] = act;
3074                         exts->nr_actions = 1;
3075                         tcf_idr_insert_many(exts->actions);
3076                 } else if (exts->action && tb[exts->action]) {
3077                         int err;
3078
3079                         flags |= TCA_ACT_FLAGS_BIND;
3080                         err = tcf_action_init(net, tp, tb[exts->action],
3081                                               rate_tlv, exts->actions, init_res,
3082                                               &attr_size, flags, fl_flags,
3083                                               extack);
3084                         if (err < 0)
3085                                 return err;
3086                         exts->nr_actions = err;
3087                 }
3088         }
3089 #else
3090         if ((exts->action && tb[exts->action]) ||
3091             (exts->police && tb[exts->police])) {
3092                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3093                 return -EOPNOTSUPP;
3094         }
3095 #endif
3096
3097         return 0;
3098 }
3099 EXPORT_SYMBOL(tcf_exts_validate_ex);
3100
3101 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3102                       struct nlattr *rate_tlv, struct tcf_exts *exts,
3103                       u32 flags, struct netlink_ext_ack *extack)
3104 {
3105         return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3106                                     flags, 0, extack);
3107 }
3108 EXPORT_SYMBOL(tcf_exts_validate);
3109
3110 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3111 {
3112 #ifdef CONFIG_NET_CLS_ACT
3113         struct tcf_exts old = *dst;
3114
3115         *dst = *src;
3116         tcf_exts_destroy(&old);
3117 #endif
3118 }
3119 EXPORT_SYMBOL(tcf_exts_change);
3120
3121 #ifdef CONFIG_NET_CLS_ACT
3122 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3123 {
3124         if (exts->nr_actions == 0)
3125                 return NULL;
3126         else
3127                 return exts->actions[0];
3128 }
3129 #endif
3130
3131 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3132 {
3133 #ifdef CONFIG_NET_CLS_ACT
3134         struct nlattr *nest;
3135
3136         if (exts->action && tcf_exts_has_actions(exts)) {
3137                 /*
3138                  * again for backward compatible mode - we want
3139                  * to work with both old and new modes of entering
3140                  * tc data even if iproute2  was newer - jhs
3141                  */
3142                 if (exts->type != TCA_OLD_COMPAT) {
3143                         nest = nla_nest_start_noflag(skb, exts->action);
3144                         if (nest == NULL)
3145                                 goto nla_put_failure;
3146
3147                         if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3148                             < 0)
3149                                 goto nla_put_failure;
3150                         nla_nest_end(skb, nest);
3151                 } else if (exts->police) {
3152                         struct tc_action *act = tcf_exts_first_act(exts);
3153                         nest = nla_nest_start_noflag(skb, exts->police);
3154                         if (nest == NULL || !act)
3155                                 goto nla_put_failure;
3156                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3157                                 goto nla_put_failure;
3158                         nla_nest_end(skb, nest);
3159                 }
3160         }
3161         return 0;
3162
3163 nla_put_failure:
3164         nla_nest_cancel(skb, nest);
3165         return -1;
3166 #else
3167         return 0;
3168 #endif
3169 }
3170 EXPORT_SYMBOL(tcf_exts_dump);
3171
3172 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3173 {
3174 #ifdef CONFIG_NET_CLS_ACT
3175         struct nlattr *nest;
3176
3177         if (!exts->action || !tcf_exts_has_actions(exts))
3178                 return 0;
3179
3180         nest = nla_nest_start_noflag(skb, exts->action);
3181         if (!nest)
3182                 goto nla_put_failure;
3183
3184         if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3185                 goto nla_put_failure;
3186         nla_nest_end(skb, nest);
3187         return 0;
3188
3189 nla_put_failure:
3190         nla_nest_cancel(skb, nest);
3191         return -1;
3192 #else
3193         return 0;
3194 #endif
3195 }
3196 EXPORT_SYMBOL(tcf_exts_terse_dump);
3197
3198 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3199 {
3200 #ifdef CONFIG_NET_CLS_ACT
3201         struct tc_action *a = tcf_exts_first_act(exts);
3202         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3203                 return -1;
3204 #endif
3205         return 0;
3206 }
3207 EXPORT_SYMBOL(tcf_exts_dump_stats);
3208
3209 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3210 {
3211         if (*flags & TCA_CLS_FLAGS_IN_HW)
3212                 return;
3213         *flags |= TCA_CLS_FLAGS_IN_HW;
3214         atomic_inc(&block->offloadcnt);
3215 }
3216
3217 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3218 {
3219         if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3220                 return;
3221         *flags &= ~TCA_CLS_FLAGS_IN_HW;
3222         atomic_dec(&block->offloadcnt);
3223 }
3224
3225 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3226                                       struct tcf_proto *tp, u32 *cnt,
3227                                       u32 *flags, u32 diff, bool add)
3228 {
3229         lockdep_assert_held(&block->cb_lock);
3230
3231         spin_lock(&tp->lock);
3232         if (add) {
3233                 if (!*cnt)
3234                         tcf_block_offload_inc(block, flags);
3235                 *cnt += diff;
3236         } else {
3237                 *cnt -= diff;
3238                 if (!*cnt)
3239                         tcf_block_offload_dec(block, flags);
3240         }
3241         spin_unlock(&tp->lock);
3242 }
3243
3244 static void
3245 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3246                          u32 *cnt, u32 *flags)
3247 {
3248         lockdep_assert_held(&block->cb_lock);
3249
3250         spin_lock(&tp->lock);
3251         tcf_block_offload_dec(block, flags);
3252         *cnt = 0;
3253         spin_unlock(&tp->lock);
3254 }
3255
3256 static int
3257 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3258                    void *type_data, bool err_stop)
3259 {
3260         struct flow_block_cb *block_cb;
3261         int ok_count = 0;
3262         int err;
3263
3264         list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3265                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3266                 if (err) {
3267                         if (err_stop)
3268                                 return err;
3269                 } else {
3270                         ok_count++;
3271                 }
3272         }
3273         return ok_count;
3274 }
3275
3276 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3277                      void *type_data, bool err_stop, bool rtnl_held)
3278 {
3279         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3280         int ok_count;
3281
3282 retry:
3283         if (take_rtnl)
3284                 rtnl_lock();
3285         down_read(&block->cb_lock);
3286         /* Need to obtain rtnl lock if block is bound to devs that require it.
3287          * In block bind code cb_lock is obtained while holding rtnl, so we must
3288          * obtain the locks in same order here.
3289          */
3290         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3291                 up_read(&block->cb_lock);
3292                 take_rtnl = true;
3293                 goto retry;
3294         }
3295
3296         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3297
3298         up_read(&block->cb_lock);
3299         if (take_rtnl)
3300                 rtnl_unlock();
3301         return ok_count;
3302 }
3303 EXPORT_SYMBOL(tc_setup_cb_call);
3304
3305 /* Non-destructive filter add. If filter that wasn't already in hardware is
3306  * successfully offloaded, increment block offloads counter. On failure,
3307  * previously offloaded filter is considered to be intact and offloads counter
3308  * is not decremented.
3309  */
3310
3311 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3312                     enum tc_setup_type type, void *type_data, bool err_stop,
3313                     u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3314 {
3315         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3316         int ok_count;
3317
3318 retry:
3319         if (take_rtnl)
3320                 rtnl_lock();
3321         down_read(&block->cb_lock);
3322         /* Need to obtain rtnl lock if block is bound to devs that require it.
3323          * In block bind code cb_lock is obtained while holding rtnl, so we must
3324          * obtain the locks in same order here.
3325          */
3326         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3327                 up_read(&block->cb_lock);
3328                 take_rtnl = true;
3329                 goto retry;
3330         }
3331
3332         /* Make sure all netdevs sharing this block are offload-capable. */
3333         if (block->nooffloaddevcnt && err_stop) {
3334                 ok_count = -EOPNOTSUPP;
3335                 goto err_unlock;
3336         }
3337
3338         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3339         if (ok_count < 0)
3340                 goto err_unlock;
3341
3342         if (tp->ops->hw_add)
3343                 tp->ops->hw_add(tp, type_data);
3344         if (ok_count > 0)
3345                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3346                                           ok_count, true);
3347 err_unlock:
3348         up_read(&block->cb_lock);
3349         if (take_rtnl)
3350                 rtnl_unlock();
3351         return min(ok_count, 0);
3352 }
3353 EXPORT_SYMBOL(tc_setup_cb_add);
3354
3355 /* Destructive filter replace. If filter that wasn't already in hardware is
3356  * successfully offloaded, increment block offload counter. On failure,
3357  * previously offloaded filter is considered to be destroyed and offload counter
3358  * is decremented.
3359  */
3360
3361 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3362                         enum tc_setup_type type, void *type_data, bool err_stop,
3363                         u32 *old_flags, unsigned int *old_in_hw_count,
3364                         u32 *new_flags, unsigned int *new_in_hw_count,
3365                         bool rtnl_held)
3366 {
3367         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3368         int ok_count;
3369
3370 retry:
3371         if (take_rtnl)
3372                 rtnl_lock();
3373         down_read(&block->cb_lock);
3374         /* Need to obtain rtnl lock if block is bound to devs that require it.
3375          * In block bind code cb_lock is obtained while holding rtnl, so we must
3376          * obtain the locks in same order here.
3377          */
3378         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3379                 up_read(&block->cb_lock);
3380                 take_rtnl = true;
3381                 goto retry;
3382         }
3383
3384         /* Make sure all netdevs sharing this block are offload-capable. */
3385         if (block->nooffloaddevcnt && err_stop) {
3386                 ok_count = -EOPNOTSUPP;
3387                 goto err_unlock;
3388         }
3389
3390         tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3391         if (tp->ops->hw_del)
3392                 tp->ops->hw_del(tp, type_data);
3393
3394         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3395         if (ok_count < 0)
3396                 goto err_unlock;
3397
3398         if (tp->ops->hw_add)
3399                 tp->ops->hw_add(tp, type_data);
3400         if (ok_count > 0)
3401                 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3402                                           new_flags, ok_count, true);
3403 err_unlock:
3404         up_read(&block->cb_lock);
3405         if (take_rtnl)
3406                 rtnl_unlock();
3407         return min(ok_count, 0);
3408 }
3409 EXPORT_SYMBOL(tc_setup_cb_replace);
3410
3411 /* Destroy filter and decrement block offload counter, if filter was previously
3412  * offloaded.
3413  */
3414
3415 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3416                         enum tc_setup_type type, void *type_data, bool err_stop,
3417                         u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3418 {
3419         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3420         int ok_count;
3421
3422 retry:
3423         if (take_rtnl)
3424                 rtnl_lock();
3425         down_read(&block->cb_lock);
3426         /* Need to obtain rtnl lock if block is bound to devs that require it.
3427          * In block bind code cb_lock is obtained while holding rtnl, so we must
3428          * obtain the locks in same order here.
3429          */
3430         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3431                 up_read(&block->cb_lock);
3432                 take_rtnl = true;
3433                 goto retry;
3434         }
3435
3436         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3437
3438         tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3439         if (tp->ops->hw_del)
3440                 tp->ops->hw_del(tp, type_data);
3441
3442         up_read(&block->cb_lock);
3443         if (take_rtnl)
3444                 rtnl_unlock();
3445         return min(ok_count, 0);
3446 }
3447 EXPORT_SYMBOL(tc_setup_cb_destroy);
3448
3449 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3450                           bool add, flow_setup_cb_t *cb,
3451                           enum tc_setup_type type, void *type_data,
3452                           void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3453 {
3454         int err = cb(type, type_data, cb_priv);
3455
3456         if (err) {
3457                 if (add && tc_skip_sw(*flags))
3458                         return err;
3459         } else {
3460                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3461                                           add);
3462         }
3463
3464         return 0;
3465 }
3466 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3467
3468 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3469                               const struct tc_action *act)
3470 {
3471         struct tc_cookie *cookie;
3472         int err = 0;
3473
3474         rcu_read_lock();
3475         cookie = rcu_dereference(act->act_cookie);
3476         if (cookie) {
3477                 entry->cookie = flow_action_cookie_create(cookie->data,
3478                                                           cookie->len,
3479                                                           GFP_ATOMIC);
3480                 if (!entry->cookie)
3481                         err = -ENOMEM;
3482         }
3483         rcu_read_unlock();
3484         return err;
3485 }
3486
3487 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3488 {
3489         flow_action_cookie_destroy(entry->cookie);
3490 }
3491
3492 void tc_cleanup_offload_action(struct flow_action *flow_action)
3493 {
3494         struct flow_action_entry *entry;
3495         int i;
3496
3497         flow_action_for_each(i, entry, flow_action) {
3498                 tcf_act_put_cookie(entry);
3499                 if (entry->destructor)
3500                         entry->destructor(entry->destructor_priv);
3501         }
3502 }
3503 EXPORT_SYMBOL(tc_cleanup_offload_action);
3504
3505 static int tc_setup_offload_act(struct tc_action *act,
3506                                 struct flow_action_entry *entry,
3507                                 u32 *index_inc,
3508                                 struct netlink_ext_ack *extack)
3509 {
3510 #ifdef CONFIG_NET_CLS_ACT
3511         if (act->ops->offload_act_setup) {
3512                 return act->ops->offload_act_setup(act, entry, index_inc, true,
3513                                                    extack);
3514         } else {
3515                 NL_SET_ERR_MSG(extack, "Action does not support offload");
3516                 return -EOPNOTSUPP;
3517         }
3518 #else
3519         return 0;
3520 #endif
3521 }
3522
3523 int tc_setup_action(struct flow_action *flow_action,
3524                     struct tc_action *actions[],
3525                     struct netlink_ext_ack *extack)
3526 {
3527         int i, j, k, index, err = 0;
3528         struct tc_action *act;
3529
3530         BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3531         BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3532         BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3533
3534         if (!actions)
3535                 return 0;
3536
3537         j = 0;
3538         tcf_act_for_each_action(i, act, actions) {
3539                 struct flow_action_entry *entry;
3540
3541                 entry = &flow_action->entries[j];
3542                 spin_lock_bh(&act->tcfa_lock);
3543                 err = tcf_act_get_cookie(entry, act);
3544                 if (err)
3545                         goto err_out_locked;
3546
3547                 index = 0;
3548                 err = tc_setup_offload_act(act, entry, &index, extack);
3549                 if (err)
3550                         goto err_out_locked;
3551
3552                 for (k = 0; k < index ; k++) {
3553                         entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3554                         entry[k].hw_index = act->tcfa_index;
3555                 }
3556
3557                 j += index;
3558
3559                 spin_unlock_bh(&act->tcfa_lock);
3560         }
3561
3562 err_out:
3563         if (err)
3564                 tc_cleanup_offload_action(flow_action);
3565
3566         return err;
3567 err_out_locked:
3568         spin_unlock_bh(&act->tcfa_lock);
3569         goto err_out;
3570 }
3571
3572 int tc_setup_offload_action(struct flow_action *flow_action,
3573                             const struct tcf_exts *exts,
3574                             struct netlink_ext_ack *extack)
3575 {
3576 #ifdef CONFIG_NET_CLS_ACT
3577         if (!exts)
3578                 return 0;
3579
3580         return tc_setup_action(flow_action, exts->actions, extack);
3581 #else
3582         return 0;
3583 #endif
3584 }
3585 EXPORT_SYMBOL(tc_setup_offload_action);
3586
3587 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3588 {
3589         unsigned int num_acts = 0;
3590         struct tc_action *act;
3591         int i;
3592
3593         tcf_exts_for_each_action(i, act, exts) {
3594                 if (is_tcf_pedit(act))
3595                         num_acts += tcf_pedit_nkeys(act);
3596                 else
3597                         num_acts++;
3598         }
3599         return num_acts;
3600 }
3601 EXPORT_SYMBOL(tcf_exts_num_actions);
3602
3603 #ifdef CONFIG_NET_CLS_ACT
3604 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3605                                         u32 *p_block_index,
3606                                         struct netlink_ext_ack *extack)
3607 {
3608         *p_block_index = nla_get_u32(block_index_attr);
3609         if (!*p_block_index) {
3610                 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3611                 return -EINVAL;
3612         }
3613
3614         return 0;
3615 }
3616
3617 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3618                     enum flow_block_binder_type binder_type,
3619                     struct nlattr *block_index_attr,
3620                     struct netlink_ext_ack *extack)
3621 {
3622         u32 block_index;
3623         int err;
3624
3625         if (!block_index_attr)
3626                 return 0;
3627
3628         err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3629         if (err)
3630                 return err;
3631
3632         qe->info.binder_type = binder_type;
3633         qe->info.chain_head_change = tcf_chain_head_change_dflt;
3634         qe->info.chain_head_change_priv = &qe->filter_chain;
3635         qe->info.block_index = block_index;
3636
3637         return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3638 }
3639 EXPORT_SYMBOL(tcf_qevent_init);
3640
3641 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3642 {
3643         if (qe->info.block_index)
3644                 tcf_block_put_ext(qe->block, sch, &qe->info);
3645 }
3646 EXPORT_SYMBOL(tcf_qevent_destroy);
3647
3648 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3649                                struct netlink_ext_ack *extack)
3650 {
3651         u32 block_index;
3652         int err;
3653
3654         if (!block_index_attr)
3655                 return 0;
3656
3657         err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3658         if (err)
3659                 return err;
3660
3661         /* Bounce newly-configured block or change in block. */
3662         if (block_index != qe->info.block_index) {
3663                 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3664                 return -EINVAL;
3665         }
3666
3667         return 0;
3668 }
3669 EXPORT_SYMBOL(tcf_qevent_validate_change);
3670
3671 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3672                                   struct sk_buff **to_free, int *ret)
3673 {
3674         struct tcf_result cl_res;
3675         struct tcf_proto *fl;
3676
3677         if (!qe->info.block_index)
3678                 return skb;
3679
3680         fl = rcu_dereference_bh(qe->filter_chain);
3681
3682         switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3683         case TC_ACT_SHOT:
3684                 qdisc_qstats_drop(sch);
3685                 __qdisc_drop(skb, to_free);
3686                 *ret = __NET_XMIT_BYPASS;
3687                 return NULL;
3688         case TC_ACT_STOLEN:
3689         case TC_ACT_QUEUED:
3690         case TC_ACT_TRAP:
3691                 __qdisc_drop(skb, to_free);
3692                 *ret = __NET_XMIT_STOLEN;
3693                 return NULL;
3694         case TC_ACT_REDIRECT:
3695                 skb_do_redirect(skb);
3696                 *ret = __NET_XMIT_STOLEN;
3697                 return NULL;
3698         }
3699
3700         return skb;
3701 }
3702 EXPORT_SYMBOL(tcf_qevent_handle);
3703
3704 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3705 {
3706         if (!qe->info.block_index)
3707                 return 0;
3708         return nla_put_u32(skb, attr_name, qe->info.block_index);
3709 }
3710 EXPORT_SYMBOL(tcf_qevent_dump);
3711 #endif
3712
3713 static __net_init int tcf_net_init(struct net *net)
3714 {
3715         struct tcf_net *tn = net_generic(net, tcf_net_id);
3716
3717         spin_lock_init(&tn->idr_lock);
3718         idr_init(&tn->idr);
3719         return 0;
3720 }
3721
3722 static void __net_exit tcf_net_exit(struct net *net)
3723 {
3724         struct tcf_net *tn = net_generic(net, tcf_net_id);
3725
3726         idr_destroy(&tn->idr);
3727 }
3728
3729 static struct pernet_operations tcf_net_ops = {
3730         .init = tcf_net_init,
3731         .exit = tcf_net_exit,
3732         .id   = &tcf_net_id,
3733         .size = sizeof(struct tcf_net),
3734 };
3735
3736 static int __init tc_filter_init(void)
3737 {
3738         int err;
3739
3740         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3741         if (!tc_filter_wq)
3742                 return -ENOMEM;
3743
3744         err = register_pernet_subsys(&tcf_net_ops);
3745         if (err)
3746                 goto err_register_pernet_subsys;
3747
3748         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3749                       RTNL_FLAG_DOIT_UNLOCKED);
3750         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3751                       RTNL_FLAG_DOIT_UNLOCKED);
3752         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3753                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3754         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3755         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3756         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3757                       tc_dump_chain, 0);
3758
3759         return 0;
3760
3761 err_register_pernet_subsys:
3762         destroy_workqueue(tc_filter_wq);
3763         return err;
3764 }
3765
3766 subsys_initcall(tc_filter_init);