ath10k: Fix NULL pointer dereference in AHB device probe
[platform/kernel/linux-starfive.git] / net / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <linux/jhash.h>
25 #include <linux/rculist.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44
45 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
46
47 /* The list of all installed classifier types */
48 static LIST_HEAD(tcf_proto_base);
49
50 /* Protects list of registered TC modules. It is pure SMP lock. */
51 static DEFINE_RWLOCK(cls_mod_lock);
52
53 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
54 {
55         return jhash_3words(tp->chain->index, tp->prio,
56                             (__force __u32)tp->protocol, 0);
57 }
58
59 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
60                                         struct tcf_proto *tp)
61 {
62         struct tcf_block *block = chain->block;
63
64         mutex_lock(&block->proto_destroy_lock);
65         hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
66                      destroy_obj_hashfn(tp));
67         mutex_unlock(&block->proto_destroy_lock);
68 }
69
70 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
71                           const struct tcf_proto *tp2)
72 {
73         return tp1->chain->index == tp2->chain->index &&
74                tp1->prio == tp2->prio &&
75                tp1->protocol == tp2->protocol;
76 }
77
78 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
79                                         struct tcf_proto *tp)
80 {
81         u32 hash = destroy_obj_hashfn(tp);
82         struct tcf_proto *iter;
83         bool found = false;
84
85         rcu_read_lock();
86         hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
87                                    destroy_ht_node, hash) {
88                 if (tcf_proto_cmp(tp, iter)) {
89                         found = true;
90                         break;
91                 }
92         }
93         rcu_read_unlock();
94
95         return found;
96 }
97
98 static void
99 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
100 {
101         struct tcf_block *block = chain->block;
102
103         mutex_lock(&block->proto_destroy_lock);
104         if (hash_hashed(&tp->destroy_ht_node))
105                 hash_del_rcu(&tp->destroy_ht_node);
106         mutex_unlock(&block->proto_destroy_lock);
107 }
108
109 /* Find classifier type by string name */
110
111 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
112 {
113         const struct tcf_proto_ops *t, *res = NULL;
114
115         if (kind) {
116                 read_lock(&cls_mod_lock);
117                 list_for_each_entry(t, &tcf_proto_base, head) {
118                         if (strcmp(kind, t->kind) == 0) {
119                                 if (try_module_get(t->owner))
120                                         res = t;
121                                 break;
122                         }
123                 }
124                 read_unlock(&cls_mod_lock);
125         }
126         return res;
127 }
128
129 static const struct tcf_proto_ops *
130 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
131                      struct netlink_ext_ack *extack)
132 {
133         const struct tcf_proto_ops *ops;
134
135         ops = __tcf_proto_lookup_ops(kind);
136         if (ops)
137                 return ops;
138 #ifdef CONFIG_MODULES
139         if (rtnl_held)
140                 rtnl_unlock();
141         request_module("cls_%s", kind);
142         if (rtnl_held)
143                 rtnl_lock();
144         ops = __tcf_proto_lookup_ops(kind);
145         /* We dropped the RTNL semaphore in order to perform
146          * the module load. So, even if we succeeded in loading
147          * the module we have to replay the request. We indicate
148          * this using -EAGAIN.
149          */
150         if (ops) {
151                 module_put(ops->owner);
152                 return ERR_PTR(-EAGAIN);
153         }
154 #endif
155         NL_SET_ERR_MSG(extack, "TC classifier not found");
156         return ERR_PTR(-ENOENT);
157 }
158
159 /* Register(unregister) new classifier type */
160
161 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
162 {
163         struct tcf_proto_ops *t;
164         int rc = -EEXIST;
165
166         write_lock(&cls_mod_lock);
167         list_for_each_entry(t, &tcf_proto_base, head)
168                 if (!strcmp(ops->kind, t->kind))
169                         goto out;
170
171         list_add_tail(&ops->head, &tcf_proto_base);
172         rc = 0;
173 out:
174         write_unlock(&cls_mod_lock);
175         return rc;
176 }
177 EXPORT_SYMBOL(register_tcf_proto_ops);
178
179 static struct workqueue_struct *tc_filter_wq;
180
181 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
182 {
183         struct tcf_proto_ops *t;
184         int rc = -ENOENT;
185
186         /* Wait for outstanding call_rcu()s, if any, from a
187          * tcf_proto_ops's destroy() handler.
188          */
189         rcu_barrier();
190         flush_workqueue(tc_filter_wq);
191
192         write_lock(&cls_mod_lock);
193         list_for_each_entry(t, &tcf_proto_base, head) {
194                 if (t == ops) {
195                         list_del(&t->head);
196                         rc = 0;
197                         break;
198                 }
199         }
200         write_unlock(&cls_mod_lock);
201         return rc;
202 }
203 EXPORT_SYMBOL(unregister_tcf_proto_ops);
204
205 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
206 {
207         INIT_RCU_WORK(rwork, func);
208         return queue_rcu_work(tc_filter_wq, rwork);
209 }
210 EXPORT_SYMBOL(tcf_queue_work);
211
212 /* Select new prio value from the range, managed by kernel. */
213
214 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
215 {
216         u32 first = TC_H_MAKE(0xC0000000U, 0U);
217
218         if (tp)
219                 first = tp->prio - 1;
220
221         return TC_H_MAJ(first);
222 }
223
224 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
225 {
226         if (kind)
227                 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
228         memset(name, 0, IFNAMSIZ);
229         return false;
230 }
231
232 static bool tcf_proto_is_unlocked(const char *kind)
233 {
234         const struct tcf_proto_ops *ops;
235         bool ret;
236
237         if (strlen(kind) == 0)
238                 return false;
239
240         ops = tcf_proto_lookup_ops(kind, false, NULL);
241         /* On error return false to take rtnl lock. Proto lookup/create
242          * functions will perform lookup again and properly handle errors.
243          */
244         if (IS_ERR(ops))
245                 return false;
246
247         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
248         module_put(ops->owner);
249         return ret;
250 }
251
252 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
253                                           u32 prio, struct tcf_chain *chain,
254                                           bool rtnl_held,
255                                           struct netlink_ext_ack *extack)
256 {
257         struct tcf_proto *tp;
258         int err;
259
260         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
261         if (!tp)
262                 return ERR_PTR(-ENOBUFS);
263
264         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
265         if (IS_ERR(tp->ops)) {
266                 err = PTR_ERR(tp->ops);
267                 goto errout;
268         }
269         tp->classify = tp->ops->classify;
270         tp->protocol = protocol;
271         tp->prio = prio;
272         tp->chain = chain;
273         spin_lock_init(&tp->lock);
274         refcount_set(&tp->refcnt, 1);
275
276         err = tp->ops->init(tp);
277         if (err) {
278                 module_put(tp->ops->owner);
279                 goto errout;
280         }
281         return tp;
282
283 errout:
284         kfree(tp);
285         return ERR_PTR(err);
286 }
287
288 static void tcf_proto_get(struct tcf_proto *tp)
289 {
290         refcount_inc(&tp->refcnt);
291 }
292
293 static void tcf_chain_put(struct tcf_chain *chain);
294
295 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
296                               bool sig_destroy, struct netlink_ext_ack *extack)
297 {
298         tp->ops->destroy(tp, rtnl_held, extack);
299         if (sig_destroy)
300                 tcf_proto_signal_destroyed(tp->chain, tp);
301         tcf_chain_put(tp->chain);
302         module_put(tp->ops->owner);
303         kfree_rcu(tp, rcu);
304 }
305
306 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
307                           struct netlink_ext_ack *extack)
308 {
309         if (refcount_dec_and_test(&tp->refcnt))
310                 tcf_proto_destroy(tp, rtnl_held, true, extack);
311 }
312
313 static bool tcf_proto_check_delete(struct tcf_proto *tp)
314 {
315         if (tp->ops->delete_empty)
316                 return tp->ops->delete_empty(tp);
317
318         tp->deleting = true;
319         return tp->deleting;
320 }
321
322 static void tcf_proto_mark_delete(struct tcf_proto *tp)
323 {
324         spin_lock(&tp->lock);
325         tp->deleting = true;
326         spin_unlock(&tp->lock);
327 }
328
329 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
330 {
331         bool deleting;
332
333         spin_lock(&tp->lock);
334         deleting = tp->deleting;
335         spin_unlock(&tp->lock);
336
337         return deleting;
338 }
339
340 #define ASSERT_BLOCK_LOCKED(block)                                      \
341         lockdep_assert_held(&(block)->lock)
342
343 struct tcf_filter_chain_list_item {
344         struct list_head list;
345         tcf_chain_head_change_t *chain_head_change;
346         void *chain_head_change_priv;
347 };
348
349 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
350                                           u32 chain_index)
351 {
352         struct tcf_chain *chain;
353
354         ASSERT_BLOCK_LOCKED(block);
355
356         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
357         if (!chain)
358                 return NULL;
359         list_add_tail_rcu(&chain->list, &block->chain_list);
360         mutex_init(&chain->filter_chain_lock);
361         chain->block = block;
362         chain->index = chain_index;
363         chain->refcnt = 1;
364         if (!chain->index)
365                 block->chain0.chain = chain;
366         return chain;
367 }
368
369 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
370                                        struct tcf_proto *tp_head)
371 {
372         if (item->chain_head_change)
373                 item->chain_head_change(tp_head, item->chain_head_change_priv);
374 }
375
376 static void tcf_chain0_head_change(struct tcf_chain *chain,
377                                    struct tcf_proto *tp_head)
378 {
379         struct tcf_filter_chain_list_item *item;
380         struct tcf_block *block = chain->block;
381
382         if (chain->index)
383                 return;
384
385         mutex_lock(&block->lock);
386         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
387                 tcf_chain_head_change_item(item, tp_head);
388         mutex_unlock(&block->lock);
389 }
390
391 /* Returns true if block can be safely freed. */
392
393 static bool tcf_chain_detach(struct tcf_chain *chain)
394 {
395         struct tcf_block *block = chain->block;
396
397         ASSERT_BLOCK_LOCKED(block);
398
399         list_del_rcu(&chain->list);
400         if (!chain->index)
401                 block->chain0.chain = NULL;
402
403         if (list_empty(&block->chain_list) &&
404             refcount_read(&block->refcnt) == 0)
405                 return true;
406
407         return false;
408 }
409
410 static void tcf_block_destroy(struct tcf_block *block)
411 {
412         mutex_destroy(&block->lock);
413         mutex_destroy(&block->proto_destroy_lock);
414         kfree_rcu(block, rcu);
415 }
416
417 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
418 {
419         struct tcf_block *block = chain->block;
420
421         mutex_destroy(&chain->filter_chain_lock);
422         kfree_rcu(chain, rcu);
423         if (free_block)
424                 tcf_block_destroy(block);
425 }
426
427 static void tcf_chain_hold(struct tcf_chain *chain)
428 {
429         ASSERT_BLOCK_LOCKED(chain->block);
430
431         ++chain->refcnt;
432 }
433
434 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
435 {
436         ASSERT_BLOCK_LOCKED(chain->block);
437
438         /* In case all the references are action references, this
439          * chain should not be shown to the user.
440          */
441         return chain->refcnt == chain->action_refcnt;
442 }
443
444 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
445                                           u32 chain_index)
446 {
447         struct tcf_chain *chain;
448
449         ASSERT_BLOCK_LOCKED(block);
450
451         list_for_each_entry(chain, &block->chain_list, list) {
452                 if (chain->index == chain_index)
453                         return chain;
454         }
455         return NULL;
456 }
457
458 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
459 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
460                                               u32 chain_index)
461 {
462         struct tcf_chain *chain;
463
464         list_for_each_entry_rcu(chain, &block->chain_list, list) {
465                 if (chain->index == chain_index)
466                         return chain;
467         }
468         return NULL;
469 }
470 #endif
471
472 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
473                            u32 seq, u16 flags, int event, bool unicast);
474
475 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
476                                          u32 chain_index, bool create,
477                                          bool by_act)
478 {
479         struct tcf_chain *chain = NULL;
480         bool is_first_reference;
481
482         mutex_lock(&block->lock);
483         chain = tcf_chain_lookup(block, chain_index);
484         if (chain) {
485                 tcf_chain_hold(chain);
486         } else {
487                 if (!create)
488                         goto errout;
489                 chain = tcf_chain_create(block, chain_index);
490                 if (!chain)
491                         goto errout;
492         }
493
494         if (by_act)
495                 ++chain->action_refcnt;
496         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
497         mutex_unlock(&block->lock);
498
499         /* Send notification only in case we got the first
500          * non-action reference. Until then, the chain acts only as
501          * a placeholder for actions pointing to it and user ought
502          * not know about them.
503          */
504         if (is_first_reference && !by_act)
505                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
506                                 RTM_NEWCHAIN, false);
507
508         return chain;
509
510 errout:
511         mutex_unlock(&block->lock);
512         return chain;
513 }
514
515 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
516                                        bool create)
517 {
518         return __tcf_chain_get(block, chain_index, create, false);
519 }
520
521 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
522 {
523         return __tcf_chain_get(block, chain_index, true, true);
524 }
525 EXPORT_SYMBOL(tcf_chain_get_by_act);
526
527 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
528                                void *tmplt_priv);
529 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
530                                   void *tmplt_priv, u32 chain_index,
531                                   struct tcf_block *block, struct sk_buff *oskb,
532                                   u32 seq, u16 flags, bool unicast);
533
534 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
535                             bool explicitly_created)
536 {
537         struct tcf_block *block = chain->block;
538         const struct tcf_proto_ops *tmplt_ops;
539         bool free_block = false;
540         unsigned int refcnt;
541         void *tmplt_priv;
542
543         mutex_lock(&block->lock);
544         if (explicitly_created) {
545                 if (!chain->explicitly_created) {
546                         mutex_unlock(&block->lock);
547                         return;
548                 }
549                 chain->explicitly_created = false;
550         }
551
552         if (by_act)
553                 chain->action_refcnt--;
554
555         /* tc_chain_notify_delete can't be called while holding block lock.
556          * However, when block is unlocked chain can be changed concurrently, so
557          * save these to temporary variables.
558          */
559         refcnt = --chain->refcnt;
560         tmplt_ops = chain->tmplt_ops;
561         tmplt_priv = chain->tmplt_priv;
562
563         /* The last dropped non-action reference will trigger notification. */
564         if (refcnt - chain->action_refcnt == 0 && !by_act) {
565                 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
566                                        block, NULL, 0, 0, false);
567                 /* Last reference to chain, no need to lock. */
568                 chain->flushing = false;
569         }
570
571         if (refcnt == 0)
572                 free_block = tcf_chain_detach(chain);
573         mutex_unlock(&block->lock);
574
575         if (refcnt == 0) {
576                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
577                 tcf_chain_destroy(chain, free_block);
578         }
579 }
580
581 static void tcf_chain_put(struct tcf_chain *chain)
582 {
583         __tcf_chain_put(chain, false, false);
584 }
585
586 void tcf_chain_put_by_act(struct tcf_chain *chain)
587 {
588         __tcf_chain_put(chain, true, false);
589 }
590 EXPORT_SYMBOL(tcf_chain_put_by_act);
591
592 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
593 {
594         __tcf_chain_put(chain, false, true);
595 }
596
597 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
598 {
599         struct tcf_proto *tp, *tp_next;
600
601         mutex_lock(&chain->filter_chain_lock);
602         tp = tcf_chain_dereference(chain->filter_chain, chain);
603         while (tp) {
604                 tp_next = rcu_dereference_protected(tp->next, 1);
605                 tcf_proto_signal_destroying(chain, tp);
606                 tp = tp_next;
607         }
608         tp = tcf_chain_dereference(chain->filter_chain, chain);
609         RCU_INIT_POINTER(chain->filter_chain, NULL);
610         tcf_chain0_head_change(chain, NULL);
611         chain->flushing = true;
612         mutex_unlock(&chain->filter_chain_lock);
613
614         while (tp) {
615                 tp_next = rcu_dereference_protected(tp->next, 1);
616                 tcf_proto_put(tp, rtnl_held, NULL);
617                 tp = tp_next;
618         }
619 }
620
621 static int tcf_block_setup(struct tcf_block *block,
622                            struct flow_block_offload *bo);
623
624 static void tcf_block_offload_init(struct flow_block_offload *bo,
625                                    struct net_device *dev,
626                                    enum flow_block_command command,
627                                    enum flow_block_binder_type binder_type,
628                                    struct flow_block *flow_block,
629                                    bool shared, struct netlink_ext_ack *extack)
630 {
631         bo->net = dev_net(dev);
632         bo->command = command;
633         bo->binder_type = binder_type;
634         bo->block = flow_block;
635         bo->block_shared = shared;
636         bo->extack = extack;
637         INIT_LIST_HEAD(&bo->cb_list);
638 }
639
640 static void tcf_block_unbind(struct tcf_block *block,
641                              struct flow_block_offload *bo);
642
643 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
644 {
645         struct tcf_block *block = block_cb->indr.data;
646         struct net_device *dev = block_cb->indr.dev;
647         struct netlink_ext_ack extack = {};
648         struct flow_block_offload bo;
649
650         tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND,
651                                block_cb->indr.binder_type,
652                                &block->flow_block, tcf_block_shared(block),
653                                &extack);
654         down_write(&block->cb_lock);
655         list_move(&block_cb->list, &bo.cb_list);
656         up_write(&block->cb_lock);
657         rtnl_lock();
658         tcf_block_unbind(block, &bo);
659         rtnl_unlock();
660 }
661
662 static bool tcf_block_offload_in_use(struct tcf_block *block)
663 {
664         return atomic_read(&block->offloadcnt);
665 }
666
667 static int tcf_block_offload_cmd(struct tcf_block *block,
668                                  struct net_device *dev,
669                                  struct tcf_block_ext_info *ei,
670                                  enum flow_block_command command,
671                                  struct netlink_ext_ack *extack)
672 {
673         struct flow_block_offload bo = {};
674         int err;
675
676         tcf_block_offload_init(&bo, dev, command, ei->binder_type,
677                                &block->flow_block, tcf_block_shared(block),
678                                extack);
679
680         if (dev->netdev_ops->ndo_setup_tc)
681                 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
682         else
683                 err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
684                                                   &bo, tc_block_indr_cleanup);
685
686         if (err < 0) {
687                 if (err != -EOPNOTSUPP)
688                         NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
689                 return err;
690         }
691
692         return tcf_block_setup(block, &bo);
693 }
694
695 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
696                                   struct tcf_block_ext_info *ei,
697                                   struct netlink_ext_ack *extack)
698 {
699         struct net_device *dev = q->dev_queue->dev;
700         int err;
701
702         down_write(&block->cb_lock);
703
704         /* If tc offload feature is disabled and the block we try to bind
705          * to already has some offloaded filters, forbid to bind.
706          */
707         if (dev->netdev_ops->ndo_setup_tc &&
708             !tc_can_offload(dev) &&
709             tcf_block_offload_in_use(block)) {
710                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
711                 err = -EOPNOTSUPP;
712                 goto err_unlock;
713         }
714
715         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
716         if (err == -EOPNOTSUPP)
717                 goto no_offload_dev_inc;
718         if (err)
719                 goto err_unlock;
720
721         up_write(&block->cb_lock);
722         return 0;
723
724 no_offload_dev_inc:
725         if (tcf_block_offload_in_use(block))
726                 goto err_unlock;
727
728         err = 0;
729         block->nooffloaddevcnt++;
730 err_unlock:
731         up_write(&block->cb_lock);
732         return err;
733 }
734
735 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
736                                      struct tcf_block_ext_info *ei)
737 {
738         struct net_device *dev = q->dev_queue->dev;
739         int err;
740
741         down_write(&block->cb_lock);
742         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
743         if (err == -EOPNOTSUPP)
744                 goto no_offload_dev_dec;
745         up_write(&block->cb_lock);
746         return;
747
748 no_offload_dev_dec:
749         WARN_ON(block->nooffloaddevcnt-- == 0);
750         up_write(&block->cb_lock);
751 }
752
753 static int
754 tcf_chain0_head_change_cb_add(struct tcf_block *block,
755                               struct tcf_block_ext_info *ei,
756                               struct netlink_ext_ack *extack)
757 {
758         struct tcf_filter_chain_list_item *item;
759         struct tcf_chain *chain0;
760
761         item = kmalloc(sizeof(*item), GFP_KERNEL);
762         if (!item) {
763                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
764                 return -ENOMEM;
765         }
766         item->chain_head_change = ei->chain_head_change;
767         item->chain_head_change_priv = ei->chain_head_change_priv;
768
769         mutex_lock(&block->lock);
770         chain0 = block->chain0.chain;
771         if (chain0)
772                 tcf_chain_hold(chain0);
773         else
774                 list_add(&item->list, &block->chain0.filter_chain_list);
775         mutex_unlock(&block->lock);
776
777         if (chain0) {
778                 struct tcf_proto *tp_head;
779
780                 mutex_lock(&chain0->filter_chain_lock);
781
782                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
783                 if (tp_head)
784                         tcf_chain_head_change_item(item, tp_head);
785
786                 mutex_lock(&block->lock);
787                 list_add(&item->list, &block->chain0.filter_chain_list);
788                 mutex_unlock(&block->lock);
789
790                 mutex_unlock(&chain0->filter_chain_lock);
791                 tcf_chain_put(chain0);
792         }
793
794         return 0;
795 }
796
797 static void
798 tcf_chain0_head_change_cb_del(struct tcf_block *block,
799                               struct tcf_block_ext_info *ei)
800 {
801         struct tcf_filter_chain_list_item *item;
802
803         mutex_lock(&block->lock);
804         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
805                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
806                     (item->chain_head_change == ei->chain_head_change &&
807                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
808                         if (block->chain0.chain)
809                                 tcf_chain_head_change_item(item, NULL);
810                         list_del(&item->list);
811                         mutex_unlock(&block->lock);
812
813                         kfree(item);
814                         return;
815                 }
816         }
817         mutex_unlock(&block->lock);
818         WARN_ON(1);
819 }
820
821 struct tcf_net {
822         spinlock_t idr_lock; /* Protects idr */
823         struct idr idr;
824 };
825
826 static unsigned int tcf_net_id;
827
828 static int tcf_block_insert(struct tcf_block *block, struct net *net,
829                             struct netlink_ext_ack *extack)
830 {
831         struct tcf_net *tn = net_generic(net, tcf_net_id);
832         int err;
833
834         idr_preload(GFP_KERNEL);
835         spin_lock(&tn->idr_lock);
836         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
837                             GFP_NOWAIT);
838         spin_unlock(&tn->idr_lock);
839         idr_preload_end();
840
841         return err;
842 }
843
844 static void tcf_block_remove(struct tcf_block *block, struct net *net)
845 {
846         struct tcf_net *tn = net_generic(net, tcf_net_id);
847
848         spin_lock(&tn->idr_lock);
849         idr_remove(&tn->idr, block->index);
850         spin_unlock(&tn->idr_lock);
851 }
852
853 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
854                                           u32 block_index,
855                                           struct netlink_ext_ack *extack)
856 {
857         struct tcf_block *block;
858
859         block = kzalloc(sizeof(*block), GFP_KERNEL);
860         if (!block) {
861                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
862                 return ERR_PTR(-ENOMEM);
863         }
864         mutex_init(&block->lock);
865         mutex_init(&block->proto_destroy_lock);
866         init_rwsem(&block->cb_lock);
867         flow_block_init(&block->flow_block);
868         INIT_LIST_HEAD(&block->chain_list);
869         INIT_LIST_HEAD(&block->owner_list);
870         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
871
872         refcount_set(&block->refcnt, 1);
873         block->net = net;
874         block->index = block_index;
875
876         /* Don't store q pointer for blocks which are shared */
877         if (!tcf_block_shared(block))
878                 block->q = q;
879         return block;
880 }
881
882 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
883 {
884         struct tcf_net *tn = net_generic(net, tcf_net_id);
885
886         return idr_find(&tn->idr, block_index);
887 }
888
889 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
890 {
891         struct tcf_block *block;
892
893         rcu_read_lock();
894         block = tcf_block_lookup(net, block_index);
895         if (block && !refcount_inc_not_zero(&block->refcnt))
896                 block = NULL;
897         rcu_read_unlock();
898
899         return block;
900 }
901
902 static struct tcf_chain *
903 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
904 {
905         mutex_lock(&block->lock);
906         if (chain)
907                 chain = list_is_last(&chain->list, &block->chain_list) ?
908                         NULL : list_next_entry(chain, list);
909         else
910                 chain = list_first_entry_or_null(&block->chain_list,
911                                                  struct tcf_chain, list);
912
913         /* skip all action-only chains */
914         while (chain && tcf_chain_held_by_acts_only(chain))
915                 chain = list_is_last(&chain->list, &block->chain_list) ?
916                         NULL : list_next_entry(chain, list);
917
918         if (chain)
919                 tcf_chain_hold(chain);
920         mutex_unlock(&block->lock);
921
922         return chain;
923 }
924
925 /* Function to be used by all clients that want to iterate over all chains on
926  * block. It properly obtains block->lock and takes reference to chain before
927  * returning it. Users of this function must be tolerant to concurrent chain
928  * insertion/deletion or ensure that no concurrent chain modification is
929  * possible. Note that all netlink dump callbacks cannot guarantee to provide
930  * consistent dump because rtnl lock is released each time skb is filled with
931  * data and sent to user-space.
932  */
933
934 struct tcf_chain *
935 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
936 {
937         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
938
939         if (chain)
940                 tcf_chain_put(chain);
941
942         return chain_next;
943 }
944 EXPORT_SYMBOL(tcf_get_next_chain);
945
946 static struct tcf_proto *
947 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
948 {
949         u32 prio = 0;
950
951         ASSERT_RTNL();
952         mutex_lock(&chain->filter_chain_lock);
953
954         if (!tp) {
955                 tp = tcf_chain_dereference(chain->filter_chain, chain);
956         } else if (tcf_proto_is_deleting(tp)) {
957                 /* 'deleting' flag is set and chain->filter_chain_lock was
958                  * unlocked, which means next pointer could be invalid. Restart
959                  * search.
960                  */
961                 prio = tp->prio + 1;
962                 tp = tcf_chain_dereference(chain->filter_chain, chain);
963
964                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
965                         if (!tp->deleting && tp->prio >= prio)
966                                 break;
967         } else {
968                 tp = tcf_chain_dereference(tp->next, chain);
969         }
970
971         if (tp)
972                 tcf_proto_get(tp);
973
974         mutex_unlock(&chain->filter_chain_lock);
975
976         return tp;
977 }
978
979 /* Function to be used by all clients that want to iterate over all tp's on
980  * chain. Users of this function must be tolerant to concurrent tp
981  * insertion/deletion or ensure that no concurrent chain modification is
982  * possible. Note that all netlink dump callbacks cannot guarantee to provide
983  * consistent dump because rtnl lock is released each time skb is filled with
984  * data and sent to user-space.
985  */
986
987 struct tcf_proto *
988 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
989                    bool rtnl_held)
990 {
991         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
992
993         if (tp)
994                 tcf_proto_put(tp, rtnl_held, NULL);
995
996         return tp_next;
997 }
998 EXPORT_SYMBOL(tcf_get_next_proto);
999
1000 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1001 {
1002         struct tcf_chain *chain;
1003
1004         /* Last reference to block. At this point chains cannot be added or
1005          * removed concurrently.
1006          */
1007         for (chain = tcf_get_next_chain(block, NULL);
1008              chain;
1009              chain = tcf_get_next_chain(block, chain)) {
1010                 tcf_chain_put_explicitly_created(chain);
1011                 tcf_chain_flush(chain, rtnl_held);
1012         }
1013 }
1014
1015 /* Lookup Qdisc and increments its reference counter.
1016  * Set parent, if necessary.
1017  */
1018
1019 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1020                             u32 *parent, int ifindex, bool rtnl_held,
1021                             struct netlink_ext_ack *extack)
1022 {
1023         const struct Qdisc_class_ops *cops;
1024         struct net_device *dev;
1025         int err = 0;
1026
1027         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1028                 return 0;
1029
1030         rcu_read_lock();
1031
1032         /* Find link */
1033         dev = dev_get_by_index_rcu(net, ifindex);
1034         if (!dev) {
1035                 rcu_read_unlock();
1036                 return -ENODEV;
1037         }
1038
1039         /* Find qdisc */
1040         if (!*parent) {
1041                 *q = dev->qdisc;
1042                 *parent = (*q)->handle;
1043         } else {
1044                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1045                 if (!*q) {
1046                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1047                         err = -EINVAL;
1048                         goto errout_rcu;
1049                 }
1050         }
1051
1052         *q = qdisc_refcount_inc_nz(*q);
1053         if (!*q) {
1054                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1055                 err = -EINVAL;
1056                 goto errout_rcu;
1057         }
1058
1059         /* Is it classful? */
1060         cops = (*q)->ops->cl_ops;
1061         if (!cops) {
1062                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1063                 err = -EINVAL;
1064                 goto errout_qdisc;
1065         }
1066
1067         if (!cops->tcf_block) {
1068                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1069                 err = -EOPNOTSUPP;
1070                 goto errout_qdisc;
1071         }
1072
1073 errout_rcu:
1074         /* At this point we know that qdisc is not noop_qdisc,
1075          * which means that qdisc holds a reference to net_device
1076          * and we hold a reference to qdisc, so it is safe to release
1077          * rcu read lock.
1078          */
1079         rcu_read_unlock();
1080         return err;
1081
1082 errout_qdisc:
1083         rcu_read_unlock();
1084
1085         if (rtnl_held)
1086                 qdisc_put(*q);
1087         else
1088                 qdisc_put_unlocked(*q);
1089         *q = NULL;
1090
1091         return err;
1092 }
1093
1094 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1095                                int ifindex, struct netlink_ext_ack *extack)
1096 {
1097         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1098                 return 0;
1099
1100         /* Do we search for filter, attached to class? */
1101         if (TC_H_MIN(parent)) {
1102                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1103
1104                 *cl = cops->find(q, parent);
1105                 if (*cl == 0) {
1106                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1107                         return -ENOENT;
1108                 }
1109         }
1110
1111         return 0;
1112 }
1113
1114 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1115                                           unsigned long cl, int ifindex,
1116                                           u32 block_index,
1117                                           struct netlink_ext_ack *extack)
1118 {
1119         struct tcf_block *block;
1120
1121         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1122                 block = tcf_block_refcnt_get(net, block_index);
1123                 if (!block) {
1124                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1125                         return ERR_PTR(-EINVAL);
1126                 }
1127         } else {
1128                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1129
1130                 block = cops->tcf_block(q, cl, extack);
1131                 if (!block)
1132                         return ERR_PTR(-EINVAL);
1133
1134                 if (tcf_block_shared(block)) {
1135                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1136                         return ERR_PTR(-EOPNOTSUPP);
1137                 }
1138
1139                 /* Always take reference to block in order to support execution
1140                  * of rules update path of cls API without rtnl lock. Caller
1141                  * must release block when it is finished using it. 'if' block
1142                  * of this conditional obtain reference to block by calling
1143                  * tcf_block_refcnt_get().
1144                  */
1145                 refcount_inc(&block->refcnt);
1146         }
1147
1148         return block;
1149 }
1150
1151 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1152                             struct tcf_block_ext_info *ei, bool rtnl_held)
1153 {
1154         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1155                 /* Flushing/putting all chains will cause the block to be
1156                  * deallocated when last chain is freed. However, if chain_list
1157                  * is empty, block has to be manually deallocated. After block
1158                  * reference counter reached 0, it is no longer possible to
1159                  * increment it or add new chains to block.
1160                  */
1161                 bool free_block = list_empty(&block->chain_list);
1162
1163                 mutex_unlock(&block->lock);
1164                 if (tcf_block_shared(block))
1165                         tcf_block_remove(block, block->net);
1166
1167                 if (q)
1168                         tcf_block_offload_unbind(block, q, ei);
1169
1170                 if (free_block)
1171                         tcf_block_destroy(block);
1172                 else
1173                         tcf_block_flush_all_chains(block, rtnl_held);
1174         } else if (q) {
1175                 tcf_block_offload_unbind(block, q, ei);
1176         }
1177 }
1178
1179 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1180 {
1181         __tcf_block_put(block, NULL, NULL, rtnl_held);
1182 }
1183
1184 /* Find tcf block.
1185  * Set q, parent, cl when appropriate.
1186  */
1187
1188 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1189                                         u32 *parent, unsigned long *cl,
1190                                         int ifindex, u32 block_index,
1191                                         struct netlink_ext_ack *extack)
1192 {
1193         struct tcf_block *block;
1194         int err = 0;
1195
1196         ASSERT_RTNL();
1197
1198         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1199         if (err)
1200                 goto errout;
1201
1202         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1203         if (err)
1204                 goto errout_qdisc;
1205
1206         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1207         if (IS_ERR(block)) {
1208                 err = PTR_ERR(block);
1209                 goto errout_qdisc;
1210         }
1211
1212         return block;
1213
1214 errout_qdisc:
1215         if (*q)
1216                 qdisc_put(*q);
1217 errout:
1218         *q = NULL;
1219         return ERR_PTR(err);
1220 }
1221
1222 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1223                               bool rtnl_held)
1224 {
1225         if (!IS_ERR_OR_NULL(block))
1226                 tcf_block_refcnt_put(block, rtnl_held);
1227
1228         if (q) {
1229                 if (rtnl_held)
1230                         qdisc_put(q);
1231                 else
1232                         qdisc_put_unlocked(q);
1233         }
1234 }
1235
1236 struct tcf_block_owner_item {
1237         struct list_head list;
1238         struct Qdisc *q;
1239         enum flow_block_binder_type binder_type;
1240 };
1241
1242 static void
1243 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1244                                struct Qdisc *q,
1245                                enum flow_block_binder_type binder_type)
1246 {
1247         if (block->keep_dst &&
1248             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1249             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1250                 netif_keep_dst(qdisc_dev(q));
1251 }
1252
1253 void tcf_block_netif_keep_dst(struct tcf_block *block)
1254 {
1255         struct tcf_block_owner_item *item;
1256
1257         block->keep_dst = true;
1258         list_for_each_entry(item, &block->owner_list, list)
1259                 tcf_block_owner_netif_keep_dst(block, item->q,
1260                                                item->binder_type);
1261 }
1262 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1263
1264 static int tcf_block_owner_add(struct tcf_block *block,
1265                                struct Qdisc *q,
1266                                enum flow_block_binder_type binder_type)
1267 {
1268         struct tcf_block_owner_item *item;
1269
1270         item = kmalloc(sizeof(*item), GFP_KERNEL);
1271         if (!item)
1272                 return -ENOMEM;
1273         item->q = q;
1274         item->binder_type = binder_type;
1275         list_add(&item->list, &block->owner_list);
1276         return 0;
1277 }
1278
1279 static void tcf_block_owner_del(struct tcf_block *block,
1280                                 struct Qdisc *q,
1281                                 enum flow_block_binder_type binder_type)
1282 {
1283         struct tcf_block_owner_item *item;
1284
1285         list_for_each_entry(item, &block->owner_list, list) {
1286                 if (item->q == q && item->binder_type == binder_type) {
1287                         list_del(&item->list);
1288                         kfree(item);
1289                         return;
1290                 }
1291         }
1292         WARN_ON(1);
1293 }
1294
1295 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1296                       struct tcf_block_ext_info *ei,
1297                       struct netlink_ext_ack *extack)
1298 {
1299         struct net *net = qdisc_net(q);
1300         struct tcf_block *block = NULL;
1301         int err;
1302
1303         if (ei->block_index)
1304                 /* block_index not 0 means the shared block is requested */
1305                 block = tcf_block_refcnt_get(net, ei->block_index);
1306
1307         if (!block) {
1308                 block = tcf_block_create(net, q, ei->block_index, extack);
1309                 if (IS_ERR(block))
1310                         return PTR_ERR(block);
1311                 if (tcf_block_shared(block)) {
1312                         err = tcf_block_insert(block, net, extack);
1313                         if (err)
1314                                 goto err_block_insert;
1315                 }
1316         }
1317
1318         err = tcf_block_owner_add(block, q, ei->binder_type);
1319         if (err)
1320                 goto err_block_owner_add;
1321
1322         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1323
1324         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1325         if (err)
1326                 goto err_chain0_head_change_cb_add;
1327
1328         err = tcf_block_offload_bind(block, q, ei, extack);
1329         if (err)
1330                 goto err_block_offload_bind;
1331
1332         *p_block = block;
1333         return 0;
1334
1335 err_block_offload_bind:
1336         tcf_chain0_head_change_cb_del(block, ei);
1337 err_chain0_head_change_cb_add:
1338         tcf_block_owner_del(block, q, ei->binder_type);
1339 err_block_owner_add:
1340 err_block_insert:
1341         tcf_block_refcnt_put(block, true);
1342         return err;
1343 }
1344 EXPORT_SYMBOL(tcf_block_get_ext);
1345
1346 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1347 {
1348         struct tcf_proto __rcu **p_filter_chain = priv;
1349
1350         rcu_assign_pointer(*p_filter_chain, tp_head);
1351 }
1352
1353 int tcf_block_get(struct tcf_block **p_block,
1354                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1355                   struct netlink_ext_ack *extack)
1356 {
1357         struct tcf_block_ext_info ei = {
1358                 .chain_head_change = tcf_chain_head_change_dflt,
1359                 .chain_head_change_priv = p_filter_chain,
1360         };
1361
1362         WARN_ON(!p_filter_chain);
1363         return tcf_block_get_ext(p_block, q, &ei, extack);
1364 }
1365 EXPORT_SYMBOL(tcf_block_get);
1366
1367 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1368  * actions should be all removed after flushing.
1369  */
1370 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1371                        struct tcf_block_ext_info *ei)
1372 {
1373         if (!block)
1374                 return;
1375         tcf_chain0_head_change_cb_del(block, ei);
1376         tcf_block_owner_del(block, q, ei->binder_type);
1377
1378         __tcf_block_put(block, q, ei, true);
1379 }
1380 EXPORT_SYMBOL(tcf_block_put_ext);
1381
1382 void tcf_block_put(struct tcf_block *block)
1383 {
1384         struct tcf_block_ext_info ei = {0, };
1385
1386         if (!block)
1387                 return;
1388         tcf_block_put_ext(block, block->q, &ei);
1389 }
1390
1391 EXPORT_SYMBOL(tcf_block_put);
1392
1393 static int
1394 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1395                             void *cb_priv, bool add, bool offload_in_use,
1396                             struct netlink_ext_ack *extack)
1397 {
1398         struct tcf_chain *chain, *chain_prev;
1399         struct tcf_proto *tp, *tp_prev;
1400         int err;
1401
1402         lockdep_assert_held(&block->cb_lock);
1403
1404         for (chain = __tcf_get_next_chain(block, NULL);
1405              chain;
1406              chain_prev = chain,
1407                      chain = __tcf_get_next_chain(block, chain),
1408                      tcf_chain_put(chain_prev)) {
1409                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1410                      tp_prev = tp,
1411                              tp = __tcf_get_next_proto(chain, tp),
1412                              tcf_proto_put(tp_prev, true, NULL)) {
1413                         if (tp->ops->reoffload) {
1414                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1415                                                          extack);
1416                                 if (err && add)
1417                                         goto err_playback_remove;
1418                         } else if (add && offload_in_use) {
1419                                 err = -EOPNOTSUPP;
1420                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1421                                 goto err_playback_remove;
1422                         }
1423                 }
1424         }
1425
1426         return 0;
1427
1428 err_playback_remove:
1429         tcf_proto_put(tp, true, NULL);
1430         tcf_chain_put(chain);
1431         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1432                                     extack);
1433         return err;
1434 }
1435
1436 static int tcf_block_bind(struct tcf_block *block,
1437                           struct flow_block_offload *bo)
1438 {
1439         struct flow_block_cb *block_cb, *next;
1440         int err, i = 0;
1441
1442         lockdep_assert_held(&block->cb_lock);
1443
1444         list_for_each_entry(block_cb, &bo->cb_list, list) {
1445                 err = tcf_block_playback_offloads(block, block_cb->cb,
1446                                                   block_cb->cb_priv, true,
1447                                                   tcf_block_offload_in_use(block),
1448                                                   bo->extack);
1449                 if (err)
1450                         goto err_unroll;
1451                 if (!bo->unlocked_driver_cb)
1452                         block->lockeddevcnt++;
1453
1454                 i++;
1455         }
1456         list_splice(&bo->cb_list, &block->flow_block.cb_list);
1457
1458         return 0;
1459
1460 err_unroll:
1461         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1462                 if (i-- > 0) {
1463                         list_del(&block_cb->list);
1464                         tcf_block_playback_offloads(block, block_cb->cb,
1465                                                     block_cb->cb_priv, false,
1466                                                     tcf_block_offload_in_use(block),
1467                                                     NULL);
1468                         if (!bo->unlocked_driver_cb)
1469                                 block->lockeddevcnt--;
1470                 }
1471                 flow_block_cb_free(block_cb);
1472         }
1473
1474         return err;
1475 }
1476
1477 static void tcf_block_unbind(struct tcf_block *block,
1478                              struct flow_block_offload *bo)
1479 {
1480         struct flow_block_cb *block_cb, *next;
1481
1482         lockdep_assert_held(&block->cb_lock);
1483
1484         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1485                 tcf_block_playback_offloads(block, block_cb->cb,
1486                                             block_cb->cb_priv, false,
1487                                             tcf_block_offload_in_use(block),
1488                                             NULL);
1489                 list_del(&block_cb->list);
1490                 flow_block_cb_free(block_cb);
1491                 if (!bo->unlocked_driver_cb)
1492                         block->lockeddevcnt--;
1493         }
1494 }
1495
1496 static int tcf_block_setup(struct tcf_block *block,
1497                            struct flow_block_offload *bo)
1498 {
1499         int err;
1500
1501         switch (bo->command) {
1502         case FLOW_BLOCK_BIND:
1503                 err = tcf_block_bind(block, bo);
1504                 break;
1505         case FLOW_BLOCK_UNBIND:
1506                 err = 0;
1507                 tcf_block_unbind(block, bo);
1508                 break;
1509         default:
1510                 WARN_ON_ONCE(1);
1511                 err = -EOPNOTSUPP;
1512         }
1513
1514         return err;
1515 }
1516
1517 /* Main classifier routine: scans classifier chain attached
1518  * to this qdisc, (optionally) tests for protocol and asks
1519  * specific classifiers.
1520  */
1521 static inline int __tcf_classify(struct sk_buff *skb,
1522                                  const struct tcf_proto *tp,
1523                                  const struct tcf_proto *orig_tp,
1524                                  struct tcf_result *res,
1525                                  bool compat_mode,
1526                                  u32 *last_executed_chain)
1527 {
1528 #ifdef CONFIG_NET_CLS_ACT
1529         const int max_reclassify_loop = 4;
1530         const struct tcf_proto *first_tp;
1531         int limit = 0;
1532
1533 reclassify:
1534 #endif
1535         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1536                 __be16 protocol = tc_skb_protocol(skb);
1537                 int err;
1538
1539                 if (tp->protocol != protocol &&
1540                     tp->protocol != htons(ETH_P_ALL))
1541                         continue;
1542
1543                 err = tp->classify(skb, tp, res);
1544 #ifdef CONFIG_NET_CLS_ACT
1545                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1546                         first_tp = orig_tp;
1547                         *last_executed_chain = first_tp->chain->index;
1548                         goto reset;
1549                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1550                         first_tp = res->goto_tp;
1551                         *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1552                         goto reset;
1553                 }
1554 #endif
1555                 if (err >= 0)
1556                         return err;
1557         }
1558
1559         return TC_ACT_UNSPEC; /* signal: continue lookup */
1560 #ifdef CONFIG_NET_CLS_ACT
1561 reset:
1562         if (unlikely(limit++ >= max_reclassify_loop)) {
1563                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1564                                        tp->chain->block->index,
1565                                        tp->prio & 0xffff,
1566                                        ntohs(tp->protocol));
1567                 return TC_ACT_SHOT;
1568         }
1569
1570         tp = first_tp;
1571         goto reclassify;
1572 #endif
1573 }
1574
1575 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1576                  struct tcf_result *res, bool compat_mode)
1577 {
1578         u32 last_executed_chain = 0;
1579
1580         return __tcf_classify(skb, tp, tp, res, compat_mode,
1581                               &last_executed_chain);
1582 }
1583 EXPORT_SYMBOL(tcf_classify);
1584
1585 int tcf_classify_ingress(struct sk_buff *skb,
1586                          const struct tcf_block *ingress_block,
1587                          const struct tcf_proto *tp,
1588                          struct tcf_result *res, bool compat_mode)
1589 {
1590 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1591         u32 last_executed_chain = 0;
1592
1593         return __tcf_classify(skb, tp, tp, res, compat_mode,
1594                               &last_executed_chain);
1595 #else
1596         u32 last_executed_chain = tp ? tp->chain->index : 0;
1597         const struct tcf_proto *orig_tp = tp;
1598         struct tc_skb_ext *ext;
1599         int ret;
1600
1601         ext = skb_ext_find(skb, TC_SKB_EXT);
1602
1603         if (ext && ext->chain) {
1604                 struct tcf_chain *fchain;
1605
1606                 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
1607                 if (!fchain)
1608                         return TC_ACT_SHOT;
1609
1610                 /* Consume, so cloned/redirect skbs won't inherit ext */
1611                 skb_ext_del(skb, TC_SKB_EXT);
1612
1613                 tp = rcu_dereference_bh(fchain->filter_chain);
1614                 last_executed_chain = fchain->index;
1615         }
1616
1617         ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1618                              &last_executed_chain);
1619
1620         /* If we missed on some chain */
1621         if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1622                 ext = skb_ext_add(skb, TC_SKB_EXT);
1623                 if (WARN_ON_ONCE(!ext))
1624                         return TC_ACT_SHOT;
1625                 ext->chain = last_executed_chain;
1626         }
1627
1628         return ret;
1629 #endif
1630 }
1631 EXPORT_SYMBOL(tcf_classify_ingress);
1632
1633 struct tcf_chain_info {
1634         struct tcf_proto __rcu **pprev;
1635         struct tcf_proto __rcu *next;
1636 };
1637
1638 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1639                                            struct tcf_chain_info *chain_info)
1640 {
1641         return tcf_chain_dereference(*chain_info->pprev, chain);
1642 }
1643
1644 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1645                                struct tcf_chain_info *chain_info,
1646                                struct tcf_proto *tp)
1647 {
1648         if (chain->flushing)
1649                 return -EAGAIN;
1650
1651         if (*chain_info->pprev == chain->filter_chain)
1652                 tcf_chain0_head_change(chain, tp);
1653         tcf_proto_get(tp);
1654         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1655         rcu_assign_pointer(*chain_info->pprev, tp);
1656
1657         return 0;
1658 }
1659
1660 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1661                                 struct tcf_chain_info *chain_info,
1662                                 struct tcf_proto *tp)
1663 {
1664         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1665
1666         tcf_proto_mark_delete(tp);
1667         if (tp == chain->filter_chain)
1668                 tcf_chain0_head_change(chain, next);
1669         RCU_INIT_POINTER(*chain_info->pprev, next);
1670 }
1671
1672 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1673                                            struct tcf_chain_info *chain_info,
1674                                            u32 protocol, u32 prio,
1675                                            bool prio_allocate);
1676
1677 /* Try to insert new proto.
1678  * If proto with specified priority already exists, free new proto
1679  * and return existing one.
1680  */
1681
1682 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1683                                                     struct tcf_proto *tp_new,
1684                                                     u32 protocol, u32 prio,
1685                                                     bool rtnl_held)
1686 {
1687         struct tcf_chain_info chain_info;
1688         struct tcf_proto *tp;
1689         int err = 0;
1690
1691         mutex_lock(&chain->filter_chain_lock);
1692
1693         if (tcf_proto_exists_destroying(chain, tp_new)) {
1694                 mutex_unlock(&chain->filter_chain_lock);
1695                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1696                 return ERR_PTR(-EAGAIN);
1697         }
1698
1699         tp = tcf_chain_tp_find(chain, &chain_info,
1700                                protocol, prio, false);
1701         if (!tp)
1702                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1703         mutex_unlock(&chain->filter_chain_lock);
1704
1705         if (tp) {
1706                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1707                 tp_new = tp;
1708         } else if (err) {
1709                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1710                 tp_new = ERR_PTR(err);
1711         }
1712
1713         return tp_new;
1714 }
1715
1716 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1717                                       struct tcf_proto *tp, bool rtnl_held,
1718                                       struct netlink_ext_ack *extack)
1719 {
1720         struct tcf_chain_info chain_info;
1721         struct tcf_proto *tp_iter;
1722         struct tcf_proto **pprev;
1723         struct tcf_proto *next;
1724
1725         mutex_lock(&chain->filter_chain_lock);
1726
1727         /* Atomically find and remove tp from chain. */
1728         for (pprev = &chain->filter_chain;
1729              (tp_iter = tcf_chain_dereference(*pprev, chain));
1730              pprev = &tp_iter->next) {
1731                 if (tp_iter == tp) {
1732                         chain_info.pprev = pprev;
1733                         chain_info.next = tp_iter->next;
1734                         WARN_ON(tp_iter->deleting);
1735                         break;
1736                 }
1737         }
1738         /* Verify that tp still exists and no new filters were inserted
1739          * concurrently.
1740          * Mark tp for deletion if it is empty.
1741          */
1742         if (!tp_iter || !tcf_proto_check_delete(tp)) {
1743                 mutex_unlock(&chain->filter_chain_lock);
1744                 return;
1745         }
1746
1747         tcf_proto_signal_destroying(chain, tp);
1748         next = tcf_chain_dereference(chain_info.next, chain);
1749         if (tp == chain->filter_chain)
1750                 tcf_chain0_head_change(chain, next);
1751         RCU_INIT_POINTER(*chain_info.pprev, next);
1752         mutex_unlock(&chain->filter_chain_lock);
1753
1754         tcf_proto_put(tp, rtnl_held, extack);
1755 }
1756
1757 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1758                                            struct tcf_chain_info *chain_info,
1759                                            u32 protocol, u32 prio,
1760                                            bool prio_allocate)
1761 {
1762         struct tcf_proto **pprev;
1763         struct tcf_proto *tp;
1764
1765         /* Check the chain for existence of proto-tcf with this priority */
1766         for (pprev = &chain->filter_chain;
1767              (tp = tcf_chain_dereference(*pprev, chain));
1768              pprev = &tp->next) {
1769                 if (tp->prio >= prio) {
1770                         if (tp->prio == prio) {
1771                                 if (prio_allocate ||
1772                                     (tp->protocol != protocol && protocol))
1773                                         return ERR_PTR(-EINVAL);
1774                         } else {
1775                                 tp = NULL;
1776                         }
1777                         break;
1778                 }
1779         }
1780         chain_info->pprev = pprev;
1781         if (tp) {
1782                 chain_info->next = tp->next;
1783                 tcf_proto_get(tp);
1784         } else {
1785                 chain_info->next = NULL;
1786         }
1787         return tp;
1788 }
1789
1790 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1791                          struct tcf_proto *tp, struct tcf_block *block,
1792                          struct Qdisc *q, u32 parent, void *fh,
1793                          u32 portid, u32 seq, u16 flags, int event,
1794                          bool terse_dump, bool rtnl_held)
1795 {
1796         struct tcmsg *tcm;
1797         struct nlmsghdr  *nlh;
1798         unsigned char *b = skb_tail_pointer(skb);
1799
1800         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1801         if (!nlh)
1802                 goto out_nlmsg_trim;
1803         tcm = nlmsg_data(nlh);
1804         tcm->tcm_family = AF_UNSPEC;
1805         tcm->tcm__pad1 = 0;
1806         tcm->tcm__pad2 = 0;
1807         if (q) {
1808                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1809                 tcm->tcm_parent = parent;
1810         } else {
1811                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1812                 tcm->tcm_block_index = block->index;
1813         }
1814         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1815         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1816                 goto nla_put_failure;
1817         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1818                 goto nla_put_failure;
1819         if (!fh) {
1820                 tcm->tcm_handle = 0;
1821         } else if (terse_dump) {
1822                 if (tp->ops->terse_dump) {
1823                         if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1824                                                 rtnl_held) < 0)
1825                                 goto nla_put_failure;
1826                 } else {
1827                         goto cls_op_not_supp;
1828                 }
1829         } else {
1830                 if (tp->ops->dump &&
1831                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1832                         goto nla_put_failure;
1833         }
1834         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1835         return skb->len;
1836
1837 out_nlmsg_trim:
1838 nla_put_failure:
1839 cls_op_not_supp:
1840         nlmsg_trim(skb, b);
1841         return -1;
1842 }
1843
1844 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1845                           struct nlmsghdr *n, struct tcf_proto *tp,
1846                           struct tcf_block *block, struct Qdisc *q,
1847                           u32 parent, void *fh, int event, bool unicast,
1848                           bool rtnl_held)
1849 {
1850         struct sk_buff *skb;
1851         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1852         int err = 0;
1853
1854         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1855         if (!skb)
1856                 return -ENOBUFS;
1857
1858         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1859                           n->nlmsg_seq, n->nlmsg_flags, event,
1860                           false, rtnl_held) <= 0) {
1861                 kfree_skb(skb);
1862                 return -EINVAL;
1863         }
1864
1865         if (unicast)
1866                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1867         else
1868                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1869                                      n->nlmsg_flags & NLM_F_ECHO);
1870
1871         if (err > 0)
1872                 err = 0;
1873         return err;
1874 }
1875
1876 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1877                               struct nlmsghdr *n, struct tcf_proto *tp,
1878                               struct tcf_block *block, struct Qdisc *q,
1879                               u32 parent, void *fh, bool unicast, bool *last,
1880                               bool rtnl_held, struct netlink_ext_ack *extack)
1881 {
1882         struct sk_buff *skb;
1883         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1884         int err;
1885
1886         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1887         if (!skb)
1888                 return -ENOBUFS;
1889
1890         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1891                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1892                           false, rtnl_held) <= 0) {
1893                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1894                 kfree_skb(skb);
1895                 return -EINVAL;
1896         }
1897
1898         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1899         if (err) {
1900                 kfree_skb(skb);
1901                 return err;
1902         }
1903
1904         if (unicast)
1905                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1906         else
1907                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1908                                      n->nlmsg_flags & NLM_F_ECHO);
1909         if (err < 0)
1910                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1911
1912         if (err > 0)
1913                 err = 0;
1914         return err;
1915 }
1916
1917 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1918                                  struct tcf_block *block, struct Qdisc *q,
1919                                  u32 parent, struct nlmsghdr *n,
1920                                  struct tcf_chain *chain, int event,
1921                                  bool rtnl_held)
1922 {
1923         struct tcf_proto *tp;
1924
1925         for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1926              tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1927                 tfilter_notify(net, oskb, n, tp, block,
1928                                q, parent, NULL, event, false, rtnl_held);
1929 }
1930
1931 static void tfilter_put(struct tcf_proto *tp, void *fh)
1932 {
1933         if (tp->ops->put && fh)
1934                 tp->ops->put(tp, fh);
1935 }
1936
1937 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1938                           struct netlink_ext_ack *extack)
1939 {
1940         struct net *net = sock_net(skb->sk);
1941         struct nlattr *tca[TCA_MAX + 1];
1942         char name[IFNAMSIZ];
1943         struct tcmsg *t;
1944         u32 protocol;
1945         u32 prio;
1946         bool prio_allocate;
1947         u32 parent;
1948         u32 chain_index;
1949         struct Qdisc *q = NULL;
1950         struct tcf_chain_info chain_info;
1951         struct tcf_chain *chain = NULL;
1952         struct tcf_block *block;
1953         struct tcf_proto *tp;
1954         unsigned long cl;
1955         void *fh;
1956         int err;
1957         int tp_created;
1958         bool rtnl_held = false;
1959
1960         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1961                 return -EPERM;
1962
1963 replay:
1964         tp_created = 0;
1965
1966         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1967                                      rtm_tca_policy, extack);
1968         if (err < 0)
1969                 return err;
1970
1971         t = nlmsg_data(n);
1972         protocol = TC_H_MIN(t->tcm_info);
1973         prio = TC_H_MAJ(t->tcm_info);
1974         prio_allocate = false;
1975         parent = t->tcm_parent;
1976         tp = NULL;
1977         cl = 0;
1978         block = NULL;
1979
1980         if (prio == 0) {
1981                 /* If no priority is provided by the user,
1982                  * we allocate one.
1983                  */
1984                 if (n->nlmsg_flags & NLM_F_CREATE) {
1985                         prio = TC_H_MAKE(0x80000000U, 0U);
1986                         prio_allocate = true;
1987                 } else {
1988                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1989                         return -ENOENT;
1990                 }
1991         }
1992
1993         /* Find head of filter chain. */
1994
1995         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1996         if (err)
1997                 return err;
1998
1999         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2000                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2001                 err = -EINVAL;
2002                 goto errout;
2003         }
2004
2005         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2006          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2007          * type is not specified, classifier is not unlocked.
2008          */
2009         if (rtnl_held ||
2010             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2011             !tcf_proto_is_unlocked(name)) {
2012                 rtnl_held = true;
2013                 rtnl_lock();
2014         }
2015
2016         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2017         if (err)
2018                 goto errout;
2019
2020         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2021                                  extack);
2022         if (IS_ERR(block)) {
2023                 err = PTR_ERR(block);
2024                 goto errout;
2025         }
2026         block->classid = parent;
2027
2028         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2029         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2030                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2031                 err = -EINVAL;
2032                 goto errout;
2033         }
2034         chain = tcf_chain_get(block, chain_index, true);
2035         if (!chain) {
2036                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2037                 err = -ENOMEM;
2038                 goto errout;
2039         }
2040
2041         mutex_lock(&chain->filter_chain_lock);
2042         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2043                                prio, prio_allocate);
2044         if (IS_ERR(tp)) {
2045                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2046                 err = PTR_ERR(tp);
2047                 goto errout_locked;
2048         }
2049
2050         if (tp == NULL) {
2051                 struct tcf_proto *tp_new = NULL;
2052
2053                 if (chain->flushing) {
2054                         err = -EAGAIN;
2055                         goto errout_locked;
2056                 }
2057
2058                 /* Proto-tcf does not exist, create new one */
2059
2060                 if (tca[TCA_KIND] == NULL || !protocol) {
2061                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2062                         err = -EINVAL;
2063                         goto errout_locked;
2064                 }
2065
2066                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2067                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2068                         err = -ENOENT;
2069                         goto errout_locked;
2070                 }
2071
2072                 if (prio_allocate)
2073                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2074                                                                &chain_info));
2075
2076                 mutex_unlock(&chain->filter_chain_lock);
2077                 tp_new = tcf_proto_create(name, protocol, prio, chain,
2078                                           rtnl_held, extack);
2079                 if (IS_ERR(tp_new)) {
2080                         err = PTR_ERR(tp_new);
2081                         goto errout_tp;
2082                 }
2083
2084                 tp_created = 1;
2085                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2086                                                 rtnl_held);
2087                 if (IS_ERR(tp)) {
2088                         err = PTR_ERR(tp);
2089                         goto errout_tp;
2090                 }
2091         } else {
2092                 mutex_unlock(&chain->filter_chain_lock);
2093         }
2094
2095         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2096                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2097                 err = -EINVAL;
2098                 goto errout;
2099         }
2100
2101         fh = tp->ops->get(tp, t->tcm_handle);
2102
2103         if (!fh) {
2104                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2105                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2106                         err = -ENOENT;
2107                         goto errout;
2108                 }
2109         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2110                 tfilter_put(tp, fh);
2111                 NL_SET_ERR_MSG(extack, "Filter already exists");
2112                 err = -EEXIST;
2113                 goto errout;
2114         }
2115
2116         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2117                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2118                 err = -EINVAL;
2119                 goto errout;
2120         }
2121
2122         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2123                               n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2124                               rtnl_held, extack);
2125         if (err == 0) {
2126                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2127                                RTM_NEWTFILTER, false, rtnl_held);
2128                 tfilter_put(tp, fh);
2129                 /* q pointer is NULL for shared blocks */
2130                 if (q)
2131                         q->flags &= ~TCQ_F_CAN_BYPASS;
2132         }
2133
2134 errout:
2135         if (err && tp_created)
2136                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2137 errout_tp:
2138         if (chain) {
2139                 if (tp && !IS_ERR(tp))
2140                         tcf_proto_put(tp, rtnl_held, NULL);
2141                 if (!tp_created)
2142                         tcf_chain_put(chain);
2143         }
2144         tcf_block_release(q, block, rtnl_held);
2145
2146         if (rtnl_held)
2147                 rtnl_unlock();
2148
2149         if (err == -EAGAIN) {
2150                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2151                  * of target chain.
2152                  */
2153                 rtnl_held = true;
2154                 /* Replay the request. */
2155                 goto replay;
2156         }
2157         return err;
2158
2159 errout_locked:
2160         mutex_unlock(&chain->filter_chain_lock);
2161         goto errout;
2162 }
2163
2164 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2165                           struct netlink_ext_ack *extack)
2166 {
2167         struct net *net = sock_net(skb->sk);
2168         struct nlattr *tca[TCA_MAX + 1];
2169         char name[IFNAMSIZ];
2170         struct tcmsg *t;
2171         u32 protocol;
2172         u32 prio;
2173         u32 parent;
2174         u32 chain_index;
2175         struct Qdisc *q = NULL;
2176         struct tcf_chain_info chain_info;
2177         struct tcf_chain *chain = NULL;
2178         struct tcf_block *block = NULL;
2179         struct tcf_proto *tp = NULL;
2180         unsigned long cl = 0;
2181         void *fh = NULL;
2182         int err;
2183         bool rtnl_held = false;
2184
2185         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2186                 return -EPERM;
2187
2188         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2189                                      rtm_tca_policy, extack);
2190         if (err < 0)
2191                 return err;
2192
2193         t = nlmsg_data(n);
2194         protocol = TC_H_MIN(t->tcm_info);
2195         prio = TC_H_MAJ(t->tcm_info);
2196         parent = t->tcm_parent;
2197
2198         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2199                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2200                 return -ENOENT;
2201         }
2202
2203         /* Find head of filter chain. */
2204
2205         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2206         if (err)
2207                 return err;
2208
2209         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2210                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2211                 err = -EINVAL;
2212                 goto errout;
2213         }
2214         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2215          * found), qdisc is not unlocked, classifier type is not specified,
2216          * classifier is not unlocked.
2217          */
2218         if (!prio ||
2219             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2220             !tcf_proto_is_unlocked(name)) {
2221                 rtnl_held = true;
2222                 rtnl_lock();
2223         }
2224
2225         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2226         if (err)
2227                 goto errout;
2228
2229         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2230                                  extack);
2231         if (IS_ERR(block)) {
2232                 err = PTR_ERR(block);
2233                 goto errout;
2234         }
2235
2236         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2237         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2238                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2239                 err = -EINVAL;
2240                 goto errout;
2241         }
2242         chain = tcf_chain_get(block, chain_index, false);
2243         if (!chain) {
2244                 /* User requested flush on non-existent chain. Nothing to do,
2245                  * so just return success.
2246                  */
2247                 if (prio == 0) {
2248                         err = 0;
2249                         goto errout;
2250                 }
2251                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2252                 err = -ENOENT;
2253                 goto errout;
2254         }
2255
2256         if (prio == 0) {
2257                 tfilter_notify_chain(net, skb, block, q, parent, n,
2258                                      chain, RTM_DELTFILTER, rtnl_held);
2259                 tcf_chain_flush(chain, rtnl_held);
2260                 err = 0;
2261                 goto errout;
2262         }
2263
2264         mutex_lock(&chain->filter_chain_lock);
2265         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2266                                prio, false);
2267         if (!tp || IS_ERR(tp)) {
2268                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2269                 err = tp ? PTR_ERR(tp) : -ENOENT;
2270                 goto errout_locked;
2271         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2272                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2273                 err = -EINVAL;
2274                 goto errout_locked;
2275         } else if (t->tcm_handle == 0) {
2276                 tcf_proto_signal_destroying(chain, tp);
2277                 tcf_chain_tp_remove(chain, &chain_info, tp);
2278                 mutex_unlock(&chain->filter_chain_lock);
2279
2280                 tcf_proto_put(tp, rtnl_held, NULL);
2281                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2282                                RTM_DELTFILTER, false, rtnl_held);
2283                 err = 0;
2284                 goto errout;
2285         }
2286         mutex_unlock(&chain->filter_chain_lock);
2287
2288         fh = tp->ops->get(tp, t->tcm_handle);
2289
2290         if (!fh) {
2291                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2292                 err = -ENOENT;
2293         } else {
2294                 bool last;
2295
2296                 err = tfilter_del_notify(net, skb, n, tp, block,
2297                                          q, parent, fh, false, &last,
2298                                          rtnl_held, extack);
2299
2300                 if (err)
2301                         goto errout;
2302                 if (last)
2303                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2304         }
2305
2306 errout:
2307         if (chain) {
2308                 if (tp && !IS_ERR(tp))
2309                         tcf_proto_put(tp, rtnl_held, NULL);
2310                 tcf_chain_put(chain);
2311         }
2312         tcf_block_release(q, block, rtnl_held);
2313
2314         if (rtnl_held)
2315                 rtnl_unlock();
2316
2317         return err;
2318
2319 errout_locked:
2320         mutex_unlock(&chain->filter_chain_lock);
2321         goto errout;
2322 }
2323
2324 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2325                           struct netlink_ext_ack *extack)
2326 {
2327         struct net *net = sock_net(skb->sk);
2328         struct nlattr *tca[TCA_MAX + 1];
2329         char name[IFNAMSIZ];
2330         struct tcmsg *t;
2331         u32 protocol;
2332         u32 prio;
2333         u32 parent;
2334         u32 chain_index;
2335         struct Qdisc *q = NULL;
2336         struct tcf_chain_info chain_info;
2337         struct tcf_chain *chain = NULL;
2338         struct tcf_block *block = NULL;
2339         struct tcf_proto *tp = NULL;
2340         unsigned long cl = 0;
2341         void *fh = NULL;
2342         int err;
2343         bool rtnl_held = false;
2344
2345         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2346                                      rtm_tca_policy, extack);
2347         if (err < 0)
2348                 return err;
2349
2350         t = nlmsg_data(n);
2351         protocol = TC_H_MIN(t->tcm_info);
2352         prio = TC_H_MAJ(t->tcm_info);
2353         parent = t->tcm_parent;
2354
2355         if (prio == 0) {
2356                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2357                 return -ENOENT;
2358         }
2359
2360         /* Find head of filter chain. */
2361
2362         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2363         if (err)
2364                 return err;
2365
2366         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2367                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2368                 err = -EINVAL;
2369                 goto errout;
2370         }
2371         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2372          * unlocked, classifier type is not specified, classifier is not
2373          * unlocked.
2374          */
2375         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2376             !tcf_proto_is_unlocked(name)) {
2377                 rtnl_held = true;
2378                 rtnl_lock();
2379         }
2380
2381         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2382         if (err)
2383                 goto errout;
2384
2385         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2386                                  extack);
2387         if (IS_ERR(block)) {
2388                 err = PTR_ERR(block);
2389                 goto errout;
2390         }
2391
2392         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2393         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2394                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2395                 err = -EINVAL;
2396                 goto errout;
2397         }
2398         chain = tcf_chain_get(block, chain_index, false);
2399         if (!chain) {
2400                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2401                 err = -EINVAL;
2402                 goto errout;
2403         }
2404
2405         mutex_lock(&chain->filter_chain_lock);
2406         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2407                                prio, false);
2408         mutex_unlock(&chain->filter_chain_lock);
2409         if (!tp || IS_ERR(tp)) {
2410                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2411                 err = tp ? PTR_ERR(tp) : -ENOENT;
2412                 goto errout;
2413         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2414                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2415                 err = -EINVAL;
2416                 goto errout;
2417         }
2418
2419         fh = tp->ops->get(tp, t->tcm_handle);
2420
2421         if (!fh) {
2422                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2423                 err = -ENOENT;
2424         } else {
2425                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2426                                      fh, RTM_NEWTFILTER, true, rtnl_held);
2427                 if (err < 0)
2428                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2429         }
2430
2431         tfilter_put(tp, fh);
2432 errout:
2433         if (chain) {
2434                 if (tp && !IS_ERR(tp))
2435                         tcf_proto_put(tp, rtnl_held, NULL);
2436                 tcf_chain_put(chain);
2437         }
2438         tcf_block_release(q, block, rtnl_held);
2439
2440         if (rtnl_held)
2441                 rtnl_unlock();
2442
2443         return err;
2444 }
2445
2446 struct tcf_dump_args {
2447         struct tcf_walker w;
2448         struct sk_buff *skb;
2449         struct netlink_callback *cb;
2450         struct tcf_block *block;
2451         struct Qdisc *q;
2452         u32 parent;
2453         bool terse_dump;
2454 };
2455
2456 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2457 {
2458         struct tcf_dump_args *a = (void *)arg;
2459         struct net *net = sock_net(a->skb->sk);
2460
2461         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2462                              n, NETLINK_CB(a->cb->skb).portid,
2463                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2464                              RTM_NEWTFILTER, a->terse_dump, true);
2465 }
2466
2467 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2468                            struct sk_buff *skb, struct netlink_callback *cb,
2469                            long index_start, long *p_index, bool terse)
2470 {
2471         struct net *net = sock_net(skb->sk);
2472         struct tcf_block *block = chain->block;
2473         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2474         struct tcf_proto *tp, *tp_prev;
2475         struct tcf_dump_args arg;
2476
2477         for (tp = __tcf_get_next_proto(chain, NULL);
2478              tp;
2479              tp_prev = tp,
2480                      tp = __tcf_get_next_proto(chain, tp),
2481                      tcf_proto_put(tp_prev, true, NULL),
2482                      (*p_index)++) {
2483                 if (*p_index < index_start)
2484                         continue;
2485                 if (TC_H_MAJ(tcm->tcm_info) &&
2486                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2487                         continue;
2488                 if (TC_H_MIN(tcm->tcm_info) &&
2489                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2490                         continue;
2491                 if (*p_index > index_start)
2492                         memset(&cb->args[1], 0,
2493                                sizeof(cb->args) - sizeof(cb->args[0]));
2494                 if (cb->args[1] == 0) {
2495                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2496                                           NETLINK_CB(cb->skb).portid,
2497                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2498                                           RTM_NEWTFILTER, false, true) <= 0)
2499                                 goto errout;
2500                         cb->args[1] = 1;
2501                 }
2502                 if (!tp->ops->walk)
2503                         continue;
2504                 arg.w.fn = tcf_node_dump;
2505                 arg.skb = skb;
2506                 arg.cb = cb;
2507                 arg.block = block;
2508                 arg.q = q;
2509                 arg.parent = parent;
2510                 arg.w.stop = 0;
2511                 arg.w.skip = cb->args[1] - 1;
2512                 arg.w.count = 0;
2513                 arg.w.cookie = cb->args[2];
2514                 arg.terse_dump = terse;
2515                 tp->ops->walk(tp, &arg.w, true);
2516                 cb->args[2] = arg.w.cookie;
2517                 cb->args[1] = arg.w.count + 1;
2518                 if (arg.w.stop)
2519                         goto errout;
2520         }
2521         return true;
2522
2523 errout:
2524         tcf_proto_put(tp, true, NULL);
2525         return false;
2526 }
2527
2528 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2529         [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2530 };
2531
2532 /* called with RTNL */
2533 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2534 {
2535         struct tcf_chain *chain, *chain_prev;
2536         struct net *net = sock_net(skb->sk);
2537         struct nlattr *tca[TCA_MAX + 1];
2538         struct Qdisc *q = NULL;
2539         struct tcf_block *block;
2540         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2541         bool terse_dump = false;
2542         long index_start;
2543         long index;
2544         u32 parent;
2545         int err;
2546
2547         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2548                 return skb->len;
2549
2550         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2551                                      tcf_tfilter_dump_policy, cb->extack);
2552         if (err)
2553                 return err;
2554
2555         if (tca[TCA_DUMP_FLAGS]) {
2556                 struct nla_bitfield32 flags =
2557                         nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2558
2559                 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2560         }
2561
2562         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2563                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2564                 if (!block)
2565                         goto out;
2566                 /* If we work with block index, q is NULL and parent value
2567                  * will never be used in the following code. The check
2568                  * in tcf_fill_node prevents it. However, compiler does not
2569                  * see that far, so set parent to zero to silence the warning
2570                  * about parent being uninitialized.
2571                  */
2572                 parent = 0;
2573         } else {
2574                 const struct Qdisc_class_ops *cops;
2575                 struct net_device *dev;
2576                 unsigned long cl = 0;
2577
2578                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2579                 if (!dev)
2580                         return skb->len;
2581
2582                 parent = tcm->tcm_parent;
2583                 if (!parent)
2584                         q = dev->qdisc;
2585                 else
2586                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2587                 if (!q)
2588                         goto out;
2589                 cops = q->ops->cl_ops;
2590                 if (!cops)
2591                         goto out;
2592                 if (!cops->tcf_block)
2593                         goto out;
2594                 if (TC_H_MIN(tcm->tcm_parent)) {
2595                         cl = cops->find(q, tcm->tcm_parent);
2596                         if (cl == 0)
2597                                 goto out;
2598                 }
2599                 block = cops->tcf_block(q, cl, NULL);
2600                 if (!block)
2601                         goto out;
2602                 parent = block->classid;
2603                 if (tcf_block_shared(block))
2604                         q = NULL;
2605         }
2606
2607         index_start = cb->args[0];
2608         index = 0;
2609
2610         for (chain = __tcf_get_next_chain(block, NULL);
2611              chain;
2612              chain_prev = chain,
2613                      chain = __tcf_get_next_chain(block, chain),
2614                      tcf_chain_put(chain_prev)) {
2615                 if (tca[TCA_CHAIN] &&
2616                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2617                         continue;
2618                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2619                                     index_start, &index, terse_dump)) {
2620                         tcf_chain_put(chain);
2621                         err = -EMSGSIZE;
2622                         break;
2623                 }
2624         }
2625
2626         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2627                 tcf_block_refcnt_put(block, true);
2628         cb->args[0] = index;
2629
2630 out:
2631         /* If we did no progress, the error (EMSGSIZE) is real */
2632         if (skb->len == 0 && err)
2633                 return err;
2634         return skb->len;
2635 }
2636
2637 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2638                               void *tmplt_priv, u32 chain_index,
2639                               struct net *net, struct sk_buff *skb,
2640                               struct tcf_block *block,
2641                               u32 portid, u32 seq, u16 flags, int event)
2642 {
2643         unsigned char *b = skb_tail_pointer(skb);
2644         const struct tcf_proto_ops *ops;
2645         struct nlmsghdr *nlh;
2646         struct tcmsg *tcm;
2647         void *priv;
2648
2649         ops = tmplt_ops;
2650         priv = tmplt_priv;
2651
2652         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2653         if (!nlh)
2654                 goto out_nlmsg_trim;
2655         tcm = nlmsg_data(nlh);
2656         tcm->tcm_family = AF_UNSPEC;
2657         tcm->tcm__pad1 = 0;
2658         tcm->tcm__pad2 = 0;
2659         tcm->tcm_handle = 0;
2660         if (block->q) {
2661                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2662                 tcm->tcm_parent = block->q->handle;
2663         } else {
2664                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2665                 tcm->tcm_block_index = block->index;
2666         }
2667
2668         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2669                 goto nla_put_failure;
2670
2671         if (ops) {
2672                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2673                         goto nla_put_failure;
2674                 if (ops->tmplt_dump(skb, net, priv) < 0)
2675                         goto nla_put_failure;
2676         }
2677
2678         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2679         return skb->len;
2680
2681 out_nlmsg_trim:
2682 nla_put_failure:
2683         nlmsg_trim(skb, b);
2684         return -EMSGSIZE;
2685 }
2686
2687 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2688                            u32 seq, u16 flags, int event, bool unicast)
2689 {
2690         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2691         struct tcf_block *block = chain->block;
2692         struct net *net = block->net;
2693         struct sk_buff *skb;
2694         int err = 0;
2695
2696         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2697         if (!skb)
2698                 return -ENOBUFS;
2699
2700         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2701                                chain->index, net, skb, block, portid,
2702                                seq, flags, event) <= 0) {
2703                 kfree_skb(skb);
2704                 return -EINVAL;
2705         }
2706
2707         if (unicast)
2708                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2709         else
2710                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2711                                      flags & NLM_F_ECHO);
2712
2713         if (err > 0)
2714                 err = 0;
2715         return err;
2716 }
2717
2718 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2719                                   void *tmplt_priv, u32 chain_index,
2720                                   struct tcf_block *block, struct sk_buff *oskb,
2721                                   u32 seq, u16 flags, bool unicast)
2722 {
2723         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2724         struct net *net = block->net;
2725         struct sk_buff *skb;
2726
2727         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2728         if (!skb)
2729                 return -ENOBUFS;
2730
2731         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2732                                block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2733                 kfree_skb(skb);
2734                 return -EINVAL;
2735         }
2736
2737         if (unicast)
2738                 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2739
2740         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2741 }
2742
2743 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2744                               struct nlattr **tca,
2745                               struct netlink_ext_ack *extack)
2746 {
2747         const struct tcf_proto_ops *ops;
2748         char name[IFNAMSIZ];
2749         void *tmplt_priv;
2750
2751         /* If kind is not set, user did not specify template. */
2752         if (!tca[TCA_KIND])
2753                 return 0;
2754
2755         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2756                 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2757                 return -EINVAL;
2758         }
2759
2760         ops = tcf_proto_lookup_ops(name, true, extack);
2761         if (IS_ERR(ops))
2762                 return PTR_ERR(ops);
2763         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2764                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2765                 return -EOPNOTSUPP;
2766         }
2767
2768         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2769         if (IS_ERR(tmplt_priv)) {
2770                 module_put(ops->owner);
2771                 return PTR_ERR(tmplt_priv);
2772         }
2773         chain->tmplt_ops = ops;
2774         chain->tmplt_priv = tmplt_priv;
2775         return 0;
2776 }
2777
2778 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2779                                void *tmplt_priv)
2780 {
2781         /* If template ops are set, no work to do for us. */
2782         if (!tmplt_ops)
2783                 return;
2784
2785         tmplt_ops->tmplt_destroy(tmplt_priv);
2786         module_put(tmplt_ops->owner);
2787 }
2788
2789 /* Add/delete/get a chain */
2790
2791 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2792                         struct netlink_ext_ack *extack)
2793 {
2794         struct net *net = sock_net(skb->sk);
2795         struct nlattr *tca[TCA_MAX + 1];
2796         struct tcmsg *t;
2797         u32 parent;
2798         u32 chain_index;
2799         struct Qdisc *q = NULL;
2800         struct tcf_chain *chain = NULL;
2801         struct tcf_block *block;
2802         unsigned long cl;
2803         int err;
2804
2805         if (n->nlmsg_type != RTM_GETCHAIN &&
2806             !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2807                 return -EPERM;
2808
2809 replay:
2810         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2811                                      rtm_tca_policy, extack);
2812         if (err < 0)
2813                 return err;
2814
2815         t = nlmsg_data(n);
2816         parent = t->tcm_parent;
2817         cl = 0;
2818
2819         block = tcf_block_find(net, &q, &parent, &cl,
2820                                t->tcm_ifindex, t->tcm_block_index, extack);
2821         if (IS_ERR(block))
2822                 return PTR_ERR(block);
2823
2824         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2825         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2826                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2827                 err = -EINVAL;
2828                 goto errout_block;
2829         }
2830
2831         mutex_lock(&block->lock);
2832         chain = tcf_chain_lookup(block, chain_index);
2833         if (n->nlmsg_type == RTM_NEWCHAIN) {
2834                 if (chain) {
2835                         if (tcf_chain_held_by_acts_only(chain)) {
2836                                 /* The chain exists only because there is
2837                                  * some action referencing it.
2838                                  */
2839                                 tcf_chain_hold(chain);
2840                         } else {
2841                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2842                                 err = -EEXIST;
2843                                 goto errout_block_locked;
2844                         }
2845                 } else {
2846                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2847                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2848                                 err = -ENOENT;
2849                                 goto errout_block_locked;
2850                         }
2851                         chain = tcf_chain_create(block, chain_index);
2852                         if (!chain) {
2853                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2854                                 err = -ENOMEM;
2855                                 goto errout_block_locked;
2856                         }
2857                 }
2858         } else {
2859                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2860                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2861                         err = -EINVAL;
2862                         goto errout_block_locked;
2863                 }
2864                 tcf_chain_hold(chain);
2865         }
2866
2867         if (n->nlmsg_type == RTM_NEWCHAIN) {
2868                 /* Modifying chain requires holding parent block lock. In case
2869                  * the chain was successfully added, take a reference to the
2870                  * chain. This ensures that an empty chain does not disappear at
2871                  * the end of this function.
2872                  */
2873                 tcf_chain_hold(chain);
2874                 chain->explicitly_created = true;
2875         }
2876         mutex_unlock(&block->lock);
2877
2878         switch (n->nlmsg_type) {
2879         case RTM_NEWCHAIN:
2880                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2881                 if (err) {
2882                         tcf_chain_put_explicitly_created(chain);
2883                         goto errout;
2884                 }
2885
2886                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2887                                 RTM_NEWCHAIN, false);
2888                 break;
2889         case RTM_DELCHAIN:
2890                 tfilter_notify_chain(net, skb, block, q, parent, n,
2891                                      chain, RTM_DELTFILTER, true);
2892                 /* Flush the chain first as the user requested chain removal. */
2893                 tcf_chain_flush(chain, true);
2894                 /* In case the chain was successfully deleted, put a reference
2895                  * to the chain previously taken during addition.
2896                  */
2897                 tcf_chain_put_explicitly_created(chain);
2898                 break;
2899         case RTM_GETCHAIN:
2900                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2901                                       n->nlmsg_seq, n->nlmsg_type, true);
2902                 if (err < 0)
2903                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2904                 break;
2905         default:
2906                 err = -EOPNOTSUPP;
2907                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2908                 goto errout;
2909         }
2910
2911 errout:
2912         tcf_chain_put(chain);
2913 errout_block:
2914         tcf_block_release(q, block, true);
2915         if (err == -EAGAIN)
2916                 /* Replay the request. */
2917                 goto replay;
2918         return err;
2919
2920 errout_block_locked:
2921         mutex_unlock(&block->lock);
2922         goto errout_block;
2923 }
2924
2925 /* called with RTNL */
2926 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2927 {
2928         struct net *net = sock_net(skb->sk);
2929         struct nlattr *tca[TCA_MAX + 1];
2930         struct Qdisc *q = NULL;
2931         struct tcf_block *block;
2932         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2933         struct tcf_chain *chain;
2934         long index_start;
2935         long index;
2936         u32 parent;
2937         int err;
2938
2939         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2940                 return skb->len;
2941
2942         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2943                                      rtm_tca_policy, cb->extack);
2944         if (err)
2945                 return err;
2946
2947         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2948                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2949                 if (!block)
2950                         goto out;
2951                 /* If we work with block index, q is NULL and parent value
2952                  * will never be used in the following code. The check
2953                  * in tcf_fill_node prevents it. However, compiler does not
2954                  * see that far, so set parent to zero to silence the warning
2955                  * about parent being uninitialized.
2956                  */
2957                 parent = 0;
2958         } else {
2959                 const struct Qdisc_class_ops *cops;
2960                 struct net_device *dev;
2961                 unsigned long cl = 0;
2962
2963                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2964                 if (!dev)
2965                         return skb->len;
2966
2967                 parent = tcm->tcm_parent;
2968                 if (!parent) {
2969                         q = dev->qdisc;
2970                         parent = q->handle;
2971                 } else {
2972                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2973                 }
2974                 if (!q)
2975                         goto out;
2976                 cops = q->ops->cl_ops;
2977                 if (!cops)
2978                         goto out;
2979                 if (!cops->tcf_block)
2980                         goto out;
2981                 if (TC_H_MIN(tcm->tcm_parent)) {
2982                         cl = cops->find(q, tcm->tcm_parent);
2983                         if (cl == 0)
2984                                 goto out;
2985                 }
2986                 block = cops->tcf_block(q, cl, NULL);
2987                 if (!block)
2988                         goto out;
2989                 if (tcf_block_shared(block))
2990                         q = NULL;
2991         }
2992
2993         index_start = cb->args[0];
2994         index = 0;
2995
2996         mutex_lock(&block->lock);
2997         list_for_each_entry(chain, &block->chain_list, list) {
2998                 if ((tca[TCA_CHAIN] &&
2999                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3000                         continue;
3001                 if (index < index_start) {
3002                         index++;
3003                         continue;
3004                 }
3005                 if (tcf_chain_held_by_acts_only(chain))
3006                         continue;
3007                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3008                                          chain->index, net, skb, block,
3009                                          NETLINK_CB(cb->skb).portid,
3010                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3011                                          RTM_NEWCHAIN);
3012                 if (err <= 0)
3013                         break;
3014                 index++;
3015         }
3016         mutex_unlock(&block->lock);
3017
3018         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3019                 tcf_block_refcnt_put(block, true);
3020         cb->args[0] = index;
3021
3022 out:
3023         /* If we did no progress, the error (EMSGSIZE) is real */
3024         if (skb->len == 0 && err)
3025                 return err;
3026         return skb->len;
3027 }
3028
3029 void tcf_exts_destroy(struct tcf_exts *exts)
3030 {
3031 #ifdef CONFIG_NET_CLS_ACT
3032         if (exts->actions) {
3033                 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3034                 kfree(exts->actions);
3035         }
3036         exts->nr_actions = 0;
3037 #endif
3038 }
3039 EXPORT_SYMBOL(tcf_exts_destroy);
3040
3041 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3042                       struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3043                       bool rtnl_held, struct netlink_ext_ack *extack)
3044 {
3045 #ifdef CONFIG_NET_CLS_ACT
3046         {
3047                 struct tc_action *act;
3048                 size_t attr_size = 0;
3049
3050                 if (exts->police && tb[exts->police]) {
3051                         act = tcf_action_init_1(net, tp, tb[exts->police],
3052                                                 rate_tlv, "police", ovr,
3053                                                 TCA_ACT_BIND, rtnl_held,
3054                                                 extack);
3055                         if (IS_ERR(act))
3056                                 return PTR_ERR(act);
3057
3058                         act->type = exts->type = TCA_OLD_COMPAT;
3059                         exts->actions[0] = act;
3060                         exts->nr_actions = 1;
3061                 } else if (exts->action && tb[exts->action]) {
3062                         int err;
3063
3064                         err = tcf_action_init(net, tp, tb[exts->action],
3065                                               rate_tlv, NULL, ovr, TCA_ACT_BIND,
3066                                               exts->actions, &attr_size,
3067                                               rtnl_held, extack);
3068                         if (err < 0)
3069                                 return err;
3070                         exts->nr_actions = err;
3071                 }
3072         }
3073 #else
3074         if ((exts->action && tb[exts->action]) ||
3075             (exts->police && tb[exts->police])) {
3076                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3077                 return -EOPNOTSUPP;
3078         }
3079 #endif
3080
3081         return 0;
3082 }
3083 EXPORT_SYMBOL(tcf_exts_validate);
3084
3085 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3086 {
3087 #ifdef CONFIG_NET_CLS_ACT
3088         struct tcf_exts old = *dst;
3089
3090         *dst = *src;
3091         tcf_exts_destroy(&old);
3092 #endif
3093 }
3094 EXPORT_SYMBOL(tcf_exts_change);
3095
3096 #ifdef CONFIG_NET_CLS_ACT
3097 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3098 {
3099         if (exts->nr_actions == 0)
3100                 return NULL;
3101         else
3102                 return exts->actions[0];
3103 }
3104 #endif
3105
3106 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3107 {
3108 #ifdef CONFIG_NET_CLS_ACT
3109         struct nlattr *nest;
3110
3111         if (exts->action && tcf_exts_has_actions(exts)) {
3112                 /*
3113                  * again for backward compatible mode - we want
3114                  * to work with both old and new modes of entering
3115                  * tc data even if iproute2  was newer - jhs
3116                  */
3117                 if (exts->type != TCA_OLD_COMPAT) {
3118                         nest = nla_nest_start_noflag(skb, exts->action);
3119                         if (nest == NULL)
3120                                 goto nla_put_failure;
3121
3122                         if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3123                             < 0)
3124                                 goto nla_put_failure;
3125                         nla_nest_end(skb, nest);
3126                 } else if (exts->police) {
3127                         struct tc_action *act = tcf_exts_first_act(exts);
3128                         nest = nla_nest_start_noflag(skb, exts->police);
3129                         if (nest == NULL || !act)
3130                                 goto nla_put_failure;
3131                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3132                                 goto nla_put_failure;
3133                         nla_nest_end(skb, nest);
3134                 }
3135         }
3136         return 0;
3137
3138 nla_put_failure:
3139         nla_nest_cancel(skb, nest);
3140         return -1;
3141 #else
3142         return 0;
3143 #endif
3144 }
3145 EXPORT_SYMBOL(tcf_exts_dump);
3146
3147 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3148 {
3149 #ifdef CONFIG_NET_CLS_ACT
3150         struct nlattr *nest;
3151
3152         if (!exts->action || !tcf_exts_has_actions(exts))
3153                 return 0;
3154
3155         nest = nla_nest_start_noflag(skb, exts->action);
3156         if (!nest)
3157                 goto nla_put_failure;
3158
3159         if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3160                 goto nla_put_failure;
3161         nla_nest_end(skb, nest);
3162         return 0;
3163
3164 nla_put_failure:
3165         nla_nest_cancel(skb, nest);
3166         return -1;
3167 #else
3168         return 0;
3169 #endif
3170 }
3171 EXPORT_SYMBOL(tcf_exts_terse_dump);
3172
3173 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3174 {
3175 #ifdef CONFIG_NET_CLS_ACT
3176         struct tc_action *a = tcf_exts_first_act(exts);
3177         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3178                 return -1;
3179 #endif
3180         return 0;
3181 }
3182 EXPORT_SYMBOL(tcf_exts_dump_stats);
3183
3184 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3185 {
3186         if (*flags & TCA_CLS_FLAGS_IN_HW)
3187                 return;
3188         *flags |= TCA_CLS_FLAGS_IN_HW;
3189         atomic_inc(&block->offloadcnt);
3190 }
3191
3192 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3193 {
3194         if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3195                 return;
3196         *flags &= ~TCA_CLS_FLAGS_IN_HW;
3197         atomic_dec(&block->offloadcnt);
3198 }
3199
3200 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3201                                       struct tcf_proto *tp, u32 *cnt,
3202                                       u32 *flags, u32 diff, bool add)
3203 {
3204         lockdep_assert_held(&block->cb_lock);
3205
3206         spin_lock(&tp->lock);
3207         if (add) {
3208                 if (!*cnt)
3209                         tcf_block_offload_inc(block, flags);
3210                 *cnt += diff;
3211         } else {
3212                 *cnt -= diff;
3213                 if (!*cnt)
3214                         tcf_block_offload_dec(block, flags);
3215         }
3216         spin_unlock(&tp->lock);
3217 }
3218
3219 static void
3220 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3221                          u32 *cnt, u32 *flags)
3222 {
3223         lockdep_assert_held(&block->cb_lock);
3224
3225         spin_lock(&tp->lock);
3226         tcf_block_offload_dec(block, flags);
3227         *cnt = 0;
3228         spin_unlock(&tp->lock);
3229 }
3230
3231 static int
3232 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3233                    void *type_data, bool err_stop)
3234 {
3235         struct flow_block_cb *block_cb;
3236         int ok_count = 0;
3237         int err;
3238
3239         list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3240                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3241                 if (err) {
3242                         if (err_stop)
3243                                 return err;
3244                 } else {
3245                         ok_count++;
3246                 }
3247         }
3248         return ok_count;
3249 }
3250
3251 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3252                      void *type_data, bool err_stop, bool rtnl_held)
3253 {
3254         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3255         int ok_count;
3256
3257 retry:
3258         if (take_rtnl)
3259                 rtnl_lock();
3260         down_read(&block->cb_lock);
3261         /* Need to obtain rtnl lock if block is bound to devs that require it.
3262          * In block bind code cb_lock is obtained while holding rtnl, so we must
3263          * obtain the locks in same order here.
3264          */
3265         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3266                 up_read(&block->cb_lock);
3267                 take_rtnl = true;
3268                 goto retry;
3269         }
3270
3271         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3272
3273         up_read(&block->cb_lock);
3274         if (take_rtnl)
3275                 rtnl_unlock();
3276         return ok_count;
3277 }
3278 EXPORT_SYMBOL(tc_setup_cb_call);
3279
3280 /* Non-destructive filter add. If filter that wasn't already in hardware is
3281  * successfully offloaded, increment block offloads counter. On failure,
3282  * previously offloaded filter is considered to be intact and offloads counter
3283  * is not decremented.
3284  */
3285
3286 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3287                     enum tc_setup_type type, void *type_data, bool err_stop,
3288                     u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3289 {
3290         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3291         int ok_count;
3292
3293 retry:
3294         if (take_rtnl)
3295                 rtnl_lock();
3296         down_read(&block->cb_lock);
3297         /* Need to obtain rtnl lock if block is bound to devs that require it.
3298          * In block bind code cb_lock is obtained while holding rtnl, so we must
3299          * obtain the locks in same order here.
3300          */
3301         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3302                 up_read(&block->cb_lock);
3303                 take_rtnl = true;
3304                 goto retry;
3305         }
3306
3307         /* Make sure all netdevs sharing this block are offload-capable. */
3308         if (block->nooffloaddevcnt && err_stop) {
3309                 ok_count = -EOPNOTSUPP;
3310                 goto err_unlock;
3311         }
3312
3313         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3314         if (ok_count < 0)
3315                 goto err_unlock;
3316
3317         if (tp->ops->hw_add)
3318                 tp->ops->hw_add(tp, type_data);
3319         if (ok_count > 0)
3320                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3321                                           ok_count, true);
3322 err_unlock:
3323         up_read(&block->cb_lock);
3324         if (take_rtnl)
3325                 rtnl_unlock();
3326         return ok_count < 0 ? ok_count : 0;
3327 }
3328 EXPORT_SYMBOL(tc_setup_cb_add);
3329
3330 /* Destructive filter replace. If filter that wasn't already in hardware is
3331  * successfully offloaded, increment block offload counter. On failure,
3332  * previously offloaded filter is considered to be destroyed and offload counter
3333  * is decremented.
3334  */
3335
3336 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3337                         enum tc_setup_type type, void *type_data, bool err_stop,
3338                         u32 *old_flags, unsigned int *old_in_hw_count,
3339                         u32 *new_flags, unsigned int *new_in_hw_count,
3340                         bool rtnl_held)
3341 {
3342         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3343         int ok_count;
3344
3345 retry:
3346         if (take_rtnl)
3347                 rtnl_lock();
3348         down_read(&block->cb_lock);
3349         /* Need to obtain rtnl lock if block is bound to devs that require it.
3350          * In block bind code cb_lock is obtained while holding rtnl, so we must
3351          * obtain the locks in same order here.
3352          */
3353         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3354                 up_read(&block->cb_lock);
3355                 take_rtnl = true;
3356                 goto retry;
3357         }
3358
3359         /* Make sure all netdevs sharing this block are offload-capable. */
3360         if (block->nooffloaddevcnt && err_stop) {
3361                 ok_count = -EOPNOTSUPP;
3362                 goto err_unlock;
3363         }
3364
3365         tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3366         if (tp->ops->hw_del)
3367                 tp->ops->hw_del(tp, type_data);
3368
3369         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3370         if (ok_count < 0)
3371                 goto err_unlock;
3372
3373         if (tp->ops->hw_add)
3374                 tp->ops->hw_add(tp, type_data);
3375         if (ok_count > 0)
3376                 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3377                                           new_flags, ok_count, true);
3378 err_unlock:
3379         up_read(&block->cb_lock);
3380         if (take_rtnl)
3381                 rtnl_unlock();
3382         return ok_count < 0 ? ok_count : 0;
3383 }
3384 EXPORT_SYMBOL(tc_setup_cb_replace);
3385
3386 /* Destroy filter and decrement block offload counter, if filter was previously
3387  * offloaded.
3388  */
3389
3390 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3391                         enum tc_setup_type type, void *type_data, bool err_stop,
3392                         u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3393 {
3394         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3395         int ok_count;
3396
3397 retry:
3398         if (take_rtnl)
3399                 rtnl_lock();
3400         down_read(&block->cb_lock);
3401         /* Need to obtain rtnl lock if block is bound to devs that require it.
3402          * In block bind code cb_lock is obtained while holding rtnl, so we must
3403          * obtain the locks in same order here.
3404          */
3405         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3406                 up_read(&block->cb_lock);
3407                 take_rtnl = true;
3408                 goto retry;
3409         }
3410
3411         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3412
3413         tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3414         if (tp->ops->hw_del)
3415                 tp->ops->hw_del(tp, type_data);
3416
3417         up_read(&block->cb_lock);
3418         if (take_rtnl)
3419                 rtnl_unlock();
3420         return ok_count < 0 ? ok_count : 0;
3421 }
3422 EXPORT_SYMBOL(tc_setup_cb_destroy);
3423
3424 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3425                           bool add, flow_setup_cb_t *cb,
3426                           enum tc_setup_type type, void *type_data,
3427                           void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3428 {
3429         int err = cb(type, type_data, cb_priv);
3430
3431         if (err) {
3432                 if (add && tc_skip_sw(*flags))
3433                         return err;
3434         } else {
3435                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3436                                           add);
3437         }
3438
3439         return 0;
3440 }
3441 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3442
3443 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3444                               const struct tc_action *act)
3445 {
3446         struct tc_cookie *cookie;
3447         int err = 0;
3448
3449         rcu_read_lock();
3450         cookie = rcu_dereference(act->act_cookie);
3451         if (cookie) {
3452                 entry->cookie = flow_action_cookie_create(cookie->data,
3453                                                           cookie->len,
3454                                                           GFP_ATOMIC);
3455                 if (!entry->cookie)
3456                         err = -ENOMEM;
3457         }
3458         rcu_read_unlock();
3459         return err;
3460 }
3461
3462 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3463 {
3464         flow_action_cookie_destroy(entry->cookie);
3465 }
3466
3467 void tc_cleanup_flow_action(struct flow_action *flow_action)
3468 {
3469         struct flow_action_entry *entry;
3470         int i;
3471
3472         flow_action_for_each(i, entry, flow_action) {
3473                 tcf_act_put_cookie(entry);
3474                 if (entry->destructor)
3475                         entry->destructor(entry->destructor_priv);
3476         }
3477 }
3478 EXPORT_SYMBOL(tc_cleanup_flow_action);
3479
3480 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3481                                const struct tc_action *act)
3482 {
3483 #ifdef CONFIG_NET_CLS_ACT
3484         entry->dev = act->ops->get_dev(act, &entry->destructor);
3485         if (!entry->dev)
3486                 return;
3487         entry->destructor_priv = entry->dev;
3488 #endif
3489 }
3490
3491 static void tcf_tunnel_encap_put_tunnel(void *priv)
3492 {
3493         struct ip_tunnel_info *tunnel = priv;
3494
3495         kfree(tunnel);
3496 }
3497
3498 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3499                                        const struct tc_action *act)
3500 {
3501         entry->tunnel = tcf_tunnel_info_copy(act);
3502         if (!entry->tunnel)
3503                 return -ENOMEM;
3504         entry->destructor = tcf_tunnel_encap_put_tunnel;
3505         entry->destructor_priv = entry->tunnel;
3506         return 0;
3507 }
3508
3509 static void tcf_sample_get_group(struct flow_action_entry *entry,
3510                                  const struct tc_action *act)
3511 {
3512 #ifdef CONFIG_NET_CLS_ACT
3513         entry->sample.psample_group =
3514                 act->ops->get_psample_group(act, &entry->destructor);
3515         entry->destructor_priv = entry->sample.psample_group;
3516 #endif
3517 }
3518
3519 static void tcf_gate_entry_destructor(void *priv)
3520 {
3521         struct action_gate_entry *oe = priv;
3522
3523         kfree(oe);
3524 }
3525
3526 static int tcf_gate_get_entries(struct flow_action_entry *entry,
3527                                 const struct tc_action *act)
3528 {
3529         entry->gate.entries = tcf_gate_get_list(act);
3530
3531         if (!entry->gate.entries)
3532                 return -EINVAL;
3533
3534         entry->destructor = tcf_gate_entry_destructor;
3535         entry->destructor_priv = entry->gate.entries;
3536
3537         return 0;
3538 }
3539
3540 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3541 {
3542         if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3543                 return FLOW_ACTION_HW_STATS_DONT_CARE;
3544         else if (!hw_stats)
3545                 return FLOW_ACTION_HW_STATS_DISABLED;
3546
3547         return hw_stats;
3548 }
3549
3550 int tc_setup_flow_action(struct flow_action *flow_action,
3551                          const struct tcf_exts *exts)
3552 {
3553         struct tc_action *act;
3554         int i, j, k, err = 0;
3555
3556         BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3557         BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3558         BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3559
3560         if (!exts)
3561                 return 0;
3562
3563         j = 0;
3564         tcf_exts_for_each_action(i, act, exts) {
3565                 struct flow_action_entry *entry;
3566
3567                 entry = &flow_action->entries[j];
3568                 spin_lock_bh(&act->tcfa_lock);
3569                 err = tcf_act_get_cookie(entry, act);
3570                 if (err)
3571                         goto err_out_locked;
3572
3573                 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3574
3575                 if (is_tcf_gact_ok(act)) {
3576                         entry->id = FLOW_ACTION_ACCEPT;
3577                 } else if (is_tcf_gact_shot(act)) {
3578                         entry->id = FLOW_ACTION_DROP;
3579                 } else if (is_tcf_gact_trap(act)) {
3580                         entry->id = FLOW_ACTION_TRAP;
3581                 } else if (is_tcf_gact_goto_chain(act)) {
3582                         entry->id = FLOW_ACTION_GOTO;
3583                         entry->chain_index = tcf_gact_goto_chain_index(act);
3584                 } else if (is_tcf_mirred_egress_redirect(act)) {
3585                         entry->id = FLOW_ACTION_REDIRECT;
3586                         tcf_mirred_get_dev(entry, act);
3587                 } else if (is_tcf_mirred_egress_mirror(act)) {
3588                         entry->id = FLOW_ACTION_MIRRED;
3589                         tcf_mirred_get_dev(entry, act);
3590                 } else if (is_tcf_mirred_ingress_redirect(act)) {
3591                         entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3592                         tcf_mirred_get_dev(entry, act);
3593                 } else if (is_tcf_mirred_ingress_mirror(act)) {
3594                         entry->id = FLOW_ACTION_MIRRED_INGRESS;
3595                         tcf_mirred_get_dev(entry, act);
3596                 } else if (is_tcf_vlan(act)) {
3597                         switch (tcf_vlan_action(act)) {
3598                         case TCA_VLAN_ACT_PUSH:
3599                                 entry->id = FLOW_ACTION_VLAN_PUSH;
3600                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3601                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3602                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3603                                 break;
3604                         case TCA_VLAN_ACT_POP:
3605                                 entry->id = FLOW_ACTION_VLAN_POP;
3606                                 break;
3607                         case TCA_VLAN_ACT_MODIFY:
3608                                 entry->id = FLOW_ACTION_VLAN_MANGLE;
3609                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3610                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3611                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3612                                 break;
3613                         default:
3614                                 err = -EOPNOTSUPP;
3615                                 goto err_out_locked;
3616                         }
3617                 } else if (is_tcf_tunnel_set(act)) {
3618                         entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3619                         err = tcf_tunnel_encap_get_tunnel(entry, act);
3620                         if (err)
3621                                 goto err_out_locked;
3622                 } else if (is_tcf_tunnel_release(act)) {
3623                         entry->id = FLOW_ACTION_TUNNEL_DECAP;
3624                 } else if (is_tcf_pedit(act)) {
3625                         for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3626                                 switch (tcf_pedit_cmd(act, k)) {
3627                                 case TCA_PEDIT_KEY_EX_CMD_SET:
3628                                         entry->id = FLOW_ACTION_MANGLE;
3629                                         break;
3630                                 case TCA_PEDIT_KEY_EX_CMD_ADD:
3631                                         entry->id = FLOW_ACTION_ADD;
3632                                         break;
3633                                 default:
3634                                         err = -EOPNOTSUPP;
3635                                         goto err_out_locked;
3636                                 }
3637                                 entry->mangle.htype = tcf_pedit_htype(act, k);
3638                                 entry->mangle.mask = tcf_pedit_mask(act, k);
3639                                 entry->mangle.val = tcf_pedit_val(act, k);
3640                                 entry->mangle.offset = tcf_pedit_offset(act, k);
3641                                 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3642                                 entry = &flow_action->entries[++j];
3643                         }
3644                 } else if (is_tcf_csum(act)) {
3645                         entry->id = FLOW_ACTION_CSUM;
3646                         entry->csum_flags = tcf_csum_update_flags(act);
3647                 } else if (is_tcf_skbedit_mark(act)) {
3648                         entry->id = FLOW_ACTION_MARK;
3649                         entry->mark = tcf_skbedit_mark(act);
3650                 } else if (is_tcf_sample(act)) {
3651                         entry->id = FLOW_ACTION_SAMPLE;
3652                         entry->sample.trunc_size = tcf_sample_trunc_size(act);
3653                         entry->sample.truncate = tcf_sample_truncate(act);
3654                         entry->sample.rate = tcf_sample_rate(act);
3655                         tcf_sample_get_group(entry, act);
3656                 } else if (is_tcf_police(act)) {
3657                         entry->id = FLOW_ACTION_POLICE;
3658                         entry->police.burst = tcf_police_tcfp_burst(act);
3659                         entry->police.rate_bytes_ps =
3660                                 tcf_police_rate_bytes_ps(act);
3661                 } else if (is_tcf_ct(act)) {
3662                         entry->id = FLOW_ACTION_CT;
3663                         entry->ct.action = tcf_ct_action(act);
3664                         entry->ct.zone = tcf_ct_zone(act);
3665                         entry->ct.flow_table = tcf_ct_ft(act);
3666                 } else if (is_tcf_mpls(act)) {
3667                         switch (tcf_mpls_action(act)) {
3668                         case TCA_MPLS_ACT_PUSH:
3669                                 entry->id = FLOW_ACTION_MPLS_PUSH;
3670                                 entry->mpls_push.proto = tcf_mpls_proto(act);
3671                                 entry->mpls_push.label = tcf_mpls_label(act);
3672                                 entry->mpls_push.tc = tcf_mpls_tc(act);
3673                                 entry->mpls_push.bos = tcf_mpls_bos(act);
3674                                 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3675                                 break;
3676                         case TCA_MPLS_ACT_POP:
3677                                 entry->id = FLOW_ACTION_MPLS_POP;
3678                                 entry->mpls_pop.proto = tcf_mpls_proto(act);
3679                                 break;
3680                         case TCA_MPLS_ACT_MODIFY:
3681                                 entry->id = FLOW_ACTION_MPLS_MANGLE;
3682                                 entry->mpls_mangle.label = tcf_mpls_label(act);
3683                                 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3684                                 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3685                                 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3686                                 break;
3687                         default:
3688                                 goto err_out_locked;
3689                         }
3690                 } else if (is_tcf_skbedit_ptype(act)) {
3691                         entry->id = FLOW_ACTION_PTYPE;
3692                         entry->ptype = tcf_skbedit_ptype(act);
3693                 } else if (is_tcf_skbedit_priority(act)) {
3694                         entry->id = FLOW_ACTION_PRIORITY;
3695                         entry->priority = tcf_skbedit_priority(act);
3696                 } else if (is_tcf_gate(act)) {
3697                         entry->id = FLOW_ACTION_GATE;
3698                         entry->gate.index = tcf_gate_index(act);
3699                         entry->gate.prio = tcf_gate_prio(act);
3700                         entry->gate.basetime = tcf_gate_basetime(act);
3701                         entry->gate.cycletime = tcf_gate_cycletime(act);
3702                         entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3703                         entry->gate.num_entries = tcf_gate_num_entries(act);
3704                         err = tcf_gate_get_entries(entry, act);
3705                         if (err)
3706                                 goto err_out;
3707                 } else {
3708                         err = -EOPNOTSUPP;
3709                         goto err_out_locked;
3710                 }
3711                 spin_unlock_bh(&act->tcfa_lock);
3712
3713                 if (!is_tcf_pedit(act))
3714                         j++;
3715         }
3716
3717 err_out:
3718         if (err)
3719                 tc_cleanup_flow_action(flow_action);
3720
3721         return err;
3722 err_out_locked:
3723         spin_unlock_bh(&act->tcfa_lock);
3724         goto err_out;
3725 }
3726 EXPORT_SYMBOL(tc_setup_flow_action);
3727
3728 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3729 {
3730         unsigned int num_acts = 0;
3731         struct tc_action *act;
3732         int i;
3733
3734         tcf_exts_for_each_action(i, act, exts) {
3735                 if (is_tcf_pedit(act))
3736                         num_acts += tcf_pedit_nkeys(act);
3737                 else
3738                         num_acts++;
3739         }
3740         return num_acts;
3741 }
3742 EXPORT_SYMBOL(tcf_exts_num_actions);
3743
3744 static __net_init int tcf_net_init(struct net *net)
3745 {
3746         struct tcf_net *tn = net_generic(net, tcf_net_id);
3747
3748         spin_lock_init(&tn->idr_lock);
3749         idr_init(&tn->idr);
3750         return 0;
3751 }
3752
3753 static void __net_exit tcf_net_exit(struct net *net)
3754 {
3755         struct tcf_net *tn = net_generic(net, tcf_net_id);
3756
3757         idr_destroy(&tn->idr);
3758 }
3759
3760 static struct pernet_operations tcf_net_ops = {
3761         .init = tcf_net_init,
3762         .exit = tcf_net_exit,
3763         .id   = &tcf_net_id,
3764         .size = sizeof(struct tcf_net),
3765 };
3766
3767 static int __init tc_filter_init(void)
3768 {
3769         int err;
3770
3771         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3772         if (!tc_filter_wq)
3773                 return -ENOMEM;
3774
3775         err = register_pernet_subsys(&tcf_net_ops);
3776         if (err)
3777                 goto err_register_pernet_subsys;
3778
3779         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3780                       RTNL_FLAG_DOIT_UNLOCKED);
3781         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3782                       RTNL_FLAG_DOIT_UNLOCKED);
3783         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3784                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3785         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3786         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3787         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3788                       tc_dump_chain, 0);
3789
3790         return 0;
3791
3792 err_register_pernet_subsys:
3793         destroy_workqueue(tc_filter_wq);
3794         return err;
3795 }
3796
3797 subsys_initcall(tc_filter_init);