Bluetooth: Add LE device found MGMT event
[platform/kernel/linux-starfive.git] / net / switchdev / switchdev.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/switchdev/switchdev.c - Switch device API
4  * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
21
22 static LIST_HEAD(deferred);
23 static DEFINE_SPINLOCK(deferred_lock);
24
25 typedef void switchdev_deferred_func_t(struct net_device *dev,
26                                        const void *data);
27
28 struct switchdev_deferred_item {
29         struct list_head list;
30         struct net_device *dev;
31         netdevice_tracker dev_tracker;
32         switchdev_deferred_func_t *func;
33         unsigned long data[];
34 };
35
36 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
37 {
38         struct switchdev_deferred_item *dfitem;
39
40         spin_lock_bh(&deferred_lock);
41         if (list_empty(&deferred)) {
42                 dfitem = NULL;
43                 goto unlock;
44         }
45         dfitem = list_first_entry(&deferred,
46                                   struct switchdev_deferred_item, list);
47         list_del(&dfitem->list);
48 unlock:
49         spin_unlock_bh(&deferred_lock);
50         return dfitem;
51 }
52
53 /**
54  *      switchdev_deferred_process - Process ops in deferred queue
55  *
56  *      Called to flush the ops currently queued in deferred ops queue.
57  *      rtnl_lock must be held.
58  */
59 void switchdev_deferred_process(void)
60 {
61         struct switchdev_deferred_item *dfitem;
62
63         ASSERT_RTNL();
64
65         while ((dfitem = switchdev_deferred_dequeue())) {
66                 dfitem->func(dfitem->dev, dfitem->data);
67                 netdev_put(dfitem->dev, &dfitem->dev_tracker);
68                 kfree(dfitem);
69         }
70 }
71 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
72
73 static void switchdev_deferred_process_work(struct work_struct *work)
74 {
75         rtnl_lock();
76         switchdev_deferred_process();
77         rtnl_unlock();
78 }
79
80 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
81
82 static int switchdev_deferred_enqueue(struct net_device *dev,
83                                       const void *data, size_t data_len,
84                                       switchdev_deferred_func_t *func)
85 {
86         struct switchdev_deferred_item *dfitem;
87
88         dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
89         if (!dfitem)
90                 return -ENOMEM;
91         dfitem->dev = dev;
92         dfitem->func = func;
93         memcpy(dfitem->data, data, data_len);
94         netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
95         spin_lock_bh(&deferred_lock);
96         list_add_tail(&dfitem->list, &deferred);
97         spin_unlock_bh(&deferred_lock);
98         schedule_work(&deferred_process_work);
99         return 0;
100 }
101
102 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
103                                       struct net_device *dev,
104                                       const struct switchdev_attr *attr,
105                                       struct netlink_ext_ack *extack)
106 {
107         int err;
108         int rc;
109
110         struct switchdev_notifier_port_attr_info attr_info = {
111                 .attr = attr,
112                 .handled = false,
113         };
114
115         rc = call_switchdev_blocking_notifiers(nt, dev,
116                                                &attr_info.info, extack);
117         err = notifier_to_errno(rc);
118         if (err) {
119                 WARN_ON(!attr_info.handled);
120                 return err;
121         }
122
123         if (!attr_info.handled)
124                 return -EOPNOTSUPP;
125
126         return 0;
127 }
128
129 static int switchdev_port_attr_set_now(struct net_device *dev,
130                                        const struct switchdev_attr *attr,
131                                        struct netlink_ext_ack *extack)
132 {
133         return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
134                                           extack);
135 }
136
137 static void switchdev_port_attr_set_deferred(struct net_device *dev,
138                                              const void *data)
139 {
140         const struct switchdev_attr *attr = data;
141         int err;
142
143         err = switchdev_port_attr_set_now(dev, attr, NULL);
144         if (err && err != -EOPNOTSUPP)
145                 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
146                            err, attr->id);
147         if (attr->complete)
148                 attr->complete(dev, err, attr->complete_priv);
149 }
150
151 static int switchdev_port_attr_set_defer(struct net_device *dev,
152                                          const struct switchdev_attr *attr)
153 {
154         return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
155                                           switchdev_port_attr_set_deferred);
156 }
157
158 /**
159  *      switchdev_port_attr_set - Set port attribute
160  *
161  *      @dev: port device
162  *      @attr: attribute to set
163  *      @extack: netlink extended ack, for error message propagation
164  *
165  *      rtnl_lock must be held and must not be in atomic section,
166  *      in case SWITCHDEV_F_DEFER flag is not set.
167  */
168 int switchdev_port_attr_set(struct net_device *dev,
169                             const struct switchdev_attr *attr,
170                             struct netlink_ext_ack *extack)
171 {
172         if (attr->flags & SWITCHDEV_F_DEFER)
173                 return switchdev_port_attr_set_defer(dev, attr);
174         ASSERT_RTNL();
175         return switchdev_port_attr_set_now(dev, attr, extack);
176 }
177 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
178
179 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
180 {
181         switch (obj->id) {
182         case SWITCHDEV_OBJ_ID_PORT_VLAN:
183                 return sizeof(struct switchdev_obj_port_vlan);
184         case SWITCHDEV_OBJ_ID_PORT_MDB:
185                 return sizeof(struct switchdev_obj_port_mdb);
186         case SWITCHDEV_OBJ_ID_HOST_MDB:
187                 return sizeof(struct switchdev_obj_port_mdb);
188         default:
189                 BUG();
190         }
191         return 0;
192 }
193
194 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
195                                      struct net_device *dev,
196                                      const struct switchdev_obj *obj,
197                                      struct netlink_ext_ack *extack)
198 {
199         int rc;
200         int err;
201
202         struct switchdev_notifier_port_obj_info obj_info = {
203                 .obj = obj,
204                 .handled = false,
205         };
206
207         rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
208         err = notifier_to_errno(rc);
209         if (err) {
210                 WARN_ON(!obj_info.handled);
211                 return err;
212         }
213         if (!obj_info.handled)
214                 return -EOPNOTSUPP;
215         return 0;
216 }
217
218 static void switchdev_port_obj_add_deferred(struct net_device *dev,
219                                             const void *data)
220 {
221         const struct switchdev_obj *obj = data;
222         int err;
223
224         ASSERT_RTNL();
225         err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
226                                         dev, obj, NULL);
227         if (err && err != -EOPNOTSUPP)
228                 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
229                            err, obj->id);
230         if (obj->complete)
231                 obj->complete(dev, err, obj->complete_priv);
232 }
233
234 static int switchdev_port_obj_add_defer(struct net_device *dev,
235                                         const struct switchdev_obj *obj)
236 {
237         return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
238                                           switchdev_port_obj_add_deferred);
239 }
240
241 /**
242  *      switchdev_port_obj_add - Add port object
243  *
244  *      @dev: port device
245  *      @obj: object to add
246  *      @extack: netlink extended ack
247  *
248  *      rtnl_lock must be held and must not be in atomic section,
249  *      in case SWITCHDEV_F_DEFER flag is not set.
250  */
251 int switchdev_port_obj_add(struct net_device *dev,
252                            const struct switchdev_obj *obj,
253                            struct netlink_ext_ack *extack)
254 {
255         if (obj->flags & SWITCHDEV_F_DEFER)
256                 return switchdev_port_obj_add_defer(dev, obj);
257         ASSERT_RTNL();
258         return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
259                                          dev, obj, extack);
260 }
261 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
262
263 static int switchdev_port_obj_del_now(struct net_device *dev,
264                                       const struct switchdev_obj *obj)
265 {
266         return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
267                                          dev, obj, NULL);
268 }
269
270 static void switchdev_port_obj_del_deferred(struct net_device *dev,
271                                             const void *data)
272 {
273         const struct switchdev_obj *obj = data;
274         int err;
275
276         err = switchdev_port_obj_del_now(dev, obj);
277         if (err && err != -EOPNOTSUPP)
278                 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
279                            err, obj->id);
280         if (obj->complete)
281                 obj->complete(dev, err, obj->complete_priv);
282 }
283
284 static int switchdev_port_obj_del_defer(struct net_device *dev,
285                                         const struct switchdev_obj *obj)
286 {
287         return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
288                                           switchdev_port_obj_del_deferred);
289 }
290
291 /**
292  *      switchdev_port_obj_del - Delete port object
293  *
294  *      @dev: port device
295  *      @obj: object to delete
296  *
297  *      rtnl_lock must be held and must not be in atomic section,
298  *      in case SWITCHDEV_F_DEFER flag is not set.
299  */
300 int switchdev_port_obj_del(struct net_device *dev,
301                            const struct switchdev_obj *obj)
302 {
303         if (obj->flags & SWITCHDEV_F_DEFER)
304                 return switchdev_port_obj_del_defer(dev, obj);
305         ASSERT_RTNL();
306         return switchdev_port_obj_del_now(dev, obj);
307 }
308 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
309
310 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
311 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
312
313 /**
314  *      register_switchdev_notifier - Register notifier
315  *      @nb: notifier_block
316  *
317  *      Register switch device notifier.
318  */
319 int register_switchdev_notifier(struct notifier_block *nb)
320 {
321         return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
322 }
323 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
324
325 /**
326  *      unregister_switchdev_notifier - Unregister notifier
327  *      @nb: notifier_block
328  *
329  *      Unregister switch device notifier.
330  */
331 int unregister_switchdev_notifier(struct notifier_block *nb)
332 {
333         return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
334 }
335 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
336
337 /**
338  *      call_switchdev_notifiers - Call notifiers
339  *      @val: value passed unmodified to notifier function
340  *      @dev: port device
341  *      @info: notifier information data
342  *      @extack: netlink extended ack
343  *      Call all network notifier blocks.
344  */
345 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
346                              struct switchdev_notifier_info *info,
347                              struct netlink_ext_ack *extack)
348 {
349         info->dev = dev;
350         info->extack = extack;
351         return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
352 }
353 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
354
355 int register_switchdev_blocking_notifier(struct notifier_block *nb)
356 {
357         struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
358
359         return blocking_notifier_chain_register(chain, nb);
360 }
361 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
362
363 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
364 {
365         struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
366
367         return blocking_notifier_chain_unregister(chain, nb);
368 }
369 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
370
371 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
372                                       struct switchdev_notifier_info *info,
373                                       struct netlink_ext_ack *extack)
374 {
375         info->dev = dev;
376         info->extack = extack;
377         return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
378                                             val, info);
379 }
380 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
381
382 struct switchdev_nested_priv {
383         bool (*check_cb)(const struct net_device *dev);
384         bool (*foreign_dev_check_cb)(const struct net_device *dev,
385                                      const struct net_device *foreign_dev);
386         const struct net_device *dev;
387         struct net_device *lower_dev;
388 };
389
390 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
391                                     struct netdev_nested_priv *priv)
392 {
393         struct switchdev_nested_priv *switchdev_priv = priv->data;
394         bool (*foreign_dev_check_cb)(const struct net_device *dev,
395                                      const struct net_device *foreign_dev);
396         bool (*check_cb)(const struct net_device *dev);
397         const struct net_device *dev;
398
399         check_cb = switchdev_priv->check_cb;
400         foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
401         dev = switchdev_priv->dev;
402
403         if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
404                 switchdev_priv->lower_dev = lower_dev;
405                 return 1;
406         }
407
408         return 0;
409 }
410
411 static struct net_device *
412 switchdev_lower_dev_find_rcu(struct net_device *dev,
413                              bool (*check_cb)(const struct net_device *dev),
414                              bool (*foreign_dev_check_cb)(const struct net_device *dev,
415                                                           const struct net_device *foreign_dev))
416 {
417         struct switchdev_nested_priv switchdev_priv = {
418                 .check_cb = check_cb,
419                 .foreign_dev_check_cb = foreign_dev_check_cb,
420                 .dev = dev,
421                 .lower_dev = NULL,
422         };
423         struct netdev_nested_priv priv = {
424                 .data = &switchdev_priv,
425         };
426
427         netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
428
429         return switchdev_priv.lower_dev;
430 }
431
432 static struct net_device *
433 switchdev_lower_dev_find(struct net_device *dev,
434                          bool (*check_cb)(const struct net_device *dev),
435                          bool (*foreign_dev_check_cb)(const struct net_device *dev,
436                                                       const struct net_device *foreign_dev))
437 {
438         struct switchdev_nested_priv switchdev_priv = {
439                 .check_cb = check_cb,
440                 .foreign_dev_check_cb = foreign_dev_check_cb,
441                 .dev = dev,
442                 .lower_dev = NULL,
443         };
444         struct netdev_nested_priv priv = {
445                 .data = &switchdev_priv,
446         };
447
448         netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
449
450         return switchdev_priv.lower_dev;
451 }
452
453 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
454                 struct net_device *orig_dev, unsigned long event,
455                 const struct switchdev_notifier_fdb_info *fdb_info,
456                 bool (*check_cb)(const struct net_device *dev),
457                 bool (*foreign_dev_check_cb)(const struct net_device *dev,
458                                              const struct net_device *foreign_dev),
459                 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
460                               unsigned long event, const void *ctx,
461                               const struct switchdev_notifier_fdb_info *fdb_info))
462 {
463         const struct switchdev_notifier_info *info = &fdb_info->info;
464         struct net_device *br, *lower_dev, *switchdev;
465         struct list_head *iter;
466         int err = -EOPNOTSUPP;
467
468         if (check_cb(dev))
469                 return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
470
471         /* Recurse through lower interfaces in case the FDB entry is pointing
472          * towards a bridge or a LAG device.
473          */
474         netdev_for_each_lower_dev(dev, lower_dev, iter) {
475                 /* Do not propagate FDB entries across bridges */
476                 if (netif_is_bridge_master(lower_dev))
477                         continue;
478
479                 /* Bridge ports might be either us, or LAG interfaces
480                  * that we offload.
481                  */
482                 if (!check_cb(lower_dev) &&
483                     !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
484                                                   foreign_dev_check_cb))
485                         continue;
486
487                 err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
488                                                              event, fdb_info, check_cb,
489                                                              foreign_dev_check_cb,
490                                                              mod_cb);
491                 if (err && err != -EOPNOTSUPP)
492                         return err;
493         }
494
495         /* Event is neither on a bridge nor a LAG. Check whether it is on an
496          * interface that is in a bridge with us.
497          */
498         br = netdev_master_upper_dev_get_rcu(dev);
499         if (!br || !netif_is_bridge_master(br))
500                 return 0;
501
502         switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
503         if (!switchdev)
504                 return 0;
505
506         if (!foreign_dev_check_cb(switchdev, dev))
507                 return err;
508
509         return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
510                                                       check_cb, foreign_dev_check_cb,
511                                                       mod_cb);
512 }
513
514 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
515                 const struct switchdev_notifier_fdb_info *fdb_info,
516                 bool (*check_cb)(const struct net_device *dev),
517                 bool (*foreign_dev_check_cb)(const struct net_device *dev,
518                                              const struct net_device *foreign_dev),
519                 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
520                               unsigned long event, const void *ctx,
521                               const struct switchdev_notifier_fdb_info *fdb_info))
522 {
523         int err;
524
525         err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
526                                                      check_cb, foreign_dev_check_cb,
527                                                      mod_cb);
528         if (err == -EOPNOTSUPP)
529                 err = 0;
530
531         return err;
532 }
533 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
534
535 static int __switchdev_handle_port_obj_add(struct net_device *dev,
536                         struct switchdev_notifier_port_obj_info *port_obj_info,
537                         bool (*check_cb)(const struct net_device *dev),
538                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
539                                                      const struct net_device *foreign_dev),
540                         int (*add_cb)(struct net_device *dev, const void *ctx,
541                                       const struct switchdev_obj *obj,
542                                       struct netlink_ext_ack *extack))
543 {
544         struct switchdev_notifier_info *info = &port_obj_info->info;
545         struct net_device *br, *lower_dev, *switchdev;
546         struct netlink_ext_ack *extack;
547         struct list_head *iter;
548         int err = -EOPNOTSUPP;
549
550         extack = switchdev_notifier_info_to_extack(info);
551
552         if (check_cb(dev)) {
553                 err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
554                 if (err != -EOPNOTSUPP)
555                         port_obj_info->handled = true;
556                 return err;
557         }
558
559         /* Switch ports might be stacked under e.g. a LAG. Ignore the
560          * unsupported devices, another driver might be able to handle them. But
561          * propagate to the callers any hard errors.
562          *
563          * If the driver does its own bookkeeping of stacked ports, it's not
564          * necessary to go through this helper.
565          */
566         netdev_for_each_lower_dev(dev, lower_dev, iter) {
567                 if (netif_is_bridge_master(lower_dev))
568                         continue;
569
570                 /* When searching for switchdev interfaces that are neighbors
571                  * of foreign ones, and @dev is a bridge, do not recurse on the
572                  * foreign interface again, it was already visited.
573                  */
574                 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
575                     !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
576                         continue;
577
578                 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
579                                                       check_cb, foreign_dev_check_cb,
580                                                       add_cb);
581                 if (err && err != -EOPNOTSUPP)
582                         return err;
583         }
584
585         /* Event is neither on a bridge nor a LAG. Check whether it is on an
586          * interface that is in a bridge with us.
587          */
588         if (!foreign_dev_check_cb)
589                 return err;
590
591         br = netdev_master_upper_dev_get(dev);
592         if (!br || !netif_is_bridge_master(br))
593                 return err;
594
595         switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
596         if (!switchdev)
597                 return err;
598
599         if (!foreign_dev_check_cb(switchdev, dev))
600                 return err;
601
602         return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
603                                                foreign_dev_check_cb, add_cb);
604 }
605
606 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
607  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
608  * bridge or a LAG.
609  */
610 int switchdev_handle_port_obj_add(struct net_device *dev,
611                         struct switchdev_notifier_port_obj_info *port_obj_info,
612                         bool (*check_cb)(const struct net_device *dev),
613                         int (*add_cb)(struct net_device *dev, const void *ctx,
614                                       const struct switchdev_obj *obj,
615                                       struct netlink_ext_ack *extack))
616 {
617         int err;
618
619         err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
620                                               NULL, add_cb);
621         if (err == -EOPNOTSUPP)
622                 err = 0;
623         return err;
624 }
625 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
626
627 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
628  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
629  * that pass @check_cb and are in the same bridge as @dev.
630  */
631 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
632                         struct switchdev_notifier_port_obj_info *port_obj_info,
633                         bool (*check_cb)(const struct net_device *dev),
634                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
635                                                      const struct net_device *foreign_dev),
636                         int (*add_cb)(struct net_device *dev, const void *ctx,
637                                       const struct switchdev_obj *obj,
638                                       struct netlink_ext_ack *extack))
639 {
640         int err;
641
642         err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
643                                               foreign_dev_check_cb, add_cb);
644         if (err == -EOPNOTSUPP)
645                 err = 0;
646         return err;
647 }
648 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
649
650 static int __switchdev_handle_port_obj_del(struct net_device *dev,
651                         struct switchdev_notifier_port_obj_info *port_obj_info,
652                         bool (*check_cb)(const struct net_device *dev),
653                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
654                                                      const struct net_device *foreign_dev),
655                         int (*del_cb)(struct net_device *dev, const void *ctx,
656                                       const struct switchdev_obj *obj))
657 {
658         struct switchdev_notifier_info *info = &port_obj_info->info;
659         struct net_device *br, *lower_dev, *switchdev;
660         struct list_head *iter;
661         int err = -EOPNOTSUPP;
662
663         if (check_cb(dev)) {
664                 err = del_cb(dev, info->ctx, port_obj_info->obj);
665                 if (err != -EOPNOTSUPP)
666                         port_obj_info->handled = true;
667                 return err;
668         }
669
670         /* Switch ports might be stacked under e.g. a LAG. Ignore the
671          * unsupported devices, another driver might be able to handle them. But
672          * propagate to the callers any hard errors.
673          *
674          * If the driver does its own bookkeeping of stacked ports, it's not
675          * necessary to go through this helper.
676          */
677         netdev_for_each_lower_dev(dev, lower_dev, iter) {
678                 if (netif_is_bridge_master(lower_dev))
679                         continue;
680
681                 /* When searching for switchdev interfaces that are neighbors
682                  * of foreign ones, and @dev is a bridge, do not recurse on the
683                  * foreign interface again, it was already visited.
684                  */
685                 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
686                     !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
687                         continue;
688
689                 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
690                                                       check_cb, foreign_dev_check_cb,
691                                                       del_cb);
692                 if (err && err != -EOPNOTSUPP)
693                         return err;
694         }
695
696         /* Event is neither on a bridge nor a LAG. Check whether it is on an
697          * interface that is in a bridge with us.
698          */
699         if (!foreign_dev_check_cb)
700                 return err;
701
702         br = netdev_master_upper_dev_get(dev);
703         if (!br || !netif_is_bridge_master(br))
704                 return err;
705
706         switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
707         if (!switchdev)
708                 return err;
709
710         if (!foreign_dev_check_cb(switchdev, dev))
711                 return err;
712
713         return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
714                                                foreign_dev_check_cb, del_cb);
715 }
716
717 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
718  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
719  * bridge or a LAG.
720  */
721 int switchdev_handle_port_obj_del(struct net_device *dev,
722                         struct switchdev_notifier_port_obj_info *port_obj_info,
723                         bool (*check_cb)(const struct net_device *dev),
724                         int (*del_cb)(struct net_device *dev, const void *ctx,
725                                       const struct switchdev_obj *obj))
726 {
727         int err;
728
729         err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
730                                               NULL, del_cb);
731         if (err == -EOPNOTSUPP)
732                 err = 0;
733         return err;
734 }
735 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
736
737 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
738  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
739  * that pass @check_cb and are in the same bridge as @dev.
740  */
741 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
742                         struct switchdev_notifier_port_obj_info *port_obj_info,
743                         bool (*check_cb)(const struct net_device *dev),
744                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
745                                                      const struct net_device *foreign_dev),
746                         int (*del_cb)(struct net_device *dev, const void *ctx,
747                                       const struct switchdev_obj *obj))
748 {
749         int err;
750
751         err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
752                                               foreign_dev_check_cb, del_cb);
753         if (err == -EOPNOTSUPP)
754                 err = 0;
755         return err;
756 }
757 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
758
759 static int __switchdev_handle_port_attr_set(struct net_device *dev,
760                         struct switchdev_notifier_port_attr_info *port_attr_info,
761                         bool (*check_cb)(const struct net_device *dev),
762                         int (*set_cb)(struct net_device *dev, const void *ctx,
763                                       const struct switchdev_attr *attr,
764                                       struct netlink_ext_ack *extack))
765 {
766         struct switchdev_notifier_info *info = &port_attr_info->info;
767         struct netlink_ext_ack *extack;
768         struct net_device *lower_dev;
769         struct list_head *iter;
770         int err = -EOPNOTSUPP;
771
772         extack = switchdev_notifier_info_to_extack(info);
773
774         if (check_cb(dev)) {
775                 err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
776                 if (err != -EOPNOTSUPP)
777                         port_attr_info->handled = true;
778                 return err;
779         }
780
781         /* Switch ports might be stacked under e.g. a LAG. Ignore the
782          * unsupported devices, another driver might be able to handle them. But
783          * propagate to the callers any hard errors.
784          *
785          * If the driver does its own bookkeeping of stacked ports, it's not
786          * necessary to go through this helper.
787          */
788         netdev_for_each_lower_dev(dev, lower_dev, iter) {
789                 if (netif_is_bridge_master(lower_dev))
790                         continue;
791
792                 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
793                                                        check_cb, set_cb);
794                 if (err && err != -EOPNOTSUPP)
795                         return err;
796         }
797
798         return err;
799 }
800
801 int switchdev_handle_port_attr_set(struct net_device *dev,
802                         struct switchdev_notifier_port_attr_info *port_attr_info,
803                         bool (*check_cb)(const struct net_device *dev),
804                         int (*set_cb)(struct net_device *dev, const void *ctx,
805                                       const struct switchdev_attr *attr,
806                                       struct netlink_ext_ack *extack))
807 {
808         int err;
809
810         err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
811                                                set_cb);
812         if (err == -EOPNOTSUPP)
813                 err = 0;
814         return err;
815 }
816 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
817
818 int switchdev_bridge_port_offload(struct net_device *brport_dev,
819                                   struct net_device *dev, const void *ctx,
820                                   struct notifier_block *atomic_nb,
821                                   struct notifier_block *blocking_nb,
822                                   bool tx_fwd_offload,
823                                   struct netlink_ext_ack *extack)
824 {
825         struct switchdev_notifier_brport_info brport_info = {
826                 .brport = {
827                         .dev = dev,
828                         .ctx = ctx,
829                         .atomic_nb = atomic_nb,
830                         .blocking_nb = blocking_nb,
831                         .tx_fwd_offload = tx_fwd_offload,
832                 },
833         };
834         int err;
835
836         ASSERT_RTNL();
837
838         err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
839                                                 brport_dev, &brport_info.info,
840                                                 extack);
841         return notifier_to_errno(err);
842 }
843 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
844
845 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
846                                      const void *ctx,
847                                      struct notifier_block *atomic_nb,
848                                      struct notifier_block *blocking_nb)
849 {
850         struct switchdev_notifier_brport_info brport_info = {
851                 .brport = {
852                         .ctx = ctx,
853                         .atomic_nb = atomic_nb,
854                         .blocking_nb = blocking_nb,
855                 },
856         };
857
858         ASSERT_RTNL();
859
860         call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
861                                           brport_dev, &brport_info.info,
862                                           NULL);
863 }
864 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);