Merge tag 'mips_5.16_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[platform/kernel/linux-starfive.git] / net / bridge / br_switchdev.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/list.h>
4 #include <linux/netdevice.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/skbuff.h>
7 #include <net/ip.h>
8 #include <net/switchdev.h>
9
10 #include "br_private.h"
11
12 static struct static_key_false br_switchdev_tx_fwd_offload;
13
14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
15                                              const struct sk_buff *skb)
16 {
17         if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
18                 return false;
19
20         return (p->flags & BR_TX_FWD_OFFLOAD) &&
21                (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
22 }
23
24 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
25 {
26         if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
27                 return false;
28
29         return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
30 }
31
32 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
33 {
34         skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
35 }
36
37 /* Mark the frame for TX forwarding offload if this egress port supports it */
38 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
39                                              struct sk_buff *skb)
40 {
41         if (nbp_switchdev_can_offload_tx_fwd(p, skb))
42                 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
43 }
44
45 /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
46  * that the skb has been already forwarded to, to avoid further cloning to
47  * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
48  * return false.
49  */
50 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
51                                               struct sk_buff *skb)
52 {
53         if (nbp_switchdev_can_offload_tx_fwd(p, skb))
54                 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
55 }
56
57 void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
58                               struct sk_buff *skb)
59 {
60         if (p->hwdom)
61                 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
62 }
63
64 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
65                                   const struct sk_buff *skb)
66 {
67         struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
68
69         return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
70                 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
71 }
72
73 /* Flags that can be offloaded to hardware */
74 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
75                                   BR_MCAST_FLOOD | BR_BCAST_FLOOD)
76
77 int br_switchdev_set_port_flag(struct net_bridge_port *p,
78                                unsigned long flags,
79                                unsigned long mask,
80                                struct netlink_ext_ack *extack)
81 {
82         struct switchdev_attr attr = {
83                 .orig_dev = p->dev,
84         };
85         struct switchdev_notifier_port_attr_info info = {
86                 .attr = &attr,
87         };
88         int err;
89
90         mask &= BR_PORT_FLAGS_HW_OFFLOAD;
91         if (!mask)
92                 return 0;
93
94         attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
95         attr.u.brport_flags.val = flags;
96         attr.u.brport_flags.mask = mask;
97
98         /* We run from atomic context here */
99         err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
100                                        &info.info, extack);
101         err = notifier_to_errno(err);
102         if (err == -EOPNOTSUPP)
103                 return 0;
104
105         if (err) {
106                 if (extack && !extack->_msg)
107                         NL_SET_ERR_MSG_MOD(extack,
108                                            "bridge flag offload is not supported");
109                 return -EOPNOTSUPP;
110         }
111
112         attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
113         attr.flags = SWITCHDEV_F_DEFER;
114
115         err = switchdev_port_attr_set(p->dev, &attr, extack);
116         if (err) {
117                 if (extack && !extack->_msg)
118                         NL_SET_ERR_MSG_MOD(extack,
119                                            "error setting offload flag on port");
120                 return err;
121         }
122
123         return 0;
124 }
125
126 static void br_switchdev_fdb_populate(struct net_bridge *br,
127                                       struct switchdev_notifier_fdb_info *item,
128                                       const struct net_bridge_fdb_entry *fdb,
129                                       const void *ctx)
130 {
131         const struct net_bridge_port *p = READ_ONCE(fdb->dst);
132
133         item->addr = fdb->key.addr.addr;
134         item->vid = fdb->key.vlan_id;
135         item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
136         item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
137         item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
138         item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
139         item->info.ctx = ctx;
140 }
141
142 void
143 br_switchdev_fdb_notify(struct net_bridge *br,
144                         const struct net_bridge_fdb_entry *fdb, int type)
145 {
146         struct switchdev_notifier_fdb_info item;
147
148         br_switchdev_fdb_populate(br, &item, fdb, NULL);
149
150         switch (type) {
151         case RTM_DELNEIGH:
152                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
153                                          item.info.dev, &item.info, NULL);
154                 break;
155         case RTM_NEWNEIGH:
156                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
157                                          item.info.dev, &item.info, NULL);
158                 break;
159         }
160 }
161
162 int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
163                                struct netlink_ext_ack *extack)
164 {
165         struct switchdev_obj_port_vlan v = {
166                 .obj.orig_dev = dev,
167                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
168                 .flags = flags,
169                 .vid = vid,
170         };
171
172         return switchdev_port_obj_add(dev, &v.obj, extack);
173 }
174
175 int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
176 {
177         struct switchdev_obj_port_vlan v = {
178                 .obj.orig_dev = dev,
179                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
180                 .vid = vid,
181         };
182
183         return switchdev_port_obj_del(dev, &v.obj);
184 }
185
186 static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
187 {
188         struct net_bridge *br = joining->br;
189         struct net_bridge_port *p;
190         int hwdom;
191
192         /* joining is yet to be added to the port list. */
193         list_for_each_entry(p, &br->port_list, list) {
194                 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
195                         joining->hwdom = p->hwdom;
196                         return 0;
197                 }
198         }
199
200         hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
201         if (hwdom >= BR_HWDOM_MAX)
202                 return -EBUSY;
203
204         set_bit(hwdom, &br->busy_hwdoms);
205         joining->hwdom = hwdom;
206         return 0;
207 }
208
209 static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
210 {
211         struct net_bridge *br = leaving->br;
212         struct net_bridge_port *p;
213
214         /* leaving is no longer in the port list. */
215         list_for_each_entry(p, &br->port_list, list) {
216                 if (p->hwdom == leaving->hwdom)
217                         return;
218         }
219
220         clear_bit(leaving->hwdom, &br->busy_hwdoms);
221 }
222
223 static int nbp_switchdev_add(struct net_bridge_port *p,
224                              struct netdev_phys_item_id ppid,
225                              bool tx_fwd_offload,
226                              struct netlink_ext_ack *extack)
227 {
228         int err;
229
230         if (p->offload_count) {
231                 /* Prevent unsupported configurations such as a bridge port
232                  * which is a bonding interface, and the member ports are from
233                  * different hardware switches.
234                  */
235                 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
236                         NL_SET_ERR_MSG_MOD(extack,
237                                            "Same bridge port cannot be offloaded by two physical switches");
238                         return -EBUSY;
239                 }
240
241                 /* Tolerate drivers that call switchdev_bridge_port_offload()
242                  * more than once for the same bridge port, such as when the
243                  * bridge port is an offloaded bonding/team interface.
244                  */
245                 p->offload_count++;
246
247                 return 0;
248         }
249
250         p->ppid = ppid;
251         p->offload_count = 1;
252
253         err = nbp_switchdev_hwdom_set(p);
254         if (err)
255                 return err;
256
257         if (tx_fwd_offload) {
258                 p->flags |= BR_TX_FWD_OFFLOAD;
259                 static_branch_inc(&br_switchdev_tx_fwd_offload);
260         }
261
262         return 0;
263 }
264
265 static void nbp_switchdev_del(struct net_bridge_port *p)
266 {
267         if (WARN_ON(!p->offload_count))
268                 return;
269
270         p->offload_count--;
271
272         if (p->offload_count)
273                 return;
274
275         if (p->hwdom)
276                 nbp_switchdev_hwdom_put(p);
277
278         if (p->flags & BR_TX_FWD_OFFLOAD) {
279                 p->flags &= ~BR_TX_FWD_OFFLOAD;
280                 static_branch_dec(&br_switchdev_tx_fwd_offload);
281         }
282 }
283
284 static int
285 br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
286                             const struct net_bridge_fdb_entry *fdb,
287                             unsigned long action, const void *ctx)
288 {
289         struct switchdev_notifier_fdb_info item;
290         int err;
291
292         br_switchdev_fdb_populate(br, &item, fdb, ctx);
293
294         err = nb->notifier_call(nb, action, &item);
295         return notifier_to_errno(err);
296 }
297
298 static int
299 br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
300                         bool adding, struct notifier_block *nb)
301 {
302         struct net_bridge_fdb_entry *fdb;
303         struct net_bridge *br;
304         unsigned long action;
305         int err = 0;
306
307         if (!nb)
308                 return 0;
309
310         if (!netif_is_bridge_master(br_dev))
311                 return -EINVAL;
312
313         br = netdev_priv(br_dev);
314
315         if (adding)
316                 action = SWITCHDEV_FDB_ADD_TO_DEVICE;
317         else
318                 action = SWITCHDEV_FDB_DEL_TO_DEVICE;
319
320         rcu_read_lock();
321
322         hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
323                 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
324                 if (err)
325                         break;
326         }
327
328         rcu_read_unlock();
329
330         return err;
331 }
332
333 static int
334 br_switchdev_vlan_replay_one(struct notifier_block *nb,
335                              struct net_device *dev,
336                              struct switchdev_obj_port_vlan *vlan,
337                              const void *ctx, unsigned long action,
338                              struct netlink_ext_ack *extack)
339 {
340         struct switchdev_notifier_port_obj_info obj_info = {
341                 .info = {
342                         .dev = dev,
343                         .extack = extack,
344                         .ctx = ctx,
345                 },
346                 .obj = &vlan->obj,
347         };
348         int err;
349
350         err = nb->notifier_call(nb, action, &obj_info);
351         return notifier_to_errno(err);
352 }
353
354 static int br_switchdev_vlan_replay(struct net_device *br_dev,
355                                     struct net_device *dev,
356                                     const void *ctx, bool adding,
357                                     struct notifier_block *nb,
358                                     struct netlink_ext_ack *extack)
359 {
360         struct net_bridge_vlan_group *vg;
361         struct net_bridge_vlan *v;
362         struct net_bridge_port *p;
363         struct net_bridge *br;
364         unsigned long action;
365         int err = 0;
366         u16 pvid;
367
368         ASSERT_RTNL();
369
370         if (!nb)
371                 return 0;
372
373         if (!netif_is_bridge_master(br_dev))
374                 return -EINVAL;
375
376         if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
377                 return -EINVAL;
378
379         if (netif_is_bridge_master(dev)) {
380                 br = netdev_priv(dev);
381                 vg = br_vlan_group(br);
382                 p = NULL;
383         } else {
384                 p = br_port_get_rtnl(dev);
385                 if (WARN_ON(!p))
386                         return -EINVAL;
387                 vg = nbp_vlan_group(p);
388                 br = p->br;
389         }
390
391         if (!vg)
392                 return 0;
393
394         if (adding)
395                 action = SWITCHDEV_PORT_OBJ_ADD;
396         else
397                 action = SWITCHDEV_PORT_OBJ_DEL;
398
399         pvid = br_get_pvid(vg);
400
401         list_for_each_entry(v, &vg->vlan_list, vlist) {
402                 struct switchdev_obj_port_vlan vlan = {
403                         .obj.orig_dev = dev,
404                         .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
405                         .flags = br_vlan_flags(v, pvid),
406                         .vid = v->vid,
407                 };
408
409                 if (!br_vlan_should_use(v))
410                         continue;
411
412                 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
413                                                    action, extack);
414                 if (err)
415                         return err;
416         }
417
418         return err;
419 }
420
421 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
422 struct br_switchdev_mdb_complete_info {
423         struct net_bridge_port *port;
424         struct br_ip ip;
425 };
426
427 static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
428 {
429         struct br_switchdev_mdb_complete_info *data = priv;
430         struct net_bridge_port_group __rcu **pp;
431         struct net_bridge_port_group *p;
432         struct net_bridge_mdb_entry *mp;
433         struct net_bridge_port *port = data->port;
434         struct net_bridge *br = port->br;
435
436         if (err)
437                 goto err;
438
439         spin_lock_bh(&br->multicast_lock);
440         mp = br_mdb_ip_get(br, &data->ip);
441         if (!mp)
442                 goto out;
443         for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
444              pp = &p->next) {
445                 if (p->key.port != port)
446                         continue;
447                 p->flags |= MDB_PG_FLAGS_OFFLOAD;
448         }
449 out:
450         spin_unlock_bh(&br->multicast_lock);
451 err:
452         kfree(priv);
453 }
454
455 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
456                                       const struct net_bridge_mdb_entry *mp)
457 {
458         if (mp->addr.proto == htons(ETH_P_IP))
459                 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
460 #if IS_ENABLED(CONFIG_IPV6)
461         else if (mp->addr.proto == htons(ETH_P_IPV6))
462                 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
463 #endif
464         else
465                 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
466
467         mdb->vid = mp->addr.vid;
468 }
469
470 static void br_switchdev_host_mdb_one(struct net_device *dev,
471                                       struct net_device *lower_dev,
472                                       struct net_bridge_mdb_entry *mp,
473                                       int type)
474 {
475         struct switchdev_obj_port_mdb mdb = {
476                 .obj = {
477                         .id = SWITCHDEV_OBJ_ID_HOST_MDB,
478                         .flags = SWITCHDEV_F_DEFER,
479                         .orig_dev = dev,
480                 },
481         };
482
483         br_switchdev_mdb_populate(&mdb, mp);
484
485         switch (type) {
486         case RTM_NEWMDB:
487                 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
488                 break;
489         case RTM_DELMDB:
490                 switchdev_port_obj_del(lower_dev, &mdb.obj);
491                 break;
492         }
493 }
494
495 static void br_switchdev_host_mdb(struct net_device *dev,
496                                   struct net_bridge_mdb_entry *mp, int type)
497 {
498         struct net_device *lower_dev;
499         struct list_head *iter;
500
501         netdev_for_each_lower_dev(dev, lower_dev, iter)
502                 br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
503 }
504
505 static int
506 br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
507                             const struct switchdev_obj_port_mdb *mdb,
508                             unsigned long action, const void *ctx,
509                             struct netlink_ext_ack *extack)
510 {
511         struct switchdev_notifier_port_obj_info obj_info = {
512                 .info = {
513                         .dev = dev,
514                         .extack = extack,
515                         .ctx = ctx,
516                 },
517                 .obj = &mdb->obj,
518         };
519         int err;
520
521         err = nb->notifier_call(nb, action, &obj_info);
522         return notifier_to_errno(err);
523 }
524
525 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
526                                       enum switchdev_obj_id id,
527                                       const struct net_bridge_mdb_entry *mp,
528                                       struct net_device *orig_dev)
529 {
530         struct switchdev_obj_port_mdb *mdb;
531
532         mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
533         if (!mdb)
534                 return -ENOMEM;
535
536         mdb->obj.id = id;
537         mdb->obj.orig_dev = orig_dev;
538         br_switchdev_mdb_populate(mdb, mp);
539         list_add_tail(&mdb->obj.list, mdb_list);
540
541         return 0;
542 }
543
544 void br_switchdev_mdb_notify(struct net_device *dev,
545                              struct net_bridge_mdb_entry *mp,
546                              struct net_bridge_port_group *pg,
547                              int type)
548 {
549         struct br_switchdev_mdb_complete_info *complete_info;
550         struct switchdev_obj_port_mdb mdb = {
551                 .obj = {
552                         .id = SWITCHDEV_OBJ_ID_PORT_MDB,
553                         .flags = SWITCHDEV_F_DEFER,
554                 },
555         };
556
557         if (!pg)
558                 return br_switchdev_host_mdb(dev, mp, type);
559
560         br_switchdev_mdb_populate(&mdb, mp);
561
562         mdb.obj.orig_dev = pg->key.port->dev;
563         switch (type) {
564         case RTM_NEWMDB:
565                 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
566                 if (!complete_info)
567                         break;
568                 complete_info->port = pg->key.port;
569                 complete_info->ip = mp->addr;
570                 mdb.obj.complete_priv = complete_info;
571                 mdb.obj.complete = br_switchdev_mdb_complete;
572                 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
573                         kfree(complete_info);
574                 break;
575         case RTM_DELMDB:
576                 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
577                 break;
578         }
579 }
580 #endif
581
582 static int
583 br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
584                         const void *ctx, bool adding, struct notifier_block *nb,
585                         struct netlink_ext_ack *extack)
586 {
587 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
588         const struct net_bridge_mdb_entry *mp;
589         struct switchdev_obj *obj, *tmp;
590         struct net_bridge *br;
591         unsigned long action;
592         LIST_HEAD(mdb_list);
593         int err = 0;
594
595         ASSERT_RTNL();
596
597         if (!nb)
598                 return 0;
599
600         if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
601                 return -EINVAL;
602
603         br = netdev_priv(br_dev);
604
605         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
606                 return 0;
607
608         /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
609          * because the write-side protection is br->multicast_lock. But we
610          * need to emulate the [ blocking ] calling context of a regular
611          * switchdev event, so since both br->multicast_lock and RCU read side
612          * critical sections are atomic, we have no choice but to pick the RCU
613          * read side lock, queue up all our events, leave the critical section
614          * and notify switchdev from blocking context.
615          */
616         rcu_read_lock();
617
618         hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
619                 struct net_bridge_port_group __rcu * const *pp;
620                 const struct net_bridge_port_group *p;
621
622                 if (mp->host_joined) {
623                         err = br_switchdev_mdb_queue_one(&mdb_list,
624                                                          SWITCHDEV_OBJ_ID_HOST_MDB,
625                                                          mp, br_dev);
626                         if (err) {
627                                 rcu_read_unlock();
628                                 goto out_free_mdb;
629                         }
630                 }
631
632                 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
633                      pp = &p->next) {
634                         if (p->key.port->dev != dev)
635                                 continue;
636
637                         err = br_switchdev_mdb_queue_one(&mdb_list,
638                                                          SWITCHDEV_OBJ_ID_PORT_MDB,
639                                                          mp, dev);
640                         if (err) {
641                                 rcu_read_unlock();
642                                 goto out_free_mdb;
643                         }
644                 }
645         }
646
647         rcu_read_unlock();
648
649         if (adding)
650                 action = SWITCHDEV_PORT_OBJ_ADD;
651         else
652                 action = SWITCHDEV_PORT_OBJ_DEL;
653
654         list_for_each_entry(obj, &mdb_list, list) {
655                 err = br_switchdev_mdb_replay_one(nb, dev,
656                                                   SWITCHDEV_OBJ_PORT_MDB(obj),
657                                                   action, ctx, extack);
658                 if (err)
659                         goto out_free_mdb;
660         }
661
662 out_free_mdb:
663         list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
664                 list_del(&obj->list);
665                 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
666         }
667
668         if (err)
669                 return err;
670 #endif
671
672         return 0;
673 }
674
675 static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
676                                    struct notifier_block *atomic_nb,
677                                    struct notifier_block *blocking_nb,
678                                    struct netlink_ext_ack *extack)
679 {
680         struct net_device *br_dev = p->br->dev;
681         struct net_device *dev = p->dev;
682         int err;
683
684         err = br_switchdev_vlan_replay(br_dev, dev, ctx, true, blocking_nb,
685                                        extack);
686         if (err && err != -EOPNOTSUPP)
687                 return err;
688
689         err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
690                                       extack);
691         if (err && err != -EOPNOTSUPP)
692                 return err;
693
694         err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
695         if (err && err != -EOPNOTSUPP)
696                 return err;
697
698         return 0;
699 }
700
701 static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
702                                       const void *ctx,
703                                       struct notifier_block *atomic_nb,
704                                       struct notifier_block *blocking_nb)
705 {
706         struct net_device *br_dev = p->br->dev;
707         struct net_device *dev = p->dev;
708
709         br_switchdev_vlan_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
710
711         br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
712
713         br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
714 }
715
716 /* Let the bridge know that this port is offloaded, so that it can assign a
717  * switchdev hardware domain to it.
718  */
719 int br_switchdev_port_offload(struct net_bridge_port *p,
720                               struct net_device *dev, const void *ctx,
721                               struct notifier_block *atomic_nb,
722                               struct notifier_block *blocking_nb,
723                               bool tx_fwd_offload,
724                               struct netlink_ext_ack *extack)
725 {
726         struct netdev_phys_item_id ppid;
727         int err;
728
729         err = dev_get_port_parent_id(dev, &ppid, false);
730         if (err)
731                 return err;
732
733         err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
734         if (err)
735                 return err;
736
737         err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
738         if (err)
739                 goto out_switchdev_del;
740
741         return 0;
742
743 out_switchdev_del:
744         nbp_switchdev_del(p);
745
746         return err;
747 }
748
749 void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
750                                  struct notifier_block *atomic_nb,
751                                  struct notifier_block *blocking_nb)
752 {
753         nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
754
755         nbp_switchdev_del(p);
756 }