powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / rep / bridge.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #include <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "bridge.h"
9 #include "esw/bridge.h"
10 #include "en_rep.h"
11
12 #define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
13
14 struct mlx5_bridge_switchdev_fdb_work {
15         struct work_struct work;
16         struct switchdev_notifier_fdb_info fdb_info;
17         struct net_device *dev;
18         struct mlx5_esw_bridge_offloads *br_offloads;
19         bool add;
20 };
21
22 static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw)
23 {
24         struct mlx5e_priv *priv = netdev_priv(dev);
25
26         return esw == priv->mdev->priv.eswitch;
27 }
28
29 static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw)
30 {
31         struct mlx5e_priv *priv = netdev_priv(dev);
32         struct mlx5_core_dev *mdev, *esw_mdev;
33         u64 system_guid, esw_system_guid;
34
35         mdev = priv->mdev;
36         esw_mdev = esw->dev;
37
38         system_guid = mlx5_query_nic_system_image_guid(mdev);
39         esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
40
41         return system_guid == esw_system_guid;
42 }
43
44 static struct net_device *
45 mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
46 {
47         struct net_device *lower;
48         struct list_head *iter;
49
50         netdev_for_each_lower_dev(dev, lower, iter) {
51                 struct mlx5_core_dev *mdev;
52                 struct mlx5e_priv *priv;
53
54                 if (!mlx5e_eswitch_rep(lower))
55                         continue;
56
57                 priv = netdev_priv(lower);
58                 mdev = priv->mdev;
59                 if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
60                         return lower;
61         }
62
63         return NULL;
64 }
65
66 static struct net_device *
67 mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
68                                           u16 *vport_num, u16 *esw_owner_vhca_id)
69 {
70         struct mlx5e_rep_priv *rpriv;
71         struct mlx5e_priv *priv;
72
73         if (netif_is_lag_master(dev))
74                 dev = mlx5_esw_bridge_lag_rep_get(dev, esw);
75
76         if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw))
77                 return NULL;
78
79         priv = netdev_priv(dev);
80
81         if (!priv->mdev->priv.eswitch->br_offloads)
82                 return NULL;
83
84         rpriv = priv->ppriv;
85         *vport_num = rpriv->rep->vport;
86         *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
87         return dev;
88 }
89
90 static struct net_device *
91 mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
92                                                 u16 *vport_num, u16 *esw_owner_vhca_id)
93 {
94         struct net_device *lower_dev;
95         struct list_head *iter;
96
97         if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev))
98                 return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num,
99                                                                  esw_owner_vhca_id);
100
101         netdev_for_each_lower_dev(dev, lower_dev, iter) {
102                 struct net_device *rep;
103
104                 if (netif_is_bridge_master(lower_dev))
105                         continue;
106
107                 rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num,
108                                                                       esw_owner_vhca_id);
109                 if (rep)
110                         return rep;
111         }
112
113         return NULL;
114 }
115
116 static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep,
117                                      struct mlx5_eswitch *esw)
118 {
119         struct mlx5_core_dev *mdev;
120         struct mlx5e_priv *priv;
121
122         if (!mlx5_esw_bridge_dev_same_esw(rep, esw))
123                 return false;
124
125         priv = netdev_priv(rep);
126         mdev = priv->mdev;
127         if (netif_is_lag_master(dev))
128                 return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
129         return true;
130 }
131
132 static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
133 {
134         struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
135                                                                     struct mlx5_esw_bridge_offloads,
136                                                                     netdev_nb);
137         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
138         struct netdev_notifier_changeupper_info *info = ptr;
139         struct net_device *upper = info->upper_dev, *rep;
140         struct mlx5_eswitch *esw = br_offloads->esw;
141         u16 vport_num, esw_owner_vhca_id;
142         struct netlink_ext_ack *extack;
143         int err = 0;
144
145         if (!netif_is_bridge_master(upper))
146                 return 0;
147
148         rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
149         if (!rep)
150                 return 0;
151
152         extack = netdev_notifier_info_to_extack(&info->info);
153
154         if (mlx5_esw_bridge_is_local(dev, rep, esw))
155                 err = info->linking ?
156                         mlx5_esw_bridge_vport_link(upper, vport_num, esw_owner_vhca_id,
157                                                    br_offloads, extack) :
158                         mlx5_esw_bridge_vport_unlink(upper, vport_num, esw_owner_vhca_id,
159                                                      br_offloads, extack);
160         else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
161                 err = info->linking ?
162                         mlx5_esw_bridge_vport_peer_link(upper, vport_num, esw_owner_vhca_id,
163                                                         br_offloads, extack) :
164                         mlx5_esw_bridge_vport_peer_unlink(upper, vport_num, esw_owner_vhca_id,
165                                                           br_offloads, extack);
166
167         return err;
168 }
169
170 static int
171 mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
172 {
173         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
174         struct netdev_notifier_changeupper_info *info = ptr;
175         struct net_device *upper = info->upper_dev;
176         struct net_device *lower;
177         struct list_head *iter;
178
179         if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
180                 return 0;
181
182         netdev_for_each_lower_dev(dev, lower, iter) {
183                 struct mlx5_core_dev *mdev;
184                 struct mlx5e_priv *priv;
185
186                 if (!mlx5e_eswitch_rep(lower))
187                         continue;
188
189                 priv = netdev_priv(lower);
190                 mdev = priv->mdev;
191                 if (!mlx5_lag_is_active(mdev))
192                         return -EAGAIN;
193                 if (!mlx5_lag_is_shared_fdb(mdev))
194                         return -EOPNOTSUPP;
195         }
196
197         return 0;
198 }
199
200 static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
201                                                 unsigned long event, void *ptr)
202 {
203         int err = 0;
204
205         switch (event) {
206         case NETDEV_PRECHANGEUPPER:
207                 err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
208                 break;
209
210         case NETDEV_CHANGEUPPER:
211                 err = mlx5_esw_bridge_port_changeupper(nb, ptr);
212                 break;
213         }
214
215         return notifier_from_errno(err);
216 }
217
218 static int
219 mlx5_esw_bridge_port_obj_add(struct net_device *dev,
220                              struct switchdev_notifier_port_obj_info *port_obj_info,
221                              struct mlx5_esw_bridge_offloads *br_offloads)
222 {
223         struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
224         const struct switchdev_obj *obj = port_obj_info->obj;
225         const struct switchdev_obj_port_vlan *vlan;
226         const struct switchdev_obj_port_mdb *mdb;
227         u16 vport_num, esw_owner_vhca_id;
228         int err;
229
230         if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
231                                                        &esw_owner_vhca_id))
232                 return 0;
233
234         port_obj_info->handled = true;
235
236         switch (obj->id) {
237         case SWITCHDEV_OBJ_ID_PORT_VLAN:
238                 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
239                 err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
240                                                     vlan->flags, br_offloads, extack);
241                 break;
242         case SWITCHDEV_OBJ_ID_PORT_MDB:
243                 mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
244                 err = mlx5_esw_bridge_port_mdb_add(dev, vport_num, esw_owner_vhca_id, mdb->addr,
245                                                    mdb->vid, br_offloads, extack);
246                 break;
247         default:
248                 return -EOPNOTSUPP;
249         }
250         return err;
251 }
252
253 static int
254 mlx5_esw_bridge_port_obj_del(struct net_device *dev,
255                              struct switchdev_notifier_port_obj_info *port_obj_info,
256                              struct mlx5_esw_bridge_offloads *br_offloads)
257 {
258         const struct switchdev_obj *obj = port_obj_info->obj;
259         const struct switchdev_obj_port_vlan *vlan;
260         const struct switchdev_obj_port_mdb *mdb;
261         u16 vport_num, esw_owner_vhca_id;
262
263         if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
264                                                        &esw_owner_vhca_id))
265                 return 0;
266
267         port_obj_info->handled = true;
268
269         switch (obj->id) {
270         case SWITCHDEV_OBJ_ID_PORT_VLAN:
271                 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
272                 mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
273                 break;
274         case SWITCHDEV_OBJ_ID_PORT_MDB:
275                 mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
276                 mlx5_esw_bridge_port_mdb_del(dev, vport_num, esw_owner_vhca_id, mdb->addr, mdb->vid,
277                                              br_offloads);
278                 break;
279         default:
280                 return -EOPNOTSUPP;
281         }
282         return 0;
283 }
284
285 static int
286 mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
287                                   struct switchdev_notifier_port_attr_info *port_attr_info,
288                                   struct mlx5_esw_bridge_offloads *br_offloads)
289 {
290         struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
291         const struct switchdev_attr *attr = port_attr_info->attr;
292         u16 vport_num, esw_owner_vhca_id;
293         int err = 0;
294
295         if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
296                                                              &esw_owner_vhca_id))
297                 return 0;
298
299         port_attr_info->handled = true;
300
301         switch (attr->id) {
302         case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
303                 if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
304                         NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
305                         err = -EINVAL;
306                 }
307                 break;
308         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
309                 break;
310         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
311                 err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id,
312                                                       attr->u.ageing_time, br_offloads);
313                 break;
314         case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
315                 err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
316                                                          attr->u.vlan_filtering, br_offloads);
317                 break;
318         case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
319                 err = mlx5_esw_bridge_vlan_proto_set(vport_num,
320                                                      esw_owner_vhca_id,
321                                                      attr->u.vlan_protocol,
322                                                      br_offloads);
323                 break;
324         case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
325                 err = mlx5_esw_bridge_mcast_set(vport_num, esw_owner_vhca_id,
326                                                 !attr->u.mc_disabled, br_offloads);
327                 break;
328         default:
329                 err = -EOPNOTSUPP;
330         }
331
332         return err;
333 }
334
335 static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb,
336                                           unsigned long event, void *ptr)
337 {
338         struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
339                                                                     struct mlx5_esw_bridge_offloads,
340                                                                     nb_blk);
341         struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
342         int err;
343
344         switch (event) {
345         case SWITCHDEV_PORT_OBJ_ADD:
346                 err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads);
347                 break;
348         case SWITCHDEV_PORT_OBJ_DEL:
349                 err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads);
350                 break;
351         case SWITCHDEV_PORT_ATTR_SET:
352                 err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
353                 break;
354         default:
355                 err = 0;
356         }
357
358         return notifier_from_errno(err);
359 }
360
361 static void
362 mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
363 {
364         dev_put(fdb_work->dev);
365         kfree(fdb_work->fdb_info.addr);
366         kfree(fdb_work);
367 }
368
369 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
370 {
371         struct mlx5_bridge_switchdev_fdb_work *fdb_work =
372                 container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
373         struct switchdev_notifier_fdb_info *fdb_info =
374                 &fdb_work->fdb_info;
375         struct mlx5_esw_bridge_offloads *br_offloads =
376                 fdb_work->br_offloads;
377         struct net_device *dev = fdb_work->dev;
378         u16 vport_num, esw_owner_vhca_id;
379
380         rtnl_lock();
381
382         if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
383                                                        &esw_owner_vhca_id))
384                 goto out;
385
386         if (fdb_work->add)
387                 mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads,
388                                            fdb_info);
389         else
390                 mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads,
391                                            fdb_info);
392
393 out:
394         rtnl_unlock();
395         mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
396 }
397
398 static struct mlx5_bridge_switchdev_fdb_work *
399 mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
400                                         struct switchdev_notifier_fdb_info *fdb_info,
401                                         struct mlx5_esw_bridge_offloads *br_offloads)
402 {
403         struct mlx5_bridge_switchdev_fdb_work *work;
404         u8 *addr;
405
406         work = kzalloc(sizeof(*work), GFP_ATOMIC);
407         if (!work)
408                 return ERR_PTR(-ENOMEM);
409
410         INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
411         memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
412
413         addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
414         if (!addr) {
415                 kfree(work);
416                 return ERR_PTR(-ENOMEM);
417         }
418         ether_addr_copy(addr, fdb_info->addr);
419         work->fdb_info.addr = addr;
420
421         dev_hold(dev);
422         work->dev = dev;
423         work->br_offloads = br_offloads;
424         work->add = add;
425         return work;
426 }
427
428 static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
429                                            unsigned long event, void *ptr)
430 {
431         struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
432                                                                     struct mlx5_esw_bridge_offloads,
433                                                                     nb);
434         struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
435         struct switchdev_notifier_fdb_info *fdb_info;
436         struct mlx5_bridge_switchdev_fdb_work *work;
437         struct mlx5_eswitch *esw = br_offloads->esw;
438         struct switchdev_notifier_info *info = ptr;
439         u16 vport_num, esw_owner_vhca_id;
440         struct net_device *upper, *rep;
441
442         if (event == SWITCHDEV_PORT_ATTR_SET) {
443                 int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
444
445                 return notifier_from_errno(err);
446         }
447
448         upper = netdev_master_upper_dev_get_rcu(dev);
449         if (!upper)
450                 return NOTIFY_DONE;
451         if (!netif_is_bridge_master(upper))
452                 return NOTIFY_DONE;
453
454         rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
455         if (!rep)
456                 return NOTIFY_DONE;
457
458         switch (event) {
459         case SWITCHDEV_FDB_ADD_TO_BRIDGE:
460                 fdb_info = container_of(info,
461                                         struct switchdev_notifier_fdb_info,
462                                         info);
463                 mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads,
464                                                 fdb_info);
465                 break;
466         case SWITCHDEV_FDB_DEL_TO_BRIDGE:
467                 /* only handle the event on peers */
468                 if (mlx5_esw_bridge_is_local(dev, rep, esw))
469                         break;
470                 fallthrough;
471         case SWITCHDEV_FDB_ADD_TO_DEVICE:
472         case SWITCHDEV_FDB_DEL_TO_DEVICE:
473                 fdb_info = container_of(info,
474                                         struct switchdev_notifier_fdb_info,
475                                         info);
476
477                 work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
478                                                                event == SWITCHDEV_FDB_ADD_TO_DEVICE,
479                                                                fdb_info,
480                                                                br_offloads);
481                 if (IS_ERR(work)) {
482                         WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
483                                   PTR_ERR(work));
484                         return notifier_from_errno(PTR_ERR(work));
485                 }
486
487                 queue_work(br_offloads->wq, &work->work);
488                 break;
489         default:
490                 break;
491         }
492         return NOTIFY_DONE;
493 }
494
495 static void mlx5_esw_bridge_update_work(struct work_struct *work)
496 {
497         struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
498                                                                     struct mlx5_esw_bridge_offloads,
499                                                                     update_work.work);
500
501         rtnl_lock();
502         mlx5_esw_bridge_update(br_offloads);
503         rtnl_unlock();
504
505         queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
506                            msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
507 }
508
509 void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
510 {
511         struct mlx5_esw_bridge_offloads *br_offloads;
512         struct mlx5_core_dev *mdev = priv->mdev;
513         struct mlx5_eswitch *esw =
514                 mdev->priv.eswitch;
515         int err;
516
517         rtnl_lock();
518         br_offloads = mlx5_esw_bridge_init(esw);
519         rtnl_unlock();
520         if (IS_ERR(br_offloads)) {
521                 esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
522                 return;
523         }
524
525         br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
526         if (!br_offloads->wq) {
527                 esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
528                 goto err_alloc_wq;
529         }
530
531         br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
532         err = register_switchdev_notifier(&br_offloads->nb);
533         if (err) {
534                 esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
535                 goto err_register_swdev;
536         }
537
538         br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
539         err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
540         if (err) {
541                 esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
542                 goto err_register_swdev_blk;
543         }
544
545         br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
546         err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
547         if (err) {
548                 esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
549                          err);
550                 goto err_register_netdev;
551         }
552         INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
553         queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
554                            msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
555         return;
556
557 err_register_netdev:
558         unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
559 err_register_swdev_blk:
560         unregister_switchdev_notifier(&br_offloads->nb);
561 err_register_swdev:
562         destroy_workqueue(br_offloads->wq);
563 err_alloc_wq:
564         rtnl_lock();
565         mlx5_esw_bridge_cleanup(esw);
566         rtnl_unlock();
567 }
568
569 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
570 {
571         struct mlx5_esw_bridge_offloads *br_offloads;
572         struct mlx5_core_dev *mdev = priv->mdev;
573         struct mlx5_eswitch *esw =
574                 mdev->priv.eswitch;
575
576         br_offloads = esw->br_offloads;
577         if (!br_offloads)
578                 return;
579
580         cancel_delayed_work_sync(&br_offloads->update_work);
581         unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
582         unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
583         unregister_switchdev_notifier(&br_offloads->nb);
584         destroy_workqueue(br_offloads->wq);
585         rtnl_lock();
586         mlx5_esw_bridge_cleanup(esw);
587         rtnl_unlock();
588 }