Bluetooth: fix vendor ext rssi link alert event
[platform/kernel/linux-rpi.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *      Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30         return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 static void dsa_port_notify_bridge_fdb_flush(const struct dsa_port *dp)
34 {
35         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
36         struct switchdev_notifier_fdb_info info = {
37                 /* flush all VLANs */
38                 .vid = 0,
39         };
40
41         /* When the port becomes standalone it has already left the bridge.
42          * Don't notify the bridge in that case.
43          */
44         if (!brport_dev)
45                 return;
46
47         call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
48                                  brport_dev, &info.info, NULL);
49 }
50
51 static void dsa_port_fast_age(const struct dsa_port *dp)
52 {
53         struct dsa_switch *ds = dp->ds;
54
55         if (!ds->ops->port_fast_age)
56                 return;
57
58         ds->ops->port_fast_age(ds, dp->index);
59
60         dsa_port_notify_bridge_fdb_flush(dp);
61 }
62
63 static bool dsa_port_can_configure_learning(struct dsa_port *dp)
64 {
65         struct switchdev_brport_flags flags = {
66                 .mask = BR_LEARNING,
67         };
68         struct dsa_switch *ds = dp->ds;
69         int err;
70
71         if (!ds->ops->port_bridge_flags || !ds->ops->port_pre_bridge_flags)
72                 return false;
73
74         err = ds->ops->port_pre_bridge_flags(ds, dp->index, flags, NULL);
75         return !err;
76 }
77
78 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
79 {
80         struct dsa_switch *ds = dp->ds;
81         int port = dp->index;
82
83         if (!ds->ops->port_stp_state_set)
84                 return -EOPNOTSUPP;
85
86         ds->ops->port_stp_state_set(ds, port, state);
87
88         if (!dsa_port_can_configure_learning(dp) ||
89             (do_fast_age && dp->learning)) {
90                 /* Fast age FDB entries or flush appropriate forwarding database
91                  * for the given port, if we are moving it from Learning or
92                  * Forwarding state, to Disabled or Blocking or Listening state.
93                  * Ports that were standalone before the STP state change don't
94                  * need to fast age the FDB, since address learning is off in
95                  * standalone mode.
96                  */
97
98                 if ((dp->stp_state == BR_STATE_LEARNING ||
99                      dp->stp_state == BR_STATE_FORWARDING) &&
100                     (state == BR_STATE_DISABLED ||
101                      state == BR_STATE_BLOCKING ||
102                      state == BR_STATE_LISTENING))
103                         dsa_port_fast_age(dp);
104         }
105
106         dp->stp_state = state;
107
108         return 0;
109 }
110
111 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
112                                    bool do_fast_age)
113 {
114         struct dsa_switch *ds = dp->ds;
115         int err;
116
117         err = dsa_port_set_state(dp, state, do_fast_age);
118         if (err && err != -EOPNOTSUPP) {
119                 dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n",
120                         dp->index, state, ERR_PTR(err));
121         }
122 }
123
124 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
125 {
126         struct dsa_switch *ds = dp->ds;
127         int port = dp->index;
128         int err;
129
130         if (ds->ops->port_enable) {
131                 err = ds->ops->port_enable(ds, port, phy);
132                 if (err)
133                         return err;
134         }
135
136         if (!dp->bridge_dev)
137                 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
138
139         if (dp->pl)
140                 phylink_start(dp->pl);
141
142         return 0;
143 }
144
145 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
146 {
147         int err;
148
149         rtnl_lock();
150         err = dsa_port_enable_rt(dp, phy);
151         rtnl_unlock();
152
153         return err;
154 }
155
156 void dsa_port_disable_rt(struct dsa_port *dp)
157 {
158         struct dsa_switch *ds = dp->ds;
159         int port = dp->index;
160
161         if (dp->pl)
162                 phylink_stop(dp->pl);
163
164         if (!dp->bridge_dev)
165                 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
166
167         if (ds->ops->port_disable)
168                 ds->ops->port_disable(ds, port);
169 }
170
171 void dsa_port_disable(struct dsa_port *dp)
172 {
173         rtnl_lock();
174         dsa_port_disable_rt(dp);
175         rtnl_unlock();
176 }
177
178 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
179                                          struct netlink_ext_ack *extack)
180 {
181         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
182                                    BR_BCAST_FLOOD;
183         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
184         int flag, err;
185
186         for_each_set_bit(flag, &mask, 32) {
187                 struct switchdev_brport_flags flags = {0};
188
189                 flags.mask = BIT(flag);
190
191                 if (br_port_flag_is_set(brport_dev, BIT(flag)))
192                         flags.val = BIT(flag);
193
194                 err = dsa_port_bridge_flags(dp, flags, extack);
195                 if (err && err != -EOPNOTSUPP)
196                         return err;
197         }
198
199         return 0;
200 }
201
202 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
203 {
204         const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
205         const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
206                                    BR_BCAST_FLOOD;
207         int flag, err;
208
209         for_each_set_bit(flag, &mask, 32) {
210                 struct switchdev_brport_flags flags = {0};
211
212                 flags.mask = BIT(flag);
213                 flags.val = val & BIT(flag);
214
215                 err = dsa_port_bridge_flags(dp, flags, NULL);
216                 if (err && err != -EOPNOTSUPP)
217                         dev_err(dp->ds->dev,
218                                 "failed to clear bridge port flag %lu: %pe\n",
219                                 flags.val, ERR_PTR(err));
220         }
221 }
222
223 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
224                                          struct netlink_ext_ack *extack)
225 {
226         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
227         struct net_device *br = dp->bridge_dev;
228         int err;
229
230         err = dsa_port_inherit_brport_flags(dp, extack);
231         if (err)
232                 return err;
233
234         err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
235         if (err && err != -EOPNOTSUPP)
236                 return err;
237
238         err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
239         if (err && err != -EOPNOTSUPP)
240                 return err;
241
242         err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
243         if (err && err != -EOPNOTSUPP)
244                 return err;
245
246         return 0;
247 }
248
249 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
250 {
251         /* Configure the port for standalone mode (no address learning,
252          * flood everything).
253          * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
254          * when the user requests it through netlink or sysfs, but not
255          * automatically at port join or leave, so we need to handle resetting
256          * the brport flags ourselves. But we even prefer it that way, because
257          * otherwise, some setups might never get the notification they need,
258          * for example, when a port leaves a LAG that offloads the bridge,
259          * it becomes standalone, but as far as the bridge is concerned, no
260          * port ever left.
261          */
262         dsa_port_clear_brport_flags(dp);
263
264         /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
265          * so allow it to be in BR_STATE_FORWARDING to be kept functional
266          */
267         dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
268
269         /* VLAN filtering is handled by dsa_switch_bridge_leave */
270
271         /* Ageing time may be global to the switch chip, so don't change it
272          * here because we have no good reason (or value) to change it to.
273          */
274 }
275
276 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
277                                              struct net_device *bridge_dev)
278 {
279         int bridge_num = dp->bridge_num;
280         struct dsa_switch *ds = dp->ds;
281
282         /* No bridge TX forwarding offload => do nothing */
283         if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
284                 return;
285
286         dp->bridge_num = -1;
287
288         dsa_bridge_num_put(bridge_dev, bridge_num);
289
290         /* Notify the chips only once the offload has been deactivated, so
291          * that they can update their configuration accordingly.
292          */
293         ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
294                                               bridge_num);
295 }
296
297 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
298                                            struct net_device *bridge_dev)
299 {
300         struct dsa_switch *ds = dp->ds;
301         int bridge_num, err;
302
303         if (!ds->ops->port_bridge_tx_fwd_offload)
304                 return false;
305
306         bridge_num = dsa_bridge_num_get(bridge_dev,
307                                         ds->num_fwd_offloading_bridges);
308         if (bridge_num < 0)
309                 return false;
310
311         dp->bridge_num = bridge_num;
312
313         /* Notify the driver */
314         err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
315                                                   bridge_num);
316         if (err) {
317                 dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
318                 return false;
319         }
320
321         return true;
322 }
323
324 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
325                          struct netlink_ext_ack *extack)
326 {
327         struct dsa_notifier_bridge_info info = {
328                 .tree_index = dp->ds->dst->index,
329                 .sw_index = dp->ds->index,
330                 .port = dp->index,
331                 .br = br,
332         };
333         struct net_device *dev = dp->slave;
334         struct net_device *brport_dev;
335         bool tx_fwd_offload;
336         int err;
337
338         /* Here the interface is already bridged. Reflect the current
339          * configuration so that drivers can program their chips accordingly.
340          */
341         dp->bridge_dev = br;
342
343         brport_dev = dsa_port_to_bridge_port(dp);
344
345         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
346         if (err)
347                 goto out_rollback;
348
349         tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
350
351         err = switchdev_bridge_port_offload(brport_dev, dev, dp,
352                                             &dsa_slave_switchdev_notifier,
353                                             &dsa_slave_switchdev_blocking_notifier,
354                                             tx_fwd_offload, extack);
355         if (err)
356                 goto out_rollback_unbridge;
357
358         err = dsa_port_switchdev_sync_attrs(dp, extack);
359         if (err)
360                 goto out_rollback_unoffload;
361
362         return 0;
363
364 out_rollback_unoffload:
365         switchdev_bridge_port_unoffload(brport_dev, dp,
366                                         &dsa_slave_switchdev_notifier,
367                                         &dsa_slave_switchdev_blocking_notifier);
368 out_rollback_unbridge:
369         dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
370 out_rollback:
371         dp->bridge_dev = NULL;
372         return err;
373 }
374
375 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
376 {
377         struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
378
379         /* Don't try to unoffload something that is not offloaded */
380         if (!brport_dev)
381                 return;
382
383         switchdev_bridge_port_unoffload(brport_dev, dp,
384                                         &dsa_slave_switchdev_notifier,
385                                         &dsa_slave_switchdev_blocking_notifier);
386 }
387
388 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
389 {
390         struct dsa_notifier_bridge_info info = {
391                 .tree_index = dp->ds->dst->index,
392                 .sw_index = dp->ds->index,
393                 .port = dp->index,
394                 .br = br,
395         };
396         int err;
397
398         /* Here the port is already unbridged. Reflect the current configuration
399          * so that drivers can program their chips accordingly.
400          */
401         dp->bridge_dev = NULL;
402
403         dsa_port_bridge_tx_fwd_unoffload(dp, br);
404
405         err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
406         if (err)
407                 dev_err(dp->ds->dev,
408                         "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n",
409                         dp->index, ERR_PTR(err));
410
411         dsa_port_switchdev_unsync_attrs(dp);
412 }
413
414 int dsa_port_lag_change(struct dsa_port *dp,
415                         struct netdev_lag_lower_state_info *linfo)
416 {
417         struct dsa_notifier_lag_info info = {
418                 .sw_index = dp->ds->index,
419                 .port = dp->index,
420         };
421         bool tx_enabled;
422
423         if (!dp->lag_dev)
424                 return 0;
425
426         /* On statically configured aggregates (e.g. loadbalance
427          * without LACP) ports will always be tx_enabled, even if the
428          * link is down. Thus we require both link_up and tx_enabled
429          * in order to include it in the tx set.
430          */
431         tx_enabled = linfo->link_up && linfo->tx_enabled;
432
433         if (tx_enabled == dp->lag_tx_enabled)
434                 return 0;
435
436         dp->lag_tx_enabled = tx_enabled;
437
438         return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
439 }
440
441 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
442                       struct netdev_lag_upper_info *uinfo,
443                       struct netlink_ext_ack *extack)
444 {
445         struct dsa_notifier_lag_info info = {
446                 .sw_index = dp->ds->index,
447                 .port = dp->index,
448                 .lag = lag,
449                 .info = uinfo,
450         };
451         struct net_device *bridge_dev;
452         int err;
453
454         dsa_lag_map(dp->ds->dst, lag);
455         dp->lag_dev = lag;
456
457         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
458         if (err)
459                 goto err_lag_join;
460
461         bridge_dev = netdev_master_upper_dev_get(lag);
462         if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
463                 return 0;
464
465         err = dsa_port_bridge_join(dp, bridge_dev, extack);
466         if (err)
467                 goto err_bridge_join;
468
469         return 0;
470
471 err_bridge_join:
472         dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
473 err_lag_join:
474         dp->lag_dev = NULL;
475         dsa_lag_unmap(dp->ds->dst, lag);
476         return err;
477 }
478
479 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
480 {
481         if (dp->bridge_dev)
482                 dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
483 }
484
485 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
486 {
487         struct dsa_notifier_lag_info info = {
488                 .sw_index = dp->ds->index,
489                 .port = dp->index,
490                 .lag = lag,
491         };
492         int err;
493
494         if (!dp->lag_dev)
495                 return;
496
497         /* Port might have been part of a LAG that in turn was
498          * attached to a bridge.
499          */
500         if (dp->bridge_dev)
501                 dsa_port_bridge_leave(dp, dp->bridge_dev);
502
503         dp->lag_tx_enabled = false;
504         dp->lag_dev = NULL;
505
506         err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
507         if (err)
508                 dev_err(dp->ds->dev,
509                         "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
510                         dp->index, ERR_PTR(err));
511
512         dsa_lag_unmap(dp->ds->dst, lag);
513 }
514
515 /* Must be called under rcu_read_lock() */
516 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
517                                               bool vlan_filtering,
518                                               struct netlink_ext_ack *extack)
519 {
520         struct dsa_switch *ds = dp->ds;
521         int err, i;
522
523         /* VLAN awareness was off, so the question is "can we turn it on".
524          * We may have had 8021q uppers, those need to go. Make sure we don't
525          * enter an inconsistent state: deny changing the VLAN awareness state
526          * as long as we have 8021q uppers.
527          */
528         if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
529                 struct net_device *upper_dev, *slave = dp->slave;
530                 struct net_device *br = dp->bridge_dev;
531                 struct list_head *iter;
532
533                 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
534                         struct bridge_vlan_info br_info;
535                         u16 vid;
536
537                         if (!is_vlan_dev(upper_dev))
538                                 continue;
539
540                         vid = vlan_dev_vlan_id(upper_dev);
541
542                         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
543                          * device, respectively the VID is not found, returning
544                          * 0 means success, which is a failure for us here.
545                          */
546                         err = br_vlan_get_info(br, vid, &br_info);
547                         if (err == 0) {
548                                 NL_SET_ERR_MSG_MOD(extack,
549                                                    "Must first remove VLAN uppers having VIDs also present in bridge");
550                                 return false;
551                         }
552                 }
553         }
554
555         if (!ds->vlan_filtering_is_global)
556                 return true;
557
558         /* For cases where enabling/disabling VLAN awareness is global to the
559          * switch, we need to handle the case where multiple bridges span
560          * different ports of the same switch device and one of them has a
561          * different setting than what is being requested.
562          */
563         for (i = 0; i < ds->num_ports; i++) {
564                 struct net_device *other_bridge;
565
566                 other_bridge = dsa_to_port(ds, i)->bridge_dev;
567                 if (!other_bridge)
568                         continue;
569                 /* If it's the same bridge, it also has same
570                  * vlan_filtering setting => no need to check
571                  */
572                 if (other_bridge == dp->bridge_dev)
573                         continue;
574                 if (br_vlan_enabled(other_bridge) != vlan_filtering) {
575                         NL_SET_ERR_MSG_MOD(extack,
576                                            "VLAN filtering is a global setting");
577                         return false;
578                 }
579         }
580         return true;
581 }
582
583 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
584                             struct netlink_ext_ack *extack)
585 {
586         bool old_vlan_filtering = dsa_port_is_vlan_filtering(dp);
587         struct dsa_switch *ds = dp->ds;
588         bool apply;
589         int err;
590
591         if (!ds->ops->port_vlan_filtering)
592                 return -EOPNOTSUPP;
593
594         /* We are called from dsa_slave_switchdev_blocking_event(),
595          * which is not under rcu_read_lock(), unlike
596          * dsa_slave_switchdev_event().
597          */
598         rcu_read_lock();
599         apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
600         rcu_read_unlock();
601         if (!apply)
602                 return -EINVAL;
603
604         if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
605                 return 0;
606
607         err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
608                                            extack);
609         if (err)
610                 return err;
611
612         if (ds->vlan_filtering_is_global) {
613                 int port;
614
615                 ds->vlan_filtering = vlan_filtering;
616
617                 for (port = 0; port < ds->num_ports; port++) {
618                         struct net_device *slave;
619
620                         if (!dsa_is_user_port(ds, port))
621                                 continue;
622
623                         /* We might be called in the unbind path, so not
624                          * all slave devices might still be registered.
625                          */
626                         slave = dsa_to_port(ds, port)->slave;
627                         if (!slave)
628                                 continue;
629
630                         err = dsa_slave_manage_vlan_filtering(slave,
631                                                               vlan_filtering);
632                         if (err)
633                                 goto restore;
634                 }
635         } else {
636                 dp->vlan_filtering = vlan_filtering;
637
638                 err = dsa_slave_manage_vlan_filtering(dp->slave,
639                                                       vlan_filtering);
640                 if (err)
641                         goto restore;
642         }
643
644         return 0;
645
646 restore:
647         ds->ops->port_vlan_filtering(ds, dp->index, old_vlan_filtering, NULL);
648
649         if (ds->vlan_filtering_is_global)
650                 ds->vlan_filtering = old_vlan_filtering;
651         else
652                 dp->vlan_filtering = old_vlan_filtering;
653
654         return err;
655 }
656
657 /* This enforces legacy behavior for switch drivers which assume they can't
658  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
659  */
660 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
661 {
662         struct dsa_switch *ds = dp->ds;
663
664         if (!dp->bridge_dev)
665                 return false;
666
667         return (!ds->configure_vlan_while_not_filtering &&
668                 !br_vlan_enabled(dp->bridge_dev));
669 }
670
671 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
672 {
673         unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
674         unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
675         struct dsa_notifier_ageing_time_info info;
676         int err;
677
678         info.ageing_time = ageing_time;
679
680         err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
681         if (err)
682                 return err;
683
684         dp->ageing_time = ageing_time;
685
686         return 0;
687 }
688
689 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
690                               struct switchdev_brport_flags flags,
691                               struct netlink_ext_ack *extack)
692 {
693         struct dsa_switch *ds = dp->ds;
694
695         if (!ds->ops->port_pre_bridge_flags)
696                 return -EINVAL;
697
698         return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
699 }
700
701 int dsa_port_bridge_flags(struct dsa_port *dp,
702                           struct switchdev_brport_flags flags,
703                           struct netlink_ext_ack *extack)
704 {
705         struct dsa_switch *ds = dp->ds;
706         int err;
707
708         if (!ds->ops->port_bridge_flags)
709                 return -EOPNOTSUPP;
710
711         err = ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
712         if (err)
713                 return err;
714
715         if (flags.mask & BR_LEARNING) {
716                 bool learning = flags.val & BR_LEARNING;
717
718                 if (learning == dp->learning)
719                         return 0;
720
721                 if ((dp->learning && !learning) &&
722                     (dp->stp_state == BR_STATE_LEARNING ||
723                      dp->stp_state == BR_STATE_FORWARDING))
724                         dsa_port_fast_age(dp);
725
726                 dp->learning = learning;
727         }
728
729         return 0;
730 }
731
732 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
733                         bool targeted_match)
734 {
735         struct dsa_notifier_mtu_info info = {
736                 .sw_index = dp->ds->index,
737                 .targeted_match = targeted_match,
738                 .port = dp->index,
739                 .mtu = new_mtu,
740         };
741
742         return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
743 }
744
745 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
746                      u16 vid)
747 {
748         struct dsa_notifier_fdb_info info = {
749                 .sw_index = dp->ds->index,
750                 .port = dp->index,
751                 .addr = addr,
752                 .vid = vid,
753         };
754
755         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
756 }
757
758 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
759                      u16 vid)
760 {
761         struct dsa_notifier_fdb_info info = {
762                 .sw_index = dp->ds->index,
763                 .port = dp->index,
764                 .addr = addr,
765                 .vid = vid,
766
767         };
768
769         return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
770 }
771
772 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
773                           u16 vid)
774 {
775         struct dsa_notifier_fdb_info info = {
776                 .sw_index = dp->ds->index,
777                 .port = dp->index,
778                 .addr = addr,
779                 .vid = vid,
780         };
781         struct dsa_port *cpu_dp = dp->cpu_dp;
782         int err;
783
784         err = dev_uc_add(cpu_dp->master, addr);
785         if (err)
786                 return err;
787
788         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
789 }
790
791 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
792                           u16 vid)
793 {
794         struct dsa_notifier_fdb_info info = {
795                 .sw_index = dp->ds->index,
796                 .port = dp->index,
797                 .addr = addr,
798                 .vid = vid,
799         };
800         struct dsa_port *cpu_dp = dp->cpu_dp;
801         int err;
802
803         err = dev_uc_del(cpu_dp->master, addr);
804         if (err)
805                 return err;
806
807         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
808 }
809
810 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
811 {
812         struct dsa_switch *ds = dp->ds;
813         int port = dp->index;
814
815         if (!ds->ops->port_fdb_dump)
816                 return -EOPNOTSUPP;
817
818         return ds->ops->port_fdb_dump(ds, port, cb, data);
819 }
820
821 int dsa_port_mdb_add(const struct dsa_port *dp,
822                      const struct switchdev_obj_port_mdb *mdb)
823 {
824         struct dsa_notifier_mdb_info info = {
825                 .sw_index = dp->ds->index,
826                 .port = dp->index,
827                 .mdb = mdb,
828         };
829
830         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
831 }
832
833 int dsa_port_mdb_del(const struct dsa_port *dp,
834                      const struct switchdev_obj_port_mdb *mdb)
835 {
836         struct dsa_notifier_mdb_info info = {
837                 .sw_index = dp->ds->index,
838                 .port = dp->index,
839                 .mdb = mdb,
840         };
841
842         return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
843 }
844
845 int dsa_port_host_mdb_add(const struct dsa_port *dp,
846                           const struct switchdev_obj_port_mdb *mdb)
847 {
848         struct dsa_notifier_mdb_info info = {
849                 .sw_index = dp->ds->index,
850                 .port = dp->index,
851                 .mdb = mdb,
852         };
853         struct dsa_port *cpu_dp = dp->cpu_dp;
854         int err;
855
856         err = dev_mc_add(cpu_dp->master, mdb->addr);
857         if (err)
858                 return err;
859
860         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
861 }
862
863 int dsa_port_host_mdb_del(const struct dsa_port *dp,
864                           const struct switchdev_obj_port_mdb *mdb)
865 {
866         struct dsa_notifier_mdb_info info = {
867                 .sw_index = dp->ds->index,
868                 .port = dp->index,
869                 .mdb = mdb,
870         };
871         struct dsa_port *cpu_dp = dp->cpu_dp;
872         int err;
873
874         err = dev_mc_del(cpu_dp->master, mdb->addr);
875         if (err)
876                 return err;
877
878         return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
879 }
880
881 int dsa_port_vlan_add(struct dsa_port *dp,
882                       const struct switchdev_obj_port_vlan *vlan,
883                       struct netlink_ext_ack *extack)
884 {
885         struct dsa_notifier_vlan_info info = {
886                 .sw_index = dp->ds->index,
887                 .port = dp->index,
888                 .vlan = vlan,
889                 .extack = extack,
890         };
891
892         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
893 }
894
895 int dsa_port_vlan_del(struct dsa_port *dp,
896                       const struct switchdev_obj_port_vlan *vlan)
897 {
898         struct dsa_notifier_vlan_info info = {
899                 .sw_index = dp->ds->index,
900                 .port = dp->index,
901                 .vlan = vlan,
902         };
903
904         return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
905 }
906
907 int dsa_port_mrp_add(const struct dsa_port *dp,
908                      const struct switchdev_obj_mrp *mrp)
909 {
910         struct dsa_notifier_mrp_info info = {
911                 .sw_index = dp->ds->index,
912                 .port = dp->index,
913                 .mrp = mrp,
914         };
915
916         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
917 }
918
919 int dsa_port_mrp_del(const struct dsa_port *dp,
920                      const struct switchdev_obj_mrp *mrp)
921 {
922         struct dsa_notifier_mrp_info info = {
923                 .sw_index = dp->ds->index,
924                 .port = dp->index,
925                 .mrp = mrp,
926         };
927
928         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
929 }
930
931 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
932                                const struct switchdev_obj_ring_role_mrp *mrp)
933 {
934         struct dsa_notifier_mrp_ring_role_info info = {
935                 .sw_index = dp->ds->index,
936                 .port = dp->index,
937                 .mrp = mrp,
938         };
939
940         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
941 }
942
943 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
944                                const struct switchdev_obj_ring_role_mrp *mrp)
945 {
946         struct dsa_notifier_mrp_ring_role_info info = {
947                 .sw_index = dp->ds->index,
948                 .port = dp->index,
949                 .mrp = mrp,
950         };
951
952         return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
953 }
954
955 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
956                                const struct dsa_device_ops *tag_ops)
957 {
958         cpu_dp->rcv = tag_ops->rcv;
959         cpu_dp->tag_ops = tag_ops;
960 }
961
962 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
963 {
964         struct device_node *phy_dn;
965         struct phy_device *phydev;
966
967         phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
968         if (!phy_dn)
969                 return NULL;
970
971         phydev = of_phy_find_device(phy_dn);
972         if (!phydev) {
973                 of_node_put(phy_dn);
974                 return ERR_PTR(-EPROBE_DEFER);
975         }
976
977         of_node_put(phy_dn);
978         return phydev;
979 }
980
981 static void dsa_port_phylink_validate(struct phylink_config *config,
982                                       unsigned long *supported,
983                                       struct phylink_link_state *state)
984 {
985         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
986         struct dsa_switch *ds = dp->ds;
987
988         if (!ds->ops->phylink_validate)
989                 return;
990
991         ds->ops->phylink_validate(ds, dp->index, supported, state);
992 }
993
994 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
995                                                struct phylink_link_state *state)
996 {
997         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
998         struct dsa_switch *ds = dp->ds;
999         int err;
1000
1001         /* Only called for inband modes */
1002         if (!ds->ops->phylink_mac_link_state) {
1003                 state->link = 0;
1004                 return;
1005         }
1006
1007         err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
1008         if (err < 0) {
1009                 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
1010                         dp->index, err);
1011                 state->link = 0;
1012         }
1013 }
1014
1015 static void dsa_port_phylink_mac_config(struct phylink_config *config,
1016                                         unsigned int mode,
1017                                         const struct phylink_link_state *state)
1018 {
1019         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1020         struct dsa_switch *ds = dp->ds;
1021
1022         if (!ds->ops->phylink_mac_config)
1023                 return;
1024
1025         ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1026 }
1027
1028 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
1029 {
1030         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1031         struct dsa_switch *ds = dp->ds;
1032
1033         if (!ds->ops->phylink_mac_an_restart)
1034                 return;
1035
1036         ds->ops->phylink_mac_an_restart(ds, dp->index);
1037 }
1038
1039 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
1040                                            unsigned int mode,
1041                                            phy_interface_t interface)
1042 {
1043         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1044         struct phy_device *phydev = NULL;
1045         struct dsa_switch *ds = dp->ds;
1046
1047         if (dsa_is_user_port(ds, dp->index))
1048                 phydev = dp->slave->phydev;
1049
1050         if (!ds->ops->phylink_mac_link_down) {
1051                 if (ds->ops->adjust_link && phydev)
1052                         ds->ops->adjust_link(ds, dp->index, phydev);
1053                 return;
1054         }
1055
1056         ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1057 }
1058
1059 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
1060                                          struct phy_device *phydev,
1061                                          unsigned int mode,
1062                                          phy_interface_t interface,
1063                                          int speed, int duplex,
1064                                          bool tx_pause, bool rx_pause)
1065 {
1066         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
1067         struct dsa_switch *ds = dp->ds;
1068
1069         if (!ds->ops->phylink_mac_link_up) {
1070                 if (ds->ops->adjust_link && phydev)
1071                         ds->ops->adjust_link(ds, dp->index, phydev);
1072                 return;
1073         }
1074
1075         ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
1076                                      speed, duplex, tx_pause, rx_pause);
1077 }
1078
1079 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
1080         .validate = dsa_port_phylink_validate,
1081         .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
1082         .mac_config = dsa_port_phylink_mac_config,
1083         .mac_an_restart = dsa_port_phylink_mac_an_restart,
1084         .mac_link_down = dsa_port_phylink_mac_link_down,
1085         .mac_link_up = dsa_port_phylink_mac_link_up,
1086 };
1087
1088 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1089 {
1090         struct dsa_switch *ds = dp->ds;
1091         struct phy_device *phydev;
1092         int port = dp->index;
1093         int err = 0;
1094
1095         phydev = dsa_port_get_phy_device(dp);
1096         if (!phydev)
1097                 return 0;
1098
1099         if (IS_ERR(phydev))
1100                 return PTR_ERR(phydev);
1101
1102         if (enable) {
1103                 err = genphy_resume(phydev);
1104                 if (err < 0)
1105                         goto err_put_dev;
1106
1107                 err = genphy_read_status(phydev);
1108                 if (err < 0)
1109                         goto err_put_dev;
1110         } else {
1111                 err = genphy_suspend(phydev);
1112                 if (err < 0)
1113                         goto err_put_dev;
1114         }
1115
1116         if (ds->ops->adjust_link)
1117                 ds->ops->adjust_link(ds, port, phydev);
1118
1119         dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1120
1121 err_put_dev:
1122         put_device(&phydev->mdio.dev);
1123         return err;
1124 }
1125
1126 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1127 {
1128         struct device_node *dn = dp->dn;
1129         struct dsa_switch *ds = dp->ds;
1130         struct phy_device *phydev;
1131         int port = dp->index;
1132         phy_interface_t mode;
1133         int err;
1134
1135         err = of_phy_register_fixed_link(dn);
1136         if (err) {
1137                 dev_err(ds->dev,
1138                         "failed to register the fixed PHY of port %d\n",
1139                         port);
1140                 return err;
1141         }
1142
1143         phydev = of_phy_find_device(dn);
1144
1145         err = of_get_phy_mode(dn, &mode);
1146         if (err)
1147                 mode = PHY_INTERFACE_MODE_NA;
1148         phydev->interface = mode;
1149
1150         genphy_read_status(phydev);
1151
1152         if (ds->ops->adjust_link)
1153                 ds->ops->adjust_link(ds, port, phydev);
1154
1155         put_device(&phydev->mdio.dev);
1156
1157         return 0;
1158 }
1159
1160 static int dsa_port_phylink_register(struct dsa_port *dp)
1161 {
1162         struct dsa_switch *ds = dp->ds;
1163         struct device_node *port_dn = dp->dn;
1164         phy_interface_t mode;
1165         int err;
1166
1167         err = of_get_phy_mode(port_dn, &mode);
1168         if (err)
1169                 mode = PHY_INTERFACE_MODE_NA;
1170
1171         dp->pl_config.dev = ds->dev;
1172         dp->pl_config.type = PHYLINK_DEV;
1173         dp->pl_config.pcs_poll = ds->pcs_poll;
1174
1175         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1176                                 mode, &dsa_port_phylink_mac_ops);
1177         if (IS_ERR(dp->pl)) {
1178                 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1179                 return PTR_ERR(dp->pl);
1180         }
1181
1182         err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1183         if (err && err != -ENODEV) {
1184                 pr_err("could not attach to PHY: %d\n", err);
1185                 goto err_phy_connect;
1186         }
1187
1188         return 0;
1189
1190 err_phy_connect:
1191         phylink_destroy(dp->pl);
1192         return err;
1193 }
1194
1195 int dsa_port_link_register_of(struct dsa_port *dp)
1196 {
1197         struct dsa_switch *ds = dp->ds;
1198         struct device_node *phy_np;
1199         int port = dp->index;
1200
1201         if (!ds->ops->adjust_link) {
1202                 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1203                 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1204                         if (ds->ops->phylink_mac_link_down)
1205                                 ds->ops->phylink_mac_link_down(ds, port,
1206                                         MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1207                         of_node_put(phy_np);
1208                         return dsa_port_phylink_register(dp);
1209                 }
1210                 of_node_put(phy_np);
1211                 return 0;
1212         }
1213
1214         dev_warn(ds->dev,
1215                  "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1216
1217         if (of_phy_is_fixed_link(dp->dn))
1218                 return dsa_port_fixed_link_register_of(dp);
1219         else
1220                 return dsa_port_setup_phy_of(dp, true);
1221 }
1222
1223 void dsa_port_link_unregister_of(struct dsa_port *dp)
1224 {
1225         struct dsa_switch *ds = dp->ds;
1226
1227         if (!ds->ops->adjust_link && dp->pl) {
1228                 rtnl_lock();
1229                 phylink_disconnect_phy(dp->pl);
1230                 rtnl_unlock();
1231                 phylink_destroy(dp->pl);
1232                 dp->pl = NULL;
1233                 return;
1234         }
1235
1236         if (of_phy_is_fixed_link(dp->dn))
1237                 of_phy_deregister_fixed_link(dp->dn);
1238         else
1239                 dsa_port_setup_phy_of(dp, false);
1240 }
1241
1242 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1243 {
1244         struct phy_device *phydev;
1245         int ret = -EOPNOTSUPP;
1246
1247         if (of_phy_is_fixed_link(dp->dn))
1248                 return ret;
1249
1250         phydev = dsa_port_get_phy_device(dp);
1251         if (IS_ERR_OR_NULL(phydev))
1252                 return ret;
1253
1254         ret = phy_ethtool_get_strings(phydev, data);
1255         put_device(&phydev->mdio.dev);
1256
1257         return ret;
1258 }
1259 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1260
1261 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1262 {
1263         struct phy_device *phydev;
1264         int ret = -EOPNOTSUPP;
1265
1266         if (of_phy_is_fixed_link(dp->dn))
1267                 return ret;
1268
1269         phydev = dsa_port_get_phy_device(dp);
1270         if (IS_ERR_OR_NULL(phydev))
1271                 return ret;
1272
1273         ret = phy_ethtool_get_stats(phydev, NULL, data);
1274         put_device(&phydev->mdio.dev);
1275
1276         return ret;
1277 }
1278 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1279
1280 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1281 {
1282         struct phy_device *phydev;
1283         int ret = -EOPNOTSUPP;
1284
1285         if (of_phy_is_fixed_link(dp->dn))
1286                 return ret;
1287
1288         phydev = dsa_port_get_phy_device(dp);
1289         if (IS_ERR_OR_NULL(phydev))
1290                 return ret;
1291
1292         ret = phy_ethtool_get_sset_count(phydev);
1293         put_device(&phydev->mdio.dev);
1294
1295         return ret;
1296 }
1297 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1298
1299 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1300 {
1301         struct dsa_notifier_hsr_info info = {
1302                 .sw_index = dp->ds->index,
1303                 .port = dp->index,
1304                 .hsr = hsr,
1305         };
1306         int err;
1307
1308         dp->hsr_dev = hsr;
1309
1310         err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1311         if (err)
1312                 dp->hsr_dev = NULL;
1313
1314         return err;
1315 }
1316
1317 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1318 {
1319         struct dsa_notifier_hsr_info info = {
1320                 .sw_index = dp->ds->index,
1321                 .port = dp->index,
1322                 .hsr = hsr,
1323         };
1324         int err;
1325
1326         dp->hsr_dev = NULL;
1327
1328         err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1329         if (err)
1330                 dev_err(dp->ds->dev,
1331                         "port %d failed to notify DSA_NOTIFIER_HSR_LEAVE: %pe\n",
1332                         dp->index, ERR_PTR(err));
1333 }
1334
1335 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
1336 {
1337         struct dsa_notifier_tag_8021q_vlan_info info = {
1338                 .tree_index = dp->ds->dst->index,
1339                 .sw_index = dp->ds->index,
1340                 .port = dp->index,
1341                 .vid = vid,
1342         };
1343
1344         if (broadcast)
1345                 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1346
1347         return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1348 }
1349
1350 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast)
1351 {
1352         struct dsa_notifier_tag_8021q_vlan_info info = {
1353                 .tree_index = dp->ds->dst->index,
1354                 .sw_index = dp->ds->index,
1355                 .port = dp->index,
1356                 .vid = vid,
1357         };
1358         int err;
1359
1360         if (broadcast)
1361                 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1362         else
1363                 err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1364         if (err)
1365                 dev_err(dp->ds->dev,
1366                         "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n",
1367                         dp->index, vid, ERR_PTR(err));
1368 }