Merge branch 'net-allow-user-specify-TC-action-HW-stats-type'
[platform/kernel/linux-rpi.git] / net / dsa / slave.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_act/tc_mirred.h>
19 #include <linux/if_bridge.h>
20 #include <linux/netpoll.h>
21 #include <linux/ptp_classify.h>
22
23 #include "dsa_priv.h"
24
25 /* slave mii_bus handling ***************************************************/
26 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
27 {
28         struct dsa_switch *ds = bus->priv;
29
30         if (ds->phys_mii_mask & (1 << addr))
31                 return ds->ops->phy_read(ds, addr, reg);
32
33         return 0xffff;
34 }
35
36 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
37 {
38         struct dsa_switch *ds = bus->priv;
39
40         if (ds->phys_mii_mask & (1 << addr))
41                 return ds->ops->phy_write(ds, addr, reg, val);
42
43         return 0;
44 }
45
46 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
47 {
48         ds->slave_mii_bus->priv = (void *)ds;
49         ds->slave_mii_bus->name = "dsa slave smi";
50         ds->slave_mii_bus->read = dsa_slave_phy_read;
51         ds->slave_mii_bus->write = dsa_slave_phy_write;
52         snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
53                  ds->dst->index, ds->index);
54         ds->slave_mii_bus->parent = ds->dev;
55         ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
56 }
57
58
59 /* slave device handling ****************************************************/
60 static int dsa_slave_get_iflink(const struct net_device *dev)
61 {
62         return dsa_slave_to_master(dev)->ifindex;
63 }
64
65 static int dsa_slave_open(struct net_device *dev)
66 {
67         struct net_device *master = dsa_slave_to_master(dev);
68         struct dsa_port *dp = dsa_slave_to_port(dev);
69         int err;
70
71         if (!(master->flags & IFF_UP))
72                 return -ENETDOWN;
73
74         if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
75                 err = dev_uc_add(master, dev->dev_addr);
76                 if (err < 0)
77                         goto out;
78         }
79
80         if (dev->flags & IFF_ALLMULTI) {
81                 err = dev_set_allmulti(master, 1);
82                 if (err < 0)
83                         goto del_unicast;
84         }
85         if (dev->flags & IFF_PROMISC) {
86                 err = dev_set_promiscuity(master, 1);
87                 if (err < 0)
88                         goto clear_allmulti;
89         }
90
91         err = dsa_port_enable(dp, dev->phydev);
92         if (err)
93                 goto clear_promisc;
94
95         phylink_start(dp->pl);
96
97         return 0;
98
99 clear_promisc:
100         if (dev->flags & IFF_PROMISC)
101                 dev_set_promiscuity(master, -1);
102 clear_allmulti:
103         if (dev->flags & IFF_ALLMULTI)
104                 dev_set_allmulti(master, -1);
105 del_unicast:
106         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
107                 dev_uc_del(master, dev->dev_addr);
108 out:
109         return err;
110 }
111
112 static int dsa_slave_close(struct net_device *dev)
113 {
114         struct net_device *master = dsa_slave_to_master(dev);
115         struct dsa_port *dp = dsa_slave_to_port(dev);
116
117         phylink_stop(dp->pl);
118
119         dsa_port_disable(dp);
120
121         dev_mc_unsync(master, dev);
122         dev_uc_unsync(master, dev);
123         if (dev->flags & IFF_ALLMULTI)
124                 dev_set_allmulti(master, -1);
125         if (dev->flags & IFF_PROMISC)
126                 dev_set_promiscuity(master, -1);
127
128         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
129                 dev_uc_del(master, dev->dev_addr);
130
131         return 0;
132 }
133
134 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
135 {
136         struct net_device *master = dsa_slave_to_master(dev);
137         if (dev->flags & IFF_UP) {
138                 if (change & IFF_ALLMULTI)
139                         dev_set_allmulti(master,
140                                          dev->flags & IFF_ALLMULTI ? 1 : -1);
141                 if (change & IFF_PROMISC)
142                         dev_set_promiscuity(master,
143                                             dev->flags & IFF_PROMISC ? 1 : -1);
144         }
145 }
146
147 static void dsa_slave_set_rx_mode(struct net_device *dev)
148 {
149         struct net_device *master = dsa_slave_to_master(dev);
150
151         dev_mc_sync(master, dev);
152         dev_uc_sync(master, dev);
153 }
154
155 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
156 {
157         struct net_device *master = dsa_slave_to_master(dev);
158         struct sockaddr *addr = a;
159         int err;
160
161         if (!is_valid_ether_addr(addr->sa_data))
162                 return -EADDRNOTAVAIL;
163
164         if (!(dev->flags & IFF_UP))
165                 goto out;
166
167         if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
168                 err = dev_uc_add(master, addr->sa_data);
169                 if (err < 0)
170                         return err;
171         }
172
173         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
174                 dev_uc_del(master, dev->dev_addr);
175
176 out:
177         ether_addr_copy(dev->dev_addr, addr->sa_data);
178
179         return 0;
180 }
181
182 struct dsa_slave_dump_ctx {
183         struct net_device *dev;
184         struct sk_buff *skb;
185         struct netlink_callback *cb;
186         int idx;
187 };
188
189 static int
190 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
191                            bool is_static, void *data)
192 {
193         struct dsa_slave_dump_ctx *dump = data;
194         u32 portid = NETLINK_CB(dump->cb->skb).portid;
195         u32 seq = dump->cb->nlh->nlmsg_seq;
196         struct nlmsghdr *nlh;
197         struct ndmsg *ndm;
198
199         if (dump->idx < dump->cb->args[2])
200                 goto skip;
201
202         nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
203                         sizeof(*ndm), NLM_F_MULTI);
204         if (!nlh)
205                 return -EMSGSIZE;
206
207         ndm = nlmsg_data(nlh);
208         ndm->ndm_family  = AF_BRIDGE;
209         ndm->ndm_pad1    = 0;
210         ndm->ndm_pad2    = 0;
211         ndm->ndm_flags   = NTF_SELF;
212         ndm->ndm_type    = 0;
213         ndm->ndm_ifindex = dump->dev->ifindex;
214         ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
215
216         if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
217                 goto nla_put_failure;
218
219         if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
220                 goto nla_put_failure;
221
222         nlmsg_end(dump->skb, nlh);
223
224 skip:
225         dump->idx++;
226         return 0;
227
228 nla_put_failure:
229         nlmsg_cancel(dump->skb, nlh);
230         return -EMSGSIZE;
231 }
232
233 static int
234 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
235                    struct net_device *dev, struct net_device *filter_dev,
236                    int *idx)
237 {
238         struct dsa_port *dp = dsa_slave_to_port(dev);
239         struct dsa_slave_dump_ctx dump = {
240                 .dev = dev,
241                 .skb = skb,
242                 .cb = cb,
243                 .idx = *idx,
244         };
245         int err;
246
247         err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
248         *idx = dump.idx;
249
250         return err;
251 }
252
253 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
254 {
255         struct dsa_slave_priv *p = netdev_priv(dev);
256         struct dsa_switch *ds = p->dp->ds;
257         int port = p->dp->index;
258
259         /* Pass through to switch driver if it supports timestamping */
260         switch (cmd) {
261         case SIOCGHWTSTAMP:
262                 if (ds->ops->port_hwtstamp_get)
263                         return ds->ops->port_hwtstamp_get(ds, port, ifr);
264                 break;
265         case SIOCSHWTSTAMP:
266                 if (ds->ops->port_hwtstamp_set)
267                         return ds->ops->port_hwtstamp_set(ds, port, ifr);
268                 break;
269         }
270
271         return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
272 }
273
274 static int dsa_slave_port_attr_set(struct net_device *dev,
275                                    const struct switchdev_attr *attr,
276                                    struct switchdev_trans *trans)
277 {
278         struct dsa_port *dp = dsa_slave_to_port(dev);
279         int ret;
280
281         switch (attr->id) {
282         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
283                 ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
284                 break;
285         case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
286                 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
287                                               trans);
288                 break;
289         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
290                 ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
291                 break;
292         case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
293                 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
294                                                 trans);
295                 break;
296         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
297                 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
298                 break;
299         case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
300                 ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans);
301                 break;
302         default:
303                 ret = -EOPNOTSUPP;
304                 break;
305         }
306
307         return ret;
308 }
309
310 static int dsa_slave_vlan_add(struct net_device *dev,
311                               const struct switchdev_obj *obj,
312                               struct switchdev_trans *trans)
313 {
314         struct dsa_port *dp = dsa_slave_to_port(dev);
315         struct switchdev_obj_port_vlan vlan;
316         int err;
317
318         if (obj->orig_dev != dev)
319                 return -EOPNOTSUPP;
320
321         if (dp->bridge_dev && !br_vlan_enabled(dp->bridge_dev))
322                 return 0;
323
324         vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
325
326         err = dsa_port_vlan_add(dp, &vlan, trans);
327         if (err)
328                 return err;
329
330         /* We need the dedicated CPU port to be a member of the VLAN as well.
331          * Even though drivers often handle CPU membership in special ways,
332          * it doesn't make sense to program a PVID, so clear this flag.
333          */
334         vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
335
336         err = dsa_port_vlan_add(dp->cpu_dp, &vlan, trans);
337         if (err)
338                 return err;
339
340         return 0;
341 }
342
343 static int dsa_slave_port_obj_add(struct net_device *dev,
344                                   const struct switchdev_obj *obj,
345                                   struct switchdev_trans *trans,
346                                   struct netlink_ext_ack *extack)
347 {
348         struct dsa_port *dp = dsa_slave_to_port(dev);
349         int err;
350
351         /* For the prepare phase, ensure the full set of changes is feasable in
352          * one go in order to signal a failure properly. If an operation is not
353          * supported, return -EOPNOTSUPP.
354          */
355
356         switch (obj->id) {
357         case SWITCHDEV_OBJ_ID_PORT_MDB:
358                 if (obj->orig_dev != dev)
359                         return -EOPNOTSUPP;
360                 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
361                 break;
362         case SWITCHDEV_OBJ_ID_HOST_MDB:
363                 /* DSA can directly translate this to a normal MDB add,
364                  * but on the CPU port.
365                  */
366                 err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj),
367                                        trans);
368                 break;
369         case SWITCHDEV_OBJ_ID_PORT_VLAN:
370                 err = dsa_slave_vlan_add(dev, obj, trans);
371                 break;
372         default:
373                 err = -EOPNOTSUPP;
374                 break;
375         }
376
377         return err;
378 }
379
380 static int dsa_slave_vlan_del(struct net_device *dev,
381                               const struct switchdev_obj *obj)
382 {
383         struct dsa_port *dp = dsa_slave_to_port(dev);
384
385         if (obj->orig_dev != dev)
386                 return -EOPNOTSUPP;
387
388         if (dp->bridge_dev && !br_vlan_enabled(dp->bridge_dev))
389                 return 0;
390
391         /* Do not deprogram the CPU port as it may be shared with other user
392          * ports which can be members of this VLAN as well.
393          */
394         return dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
395 }
396
397 static int dsa_slave_port_obj_del(struct net_device *dev,
398                                   const struct switchdev_obj *obj)
399 {
400         struct dsa_port *dp = dsa_slave_to_port(dev);
401         int err;
402
403         switch (obj->id) {
404         case SWITCHDEV_OBJ_ID_PORT_MDB:
405                 if (obj->orig_dev != dev)
406                         return -EOPNOTSUPP;
407                 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
408                 break;
409         case SWITCHDEV_OBJ_ID_HOST_MDB:
410                 /* DSA can directly translate this to a normal MDB add,
411                  * but on the CPU port.
412                  */
413                 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
414                 break;
415         case SWITCHDEV_OBJ_ID_PORT_VLAN:
416                 err = dsa_slave_vlan_del(dev, obj);
417                 break;
418         default:
419                 err = -EOPNOTSUPP;
420                 break;
421         }
422
423         return err;
424 }
425
426 static int dsa_slave_get_port_parent_id(struct net_device *dev,
427                                         struct netdev_phys_item_id *ppid)
428 {
429         struct dsa_port *dp = dsa_slave_to_port(dev);
430         struct dsa_switch *ds = dp->ds;
431         struct dsa_switch_tree *dst = ds->dst;
432
433         /* For non-legacy ports, devlink is used and it takes
434          * care of the name generation. This ndo implementation
435          * should be removed with legacy support.
436          */
437         if (dp->ds->devlink)
438                 return -EOPNOTSUPP;
439
440         ppid->id_len = sizeof(dst->index);
441         memcpy(&ppid->id, &dst->index, ppid->id_len);
442
443         return 0;
444 }
445
446 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
447                                                      struct sk_buff *skb)
448 {
449 #ifdef CONFIG_NET_POLL_CONTROLLER
450         struct dsa_slave_priv *p = netdev_priv(dev);
451
452         if (p->netpoll)
453                 netpoll_send_skb(p->netpoll, skb);
454 #else
455         BUG();
456 #endif
457         return NETDEV_TX_OK;
458 }
459
460 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
461                                  struct sk_buff *skb)
462 {
463         struct dsa_switch *ds = p->dp->ds;
464         struct sk_buff *clone;
465         unsigned int type;
466
467         type = ptp_classify_raw(skb);
468         if (type == PTP_CLASS_NONE)
469                 return;
470
471         if (!ds->ops->port_txtstamp)
472                 return;
473
474         clone = skb_clone_sk(skb);
475         if (!clone)
476                 return;
477
478         DSA_SKB_CB(skb)->clone = clone;
479
480         if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
481                 return;
482
483         kfree_skb(clone);
484 }
485
486 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
487 {
488         /* SKB for netpoll still need to be mangled with the protocol-specific
489          * tag to be successfully transmitted
490          */
491         if (unlikely(netpoll_tx_running(dev)))
492                 return dsa_slave_netpoll_send_skb(dev, skb);
493
494         /* Queue the SKB for transmission on the parent interface, but
495          * do not modify its EtherType
496          */
497         skb->dev = dsa_slave_to_master(dev);
498         dev_queue_xmit(skb);
499
500         return NETDEV_TX_OK;
501 }
502 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
503
504 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
505 {
506         struct dsa_slave_priv *p = netdev_priv(dev);
507         struct pcpu_sw_netstats *s;
508         struct sk_buff *nskb;
509
510         s = this_cpu_ptr(p->stats64);
511         u64_stats_update_begin(&s->syncp);
512         s->tx_packets++;
513         s->tx_bytes += skb->len;
514         u64_stats_update_end(&s->syncp);
515
516         DSA_SKB_CB(skb)->clone = NULL;
517
518         /* Identify PTP protocol packets, clone them, and pass them to the
519          * switch driver
520          */
521         dsa_skb_tx_timestamp(p, skb);
522
523         /* Transmit function may have to reallocate the original SKB,
524          * in which case it must have freed it. Only free it here on error.
525          */
526         nskb = p->xmit(skb, dev);
527         if (!nskb) {
528                 kfree_skb(skb);
529                 return NETDEV_TX_OK;
530         }
531
532         return dsa_enqueue_skb(nskb, dev);
533 }
534
535 /* ethtool operations *******************************************************/
536
537 static void dsa_slave_get_drvinfo(struct net_device *dev,
538                                   struct ethtool_drvinfo *drvinfo)
539 {
540         strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
541         strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
542         strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
543 }
544
545 static int dsa_slave_get_regs_len(struct net_device *dev)
546 {
547         struct dsa_port *dp = dsa_slave_to_port(dev);
548         struct dsa_switch *ds = dp->ds;
549
550         if (ds->ops->get_regs_len)
551                 return ds->ops->get_regs_len(ds, dp->index);
552
553         return -EOPNOTSUPP;
554 }
555
556 static void
557 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
558 {
559         struct dsa_port *dp = dsa_slave_to_port(dev);
560         struct dsa_switch *ds = dp->ds;
561
562         if (ds->ops->get_regs)
563                 ds->ops->get_regs(ds, dp->index, regs, _p);
564 }
565
566 static int dsa_slave_nway_reset(struct net_device *dev)
567 {
568         struct dsa_port *dp = dsa_slave_to_port(dev);
569
570         return phylink_ethtool_nway_reset(dp->pl);
571 }
572
573 static int dsa_slave_get_eeprom_len(struct net_device *dev)
574 {
575         struct dsa_port *dp = dsa_slave_to_port(dev);
576         struct dsa_switch *ds = dp->ds;
577
578         if (ds->cd && ds->cd->eeprom_len)
579                 return ds->cd->eeprom_len;
580
581         if (ds->ops->get_eeprom_len)
582                 return ds->ops->get_eeprom_len(ds);
583
584         return 0;
585 }
586
587 static int dsa_slave_get_eeprom(struct net_device *dev,
588                                 struct ethtool_eeprom *eeprom, u8 *data)
589 {
590         struct dsa_port *dp = dsa_slave_to_port(dev);
591         struct dsa_switch *ds = dp->ds;
592
593         if (ds->ops->get_eeprom)
594                 return ds->ops->get_eeprom(ds, eeprom, data);
595
596         return -EOPNOTSUPP;
597 }
598
599 static int dsa_slave_set_eeprom(struct net_device *dev,
600                                 struct ethtool_eeprom *eeprom, u8 *data)
601 {
602         struct dsa_port *dp = dsa_slave_to_port(dev);
603         struct dsa_switch *ds = dp->ds;
604
605         if (ds->ops->set_eeprom)
606                 return ds->ops->set_eeprom(ds, eeprom, data);
607
608         return -EOPNOTSUPP;
609 }
610
611 static void dsa_slave_get_strings(struct net_device *dev,
612                                   uint32_t stringset, uint8_t *data)
613 {
614         struct dsa_port *dp = dsa_slave_to_port(dev);
615         struct dsa_switch *ds = dp->ds;
616
617         if (stringset == ETH_SS_STATS) {
618                 int len = ETH_GSTRING_LEN;
619
620                 strncpy(data, "tx_packets", len);
621                 strncpy(data + len, "tx_bytes", len);
622                 strncpy(data + 2 * len, "rx_packets", len);
623                 strncpy(data + 3 * len, "rx_bytes", len);
624                 if (ds->ops->get_strings)
625                         ds->ops->get_strings(ds, dp->index, stringset,
626                                              data + 4 * len);
627         }
628 }
629
630 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
631                                         struct ethtool_stats *stats,
632                                         uint64_t *data)
633 {
634         struct dsa_port *dp = dsa_slave_to_port(dev);
635         struct dsa_slave_priv *p = netdev_priv(dev);
636         struct dsa_switch *ds = dp->ds;
637         struct pcpu_sw_netstats *s;
638         unsigned int start;
639         int i;
640
641         for_each_possible_cpu(i) {
642                 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
643
644                 s = per_cpu_ptr(p->stats64, i);
645                 do {
646                         start = u64_stats_fetch_begin_irq(&s->syncp);
647                         tx_packets = s->tx_packets;
648                         tx_bytes = s->tx_bytes;
649                         rx_packets = s->rx_packets;
650                         rx_bytes = s->rx_bytes;
651                 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
652                 data[0] += tx_packets;
653                 data[1] += tx_bytes;
654                 data[2] += rx_packets;
655                 data[3] += rx_bytes;
656         }
657         if (ds->ops->get_ethtool_stats)
658                 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
659 }
660
661 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
662 {
663         struct dsa_port *dp = dsa_slave_to_port(dev);
664         struct dsa_switch *ds = dp->ds;
665
666         if (sset == ETH_SS_STATS) {
667                 int count;
668
669                 count = 4;
670                 if (ds->ops->get_sset_count)
671                         count += ds->ops->get_sset_count(ds, dp->index, sset);
672
673                 return count;
674         }
675
676         return -EOPNOTSUPP;
677 }
678
679 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
680 {
681         struct dsa_port *dp = dsa_slave_to_port(dev);
682         struct dsa_switch *ds = dp->ds;
683
684         phylink_ethtool_get_wol(dp->pl, w);
685
686         if (ds->ops->get_wol)
687                 ds->ops->get_wol(ds, dp->index, w);
688 }
689
690 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
691 {
692         struct dsa_port *dp = dsa_slave_to_port(dev);
693         struct dsa_switch *ds = dp->ds;
694         int ret = -EOPNOTSUPP;
695
696         phylink_ethtool_set_wol(dp->pl, w);
697
698         if (ds->ops->set_wol)
699                 ret = ds->ops->set_wol(ds, dp->index, w);
700
701         return ret;
702 }
703
704 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
705 {
706         struct dsa_port *dp = dsa_slave_to_port(dev);
707         struct dsa_switch *ds = dp->ds;
708         int ret;
709
710         /* Port's PHY and MAC both need to be EEE capable */
711         if (!dev->phydev || !dp->pl)
712                 return -ENODEV;
713
714         if (!ds->ops->set_mac_eee)
715                 return -EOPNOTSUPP;
716
717         ret = ds->ops->set_mac_eee(ds, dp->index, e);
718         if (ret)
719                 return ret;
720
721         return phylink_ethtool_set_eee(dp->pl, e);
722 }
723
724 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
725 {
726         struct dsa_port *dp = dsa_slave_to_port(dev);
727         struct dsa_switch *ds = dp->ds;
728         int ret;
729
730         /* Port's PHY and MAC both need to be EEE capable */
731         if (!dev->phydev || !dp->pl)
732                 return -ENODEV;
733
734         if (!ds->ops->get_mac_eee)
735                 return -EOPNOTSUPP;
736
737         ret = ds->ops->get_mac_eee(ds, dp->index, e);
738         if (ret)
739                 return ret;
740
741         return phylink_ethtool_get_eee(dp->pl, e);
742 }
743
744 static int dsa_slave_get_link_ksettings(struct net_device *dev,
745                                         struct ethtool_link_ksettings *cmd)
746 {
747         struct dsa_port *dp = dsa_slave_to_port(dev);
748
749         return phylink_ethtool_ksettings_get(dp->pl, cmd);
750 }
751
752 static int dsa_slave_set_link_ksettings(struct net_device *dev,
753                                         const struct ethtool_link_ksettings *cmd)
754 {
755         struct dsa_port *dp = dsa_slave_to_port(dev);
756
757         return phylink_ethtool_ksettings_set(dp->pl, cmd);
758 }
759
760 static void dsa_slave_get_pauseparam(struct net_device *dev,
761                                      struct ethtool_pauseparam *pause)
762 {
763         struct dsa_port *dp = dsa_slave_to_port(dev);
764
765         phylink_ethtool_get_pauseparam(dp->pl, pause);
766 }
767
768 static int dsa_slave_set_pauseparam(struct net_device *dev,
769                                     struct ethtool_pauseparam *pause)
770 {
771         struct dsa_port *dp = dsa_slave_to_port(dev);
772
773         return phylink_ethtool_set_pauseparam(dp->pl, pause);
774 }
775
776 #ifdef CONFIG_NET_POLL_CONTROLLER
777 static int dsa_slave_netpoll_setup(struct net_device *dev,
778                                    struct netpoll_info *ni)
779 {
780         struct net_device *master = dsa_slave_to_master(dev);
781         struct dsa_slave_priv *p = netdev_priv(dev);
782         struct netpoll *netpoll;
783         int err = 0;
784
785         netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
786         if (!netpoll)
787                 return -ENOMEM;
788
789         err = __netpoll_setup(netpoll, master);
790         if (err) {
791                 kfree(netpoll);
792                 goto out;
793         }
794
795         p->netpoll = netpoll;
796 out:
797         return err;
798 }
799
800 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
801 {
802         struct dsa_slave_priv *p = netdev_priv(dev);
803         struct netpoll *netpoll = p->netpoll;
804
805         if (!netpoll)
806                 return;
807
808         p->netpoll = NULL;
809
810         __netpoll_free(netpoll);
811 }
812
813 static void dsa_slave_poll_controller(struct net_device *dev)
814 {
815 }
816 #endif
817
818 static int dsa_slave_get_phys_port_name(struct net_device *dev,
819                                         char *name, size_t len)
820 {
821         struct dsa_port *dp = dsa_slave_to_port(dev);
822
823         /* For non-legacy ports, devlink is used and it takes
824          * care of the name generation. This ndo implementation
825          * should be removed with legacy support.
826          */
827         if (dp->ds->devlink)
828                 return -EOPNOTSUPP;
829
830         if (snprintf(name, len, "p%d", dp->index) >= len)
831                 return -EINVAL;
832
833         return 0;
834 }
835
836 static struct dsa_mall_tc_entry *
837 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
838 {
839         struct dsa_slave_priv *p = netdev_priv(dev);
840         struct dsa_mall_tc_entry *mall_tc_entry;
841
842         list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
843                 if (mall_tc_entry->cookie == cookie)
844                         return mall_tc_entry;
845
846         return NULL;
847 }
848
849 static int dsa_slave_add_cls_matchall(struct net_device *dev,
850                                       struct tc_cls_matchall_offload *cls,
851                                       bool ingress)
852 {
853         struct dsa_port *dp = dsa_slave_to_port(dev);
854         struct dsa_slave_priv *p = netdev_priv(dev);
855         struct dsa_mall_tc_entry *mall_tc_entry;
856         __be16 protocol = cls->common.protocol;
857         struct dsa_switch *ds = dp->ds;
858         struct flow_action_entry *act;
859         struct dsa_port *to_dp;
860         int err = -EOPNOTSUPP;
861
862         if (!ds->ops->port_mirror_add)
863                 return err;
864
865         if (!flow_offload_has_one_action(&cls->rule->action))
866                 return err;
867
868         if (!flow_action_basic_hw_stats_types_check(&cls->rule->action,
869                                                     cls->common.extack))
870                 return err;
871
872         act = &cls->rule->action.entries[0];
873
874         if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
875                 struct dsa_mall_mirror_tc_entry *mirror;
876
877                 if (!act->dev)
878                         return -EINVAL;
879
880                 if (!dsa_slave_dev_check(act->dev))
881                         return -EOPNOTSUPP;
882
883                 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
884                 if (!mall_tc_entry)
885                         return -ENOMEM;
886
887                 mall_tc_entry->cookie = cls->cookie;
888                 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
889                 mirror = &mall_tc_entry->mirror;
890
891                 to_dp = dsa_slave_to_port(act->dev);
892
893                 mirror->to_local_port = to_dp->index;
894                 mirror->ingress = ingress;
895
896                 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
897                 if (err) {
898                         kfree(mall_tc_entry);
899                         return err;
900                 }
901
902                 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
903         }
904
905         return 0;
906 }
907
908 static void dsa_slave_del_cls_matchall(struct net_device *dev,
909                                        struct tc_cls_matchall_offload *cls)
910 {
911         struct dsa_port *dp = dsa_slave_to_port(dev);
912         struct dsa_mall_tc_entry *mall_tc_entry;
913         struct dsa_switch *ds = dp->ds;
914
915         if (!ds->ops->port_mirror_del)
916                 return;
917
918         mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
919         if (!mall_tc_entry)
920                 return;
921
922         list_del(&mall_tc_entry->list);
923
924         switch (mall_tc_entry->type) {
925         case DSA_PORT_MALL_MIRROR:
926                 ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
927                 break;
928         default:
929                 WARN_ON(1);
930         }
931
932         kfree(mall_tc_entry);
933 }
934
935 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
936                                            struct tc_cls_matchall_offload *cls,
937                                            bool ingress)
938 {
939         if (cls->common.chain_index)
940                 return -EOPNOTSUPP;
941
942         switch (cls->command) {
943         case TC_CLSMATCHALL_REPLACE:
944                 return dsa_slave_add_cls_matchall(dev, cls, ingress);
945         case TC_CLSMATCHALL_DESTROY:
946                 dsa_slave_del_cls_matchall(dev, cls);
947                 return 0;
948         default:
949                 return -EOPNOTSUPP;
950         }
951 }
952
953 static int dsa_slave_add_cls_flower(struct net_device *dev,
954                                     struct flow_cls_offload *cls,
955                                     bool ingress)
956 {
957         struct dsa_port *dp = dsa_slave_to_port(dev);
958         struct dsa_switch *ds = dp->ds;
959         int port = dp->index;
960
961         if (!ds->ops->cls_flower_add)
962                 return -EOPNOTSUPP;
963
964         return ds->ops->cls_flower_add(ds, port, cls, ingress);
965 }
966
967 static int dsa_slave_del_cls_flower(struct net_device *dev,
968                                     struct flow_cls_offload *cls,
969                                     bool ingress)
970 {
971         struct dsa_port *dp = dsa_slave_to_port(dev);
972         struct dsa_switch *ds = dp->ds;
973         int port = dp->index;
974
975         if (!ds->ops->cls_flower_del)
976                 return -EOPNOTSUPP;
977
978         return ds->ops->cls_flower_del(ds, port, cls, ingress);
979 }
980
981 static int dsa_slave_stats_cls_flower(struct net_device *dev,
982                                       struct flow_cls_offload *cls,
983                                       bool ingress)
984 {
985         struct dsa_port *dp = dsa_slave_to_port(dev);
986         struct dsa_switch *ds = dp->ds;
987         int port = dp->index;
988
989         if (!ds->ops->cls_flower_stats)
990                 return -EOPNOTSUPP;
991
992         return ds->ops->cls_flower_stats(ds, port, cls, ingress);
993 }
994
995 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
996                                          struct flow_cls_offload *cls,
997                                          bool ingress)
998 {
999         switch (cls->command) {
1000         case FLOW_CLS_REPLACE:
1001                 return dsa_slave_add_cls_flower(dev, cls, ingress);
1002         case FLOW_CLS_DESTROY:
1003                 return dsa_slave_del_cls_flower(dev, cls, ingress);
1004         case FLOW_CLS_STATS:
1005                 return dsa_slave_stats_cls_flower(dev, cls, ingress);
1006         default:
1007                 return -EOPNOTSUPP;
1008         }
1009 }
1010
1011 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1012                                        void *cb_priv, bool ingress)
1013 {
1014         struct net_device *dev = cb_priv;
1015
1016         if (!tc_can_offload(dev))
1017                 return -EOPNOTSUPP;
1018
1019         switch (type) {
1020         case TC_SETUP_CLSMATCHALL:
1021                 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1022         case TC_SETUP_CLSFLOWER:
1023                 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1024         default:
1025                 return -EOPNOTSUPP;
1026         }
1027 }
1028
1029 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1030                                           void *type_data, void *cb_priv)
1031 {
1032         return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1033 }
1034
1035 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1036                                           void *type_data, void *cb_priv)
1037 {
1038         return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1039 }
1040
1041 static LIST_HEAD(dsa_slave_block_cb_list);
1042
1043 static int dsa_slave_setup_tc_block(struct net_device *dev,
1044                                     struct flow_block_offload *f)
1045 {
1046         struct flow_block_cb *block_cb;
1047         flow_setup_cb_t *cb;
1048
1049         if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1050                 cb = dsa_slave_setup_tc_block_cb_ig;
1051         else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1052                 cb = dsa_slave_setup_tc_block_cb_eg;
1053         else
1054                 return -EOPNOTSUPP;
1055
1056         f->driver_block_list = &dsa_slave_block_cb_list;
1057
1058         switch (f->command) {
1059         case FLOW_BLOCK_BIND:
1060                 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1061                         return -EBUSY;
1062
1063                 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1064                 if (IS_ERR(block_cb))
1065                         return PTR_ERR(block_cb);
1066
1067                 flow_block_cb_add(block_cb, f);
1068                 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1069                 return 0;
1070         case FLOW_BLOCK_UNBIND:
1071                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1072                 if (!block_cb)
1073                         return -ENOENT;
1074
1075                 flow_block_cb_remove(block_cb, f);
1076                 list_del(&block_cb->driver_list);
1077                 return 0;
1078         default:
1079                 return -EOPNOTSUPP;
1080         }
1081 }
1082
1083 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1084                               void *type_data)
1085 {
1086         struct dsa_port *dp = dsa_slave_to_port(dev);
1087         struct dsa_switch *ds = dp->ds;
1088
1089         if (type == TC_SETUP_BLOCK)
1090                 return dsa_slave_setup_tc_block(dev, type_data);
1091
1092         if (!ds->ops->port_setup_tc)
1093                 return -EOPNOTSUPP;
1094
1095         return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1096 }
1097
1098 static void dsa_slave_get_stats64(struct net_device *dev,
1099                                   struct rtnl_link_stats64 *stats)
1100 {
1101         struct dsa_slave_priv *p = netdev_priv(dev);
1102         struct pcpu_sw_netstats *s;
1103         unsigned int start;
1104         int i;
1105
1106         netdev_stats_to_stats64(stats, &dev->stats);
1107         for_each_possible_cpu(i) {
1108                 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1109
1110                 s = per_cpu_ptr(p->stats64, i);
1111                 do {
1112                         start = u64_stats_fetch_begin_irq(&s->syncp);
1113                         tx_packets = s->tx_packets;
1114                         tx_bytes = s->tx_bytes;
1115                         rx_packets = s->rx_packets;
1116                         rx_bytes = s->rx_bytes;
1117                 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
1118
1119                 stats->tx_packets += tx_packets;
1120                 stats->tx_bytes += tx_bytes;
1121                 stats->rx_packets += rx_packets;
1122                 stats->rx_bytes += rx_bytes;
1123         }
1124 }
1125
1126 static int dsa_slave_get_rxnfc(struct net_device *dev,
1127                                struct ethtool_rxnfc *nfc, u32 *rule_locs)
1128 {
1129         struct dsa_port *dp = dsa_slave_to_port(dev);
1130         struct dsa_switch *ds = dp->ds;
1131
1132         if (!ds->ops->get_rxnfc)
1133                 return -EOPNOTSUPP;
1134
1135         return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1136 }
1137
1138 static int dsa_slave_set_rxnfc(struct net_device *dev,
1139                                struct ethtool_rxnfc *nfc)
1140 {
1141         struct dsa_port *dp = dsa_slave_to_port(dev);
1142         struct dsa_switch *ds = dp->ds;
1143
1144         if (!ds->ops->set_rxnfc)
1145                 return -EOPNOTSUPP;
1146
1147         return ds->ops->set_rxnfc(ds, dp->index, nfc);
1148 }
1149
1150 static int dsa_slave_get_ts_info(struct net_device *dev,
1151                                  struct ethtool_ts_info *ts)
1152 {
1153         struct dsa_slave_priv *p = netdev_priv(dev);
1154         struct dsa_switch *ds = p->dp->ds;
1155
1156         if (!ds->ops->get_ts_info)
1157                 return -EOPNOTSUPP;
1158
1159         return ds->ops->get_ts_info(ds, p->dp->index, ts);
1160 }
1161
1162 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1163                                      u16 vid)
1164 {
1165         struct dsa_port *dp = dsa_slave_to_port(dev);
1166         struct bridge_vlan_info info;
1167         int ret;
1168
1169         /* Check for a possible bridge VLAN entry now since there is no
1170          * need to emulate the switchdev prepare + commit phase.
1171          */
1172         if (dp->bridge_dev) {
1173                 if (!br_vlan_enabled(dp->bridge_dev))
1174                         return 0;
1175
1176                 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1177                  * device, respectively the VID is not found, returning
1178                  * 0 means success, which is a failure for us here.
1179                  */
1180                 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1181                 if (ret == 0)
1182                         return -EBUSY;
1183         }
1184
1185         ret = dsa_port_vid_add(dp, vid, 0);
1186         if (ret)
1187                 return ret;
1188
1189         ret = dsa_port_vid_add(dp->cpu_dp, vid, 0);
1190         if (ret)
1191                 return ret;
1192
1193         return 0;
1194 }
1195
1196 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1197                                       u16 vid)
1198 {
1199         struct dsa_port *dp = dsa_slave_to_port(dev);
1200         struct bridge_vlan_info info;
1201         int ret;
1202
1203         /* Check for a possible bridge VLAN entry now since there is no
1204          * need to emulate the switchdev prepare + commit phase.
1205          */
1206         if (dp->bridge_dev) {
1207                 if (!br_vlan_enabled(dp->bridge_dev))
1208                         return 0;
1209
1210                 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1211                  * device, respectively the VID is not found, returning
1212                  * 0 means success, which is a failure for us here.
1213                  */
1214                 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1215                 if (ret == 0)
1216                         return -EBUSY;
1217         }
1218
1219         /* Do not deprogram the CPU port as it may be shared with other user
1220          * ports which can be members of this VLAN as well.
1221          */
1222         return dsa_port_vid_del(dp, vid);
1223 }
1224
1225 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1226         .get_drvinfo            = dsa_slave_get_drvinfo,
1227         .get_regs_len           = dsa_slave_get_regs_len,
1228         .get_regs               = dsa_slave_get_regs,
1229         .nway_reset             = dsa_slave_nway_reset,
1230         .get_link               = ethtool_op_get_link,
1231         .get_eeprom_len         = dsa_slave_get_eeprom_len,
1232         .get_eeprom             = dsa_slave_get_eeprom,
1233         .set_eeprom             = dsa_slave_set_eeprom,
1234         .get_strings            = dsa_slave_get_strings,
1235         .get_ethtool_stats      = dsa_slave_get_ethtool_stats,
1236         .get_sset_count         = dsa_slave_get_sset_count,
1237         .set_wol                = dsa_slave_set_wol,
1238         .get_wol                = dsa_slave_get_wol,
1239         .set_eee                = dsa_slave_set_eee,
1240         .get_eee                = dsa_slave_get_eee,
1241         .get_link_ksettings     = dsa_slave_get_link_ksettings,
1242         .set_link_ksettings     = dsa_slave_set_link_ksettings,
1243         .get_pauseparam         = dsa_slave_get_pauseparam,
1244         .set_pauseparam         = dsa_slave_set_pauseparam,
1245         .get_rxnfc              = dsa_slave_get_rxnfc,
1246         .set_rxnfc              = dsa_slave_set_rxnfc,
1247         .get_ts_info            = dsa_slave_get_ts_info,
1248 };
1249
1250 /* legacy way, bypassing the bridge *****************************************/
1251 int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1252                        struct net_device *dev,
1253                        const unsigned char *addr, u16 vid,
1254                        u16 flags,
1255                        struct netlink_ext_ack *extack)
1256 {
1257         struct dsa_port *dp = dsa_slave_to_port(dev);
1258
1259         return dsa_port_fdb_add(dp, addr, vid);
1260 }
1261
1262 int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1263                        struct net_device *dev,
1264                        const unsigned char *addr, u16 vid)
1265 {
1266         struct dsa_port *dp = dsa_slave_to_port(dev);
1267
1268         return dsa_port_fdb_del(dp, addr, vid);
1269 }
1270
1271 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1272 {
1273         struct dsa_port *dp = dsa_slave_to_port(dev);
1274
1275         return dp->ds->devlink ? &dp->devlink_port : NULL;
1276 }
1277
1278 static const struct net_device_ops dsa_slave_netdev_ops = {
1279         .ndo_open               = dsa_slave_open,
1280         .ndo_stop               = dsa_slave_close,
1281         .ndo_start_xmit         = dsa_slave_xmit,
1282         .ndo_change_rx_flags    = dsa_slave_change_rx_flags,
1283         .ndo_set_rx_mode        = dsa_slave_set_rx_mode,
1284         .ndo_set_mac_address    = dsa_slave_set_mac_address,
1285         .ndo_fdb_add            = dsa_legacy_fdb_add,
1286         .ndo_fdb_del            = dsa_legacy_fdb_del,
1287         .ndo_fdb_dump           = dsa_slave_fdb_dump,
1288         .ndo_do_ioctl           = dsa_slave_ioctl,
1289         .ndo_get_iflink         = dsa_slave_get_iflink,
1290 #ifdef CONFIG_NET_POLL_CONTROLLER
1291         .ndo_netpoll_setup      = dsa_slave_netpoll_setup,
1292         .ndo_netpoll_cleanup    = dsa_slave_netpoll_cleanup,
1293         .ndo_poll_controller    = dsa_slave_poll_controller,
1294 #endif
1295         .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
1296         .ndo_setup_tc           = dsa_slave_setup_tc,
1297         .ndo_get_stats64        = dsa_slave_get_stats64,
1298         .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
1299         .ndo_vlan_rx_add_vid    = dsa_slave_vlan_rx_add_vid,
1300         .ndo_vlan_rx_kill_vid   = dsa_slave_vlan_rx_kill_vid,
1301         .ndo_get_devlink_port   = dsa_slave_get_devlink_port,
1302 };
1303
1304 static struct device_type dsa_type = {
1305         .name   = "dsa",
1306 };
1307
1308 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1309 {
1310         const struct dsa_port *dp = dsa_to_port(ds, port);
1311
1312         phylink_mac_change(dp->pl, up);
1313 }
1314 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1315
1316 static void dsa_slave_phylink_fixed_state(struct net_device *dev,
1317                                           struct phylink_link_state *state)
1318 {
1319         struct dsa_port *dp = dsa_slave_to_port(dev);
1320         struct dsa_switch *ds = dp->ds;
1321
1322         /* No need to check that this operation is valid, the callback would
1323          * not be called if it was not.
1324          */
1325         ds->ops->phylink_fixed_state(ds, dp->index, state);
1326 }
1327
1328 /* slave device setup *******************************************************/
1329 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1330 {
1331         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1332         struct dsa_switch *ds = dp->ds;
1333
1334         slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1335         if (!slave_dev->phydev) {
1336                 netdev_err(slave_dev, "no phy at %d\n", addr);
1337                 return -ENODEV;
1338         }
1339
1340         return phylink_connect_phy(dp->pl, slave_dev->phydev);
1341 }
1342
1343 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1344 {
1345         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1346         struct device_node *port_dn = dp->dn;
1347         struct dsa_switch *ds = dp->ds;
1348         phy_interface_t mode;
1349         u32 phy_flags = 0;
1350         int ret;
1351
1352         ret = of_get_phy_mode(port_dn, &mode);
1353         if (ret)
1354                 mode = PHY_INTERFACE_MODE_NA;
1355
1356         dp->pl_config.dev = &slave_dev->dev;
1357         dp->pl_config.type = PHYLINK_NETDEV;
1358
1359         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1360                                 &dsa_port_phylink_mac_ops);
1361         if (IS_ERR(dp->pl)) {
1362                 netdev_err(slave_dev,
1363                            "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1364                 return PTR_ERR(dp->pl);
1365         }
1366
1367         /* Register only if the switch provides such a callback, since this
1368          * callback takes precedence over polling the link GPIO in PHYLINK
1369          * (see phylink_get_fixed_state).
1370          */
1371         if (ds->ops->phylink_fixed_state)
1372                 phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state);
1373
1374         if (ds->ops->get_phy_flags)
1375                 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1376
1377         ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1378         if (ret == -ENODEV && ds->slave_mii_bus) {
1379                 /* We could not connect to a designated PHY or SFP, so try to
1380                  * use the switch internal MDIO bus instead
1381                  */
1382                 ret = dsa_slave_phy_connect(slave_dev, dp->index);
1383                 if (ret) {
1384                         netdev_err(slave_dev,
1385                                    "failed to connect to port %d: %d\n",
1386                                    dp->index, ret);
1387                         phylink_destroy(dp->pl);
1388                         return ret;
1389                 }
1390         }
1391
1392         return ret;
1393 }
1394
1395 int dsa_slave_suspend(struct net_device *slave_dev)
1396 {
1397         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1398
1399         if (!netif_running(slave_dev))
1400                 return 0;
1401
1402         netif_device_detach(slave_dev);
1403
1404         rtnl_lock();
1405         phylink_stop(dp->pl);
1406         rtnl_unlock();
1407
1408         return 0;
1409 }
1410
1411 int dsa_slave_resume(struct net_device *slave_dev)
1412 {
1413         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1414
1415         if (!netif_running(slave_dev))
1416                 return 0;
1417
1418         netif_device_attach(slave_dev);
1419
1420         rtnl_lock();
1421         phylink_start(dp->pl);
1422         rtnl_unlock();
1423
1424         return 0;
1425 }
1426
1427 static void dsa_slave_notify(struct net_device *dev, unsigned long val)
1428 {
1429         struct net_device *master = dsa_slave_to_master(dev);
1430         struct dsa_port *dp = dsa_slave_to_port(dev);
1431         struct dsa_notifier_register_info rinfo = {
1432                 .switch_number = dp->ds->index,
1433                 .port_number = dp->index,
1434                 .master = master,
1435                 .info.dev = dev,
1436         };
1437
1438         call_dsa_notifiers(val, dev, &rinfo.info);
1439 }
1440
1441 int dsa_slave_create(struct dsa_port *port)
1442 {
1443         const struct dsa_port *cpu_dp = port->cpu_dp;
1444         struct net_device *master = cpu_dp->master;
1445         struct dsa_switch *ds = port->ds;
1446         const char *name = port->name;
1447         struct net_device *slave_dev;
1448         struct dsa_slave_priv *p;
1449         int ret;
1450
1451         if (!ds->num_tx_queues)
1452                 ds->num_tx_queues = 1;
1453
1454         slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1455                                      NET_NAME_UNKNOWN, ether_setup,
1456                                      ds->num_tx_queues, 1);
1457         if (slave_dev == NULL)
1458                 return -ENOMEM;
1459
1460         slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1461         if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1462                 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1463         slave_dev->hw_features |= NETIF_F_HW_TC;
1464         slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1465         if (!IS_ERR_OR_NULL(port->mac))
1466                 ether_addr_copy(slave_dev->dev_addr, port->mac);
1467         else
1468                 eth_hw_addr_inherit(slave_dev, master);
1469         slave_dev->priv_flags |= IFF_NO_QUEUE;
1470         slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1471         slave_dev->min_mtu = 0;
1472         slave_dev->max_mtu = ETH_MAX_MTU;
1473         SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1474
1475         SET_NETDEV_DEV(slave_dev, port->ds->dev);
1476         slave_dev->dev.of_node = port->dn;
1477         slave_dev->vlan_features = master->vlan_features;
1478
1479         p = netdev_priv(slave_dev);
1480         p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1481         if (!p->stats64) {
1482                 free_netdev(slave_dev);
1483                 return -ENOMEM;
1484         }
1485         p->dp = port;
1486         INIT_LIST_HEAD(&p->mall_tc_list);
1487         p->xmit = cpu_dp->tag_ops->xmit;
1488         port->slave = slave_dev;
1489
1490         netif_carrier_off(slave_dev);
1491
1492         ret = dsa_slave_phy_setup(slave_dev);
1493         if (ret) {
1494                 netdev_err(master, "error %d setting up slave phy\n", ret);
1495                 goto out_free;
1496         }
1497
1498         dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1499
1500         ret = register_netdev(slave_dev);
1501         if (ret) {
1502                 netdev_err(master, "error %d registering interface %s\n",
1503                            ret, slave_dev->name);
1504                 goto out_phy;
1505         }
1506
1507         return 0;
1508
1509 out_phy:
1510         rtnl_lock();
1511         phylink_disconnect_phy(p->dp->pl);
1512         rtnl_unlock();
1513         phylink_destroy(p->dp->pl);
1514 out_free:
1515         free_percpu(p->stats64);
1516         free_netdev(slave_dev);
1517         port->slave = NULL;
1518         return ret;
1519 }
1520
1521 void dsa_slave_destroy(struct net_device *slave_dev)
1522 {
1523         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1524         struct dsa_slave_priv *p = netdev_priv(slave_dev);
1525
1526         netif_carrier_off(slave_dev);
1527         rtnl_lock();
1528         phylink_disconnect_phy(dp->pl);
1529         rtnl_unlock();
1530
1531         dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1532         unregister_netdev(slave_dev);
1533         phylink_destroy(dp->pl);
1534         free_percpu(p->stats64);
1535         free_netdev(slave_dev);
1536 }
1537
1538 bool dsa_slave_dev_check(const struct net_device *dev)
1539 {
1540         return dev->netdev_ops == &dsa_slave_netdev_ops;
1541 }
1542
1543 static int dsa_slave_changeupper(struct net_device *dev,
1544                                  struct netdev_notifier_changeupper_info *info)
1545 {
1546         struct dsa_port *dp = dsa_slave_to_port(dev);
1547         int err = NOTIFY_DONE;
1548
1549         if (netif_is_bridge_master(info->upper_dev)) {
1550                 if (info->linking) {
1551                         err = dsa_port_bridge_join(dp, info->upper_dev);
1552                         err = notifier_from_errno(err);
1553                 } else {
1554                         dsa_port_bridge_leave(dp, info->upper_dev);
1555                         err = NOTIFY_OK;
1556                 }
1557         }
1558
1559         return err;
1560 }
1561
1562 static int dsa_slave_upper_vlan_check(struct net_device *dev,
1563                                       struct netdev_notifier_changeupper_info *
1564                                       info)
1565 {
1566         struct netlink_ext_ack *ext_ack;
1567         struct net_device *slave;
1568         struct dsa_port *dp;
1569
1570         ext_ack = netdev_notifier_info_to_extack(&info->info);
1571
1572         if (!is_vlan_dev(dev))
1573                 return NOTIFY_DONE;
1574
1575         slave = vlan_dev_real_dev(dev);
1576         if (!dsa_slave_dev_check(slave))
1577                 return NOTIFY_DONE;
1578
1579         dp = dsa_slave_to_port(slave);
1580         if (!dp->bridge_dev)
1581                 return NOTIFY_DONE;
1582
1583         /* Deny enslaving a VLAN device into a VLAN-aware bridge */
1584         if (br_vlan_enabled(dp->bridge_dev) &&
1585             netif_is_bridge_master(info->upper_dev) && info->linking) {
1586                 NL_SET_ERR_MSG_MOD(ext_ack,
1587                                    "Cannot enslave VLAN device into VLAN aware bridge");
1588                 return notifier_from_errno(-EINVAL);
1589         }
1590
1591         return NOTIFY_DONE;
1592 }
1593
1594 static int dsa_slave_netdevice_event(struct notifier_block *nb,
1595                                      unsigned long event, void *ptr)
1596 {
1597         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1598
1599         if (event == NETDEV_CHANGEUPPER) {
1600                 if (!dsa_slave_dev_check(dev))
1601                         return dsa_slave_upper_vlan_check(dev, ptr);
1602
1603                 return dsa_slave_changeupper(dev, ptr);
1604         }
1605
1606         return NOTIFY_DONE;
1607 }
1608
1609 struct dsa_switchdev_event_work {
1610         struct work_struct work;
1611         struct switchdev_notifier_fdb_info fdb_info;
1612         struct net_device *dev;
1613         unsigned long event;
1614 };
1615
1616 static void dsa_slave_switchdev_event_work(struct work_struct *work)
1617 {
1618         struct dsa_switchdev_event_work *switchdev_work =
1619                 container_of(work, struct dsa_switchdev_event_work, work);
1620         struct net_device *dev = switchdev_work->dev;
1621         struct switchdev_notifier_fdb_info *fdb_info;
1622         struct dsa_port *dp = dsa_slave_to_port(dev);
1623         int err;
1624
1625         rtnl_lock();
1626         switch (switchdev_work->event) {
1627         case SWITCHDEV_FDB_ADD_TO_DEVICE:
1628                 fdb_info = &switchdev_work->fdb_info;
1629                 if (!fdb_info->added_by_user)
1630                         break;
1631
1632                 err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
1633                 if (err) {
1634                         netdev_dbg(dev, "fdb add failed err=%d\n", err);
1635                         break;
1636                 }
1637                 fdb_info->offloaded = true;
1638                 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
1639                                          &fdb_info->info, NULL);
1640                 break;
1641
1642         case SWITCHDEV_FDB_DEL_TO_DEVICE:
1643                 fdb_info = &switchdev_work->fdb_info;
1644                 if (!fdb_info->added_by_user)
1645                         break;
1646
1647                 err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
1648                 if (err) {
1649                         netdev_dbg(dev, "fdb del failed err=%d\n", err);
1650                         dev_close(dev);
1651                 }
1652                 break;
1653         }
1654         rtnl_unlock();
1655
1656         kfree(switchdev_work->fdb_info.addr);
1657         kfree(switchdev_work);
1658         dev_put(dev);
1659 }
1660
1661 static int
1662 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
1663                                   switchdev_work,
1664                                   const struct switchdev_notifier_fdb_info *
1665                                   fdb_info)
1666 {
1667         memcpy(&switchdev_work->fdb_info, fdb_info,
1668                sizeof(switchdev_work->fdb_info));
1669         switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1670         if (!switchdev_work->fdb_info.addr)
1671                 return -ENOMEM;
1672         ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1673                         fdb_info->addr);
1674         return 0;
1675 }
1676
1677 /* Called under rcu_read_lock() */
1678 static int dsa_slave_switchdev_event(struct notifier_block *unused,
1679                                      unsigned long event, void *ptr)
1680 {
1681         struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1682         struct dsa_switchdev_event_work *switchdev_work;
1683         int err;
1684
1685         if (event == SWITCHDEV_PORT_ATTR_SET) {
1686                 err = switchdev_handle_port_attr_set(dev, ptr,
1687                                                      dsa_slave_dev_check,
1688                                                      dsa_slave_port_attr_set);
1689                 return notifier_from_errno(err);
1690         }
1691
1692         if (!dsa_slave_dev_check(dev))
1693                 return NOTIFY_DONE;
1694
1695         switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1696         if (!switchdev_work)
1697                 return NOTIFY_BAD;
1698
1699         INIT_WORK(&switchdev_work->work,
1700                   dsa_slave_switchdev_event_work);
1701         switchdev_work->dev = dev;
1702         switchdev_work->event = event;
1703
1704         switch (event) {
1705         case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
1706         case SWITCHDEV_FDB_DEL_TO_DEVICE:
1707                 if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
1708                         goto err_fdb_work_init;
1709                 dev_hold(dev);
1710                 break;
1711         default:
1712                 kfree(switchdev_work);
1713                 return NOTIFY_DONE;
1714         }
1715
1716         dsa_schedule_work(&switchdev_work->work);
1717         return NOTIFY_OK;
1718
1719 err_fdb_work_init:
1720         kfree(switchdev_work);
1721         return NOTIFY_BAD;
1722 }
1723
1724 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
1725                                               unsigned long event, void *ptr)
1726 {
1727         struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1728         int err;
1729
1730         switch (event) {
1731         case SWITCHDEV_PORT_OBJ_ADD:
1732                 err = switchdev_handle_port_obj_add(dev, ptr,
1733                                                     dsa_slave_dev_check,
1734                                                     dsa_slave_port_obj_add);
1735                 return notifier_from_errno(err);
1736         case SWITCHDEV_PORT_OBJ_DEL:
1737                 err = switchdev_handle_port_obj_del(dev, ptr,
1738                                                     dsa_slave_dev_check,
1739                                                     dsa_slave_port_obj_del);
1740                 return notifier_from_errno(err);
1741         case SWITCHDEV_PORT_ATTR_SET:
1742                 err = switchdev_handle_port_attr_set(dev, ptr,
1743                                                      dsa_slave_dev_check,
1744                                                      dsa_slave_port_attr_set);
1745                 return notifier_from_errno(err);
1746         }
1747
1748         return NOTIFY_DONE;
1749 }
1750
1751 static struct notifier_block dsa_slave_nb __read_mostly = {
1752         .notifier_call  = dsa_slave_netdevice_event,
1753 };
1754
1755 static struct notifier_block dsa_slave_switchdev_notifier = {
1756         .notifier_call = dsa_slave_switchdev_event,
1757 };
1758
1759 static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
1760         .notifier_call = dsa_slave_switchdev_blocking_event,
1761 };
1762
1763 int dsa_slave_register_notifier(void)
1764 {
1765         struct notifier_block *nb;
1766         int err;
1767
1768         err = register_netdevice_notifier(&dsa_slave_nb);
1769         if (err)
1770                 return err;
1771
1772         err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
1773         if (err)
1774                 goto err_switchdev_nb;
1775
1776         nb = &dsa_slave_switchdev_blocking_notifier;
1777         err = register_switchdev_blocking_notifier(nb);
1778         if (err)
1779                 goto err_switchdev_blocking_nb;
1780
1781         return 0;
1782
1783 err_switchdev_blocking_nb:
1784         unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1785 err_switchdev_nb:
1786         unregister_netdevice_notifier(&dsa_slave_nb);
1787         return err;
1788 }
1789
1790 void dsa_slave_unregister_notifier(void)
1791 {
1792         struct notifier_block *nb;
1793         int err;
1794
1795         nb = &dsa_slave_switchdev_blocking_notifier;
1796         err = unregister_switchdev_blocking_notifier(nb);
1797         if (err)
1798                 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
1799
1800         err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1801         if (err)
1802                 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
1803
1804         err = unregister_netdevice_notifier(&dsa_slave_nb);
1805         if (err)
1806                 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
1807 }