net: ethernet: ti: am65-cpsw: Add support for SERDES configuration
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / ti / am65-cpsw-nuss.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
3  *
4  * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
5  *
6  */
7
8 #include <linux/clk.h>
9 #include <linux/etherdevice.h>
10 #include <linux/if_vlan.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqdomain.h>
13 #include <linux/kernel.h>
14 #include <linux/kmemleak.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/net_tstamp.h>
18 #include <linux/of.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
21 #include <linux/of_device.h>
22 #include <linux/phylink.h>
23 #include <linux/phy/phy.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/regmap.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/mfd/syscon.h>
29 #include <linux/sys_soc.h>
30 #include <linux/dma/ti-cppi5.h>
31 #include <linux/dma/k3-udma-glue.h>
32 #include <net/switchdev.h>
33
34 #include "cpsw_ale.h"
35 #include "cpsw_sl.h"
36 #include "am65-cpsw-nuss.h"
37 #include "am65-cpsw-switchdev.h"
38 #include "k3-cppi-desc-pool.h"
39 #include "am65-cpts.h"
40
41 #define AM65_CPSW_SS_BASE       0x0
42 #define AM65_CPSW_SGMII_BASE    0x100
43 #define AM65_CPSW_XGMII_BASE    0x2100
44 #define AM65_CPSW_CPSW_NU_BASE  0x20000
45 #define AM65_CPSW_NU_PORTS_BASE 0x1000
46 #define AM65_CPSW_NU_FRAM_BASE  0x12000
47 #define AM65_CPSW_NU_STATS_BASE 0x1a000
48 #define AM65_CPSW_NU_ALE_BASE   0x1e000
49 #define AM65_CPSW_NU_CPTS_BASE  0x1d000
50
51 #define AM65_CPSW_NU_PORTS_OFFSET       0x1000
52 #define AM65_CPSW_NU_STATS_PORT_OFFSET  0x200
53 #define AM65_CPSW_NU_FRAM_PORT_OFFSET   0x200
54
55 #define AM65_CPSW_MAX_PORTS     8
56
57 #define AM65_CPSW_MIN_PACKET_SIZE       VLAN_ETH_ZLEN
58 #define AM65_CPSW_MAX_PACKET_SIZE       (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
59
60 #define AM65_CPSW_REG_CTL               0x004
61 #define AM65_CPSW_REG_STAT_PORT_EN      0x014
62 #define AM65_CPSW_REG_PTYPE             0x018
63
64 #define AM65_CPSW_P0_REG_CTL                    0x004
65 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET      0x008
66
67 #define AM65_CPSW_PORT_REG_PRI_CTL              0x01c
68 #define AM65_CPSW_PORT_REG_RX_PRI_MAP           0x020
69 #define AM65_CPSW_PORT_REG_RX_MAXLEN            0x024
70
71 #define AM65_CPSW_PORTN_REG_SA_L                0x308
72 #define AM65_CPSW_PORTN_REG_SA_H                0x30c
73 #define AM65_CPSW_PORTN_REG_TS_CTL              0x310
74 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG    0x314
75 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG   0x318
76 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2       0x31C
77
78 #define AM65_CPSW_SGMII_CONTROL_REG             0x010
79 #define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE    BIT(0)
80
81 #define AM65_CPSW_CTL_VLAN_AWARE                BIT(1)
82 #define AM65_CPSW_CTL_P0_ENABLE                 BIT(2)
83 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE          BIT(13)
84 #define AM65_CPSW_CTL_P0_RX_PAD                 BIT(14)
85
86 /* AM65_CPSW_P0_REG_CTL */
87 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN     BIT(0)
88
89 /* AM65_CPSW_PORT_REG_PRI_CTL */
90 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN      BIT(8)
91
92 /* AM65_CPSW_PN_TS_CTL register fields */
93 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN         BIT(4)
94 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN      BIT(5)
95 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN      BIT(6)
96 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN         BIT(7)
97 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN         BIT(10)
98 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN       BIT(11)
99 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT   16
100
101 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
102 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT     16
103
104 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
105 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107       BIT(16)
106 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129       BIT(17)
107 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130       BIT(18)
108 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131       BIT(19)
109 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132       BIT(20)
110 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319       BIT(21)
111 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320       BIT(22)
112 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
113
114 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
115 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
116
117 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
118
119 #define AM65_CPSW_TS_TX_ANX_ALL_EN              \
120         (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN |      \
121          AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN |      \
122          AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
123
124 #define AM65_CPSW_ALE_AGEOUT_DEFAULT    30
125 /* Number of TX/RX descriptors */
126 #define AM65_CPSW_MAX_TX_DESC   500
127 #define AM65_CPSW_MAX_RX_DESC   500
128
129 #define AM65_CPSW_NAV_PS_DATA_SIZE 16
130 #define AM65_CPSW_NAV_SW_DATA_SIZE 16
131
132 #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
133                          NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
134                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
135
136 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
137                                       const u8 *dev_addr)
138 {
139         u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
140                      (dev_addr[2] << 16) | (dev_addr[3] << 24);
141         u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
142
143         writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
144         writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
145 }
146
147 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
148 {
149         cpsw_sl_reset(port->slave.mac_sl, 100);
150         /* Max length register has to be restored after MAC SL reset */
151         writel(AM65_CPSW_MAX_PACKET_SIZE,
152                port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
153 }
154
155 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
156 {
157         common->nuss_ver = readl(common->ss_base);
158         common->cpsw_ver = readl(common->cpsw_base);
159         dev_info(common->dev,
160                  "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
161                 common->nuss_ver,
162                 common->cpsw_ver,
163                 common->port_num + 1,
164                 common->pdata.quirks);
165 }
166
167 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
168                                             __be16 proto, u16 vid)
169 {
170         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
171         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
172         u32 port_mask, unreg_mcast = 0;
173         int ret;
174
175         if (!common->is_emac_mode)
176                 return 0;
177
178         if (!netif_running(ndev) || !vid)
179                 return 0;
180
181         ret = pm_runtime_resume_and_get(common->dev);
182         if (ret < 0)
183                 return ret;
184
185         port_mask = BIT(port->port_id) | ALE_PORT_HOST;
186         if (!vid)
187                 unreg_mcast = port_mask;
188         dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
189         ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
190                                        unreg_mcast, port_mask, 0);
191
192         pm_runtime_put(common->dev);
193         return ret;
194 }
195
196 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
197                                              __be16 proto, u16 vid)
198 {
199         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
200         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
201         int ret;
202
203         if (!common->is_emac_mode)
204                 return 0;
205
206         if (!netif_running(ndev) || !vid)
207                 return 0;
208
209         ret = pm_runtime_resume_and_get(common->dev);
210         if (ret < 0)
211                 return ret;
212
213         dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
214         ret = cpsw_ale_del_vlan(common->ale, vid,
215                                 BIT(port->port_id) | ALE_PORT_HOST);
216
217         pm_runtime_put(common->dev);
218         return ret;
219 }
220
221 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
222                                         bool promisc)
223 {
224         struct am65_cpsw_common *common = port->common;
225
226         if (promisc && !common->is_emac_mode) {
227                 dev_dbg(common->dev, "promisc mode requested in switch mode");
228                 return;
229         }
230
231         if (promisc) {
232                 /* Enable promiscuous mode */
233                 cpsw_ale_control_set(common->ale, port->port_id,
234                                      ALE_PORT_MACONLY_CAF, 1);
235                 dev_dbg(common->dev, "promisc enabled\n");
236         } else {
237                 /* Disable promiscuous mode */
238                 cpsw_ale_control_set(common->ale, port->port_id,
239                                      ALE_PORT_MACONLY_CAF, 0);
240                 dev_dbg(common->dev, "promisc disabled\n");
241         }
242 }
243
244 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
245 {
246         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
247         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
248         u32 port_mask;
249         bool promisc;
250
251         promisc = !!(ndev->flags & IFF_PROMISC);
252         am65_cpsw_slave_set_promisc(port, promisc);
253
254         if (promisc)
255                 return;
256
257         /* Restore allmulti on vlans if necessary */
258         cpsw_ale_set_allmulti(common->ale,
259                               ndev->flags & IFF_ALLMULTI, port->port_id);
260
261         port_mask = ALE_PORT_HOST;
262         /* Clear all mcast from ALE */
263         cpsw_ale_flush_multicast(common->ale, port_mask, -1);
264
265         if (!netdev_mc_empty(ndev)) {
266                 struct netdev_hw_addr *ha;
267
268                 /* program multicast address list into ALE register */
269                 netdev_for_each_mc_addr(ha, ndev) {
270                         cpsw_ale_add_mcast(common->ale, ha->addr,
271                                            port_mask, 0, 0, 0);
272                 }
273         }
274 }
275
276 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
277                                                unsigned int txqueue)
278 {
279         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
280         struct am65_cpsw_tx_chn *tx_chn;
281         struct netdev_queue *netif_txq;
282         unsigned long trans_start;
283
284         netif_txq = netdev_get_tx_queue(ndev, txqueue);
285         tx_chn = &common->tx_chns[txqueue];
286         trans_start = READ_ONCE(netif_txq->trans_start);
287
288         netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
289                    txqueue,
290                    netif_tx_queue_stopped(netif_txq),
291                    jiffies_to_msecs(jiffies - trans_start),
292                    dql_avail(&netif_txq->dql),
293                    k3_cppi_desc_pool_avail(tx_chn->desc_pool));
294
295         if (netif_tx_queue_stopped(netif_txq)) {
296                 /* try recover if stopped by us */
297                 txq_trans_update(netif_txq);
298                 netif_tx_wake_queue(netif_txq);
299         }
300 }
301
302 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
303                                   struct sk_buff *skb)
304 {
305         struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
306         struct cppi5_host_desc_t *desc_rx;
307         struct device *dev = common->dev;
308         u32 pkt_len = skb_tailroom(skb);
309         dma_addr_t desc_dma;
310         dma_addr_t buf_dma;
311         void *swdata;
312
313         desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
314         if (!desc_rx) {
315                 dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
316                 return -ENOMEM;
317         }
318         desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
319
320         buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
321                                  DMA_FROM_DEVICE);
322         if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
323                 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
324                 dev_err(dev, "Failed to map rx skb buffer\n");
325                 return -EINVAL;
326         }
327
328         cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
329                          AM65_CPSW_NAV_PS_DATA_SIZE);
330         k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
331         cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
332         swdata = cppi5_hdesc_get_swdata(desc_rx);
333         *((void **)swdata) = skb;
334
335         return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
336 }
337
338 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
339 {
340         struct am65_cpsw_host *host_p = am65_common_get_host(common);
341         u32 val, pri_map;
342
343         /* P0 set Receive Priority Type */
344         val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
345
346         if (common->pf_p0_rx_ptype_rrobin) {
347                 val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
348                 /* Enet Ports fifos works in fixed priority mode only, so
349                  * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
350                  */
351                 pri_map = 0x0;
352         } else {
353                 val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
354                 /* restore P0_Rx_Pri_Map */
355                 pri_map = 0x76543210;
356         }
357
358         writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
359         writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
360 }
361
362 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
363 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
364 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
365 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
366
367 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
368 {
369         struct am65_cpsw_host *host_p = am65_common_get_host(common);
370         int port_idx, i, ret;
371         struct sk_buff *skb;
372         u32 val, port_mask;
373
374         if (common->usage_count)
375                 return 0;
376
377         /* Control register */
378         writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
379                AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
380                common->cpsw_base + AM65_CPSW_REG_CTL);
381         /* Max length register */
382         writel(AM65_CPSW_MAX_PACKET_SIZE,
383                host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
384         /* set base flow_id */
385         writel(common->rx_flow_id_base,
386                host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
387         /* en tx crc offload */
388         writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL);
389
390         am65_cpsw_nuss_set_p0_ptype(common);
391
392         /* enable statistic */
393         val = BIT(HOST_PORT_NUM);
394         for (port_idx = 0; port_idx < common->port_num; port_idx++) {
395                 struct am65_cpsw_port *port = &common->ports[port_idx];
396
397                 if (!port->disabled)
398                         val |=  BIT(port->port_id);
399         }
400         writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
401
402         /* disable priority elevation */
403         writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
404
405         cpsw_ale_start(common->ale);
406
407         /* limit to one RX flow only */
408         cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
409                              ALE_DEFAULT_THREAD_ID, 0);
410         cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
411                              ALE_DEFAULT_THREAD_ENABLE, 1);
412         /* switch to vlan unaware mode */
413         cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
414         cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
415                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
416
417         /* default vlan cfg: create mask based on enabled ports */
418         port_mask = GENMASK(common->port_num, 0) &
419                     ~common->disabled_ports_mask;
420
421         cpsw_ale_add_vlan(common->ale, 0, port_mask,
422                           port_mask, port_mask,
423                           port_mask & ~ALE_PORT_HOST);
424
425         if (common->is_emac_mode)
426                 am65_cpsw_init_host_port_emac(common);
427         else
428                 am65_cpsw_init_host_port_switch(common);
429
430         for (i = 0; i < common->rx_chns.descs_num; i++) {
431                 skb = __netdev_alloc_skb_ip_align(NULL,
432                                                   AM65_CPSW_MAX_PACKET_SIZE,
433                                                   GFP_KERNEL);
434                 if (!skb) {
435                         dev_err(common->dev, "cannot allocate skb\n");
436                         return -ENOMEM;
437                 }
438
439                 ret = am65_cpsw_nuss_rx_push(common, skb);
440                 if (ret < 0) {
441                         dev_err(common->dev,
442                                 "cannot submit skb to channel rx, error %d\n",
443                                 ret);
444                         kfree_skb(skb);
445                         return ret;
446                 }
447                 kmemleak_not_leak(skb);
448         }
449         k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
450
451         for (i = 0; i < common->tx_ch_num; i++) {
452                 ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
453                 if (ret)
454                         return ret;
455                 napi_enable(&common->tx_chns[i].napi_tx);
456         }
457
458         napi_enable(&common->napi_rx);
459         if (common->rx_irq_disabled) {
460                 common->rx_irq_disabled = false;
461                 enable_irq(common->rx_chns.irq);
462         }
463
464         dev_dbg(common->dev, "cpsw_nuss started\n");
465         return 0;
466 }
467
468 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
469 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
470
471 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
472 {
473         int i;
474
475         if (common->usage_count != 1)
476                 return 0;
477
478         cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
479                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
480
481         /* shutdown tx channels */
482         atomic_set(&common->tdown_cnt, common->tx_ch_num);
483         /* ensure new tdown_cnt value is visible */
484         smp_mb__after_atomic();
485         reinit_completion(&common->tdown_complete);
486
487         for (i = 0; i < common->tx_ch_num; i++)
488                 k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
489
490         i = wait_for_completion_timeout(&common->tdown_complete,
491                                         msecs_to_jiffies(1000));
492         if (!i)
493                 dev_err(common->dev, "tx timeout\n");
494         for (i = 0; i < common->tx_ch_num; i++)
495                 napi_disable(&common->tx_chns[i].napi_tx);
496
497         for (i = 0; i < common->tx_ch_num; i++) {
498                 k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
499                                           &common->tx_chns[i],
500                                           am65_cpsw_nuss_tx_cleanup);
501                 k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
502         }
503
504         k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
505         napi_disable(&common->napi_rx);
506
507         for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
508                 k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
509                                           &common->rx_chns,
510                                           am65_cpsw_nuss_rx_cleanup, !!i);
511
512         k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
513
514         cpsw_ale_stop(common->ale);
515
516         writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
517         writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
518
519         dev_dbg(common->dev, "cpsw_nuss stopped\n");
520         return 0;
521 }
522
523 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
524 {
525         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
526         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
527         int ret;
528
529         phylink_stop(port->slave.phylink);
530
531         netif_tx_stop_all_queues(ndev);
532
533         phylink_disconnect_phy(port->slave.phylink);
534
535         ret = am65_cpsw_nuss_common_stop(common);
536         if (ret)
537                 return ret;
538
539         common->usage_count--;
540         pm_runtime_put(common->dev);
541         return 0;
542 }
543
544 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
545 {
546         struct am65_cpsw_port *port = arg;
547
548         if (!vdev)
549                 return 0;
550
551         return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid);
552 }
553
554 static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
555 {
556         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
557         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
558         int ret, i;
559         u32 reg;
560
561         ret = pm_runtime_resume_and_get(common->dev);
562         if (ret < 0)
563                 return ret;
564
565         /* Idle MAC port */
566         cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
567         cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
568         cpsw_sl_ctl_reset(port->slave.mac_sl);
569
570         /* soft reset MAC */
571         cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1);
572         mdelay(1);
573         reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET);
574         if (reg) {
575                 dev_err(common->dev, "soft RESET didn't complete\n");
576                 ret = -ETIMEDOUT;
577                 goto runtime_put;
578         }
579
580         /* Notify the stack of the actual queue counts. */
581         ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
582         if (ret) {
583                 dev_err(common->dev, "cannot set real number of tx queues\n");
584                 goto runtime_put;
585         }
586
587         ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
588         if (ret) {
589                 dev_err(common->dev, "cannot set real number of rx queues\n");
590                 goto runtime_put;
591         }
592
593         for (i = 0; i < common->tx_ch_num; i++)
594                 netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
595
596         ret = am65_cpsw_nuss_common_open(common);
597         if (ret)
598                 goto runtime_put;
599
600         common->usage_count++;
601
602         am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
603
604         if (common->is_emac_mode)
605                 am65_cpsw_init_port_emac_ale(port);
606         else
607                 am65_cpsw_init_port_switch_ale(port);
608
609         /* mac_sl should be configured via phy-link interface */
610         am65_cpsw_sl_ctl_reset(port);
611
612         ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
613         if (ret)
614                 goto error_cleanup;
615
616         /* restore vlan configurations */
617         vlan_for_each(ndev, cpsw_restore_vlans, port);
618
619         phylink_start(port->slave.phylink);
620
621         return 0;
622
623 error_cleanup:
624         am65_cpsw_nuss_ndo_slave_stop(ndev);
625         return ret;
626
627 runtime_put:
628         pm_runtime_put(common->dev);
629         return ret;
630 }
631
632 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
633 {
634         struct am65_cpsw_rx_chn *rx_chn = data;
635         struct cppi5_host_desc_t *desc_rx;
636         struct sk_buff *skb;
637         dma_addr_t buf_dma;
638         u32 buf_dma_len;
639         void **swdata;
640
641         desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
642         swdata = cppi5_hdesc_get_swdata(desc_rx);
643         skb = *swdata;
644         cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
645         k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
646
647         dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
648         k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
649
650         dev_kfree_skb_any(skb);
651 }
652
653 static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
654 {
655         struct skb_shared_hwtstamps *ssh;
656         u64 ns;
657
658         ns = ((u64)psdata[1] << 32) | psdata[0];
659
660         ssh = skb_hwtstamps(skb);
661         memset(ssh, 0, sizeof(*ssh));
662         ssh->hwtstamp = ns_to_ktime(ns);
663 }
664
665 /* RX psdata[2] word format - checksum information */
666 #define AM65_CPSW_RX_PSD_CSUM_ADD       GENMASK(15, 0)
667 #define AM65_CPSW_RX_PSD_CSUM_ERR       BIT(16)
668 #define AM65_CPSW_RX_PSD_IS_FRAGMENT    BIT(17)
669 #define AM65_CPSW_RX_PSD_IS_TCP         BIT(18)
670 #define AM65_CPSW_RX_PSD_IPV6_VALID     BIT(19)
671 #define AM65_CPSW_RX_PSD_IPV4_VALID     BIT(20)
672
673 static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
674 {
675         /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
676          * csum information provides in psdata[2] word:
677          * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
678          * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
679          * bits - indicates IPv4/IPv6 packet
680          * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
681          * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
682          * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
683          */
684         skb_checksum_none_assert(skb);
685
686         if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
687                 return;
688
689         if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
690                           AM65_CPSW_RX_PSD_IPV4_VALID)) &&
691                           !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
692                 /* csum for fragmented packets is unsupported */
693                 if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
694                         skb->ip_summed = CHECKSUM_UNNECESSARY;
695         }
696 }
697
698 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
699                                      u32 flow_idx)
700 {
701         struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
702         u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
703         struct am65_cpsw_ndev_priv *ndev_priv;
704         struct am65_cpsw_ndev_stats *stats;
705         struct cppi5_host_desc_t *desc_rx;
706         struct device *dev = common->dev;
707         struct sk_buff *skb, *new_skb;
708         dma_addr_t desc_dma, buf_dma;
709         struct am65_cpsw_port *port;
710         struct net_device *ndev;
711         void **swdata;
712         u32 *psdata;
713         int ret = 0;
714
715         ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
716         if (ret) {
717                 if (ret != -ENODATA)
718                         dev_err(dev, "RX: pop chn fail %d\n", ret);
719                 return ret;
720         }
721
722         if (cppi5_desc_is_tdcm(desc_dma)) {
723                 dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
724                 return 0;
725         }
726
727         desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
728         dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
729                 __func__, flow_idx, &desc_dma);
730
731         swdata = cppi5_hdesc_get_swdata(desc_rx);
732         skb = *swdata;
733         cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
734         k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
735         pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
736         cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
737         dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
738         port = am65_common_get_port(common, port_id);
739         ndev = port->ndev;
740         skb->dev = ndev;
741
742         psdata = cppi5_hdesc_get_psdata(desc_rx);
743         /* add RX timestamp */
744         if (port->rx_ts_enabled)
745                 am65_cpsw_nuss_rx_ts(skb, psdata);
746         csum_info = psdata[2];
747         dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
748
749         dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
750
751         k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
752
753         new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
754         if (new_skb) {
755                 ndev_priv = netdev_priv(ndev);
756                 am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
757                 skb_put(skb, pkt_len);
758                 skb->protocol = eth_type_trans(skb, ndev);
759                 am65_cpsw_nuss_rx_csum(skb, csum_info);
760                 napi_gro_receive(&common->napi_rx, skb);
761
762                 stats = this_cpu_ptr(ndev_priv->stats);
763
764                 u64_stats_update_begin(&stats->syncp);
765                 stats->rx_packets++;
766                 stats->rx_bytes += pkt_len;
767                 u64_stats_update_end(&stats->syncp);
768                 kmemleak_not_leak(new_skb);
769         } else {
770                 ndev->stats.rx_dropped++;
771                 new_skb = skb;
772         }
773
774         if (netif_dormant(ndev)) {
775                 dev_kfree_skb_any(new_skb);
776                 ndev->stats.rx_dropped++;
777                 return 0;
778         }
779
780         ret = am65_cpsw_nuss_rx_push(common, new_skb);
781         if (WARN_ON(ret < 0)) {
782                 dev_kfree_skb_any(new_skb);
783                 ndev->stats.rx_errors++;
784                 ndev->stats.rx_dropped++;
785         }
786
787         return ret;
788 }
789
790 static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
791 {
792         struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
793         int flow = AM65_CPSW_MAX_RX_FLOWS;
794         int cur_budget, ret;
795         int num_rx = 0;
796
797         /* process every flow */
798         while (flow--) {
799                 cur_budget = budget - num_rx;
800
801                 while (cur_budget--) {
802                         ret = am65_cpsw_nuss_rx_packets(common, flow);
803                         if (ret)
804                                 break;
805                         num_rx++;
806                 }
807
808                 if (num_rx >= budget)
809                         break;
810         }
811
812         dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
813
814         if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
815                 if (common->rx_irq_disabled) {
816                         common->rx_irq_disabled = false;
817                         enable_irq(common->rx_chns.irq);
818                 }
819         }
820
821         return num_rx;
822 }
823
824 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
825                                      struct cppi5_host_desc_t *desc)
826 {
827         struct cppi5_host_desc_t *first_desc, *next_desc;
828         dma_addr_t buf_dma, next_desc_dma;
829         u32 buf_dma_len;
830
831         first_desc = desc;
832         next_desc = first_desc;
833
834         cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
835         k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
836
837         dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
838
839         next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
840         k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
841         while (next_desc_dma) {
842                 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
843                                                        next_desc_dma);
844                 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
845                 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
846
847                 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
848                                DMA_TO_DEVICE);
849
850                 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
851                 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
852
853                 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
854         }
855
856         k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
857 }
858
859 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
860 {
861         struct am65_cpsw_tx_chn *tx_chn = data;
862         struct cppi5_host_desc_t *desc_tx;
863         struct sk_buff *skb;
864         void **swdata;
865
866         desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
867         swdata = cppi5_hdesc_get_swdata(desc_tx);
868         skb = *(swdata);
869         am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
870
871         dev_kfree_skb_any(skb);
872 }
873
874 static struct sk_buff *
875 am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
876                                dma_addr_t desc_dma)
877 {
878         struct am65_cpsw_ndev_priv *ndev_priv;
879         struct am65_cpsw_ndev_stats *stats;
880         struct cppi5_host_desc_t *desc_tx;
881         struct net_device *ndev;
882         struct sk_buff *skb;
883         void **swdata;
884
885         desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
886                                              desc_dma);
887         swdata = cppi5_hdesc_get_swdata(desc_tx);
888         skb = *(swdata);
889         am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
890
891         ndev = skb->dev;
892
893         am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
894
895         ndev_priv = netdev_priv(ndev);
896         stats = this_cpu_ptr(ndev_priv->stats);
897         u64_stats_update_begin(&stats->syncp);
898         stats->tx_packets++;
899         stats->tx_bytes += skb->len;
900         u64_stats_update_end(&stats->syncp);
901
902         return skb;
903 }
904
905 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
906                                    struct netdev_queue *netif_txq)
907 {
908         if (netif_tx_queue_stopped(netif_txq)) {
909                 /* Check whether the queue is stopped due to stalled
910                  * tx dma, if the queue is stopped then wake the queue
911                  * as we have free desc for tx
912                  */
913                 __netif_tx_lock(netif_txq, smp_processor_id());
914                 if (netif_running(ndev) &&
915                     (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
916                         netif_tx_wake_queue(netif_txq);
917
918                 __netif_tx_unlock(netif_txq);
919         }
920 }
921
922 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
923                                            int chn, unsigned int budget)
924 {
925         struct device *dev = common->dev;
926         struct am65_cpsw_tx_chn *tx_chn;
927         struct netdev_queue *netif_txq;
928         unsigned int total_bytes = 0;
929         struct net_device *ndev;
930         struct sk_buff *skb;
931         dma_addr_t desc_dma;
932         int res, num_tx = 0;
933
934         tx_chn = &common->tx_chns[chn];
935
936         while (true) {
937                 spin_lock(&tx_chn->lock);
938                 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
939                 spin_unlock(&tx_chn->lock);
940                 if (res == -ENODATA)
941                         break;
942
943                 if (cppi5_desc_is_tdcm(desc_dma)) {
944                         if (atomic_dec_and_test(&common->tdown_cnt))
945                                 complete(&common->tdown_complete);
946                         break;
947                 }
948
949                 skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
950                 total_bytes = skb->len;
951                 ndev = skb->dev;
952                 napi_consume_skb(skb, budget);
953                 num_tx++;
954
955                 netif_txq = netdev_get_tx_queue(ndev, chn);
956
957                 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
958
959                 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
960         }
961
962         dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
963
964         return num_tx;
965 }
966
967 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
968                                               int chn, unsigned int budget)
969 {
970         struct device *dev = common->dev;
971         struct am65_cpsw_tx_chn *tx_chn;
972         struct netdev_queue *netif_txq;
973         unsigned int total_bytes = 0;
974         struct net_device *ndev;
975         struct sk_buff *skb;
976         dma_addr_t desc_dma;
977         int res, num_tx = 0;
978
979         tx_chn = &common->tx_chns[chn];
980
981         while (true) {
982                 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
983                 if (res == -ENODATA)
984                         break;
985
986                 if (cppi5_desc_is_tdcm(desc_dma)) {
987                         if (atomic_dec_and_test(&common->tdown_cnt))
988                                 complete(&common->tdown_complete);
989                         break;
990                 }
991
992                 skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
993
994                 ndev = skb->dev;
995                 total_bytes += skb->len;
996                 napi_consume_skb(skb, budget);
997                 num_tx++;
998         }
999
1000         if (!num_tx)
1001                 return 0;
1002
1003         netif_txq = netdev_get_tx_queue(ndev, chn);
1004
1005         netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
1006
1007         am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1008
1009         dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1010
1011         return num_tx;
1012 }
1013
1014 static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
1015 {
1016         struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
1017         int num_tx;
1018
1019         if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
1020                 num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
1021         else
1022                 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
1023
1024         if (num_tx >= budget)
1025                 return budget;
1026
1027         if (napi_complete_done(napi_tx, num_tx))
1028                 enable_irq(tx_chn->irq);
1029
1030         return 0;
1031 }
1032
1033 static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
1034 {
1035         struct am65_cpsw_common *common = dev_id;
1036
1037         common->rx_irq_disabled = true;
1038         disable_irq_nosync(irq);
1039         napi_schedule(&common->napi_rx);
1040
1041         return IRQ_HANDLED;
1042 }
1043
1044 static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
1045 {
1046         struct am65_cpsw_tx_chn *tx_chn = dev_id;
1047
1048         disable_irq_nosync(irq);
1049         napi_schedule(&tx_chn->napi_tx);
1050
1051         return IRQ_HANDLED;
1052 }
1053
1054 static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
1055                                                  struct net_device *ndev)
1056 {
1057         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1058         struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
1059         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1060         struct device *dev = common->dev;
1061         struct am65_cpsw_tx_chn *tx_chn;
1062         struct netdev_queue *netif_txq;
1063         dma_addr_t desc_dma, buf_dma;
1064         int ret, q_idx, i;
1065         void **swdata;
1066         u32 *psdata;
1067         u32 pkt_len;
1068
1069         /* padding enabled in hw */
1070         pkt_len = skb_headlen(skb);
1071
1072         /* SKB TX timestamp */
1073         if (port->tx_ts_enabled)
1074                 am65_cpts_prep_tx_timestamp(common->cpts, skb);
1075
1076         q_idx = skb_get_queue_mapping(skb);
1077         dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
1078
1079         tx_chn = &common->tx_chns[q_idx];
1080         netif_txq = netdev_get_tx_queue(ndev, q_idx);
1081
1082         /* Map the linear buffer */
1083         buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
1084                                  DMA_TO_DEVICE);
1085         if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1086                 dev_err(dev, "Failed to map tx skb buffer\n");
1087                 ndev->stats.tx_errors++;
1088                 goto err_free_skb;
1089         }
1090
1091         first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1092         if (!first_desc) {
1093                 dev_dbg(dev, "Failed to allocate descriptor\n");
1094                 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
1095                                  DMA_TO_DEVICE);
1096                 goto busy_stop_q;
1097         }
1098
1099         cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
1100                          AM65_CPSW_NAV_PS_DATA_SIZE);
1101         cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
1102         cppi5_hdesc_set_pkttype(first_desc, 0x7);
1103         cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
1104
1105         k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1106         cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
1107         swdata = cppi5_hdesc_get_swdata(first_desc);
1108         *(swdata) = skb;
1109         psdata = cppi5_hdesc_get_psdata(first_desc);
1110
1111         /* HW csum offload if enabled */
1112         psdata[2] = 0;
1113         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1114                 unsigned int cs_start, cs_offset;
1115
1116                 cs_start = skb_transport_offset(skb);
1117                 cs_offset = cs_start + skb->csum_offset;
1118                 /* HW numerates bytes starting from 1 */
1119                 psdata[2] = ((cs_offset + 1) << 24) |
1120                             ((cs_start + 1) << 16) | (skb->len - cs_start);
1121                 dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
1122         }
1123
1124         if (!skb_is_nonlinear(skb))
1125                 goto done_tx;
1126
1127         dev_dbg(dev, "fragmented SKB\n");
1128
1129         /* Handle the case where skb is fragmented in pages */
1130         cur_desc = first_desc;
1131         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1132                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1133                 u32 frag_size = skb_frag_size(frag);
1134
1135                 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1136                 if (!next_desc) {
1137                         dev_err(dev, "Failed to allocate descriptor\n");
1138                         goto busy_free_descs;
1139                 }
1140
1141                 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
1142                                            DMA_TO_DEVICE);
1143                 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1144                         dev_err(dev, "Failed to map tx skb page\n");
1145                         k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1146                         ndev->stats.tx_errors++;
1147                         goto err_free_descs;
1148                 }
1149
1150                 cppi5_hdesc_reset_hbdesc(next_desc);
1151                 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1152                 cppi5_hdesc_attach_buf(next_desc,
1153                                        buf_dma, frag_size, buf_dma, frag_size);
1154
1155                 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1156                                                       next_desc);
1157                 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
1158                 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
1159
1160                 pkt_len += frag_size;
1161                 cur_desc = next_desc;
1162         }
1163         WARN_ON(pkt_len != skb->len);
1164
1165 done_tx:
1166         skb_tx_timestamp(skb);
1167
1168         /* report bql before sending packet */
1169         netdev_tx_sent_queue(netif_txq, pkt_len);
1170
1171         cppi5_hdesc_set_pktlen(first_desc, pkt_len);
1172         desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1173         if (AM65_CPSW_IS_CPSW2G(common)) {
1174                 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1175         } else {
1176                 spin_lock_bh(&tx_chn->lock);
1177                 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1178                 spin_unlock_bh(&tx_chn->lock);
1179         }
1180         if (ret) {
1181                 dev_err(dev, "can't push desc %d\n", ret);
1182                 /* inform bql */
1183                 netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1184                 ndev->stats.tx_errors++;
1185                 goto err_free_descs;
1186         }
1187
1188         if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1189                 netif_tx_stop_queue(netif_txq);
1190                 /* Barrier, so that stop_queue visible to other cpus */
1191                 smp_mb__after_atomic();
1192                 dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
1193
1194                 /* re-check for smp */
1195                 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1196                     MAX_SKB_FRAGS) {
1197                         netif_tx_wake_queue(netif_txq);
1198                         dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
1199                 }
1200         }
1201
1202         return NETDEV_TX_OK;
1203
1204 err_free_descs:
1205         am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1206 err_free_skb:
1207         ndev->stats.tx_dropped++;
1208         dev_kfree_skb_any(skb);
1209         return NETDEV_TX_OK;
1210
1211 busy_free_descs:
1212         am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1213 busy_stop_q:
1214         netif_tx_stop_queue(netif_txq);
1215         return NETDEV_TX_BUSY;
1216 }
1217
1218 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
1219                                                     void *addr)
1220 {
1221         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1222         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1223         struct sockaddr *sockaddr = (struct sockaddr *)addr;
1224         int ret;
1225
1226         ret = eth_prepare_mac_addr_change(ndev, addr);
1227         if (ret < 0)
1228                 return ret;
1229
1230         ret = pm_runtime_resume_and_get(common->dev);
1231         if (ret < 0)
1232                 return ret;
1233
1234         cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
1235                            HOST_PORT_NUM, 0, 0);
1236         cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
1237                            HOST_PORT_NUM, ALE_SECURE, 0);
1238
1239         am65_cpsw_port_set_sl_mac(port, addr);
1240         eth_commit_mac_addr_change(ndev, sockaddr);
1241
1242         pm_runtime_put(common->dev);
1243
1244         return 0;
1245 }
1246
1247 static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
1248                                        struct ifreq *ifr)
1249 {
1250         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1251         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1252         u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
1253         struct hwtstamp_config cfg;
1254
1255         if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1256                 return -EOPNOTSUPP;
1257
1258         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1259                 return -EFAULT;
1260
1261         /* TX HW timestamp */
1262         switch (cfg.tx_type) {
1263         case HWTSTAMP_TX_OFF:
1264         case HWTSTAMP_TX_ON:
1265                 break;
1266         default:
1267                 return -ERANGE;
1268         }
1269
1270         switch (cfg.rx_filter) {
1271         case HWTSTAMP_FILTER_NONE:
1272                 port->rx_ts_enabled = false;
1273                 break;
1274         case HWTSTAMP_FILTER_ALL:
1275         case HWTSTAMP_FILTER_SOME:
1276         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1277         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1278         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1279         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1280         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1281         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1282         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1283         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1284         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1285         case HWTSTAMP_FILTER_PTP_V2_EVENT:
1286         case HWTSTAMP_FILTER_PTP_V2_SYNC:
1287         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1288         case HWTSTAMP_FILTER_NTP_ALL:
1289                 port->rx_ts_enabled = true;
1290                 cfg.rx_filter = HWTSTAMP_FILTER_ALL;
1291                 break;
1292         default:
1293                 return -ERANGE;
1294         }
1295
1296         port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
1297
1298         /* cfg TX timestamp */
1299         seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
1300                   AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
1301
1302         ts_vlan_ltype = ETH_P_8021Q;
1303
1304         ts_ctrl_ltype2 = ETH_P_1588 |
1305                          AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
1306                          AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
1307                          AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
1308                          AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
1309                          AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
1310                          AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
1311                          AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
1312                          AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
1313
1314         ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
1315                   AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
1316
1317         if (port->tx_ts_enabled)
1318                 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
1319                            AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
1320
1321         writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
1322         writel(ts_vlan_ltype, port->port_base +
1323                AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
1324         writel(ts_ctrl_ltype2, port->port_base +
1325                AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
1326         writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
1327
1328         /* en/dis RX timestamp */
1329         am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
1330
1331         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1332 }
1333
1334 static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
1335                                        struct ifreq *ifr)
1336 {
1337         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1338         struct hwtstamp_config cfg;
1339
1340         if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1341                 return -EOPNOTSUPP;
1342
1343         cfg.flags = 0;
1344         cfg.tx_type = port->tx_ts_enabled ?
1345                       HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1346         cfg.rx_filter = port->rx_ts_enabled ?
1347                         HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1348
1349         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1350 }
1351
1352 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
1353                                           struct ifreq *req, int cmd)
1354 {
1355         struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1356
1357         if (!netif_running(ndev))
1358                 return -EINVAL;
1359
1360         switch (cmd) {
1361         case SIOCSHWTSTAMP:
1362                 return am65_cpsw_nuss_hwtstamp_set(ndev, req);
1363         case SIOCGHWTSTAMP:
1364                 return am65_cpsw_nuss_hwtstamp_get(ndev, req);
1365         }
1366
1367         return phylink_mii_ioctl(port->slave.phylink, req, cmd);
1368 }
1369
1370 static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
1371                                          struct rtnl_link_stats64 *stats)
1372 {
1373         struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
1374         unsigned int start;
1375         int cpu;
1376
1377         for_each_possible_cpu(cpu) {
1378                 struct am65_cpsw_ndev_stats *cpu_stats;
1379                 u64 rx_packets;
1380                 u64 rx_bytes;
1381                 u64 tx_packets;
1382                 u64 tx_bytes;
1383
1384                 cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
1385                 do {
1386                         start = u64_stats_fetch_begin(&cpu_stats->syncp);
1387                         rx_packets = cpu_stats->rx_packets;
1388                         rx_bytes   = cpu_stats->rx_bytes;
1389                         tx_packets = cpu_stats->tx_packets;
1390                         tx_bytes   = cpu_stats->tx_bytes;
1391                 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
1392
1393                 stats->rx_packets += rx_packets;
1394                 stats->rx_bytes   += rx_bytes;
1395                 stats->tx_packets += tx_packets;
1396                 stats->tx_bytes   += tx_bytes;
1397         }
1398
1399         stats->rx_errors        = dev->stats.rx_errors;
1400         stats->rx_dropped       = dev->stats.rx_dropped;
1401         stats->tx_dropped       = dev->stats.tx_dropped;
1402 }
1403
1404 static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
1405         .ndo_open               = am65_cpsw_nuss_ndo_slave_open,
1406         .ndo_stop               = am65_cpsw_nuss_ndo_slave_stop,
1407         .ndo_start_xmit         = am65_cpsw_nuss_ndo_slave_xmit,
1408         .ndo_set_rx_mode        = am65_cpsw_nuss_ndo_slave_set_rx_mode,
1409         .ndo_get_stats64        = am65_cpsw_nuss_ndo_get_stats,
1410         .ndo_validate_addr      = eth_validate_addr,
1411         .ndo_set_mac_address    = am65_cpsw_nuss_ndo_slave_set_mac_address,
1412         .ndo_tx_timeout         = am65_cpsw_nuss_ndo_host_tx_timeout,
1413         .ndo_vlan_rx_add_vid    = am65_cpsw_nuss_ndo_slave_add_vid,
1414         .ndo_vlan_rx_kill_vid   = am65_cpsw_nuss_ndo_slave_kill_vid,
1415         .ndo_eth_ioctl          = am65_cpsw_nuss_ndo_slave_ioctl,
1416         .ndo_setup_tc           = am65_cpsw_qos_ndo_setup_tc,
1417 };
1418
1419 static void am65_cpsw_disable_phy(struct phy *phy)
1420 {
1421         phy_power_off(phy);
1422         phy_exit(phy);
1423 }
1424
1425 static int am65_cpsw_enable_phy(struct phy *phy)
1426 {
1427         int ret;
1428
1429         ret = phy_init(phy);
1430         if (ret < 0)
1431                 return ret;
1432
1433         ret = phy_power_on(phy);
1434         if (ret < 0) {
1435                 phy_exit(phy);
1436                 return ret;
1437         }
1438
1439         return 0;
1440 }
1441
1442 static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common)
1443 {
1444         struct am65_cpsw_port *port;
1445         struct phy *phy;
1446         int i;
1447
1448         for (i = 0; i < common->port_num; i++) {
1449                 port = &common->ports[i];
1450                 phy = port->slave.serdes_phy;
1451                 if (phy)
1452                         am65_cpsw_disable_phy(phy);
1453         }
1454 }
1455
1456 static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np,
1457                                      struct am65_cpsw_port *port)
1458 {
1459         const char *name = "serdes-phy";
1460         struct phy *phy;
1461         int ret;
1462
1463         phy = devm_of_phy_get(dev, port_np, name);
1464         if (PTR_ERR(phy) == -ENODEV)
1465                 return 0;
1466
1467         /* Serdes PHY exists. Store it. */
1468         port->slave.serdes_phy = phy;
1469
1470         ret =  am65_cpsw_enable_phy(phy);
1471         if (ret < 0)
1472                 goto err_phy;
1473
1474         return 0;
1475
1476 err_phy:
1477         devm_phy_put(dev, phy);
1478         return ret;
1479 }
1480
1481 static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
1482                                       const struct phylink_link_state *state)
1483 {
1484         struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
1485                                                           phylink_config);
1486         struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
1487         struct am65_cpsw_common *common = port->common;
1488
1489         if (common->pdata.extra_modes & BIT(state->interface))
1490                 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
1491                        port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
1492 }
1493
1494 static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
1495                                          phy_interface_t interface)
1496 {
1497         struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
1498                                                           phylink_config);
1499         struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
1500         struct am65_cpsw_common *common = port->common;
1501         struct net_device *ndev = port->ndev;
1502         int tmo;
1503
1504         /* disable forwarding */
1505         cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1506
1507         cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
1508
1509         tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
1510         dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
1511                 cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
1512
1513         cpsw_sl_ctl_reset(port->slave.mac_sl);
1514
1515         am65_cpsw_qos_link_down(ndev);
1516         netif_tx_stop_all_queues(ndev);
1517 }
1518
1519 static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
1520                                        unsigned int mode, phy_interface_t interface, int speed,
1521                                        int duplex, bool tx_pause, bool rx_pause)
1522 {
1523         struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
1524                                                           phylink_config);
1525         struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
1526         struct am65_cpsw_common *common = port->common;
1527         u32 mac_control = CPSW_SL_CTL_GMII_EN;
1528         struct net_device *ndev = port->ndev;
1529
1530         if (speed == SPEED_1000)
1531                 mac_control |= CPSW_SL_CTL_GIG;
1532         if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))
1533                 /* Can be used with in band mode only */
1534                 mac_control |= CPSW_SL_CTL_EXT_EN;
1535         if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
1536                 mac_control |= CPSW_SL_CTL_IFCTL_A;
1537         if (duplex)
1538                 mac_control |= CPSW_SL_CTL_FULLDUPLEX;
1539
1540         /* rx_pause/tx_pause */
1541         if (rx_pause)
1542                 mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
1543
1544         if (tx_pause)
1545                 mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
1546
1547         cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
1548
1549         /* enable forwarding */
1550         cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1551
1552         am65_cpsw_qos_link_up(ndev, speed);
1553         netif_tx_wake_all_queues(ndev);
1554 }
1555
1556 static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
1557         .mac_config = am65_cpsw_nuss_mac_config,
1558         .mac_link_down = am65_cpsw_nuss_mac_link_down,
1559         .mac_link_up = am65_cpsw_nuss_mac_link_up,
1560 };
1561
1562 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
1563 {
1564         struct am65_cpsw_common *common = port->common;
1565
1566         if (!port->disabled)
1567                 return;
1568
1569         cpsw_ale_control_set(common->ale, port->port_id,
1570                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1571
1572         cpsw_sl_reset(port->slave.mac_sl, 100);
1573         cpsw_sl_ctl_reset(port->slave.mac_sl);
1574 }
1575
1576 static void am65_cpsw_nuss_free_tx_chns(void *data)
1577 {
1578         struct am65_cpsw_common *common = data;
1579         int i;
1580
1581         for (i = 0; i < common->tx_ch_num; i++) {
1582                 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1583
1584                 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1585                         k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1586
1587                 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1588                         k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1589
1590                 memset(tx_chn, 0, sizeof(*tx_chn));
1591         }
1592 }
1593
1594 void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
1595 {
1596         struct device *dev = common->dev;
1597         int i;
1598
1599         devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
1600
1601         for (i = 0; i < common->tx_ch_num; i++) {
1602                 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1603
1604                 if (tx_chn->irq)
1605                         devm_free_irq(dev, tx_chn->irq, tx_chn);
1606
1607                 netif_napi_del(&tx_chn->napi_tx);
1608
1609                 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1610                         k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1611
1612                 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1613                         k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1614
1615                 memset(tx_chn, 0, sizeof(*tx_chn));
1616         }
1617 }
1618
1619 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
1620 {
1621         struct device *dev = common->dev;
1622         int i, ret = 0;
1623
1624         for (i = 0; i < common->tx_ch_num; i++) {
1625                 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1626
1627                 netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
1628                                   am65_cpsw_nuss_tx_poll);
1629
1630                 ret = devm_request_irq(dev, tx_chn->irq,
1631                                        am65_cpsw_nuss_tx_irq,
1632                                        IRQF_TRIGGER_HIGH,
1633                                        tx_chn->tx_chn_name, tx_chn);
1634                 if (ret) {
1635                         dev_err(dev, "failure requesting tx%u irq %u, %d\n",
1636                                 tx_chn->id, tx_chn->irq, ret);
1637                         goto err;
1638                 }
1639         }
1640
1641 err:
1642         return ret;
1643 }
1644
1645 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
1646 {
1647         u32  max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
1648         struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
1649         struct device *dev = common->dev;
1650         struct k3_ring_cfg ring_cfg = {
1651                 .elm_size = K3_RINGACC_RING_ELSIZE_8,
1652                 .mode = K3_RINGACC_RING_MODE_RING,
1653                 .flags = 0
1654         };
1655         u32 hdesc_size;
1656         int i, ret = 0;
1657
1658         hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
1659                                            AM65_CPSW_NAV_SW_DATA_SIZE);
1660
1661         tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
1662         tx_cfg.tx_cfg = ring_cfg;
1663         tx_cfg.txcq_cfg = ring_cfg;
1664         tx_cfg.tx_cfg.size = max_desc_num;
1665         tx_cfg.txcq_cfg.size = max_desc_num;
1666
1667         for (i = 0; i < common->tx_ch_num; i++) {
1668                 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1669
1670                 snprintf(tx_chn->tx_chn_name,
1671                          sizeof(tx_chn->tx_chn_name), "tx%d", i);
1672
1673                 spin_lock_init(&tx_chn->lock);
1674                 tx_chn->common = common;
1675                 tx_chn->id = i;
1676                 tx_chn->descs_num = max_desc_num;
1677
1678                 tx_chn->tx_chn =
1679                         k3_udma_glue_request_tx_chn(dev,
1680                                                     tx_chn->tx_chn_name,
1681                                                     &tx_cfg);
1682                 if (IS_ERR(tx_chn->tx_chn)) {
1683                         ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
1684                                             "Failed to request tx dma channel\n");
1685                         goto err;
1686                 }
1687                 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
1688
1689                 tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
1690                                                                   tx_chn->descs_num,
1691                                                                   hdesc_size,
1692                                                                   tx_chn->tx_chn_name);
1693                 if (IS_ERR(tx_chn->desc_pool)) {
1694                         ret = PTR_ERR(tx_chn->desc_pool);
1695                         dev_err(dev, "Failed to create poll %d\n", ret);
1696                         goto err;
1697                 }
1698
1699                 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
1700                 if (tx_chn->irq <= 0) {
1701                         dev_err(dev, "Failed to get tx dma irq %d\n",
1702                                 tx_chn->irq);
1703                         goto err;
1704                 }
1705
1706                 snprintf(tx_chn->tx_chn_name,
1707                          sizeof(tx_chn->tx_chn_name), "%s-tx%d",
1708                          dev_name(dev), tx_chn->id);
1709         }
1710
1711         ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
1712         if (ret) {
1713                 dev_err(dev, "Failed to add tx NAPI %d\n", ret);
1714                 goto err;
1715         }
1716
1717 err:
1718         i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
1719         if (i) {
1720                 dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
1721                 return i;
1722         }
1723
1724         return ret;
1725 }
1726
1727 static void am65_cpsw_nuss_free_rx_chns(void *data)
1728 {
1729         struct am65_cpsw_common *common = data;
1730         struct am65_cpsw_rx_chn *rx_chn;
1731
1732         rx_chn = &common->rx_chns;
1733
1734         if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
1735                 k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
1736
1737         if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
1738                 k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
1739 }
1740
1741 static void am65_cpsw_nuss_remove_rx_chns(void *data)
1742 {
1743         struct am65_cpsw_common *common = data;
1744         struct am65_cpsw_rx_chn *rx_chn;
1745         struct device *dev = common->dev;
1746
1747         rx_chn = &common->rx_chns;
1748         devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
1749
1750         if (!(rx_chn->irq < 0))
1751                 devm_free_irq(dev, rx_chn->irq, common);
1752
1753         netif_napi_del(&common->napi_rx);
1754
1755         if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
1756                 k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
1757
1758         if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
1759                 k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
1760
1761         common->rx_flow_id_base = -1;
1762 }
1763
1764 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
1765 {
1766         struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
1767         struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
1768         u32  max_desc_num = AM65_CPSW_MAX_RX_DESC;
1769         struct device *dev = common->dev;
1770         u32 hdesc_size;
1771         u32 fdqring_id;
1772         int i, ret = 0;
1773
1774         hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
1775                                            AM65_CPSW_NAV_SW_DATA_SIZE);
1776
1777         rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
1778         rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
1779         rx_cfg.flow_id_base = common->rx_flow_id_base;
1780
1781         /* init all flows */
1782         rx_chn->dev = dev;
1783         rx_chn->descs_num = max_desc_num;
1784
1785         rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
1786         if (IS_ERR(rx_chn->rx_chn)) {
1787                 ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
1788                                     "Failed to request rx dma channel\n");
1789                 goto err;
1790         }
1791         rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
1792
1793         rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
1794                                                           rx_chn->descs_num,
1795                                                           hdesc_size, "rx");
1796         if (IS_ERR(rx_chn->desc_pool)) {
1797                 ret = PTR_ERR(rx_chn->desc_pool);
1798                 dev_err(dev, "Failed to create rx poll %d\n", ret);
1799                 goto err;
1800         }
1801
1802         common->rx_flow_id_base =
1803                         k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
1804         dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
1805
1806         fdqring_id = K3_RINGACC_RING_ID_ANY;
1807         for (i = 0; i < rx_cfg.flow_id_num; i++) {
1808                 struct k3_ring_cfg rxring_cfg = {
1809                         .elm_size = K3_RINGACC_RING_ELSIZE_8,
1810                         .mode = K3_RINGACC_RING_MODE_RING,
1811                         .flags = 0,
1812                 };
1813                 struct k3_ring_cfg fdqring_cfg = {
1814                         .elm_size = K3_RINGACC_RING_ELSIZE_8,
1815                         .flags = K3_RINGACC_RING_SHARED,
1816                 };
1817                 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
1818                         .rx_cfg = rxring_cfg,
1819                         .rxfdq_cfg = fdqring_cfg,
1820                         .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
1821                         .src_tag_lo_sel =
1822                                 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
1823                 };
1824
1825                 rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
1826                 rx_flow_cfg.rx_cfg.size = max_desc_num;
1827                 rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
1828                 rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
1829
1830                 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
1831                                                 i, &rx_flow_cfg);
1832                 if (ret) {
1833                         dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
1834                         goto err;
1835                 }
1836                 if (!i)
1837                         fdqring_id =
1838                                 k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
1839                                                                 i);
1840
1841                 rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
1842
1843                 if (rx_chn->irq <= 0) {
1844                         dev_err(dev, "Failed to get rx dma irq %d\n",
1845                                 rx_chn->irq);
1846                         ret = -ENXIO;
1847                         goto err;
1848                 }
1849         }
1850
1851         netif_napi_add(common->dma_ndev, &common->napi_rx,
1852                        am65_cpsw_nuss_rx_poll);
1853
1854         ret = devm_request_irq(dev, rx_chn->irq,
1855                                am65_cpsw_nuss_rx_irq,
1856                                IRQF_TRIGGER_HIGH, dev_name(dev), common);
1857         if (ret) {
1858                 dev_err(dev, "failure requesting rx irq %u, %d\n",
1859                         rx_chn->irq, ret);
1860                 goto err;
1861         }
1862
1863 err:
1864         i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
1865         if (i) {
1866                 dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
1867                 return i;
1868         }
1869
1870         return ret;
1871 }
1872
1873 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
1874 {
1875         struct am65_cpsw_host *host_p = am65_common_get_host(common);
1876
1877         host_p->common = common;
1878         host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
1879         host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
1880
1881         return 0;
1882 }
1883
1884 static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
1885                                            int slave, u8 *mac_addr)
1886 {
1887         u32 mac_lo, mac_hi, offset;
1888         struct regmap *syscon;
1889         int ret;
1890
1891         syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
1892         if (IS_ERR(syscon)) {
1893                 if (PTR_ERR(syscon) == -ENODEV)
1894                         return 0;
1895                 return PTR_ERR(syscon);
1896         }
1897
1898         ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
1899                                          &offset);
1900         if (ret)
1901                 return ret;
1902
1903         regmap_read(syscon, offset, &mac_lo);
1904         regmap_read(syscon, offset + 4, &mac_hi);
1905
1906         mac_addr[0] = (mac_hi >> 8) & 0xff;
1907         mac_addr[1] = mac_hi & 0xff;
1908         mac_addr[2] = (mac_lo >> 24) & 0xff;
1909         mac_addr[3] = (mac_lo >> 16) & 0xff;
1910         mac_addr[4] = (mac_lo >> 8) & 0xff;
1911         mac_addr[5] = mac_lo & 0xff;
1912
1913         return 0;
1914 }
1915
1916 static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
1917 {
1918         struct device *dev = common->dev;
1919         struct device_node *node;
1920         struct am65_cpts *cpts;
1921         void __iomem *reg_base;
1922
1923         if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1924                 return 0;
1925
1926         node = of_get_child_by_name(dev->of_node, "cpts");
1927         if (!node) {
1928                 dev_err(dev, "%s cpts not found\n", __func__);
1929                 return -ENOENT;
1930         }
1931
1932         reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
1933         cpts = am65_cpts_create(dev, reg_base, node);
1934         if (IS_ERR(cpts)) {
1935                 int ret = PTR_ERR(cpts);
1936
1937                 of_node_put(node);
1938                 if (ret == -EOPNOTSUPP) {
1939                         dev_info(dev, "cpts disabled\n");
1940                         return 0;
1941                 }
1942
1943                 dev_err(dev, "cpts create err %d\n", ret);
1944                 return ret;
1945         }
1946         common->cpts = cpts;
1947         /* Forbid PM runtime if CPTS is running.
1948          * K3 CPSWxG modules may completely lose context during ON->OFF
1949          * transitions depending on integration.
1950          * AM65x/J721E MCU CPSW2G: false
1951          * J721E MAIN_CPSW9G: true
1952          */
1953         pm_runtime_forbid(dev);
1954
1955         return 0;
1956 }
1957
1958 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
1959 {
1960         struct device_node *node, *port_np;
1961         struct device *dev = common->dev;
1962         int ret;
1963
1964         node = of_get_child_by_name(dev->of_node, "ethernet-ports");
1965         if (!node)
1966                 return -ENOENT;
1967
1968         for_each_child_of_node(node, port_np) {
1969                 struct am65_cpsw_port *port;
1970                 u32 port_id;
1971
1972                 /* it is not a slave port node, continue */
1973                 if (strcmp(port_np->name, "port"))
1974                         continue;
1975
1976                 ret = of_property_read_u32(port_np, "reg", &port_id);
1977                 if (ret < 0) {
1978                         dev_err(dev, "%pOF error reading port_id %d\n",
1979                                 port_np, ret);
1980                         goto of_node_put;
1981                 }
1982
1983                 if (!port_id || port_id > common->port_num) {
1984                         dev_err(dev, "%pOF has invalid port_id %u %s\n",
1985                                 port_np, port_id, port_np->name);
1986                         ret = -EINVAL;
1987                         goto of_node_put;
1988                 }
1989
1990                 port = am65_common_get_port(common, port_id);
1991                 port->port_id = port_id;
1992                 port->common = common;
1993                 port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
1994                                   AM65_CPSW_NU_PORTS_OFFSET * (port_id);
1995                 if (common->pdata.extra_modes)
1996                         port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
1997                 port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
1998                                   (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
1999                 port->name = of_get_property(port_np, "label", NULL);
2000                 port->fetch_ram_base =
2001                                 common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
2002                                 (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
2003
2004                 port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
2005                 if (IS_ERR(port->slave.mac_sl)) {
2006                         ret = PTR_ERR(port->slave.mac_sl);
2007                         goto of_node_put;
2008                 }
2009
2010                 port->disabled = !of_device_is_available(port_np);
2011                 if (port->disabled) {
2012                         common->disabled_ports_mask |= BIT(port->port_id);
2013                         continue;
2014                 }
2015
2016                 port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
2017                 if (IS_ERR(port->slave.ifphy)) {
2018                         ret = PTR_ERR(port->slave.ifphy);
2019                         dev_err(dev, "%pOF error retrieving port phy: %d\n",
2020                                 port_np, ret);
2021                         goto of_node_put;
2022                 }
2023
2024                 /* Initialize the Serdes PHY for the port */
2025                 ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
2026                 if (ret)
2027                         return ret;
2028
2029                 port->slave.mac_only =
2030                                 of_property_read_bool(port_np, "ti,mac-only");
2031
2032                 /* get phy/link info */
2033                 port->slave.phy_node = port_np;
2034                 ret = of_get_phy_mode(port_np, &port->slave.phy_if);
2035                 if (ret) {
2036                         dev_err(dev, "%pOF read phy-mode err %d\n",
2037                                 port_np, ret);
2038                         goto of_node_put;
2039                 }
2040
2041                 ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
2042                 if (ret)
2043                         goto of_node_put;
2044
2045                 ret = of_get_mac_address(port_np, port->slave.mac_addr);
2046                 if (ret) {
2047                         am65_cpsw_am654_get_efuse_macid(port_np,
2048                                                         port->port_id,
2049                                                         port->slave.mac_addr);
2050                         if (!is_valid_ether_addr(port->slave.mac_addr)) {
2051                                 eth_random_addr(port->slave.mac_addr);
2052                                 dev_err(dev, "Use random MAC address\n");
2053                         }
2054                 }
2055         }
2056         of_node_put(node);
2057
2058         /* is there at least one ext.port */
2059         if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
2060                 dev_err(dev, "No Ext. port are available\n");
2061                 return -ENODEV;
2062         }
2063
2064         return 0;
2065
2066 of_node_put:
2067         of_node_put(port_np);
2068         of_node_put(node);
2069         return ret;
2070 }
2071
2072 static void am65_cpsw_pcpu_stats_free(void *data)
2073 {
2074         struct am65_cpsw_ndev_stats __percpu *stats = data;
2075
2076         free_percpu(stats);
2077 }
2078
2079 static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
2080 {
2081         struct am65_cpsw_port *port;
2082         int i;
2083
2084         for (i = 0; i < common->port_num; i++) {
2085                 port = &common->ports[i];
2086                 if (port->slave.phylink)
2087                         phylink_destroy(port->slave.phylink);
2088         }
2089 }
2090
2091 static int
2092 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
2093 {
2094         struct am65_cpsw_ndev_priv *ndev_priv;
2095         struct device *dev = common->dev;
2096         struct am65_cpsw_port *port;
2097         struct phylink *phylink;
2098         int ret;
2099
2100         port = &common->ports[port_idx];
2101
2102         if (port->disabled)
2103                 return 0;
2104
2105         /* alloc netdev */
2106         port->ndev = devm_alloc_etherdev_mqs(common->dev,
2107                                              sizeof(struct am65_cpsw_ndev_priv),
2108                                              AM65_CPSW_MAX_TX_QUEUES,
2109                                              AM65_CPSW_MAX_RX_QUEUES);
2110         if (!port->ndev) {
2111                 dev_err(dev, "error allocating slave net_device %u\n",
2112                         port->port_id);
2113                 return -ENOMEM;
2114         }
2115
2116         ndev_priv = netdev_priv(port->ndev);
2117         ndev_priv->port = port;
2118         ndev_priv->msg_enable = AM65_CPSW_DEBUG;
2119         SET_NETDEV_DEV(port->ndev, dev);
2120
2121         eth_hw_addr_set(port->ndev, port->slave.mac_addr);
2122
2123         port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
2124         port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
2125         port->ndev->hw_features = NETIF_F_SG |
2126                                   NETIF_F_RXCSUM |
2127                                   NETIF_F_HW_CSUM |
2128                                   NETIF_F_HW_TC;
2129         port->ndev->features = port->ndev->hw_features |
2130                                NETIF_F_HW_VLAN_CTAG_FILTER;
2131         port->ndev->vlan_features |=  NETIF_F_SG;
2132         port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
2133         port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
2134
2135         /* Configuring Phylink */
2136         port->slave.phylink_config.dev = &port->ndev->dev;
2137         port->slave.phylink_config.type = PHYLINK_NETDEV;
2138         port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
2139         port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */
2140
2141         if (phy_interface_mode_is_rgmii(port->slave.phy_if)) {
2142                 phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
2143         } else if (port->slave.phy_if == PHY_INTERFACE_MODE_RMII) {
2144                 __set_bit(PHY_INTERFACE_MODE_RMII,
2145                           port->slave.phylink_config.supported_interfaces);
2146         } else if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
2147                 __set_bit(PHY_INTERFACE_MODE_QSGMII,
2148                           port->slave.phylink_config.supported_interfaces);
2149         } else {
2150                 dev_err(dev, "selected phy-mode is not supported\n");
2151                 return -EOPNOTSUPP;
2152         }
2153
2154         phylink = phylink_create(&port->slave.phylink_config,
2155                                  of_node_to_fwnode(port->slave.phy_node),
2156                                  port->slave.phy_if,
2157                                  &am65_cpsw_phylink_mac_ops);
2158         if (IS_ERR(phylink))
2159                 return PTR_ERR(phylink);
2160
2161         port->slave.phylink = phylink;
2162
2163         /* Disable TX checksum offload by default due to HW bug */
2164         if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
2165                 port->ndev->features &= ~NETIF_F_HW_CSUM;
2166
2167         ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
2168         if (!ndev_priv->stats)
2169                 return -ENOMEM;
2170
2171         ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
2172                                        ndev_priv->stats);
2173         if (ret)
2174                 dev_err(dev, "failed to add percpu stat free action %d\n", ret);
2175
2176         if (!common->dma_ndev)
2177                 common->dma_ndev = port->ndev;
2178
2179         return ret;
2180 }
2181
2182 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
2183 {
2184         int ret;
2185         int i;
2186
2187         for (i = 0; i < common->port_num; i++) {
2188                 ret = am65_cpsw_nuss_init_port_ndev(common, i);
2189                 if (ret)
2190                         return ret;
2191         }
2192
2193         return ret;
2194 }
2195
2196 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
2197 {
2198         struct am65_cpsw_port *port;
2199         int i;
2200
2201         for (i = 0; i < common->port_num; i++) {
2202                 port = &common->ports[i];
2203                 if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED)
2204                         unregister_netdev(port->ndev);
2205         }
2206 }
2207
2208 static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
2209 {
2210         int set_val = 0;
2211         int i;
2212
2213         if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
2214                 set_val = 1;
2215
2216         dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
2217
2218         for (i = 1; i <= common->port_num; i++) {
2219                 struct am65_cpsw_port *port = am65_common_get_port(common, i);
2220                 struct am65_cpsw_ndev_priv *priv;
2221
2222                 if (!port->ndev)
2223                         continue;
2224
2225                 priv = am65_ndev_to_priv(port->ndev);
2226                 priv->offload_fwd_mark = set_val;
2227         }
2228 }
2229
2230 bool am65_cpsw_port_dev_check(const struct net_device *ndev)
2231 {
2232         if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
2233                 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2234
2235                 return !common->is_emac_mode;
2236         }
2237
2238         return false;
2239 }
2240
2241 static int am65_cpsw_netdevice_port_link(struct net_device *ndev,
2242                                          struct net_device *br_ndev,
2243                                          struct netlink_ext_ack *extack)
2244 {
2245         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2246         struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
2247         int err;
2248
2249         if (!common->br_members) {
2250                 common->hw_bridge_dev = br_ndev;
2251         } else {
2252                 /* This is adding the port to a second bridge, this is
2253                  * unsupported
2254                  */
2255                 if (common->hw_bridge_dev != br_ndev)
2256                         return -EOPNOTSUPP;
2257         }
2258
2259         err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
2260                                             false, extack);
2261         if (err)
2262                 return err;
2263
2264         common->br_members |= BIT(priv->port->port_id);
2265
2266         am65_cpsw_port_offload_fwd_mark_update(common);
2267
2268         return NOTIFY_DONE;
2269 }
2270
2271 static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
2272 {
2273         struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2274         struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
2275
2276         switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
2277
2278         common->br_members &= ~BIT(priv->port->port_id);
2279
2280         am65_cpsw_port_offload_fwd_mark_update(common);
2281
2282         if (!common->br_members)
2283                 common->hw_bridge_dev = NULL;
2284 }
2285
2286 /* netdev notifier */
2287 static int am65_cpsw_netdevice_event(struct notifier_block *unused,
2288                                      unsigned long event, void *ptr)
2289 {
2290         struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
2291         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2292         struct netdev_notifier_changeupper_info *info;
2293         int ret = NOTIFY_DONE;
2294
2295         if (!am65_cpsw_port_dev_check(ndev))
2296                 return NOTIFY_DONE;
2297
2298         switch (event) {
2299         case NETDEV_CHANGEUPPER:
2300                 info = ptr;
2301
2302                 if (netif_is_bridge_master(info->upper_dev)) {
2303                         if (info->linking)
2304                                 ret = am65_cpsw_netdevice_port_link(ndev,
2305                                                                     info->upper_dev,
2306                                                                     extack);
2307                         else
2308                                 am65_cpsw_netdevice_port_unlink(ndev);
2309                 }
2310                 break;
2311         default:
2312                 return NOTIFY_DONE;
2313         }
2314
2315         return notifier_from_errno(ret);
2316 }
2317
2318 static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
2319 {
2320         int ret = 0;
2321
2322         if (AM65_CPSW_IS_CPSW2G(cpsw) ||
2323             !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
2324                 return 0;
2325
2326         cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
2327         ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
2328         if (ret) {
2329                 dev_err(cpsw->dev, "can't register netdevice notifier\n");
2330                 return ret;
2331         }
2332
2333         ret = am65_cpsw_switchdev_register_notifiers(cpsw);
2334         if (ret)
2335                 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
2336
2337         return ret;
2338 }
2339
2340 static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
2341 {
2342         if (AM65_CPSW_IS_CPSW2G(cpsw) ||
2343             !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
2344                 return;
2345
2346         am65_cpsw_switchdev_unregister_notifiers(cpsw);
2347         unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
2348 }
2349
2350 static const struct devlink_ops am65_cpsw_devlink_ops = {};
2351
2352 static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
2353 {
2354         cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
2355                            ALE_MCAST_BLOCK_LEARN_FWD);
2356 }
2357
2358 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
2359 {
2360         struct am65_cpsw_host *host = am65_common_get_host(common);
2361
2362         writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
2363
2364         am65_cpsw_init_stp_ale_entry(common);
2365
2366         cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
2367         dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
2368         cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
2369 }
2370
2371 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
2372 {
2373         struct am65_cpsw_host *host = am65_common_get_host(common);
2374
2375         writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
2376
2377         cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
2378         dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
2379
2380         /* learning make no sense in multi-mac mode */
2381         cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
2382 }
2383
2384 static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
2385                                         struct devlink_param_gset_ctx *ctx)
2386 {
2387         struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
2388         struct am65_cpsw_common *common = dl_priv->common;
2389
2390         dev_dbg(common->dev, "%s id:%u\n", __func__, id);
2391
2392         if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
2393                 return -EOPNOTSUPP;
2394
2395         ctx->val.vbool = !common->is_emac_mode;
2396
2397         return 0;
2398 }
2399
2400 static void am65_cpsw_init_port_emac_ale(struct  am65_cpsw_port *port)
2401 {
2402         struct am65_cpsw_slave_data *slave = &port->slave;
2403         struct am65_cpsw_common *common = port->common;
2404         u32 port_mask;
2405
2406         writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
2407
2408         if (slave->mac_only)
2409                 /* enable mac-only mode on port */
2410                 cpsw_ale_control_set(common->ale, port->port_id,
2411                                      ALE_PORT_MACONLY, 1);
2412
2413         cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
2414
2415         port_mask = BIT(port->port_id) | ALE_PORT_HOST;
2416
2417         cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
2418                            HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
2419         cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
2420                            port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
2421 }
2422
2423 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
2424 {
2425         struct am65_cpsw_slave_data *slave = &port->slave;
2426         struct am65_cpsw_common *cpsw = port->common;
2427         u32 port_mask;
2428
2429         cpsw_ale_control_set(cpsw->ale, port->port_id,
2430                              ALE_PORT_NOLEARN, 0);
2431
2432         cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
2433                            HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
2434                            slave->port_vlan);
2435
2436         port_mask = BIT(port->port_id) | ALE_PORT_HOST;
2437
2438         cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
2439                            port_mask, ALE_VLAN, slave->port_vlan,
2440                            ALE_MCAST_FWD_2);
2441
2442         writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
2443
2444         cpsw_ale_control_set(cpsw->ale, port->port_id,
2445                              ALE_PORT_MACONLY, 0);
2446 }
2447
2448 static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
2449                                         struct devlink_param_gset_ctx *ctx)
2450 {
2451         struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
2452         struct am65_cpsw_common *cpsw = dl_priv->common;
2453         bool switch_en = ctx->val.vbool;
2454         bool if_running = false;
2455         int i;
2456
2457         dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
2458
2459         if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
2460                 return -EOPNOTSUPP;
2461
2462         if (switch_en == !cpsw->is_emac_mode)
2463                 return 0;
2464
2465         if (!switch_en && cpsw->br_members) {
2466                 dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
2467                 return -EINVAL;
2468         }
2469
2470         rtnl_lock();
2471
2472         cpsw->is_emac_mode = !switch_en;
2473
2474         for (i = 0; i < cpsw->port_num; i++) {
2475                 struct net_device *sl_ndev = cpsw->ports[i].ndev;
2476
2477                 if (!sl_ndev || !netif_running(sl_ndev))
2478                         continue;
2479
2480                 if_running = true;
2481         }
2482
2483         if (!if_running) {
2484                 /* all ndevs are down */
2485                 for (i = 0; i < cpsw->port_num; i++) {
2486                         struct net_device *sl_ndev = cpsw->ports[i].ndev;
2487                         struct am65_cpsw_slave_data *slave;
2488
2489                         if (!sl_ndev)
2490                                 continue;
2491
2492                         slave = am65_ndev_to_slave(sl_ndev);
2493                         if (switch_en)
2494                                 slave->port_vlan = cpsw->default_vlan;
2495                         else
2496                                 slave->port_vlan = 0;
2497                 }
2498
2499                 goto exit;
2500         }
2501
2502         cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
2503         /* clean up ALE table */
2504         cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
2505         cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
2506
2507         if (switch_en) {
2508                 dev_info(cpsw->dev, "Enable switch mode\n");
2509
2510                 am65_cpsw_init_host_port_switch(cpsw);
2511
2512                 for (i = 0; i < cpsw->port_num; i++) {
2513                         struct net_device *sl_ndev = cpsw->ports[i].ndev;
2514                         struct am65_cpsw_slave_data *slave;
2515                         struct am65_cpsw_port *port;
2516
2517                         if (!sl_ndev)
2518                                 continue;
2519
2520                         port = am65_ndev_to_port(sl_ndev);
2521                         slave = am65_ndev_to_slave(sl_ndev);
2522                         slave->port_vlan = cpsw->default_vlan;
2523
2524                         if (netif_running(sl_ndev))
2525                                 am65_cpsw_init_port_switch_ale(port);
2526                 }
2527
2528         } else {
2529                 dev_info(cpsw->dev, "Disable switch mode\n");
2530
2531                 am65_cpsw_init_host_port_emac(cpsw);
2532
2533                 for (i = 0; i < cpsw->port_num; i++) {
2534                         struct net_device *sl_ndev = cpsw->ports[i].ndev;
2535                         struct am65_cpsw_port *port;
2536
2537                         if (!sl_ndev)
2538                                 continue;
2539
2540                         port = am65_ndev_to_port(sl_ndev);
2541                         port->slave.port_vlan = 0;
2542                         if (netif_running(sl_ndev))
2543                                 am65_cpsw_init_port_emac_ale(port);
2544                 }
2545         }
2546         cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
2547 exit:
2548         rtnl_unlock();
2549
2550         return 0;
2551 }
2552
2553 static const struct devlink_param am65_cpsw_devlink_params[] = {
2554         DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
2555                              DEVLINK_PARAM_TYPE_BOOL,
2556                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2557                              am65_cpsw_dl_switch_mode_get,
2558                              am65_cpsw_dl_switch_mode_set, NULL),
2559 };
2560
2561 static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
2562 {
2563         struct devlink_port_attrs attrs = {};
2564         struct am65_cpsw_devlink *dl_priv;
2565         struct device *dev = common->dev;
2566         struct devlink_port *dl_port;
2567         struct am65_cpsw_port *port;
2568         int ret = 0;
2569         int i;
2570
2571         common->devlink =
2572                 devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev);
2573         if (!common->devlink)
2574                 return -ENOMEM;
2575
2576         dl_priv = devlink_priv(common->devlink);
2577         dl_priv->common = common;
2578
2579         /* Provide devlink hook to switch mode when multiple external ports
2580          * are present NUSS switchdev driver is enabled.
2581          */
2582         if (!AM65_CPSW_IS_CPSW2G(common) &&
2583             IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
2584                 ret = devlink_params_register(common->devlink,
2585                                               am65_cpsw_devlink_params,
2586                                               ARRAY_SIZE(am65_cpsw_devlink_params));
2587                 if (ret) {
2588                         dev_err(dev, "devlink params reg fail ret:%d\n", ret);
2589                         goto dl_unreg;
2590                 }
2591         }
2592
2593         for (i = 1; i <= common->port_num; i++) {
2594                 port = am65_common_get_port(common, i);
2595                 dl_port = &port->devlink_port;
2596
2597                 if (port->ndev)
2598                         attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
2599                 else
2600                         attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
2601                 attrs.phys.port_number = port->port_id;
2602                 attrs.switch_id.id_len = sizeof(resource_size_t);
2603                 memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
2604                 devlink_port_attrs_set(dl_port, &attrs);
2605
2606                 ret = devlink_port_register(common->devlink, dl_port, port->port_id);
2607                 if (ret) {
2608                         dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
2609                                 port->port_id, ret);
2610                         goto dl_port_unreg;
2611                 }
2612         }
2613         devlink_register(common->devlink);
2614         return ret;
2615
2616 dl_port_unreg:
2617         for (i = i - 1; i >= 1; i--) {
2618                 port = am65_common_get_port(common, i);
2619                 dl_port = &port->devlink_port;
2620
2621                 devlink_port_unregister(dl_port);
2622         }
2623 dl_unreg:
2624         devlink_free(common->devlink);
2625         return ret;
2626 }
2627
2628 static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
2629 {
2630         struct devlink_port *dl_port;
2631         struct am65_cpsw_port *port;
2632         int i;
2633
2634         devlink_unregister(common->devlink);
2635
2636         for (i = 1; i <= common->port_num; i++) {
2637                 port = am65_common_get_port(common, i);
2638                 dl_port = &port->devlink_port;
2639
2640                 devlink_port_unregister(dl_port);
2641         }
2642
2643         if (!AM65_CPSW_IS_CPSW2G(common) &&
2644             IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
2645                 devlink_params_unregister(common->devlink,
2646                                           am65_cpsw_devlink_params,
2647                                           ARRAY_SIZE(am65_cpsw_devlink_params));
2648
2649         devlink_free(common->devlink);
2650 }
2651
2652 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
2653 {
2654         struct device *dev = common->dev;
2655         struct am65_cpsw_port *port;
2656         int ret = 0, i;
2657
2658         /* init tx channels */
2659         ret = am65_cpsw_nuss_init_tx_chns(common);
2660         if (ret)
2661                 return ret;
2662         ret = am65_cpsw_nuss_init_rx_chns(common);
2663         if (ret)
2664                 return ret;
2665
2666         ret = am65_cpsw_nuss_register_devlink(common);
2667         if (ret)
2668                 return ret;
2669
2670         for (i = 0; i < common->port_num; i++) {
2671                 port = &common->ports[i];
2672
2673                 if (!port->ndev)
2674                         continue;
2675
2676                 SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port);
2677
2678                 ret = register_netdev(port->ndev);
2679                 if (ret) {
2680                         dev_err(dev, "error registering slave net device%i %d\n",
2681                                 i, ret);
2682                         goto err_cleanup_ndev;
2683                 }
2684         }
2685
2686         ret = am65_cpsw_register_notifiers(common);
2687         if (ret)
2688                 goto err_cleanup_ndev;
2689
2690         /* can't auto unregister ndev using devm_add_action() due to
2691          * devres release sequence in DD core for DMA
2692          */
2693
2694         return 0;
2695
2696 err_cleanup_ndev:
2697         am65_cpsw_nuss_cleanup_ndev(common);
2698         am65_cpsw_unregister_devlink(common);
2699
2700         return ret;
2701 }
2702
2703 int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
2704 {
2705         int ret;
2706
2707         common->tx_ch_num = num_tx;
2708         ret = am65_cpsw_nuss_init_tx_chns(common);
2709
2710         return ret;
2711 }
2712
2713 struct am65_cpsw_soc_pdata {
2714         u32     quirks_dis;
2715 };
2716
2717 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = {
2718         .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
2719 };
2720
2721 static const struct soc_device_attribute am65_cpsw_socinfo[] = {
2722         { .family = "AM65X",
2723           .revision = "SR2.0",
2724           .data = &am65x_soc_sr2_0
2725         },
2726         {/* sentinel */}
2727 };
2728
2729 static const struct am65_cpsw_pdata am65x_sr1_0 = {
2730         .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
2731         .ale_dev_id = "am65x-cpsw2g",
2732         .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2733 };
2734
2735 static const struct am65_cpsw_pdata j721e_pdata = {
2736         .quirks = 0,
2737         .ale_dev_id = "am65x-cpsw2g",
2738         .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2739 };
2740
2741 static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
2742         .quirks = 0,
2743         .ale_dev_id = "am64-cpswxg",
2744         .fdqring_mode = K3_RINGACC_RING_MODE_RING,
2745 };
2746
2747 static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
2748         .quirks = 0,
2749         .ale_dev_id = "am64-cpswxg",
2750         .fdqring_mode = K3_RINGACC_RING_MODE_RING,
2751         .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
2752 };
2753
2754 static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
2755         .quirks = 0,
2756         .ale_dev_id = "am64-cpswxg",
2757         .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2758         .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
2759 };
2760
2761 static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
2762         { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
2763         { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
2764         { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
2765         { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
2766         { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata},
2767         { /* sentinel */ },
2768 };
2769 MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
2770
2771 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common)
2772 {
2773         const struct soc_device_attribute *soc;
2774
2775         soc = soc_device_match(am65_cpsw_socinfo);
2776         if (soc && soc->data) {
2777                 const struct am65_cpsw_soc_pdata *socdata = soc->data;
2778
2779                 /* disable quirks */
2780                 common->pdata.quirks &= ~socdata->quirks_dis;
2781         }
2782 }
2783
2784 static int am65_cpsw_nuss_probe(struct platform_device *pdev)
2785 {
2786         struct cpsw_ale_params ale_params = { 0 };
2787         const struct of_device_id *of_id;
2788         struct device *dev = &pdev->dev;
2789         struct am65_cpsw_common *common;
2790         struct device_node *node;
2791         struct resource *res;
2792         struct clk *clk;
2793         u64 id_temp;
2794         int ret, i;
2795         int ale_entries;
2796
2797         common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
2798         if (!common)
2799                 return -ENOMEM;
2800         common->dev = dev;
2801
2802         of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
2803         if (!of_id)
2804                 return -EINVAL;
2805         common->pdata = *(const struct am65_cpsw_pdata *)of_id->data;
2806
2807         am65_cpsw_nuss_apply_socinfo(common);
2808
2809         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
2810         common->ss_base = devm_ioremap_resource(&pdev->dev, res);
2811         if (IS_ERR(common->ss_base))
2812                 return PTR_ERR(common->ss_base);
2813         common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
2814         /* Use device's physical base address as switch id */
2815         id_temp = cpu_to_be64(res->start);
2816         memcpy(common->switch_id, &id_temp, sizeof(res->start));
2817
2818         node = of_get_child_by_name(dev->of_node, "ethernet-ports");
2819         if (!node)
2820                 return -ENOENT;
2821         common->port_num = of_get_child_count(node);
2822         of_node_put(node);
2823         if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
2824                 return -ENOENT;
2825
2826         common->rx_flow_id_base = -1;
2827         init_completion(&common->tdown_complete);
2828         common->tx_ch_num = 1;
2829         common->pf_p0_rx_ptype_rrobin = false;
2830         common->default_vlan = 1;
2831
2832         common->ports = devm_kcalloc(dev, common->port_num,
2833                                      sizeof(*common->ports),
2834                                      GFP_KERNEL);
2835         if (!common->ports)
2836                 return -ENOMEM;
2837
2838         clk = devm_clk_get(dev, "fck");
2839         if (IS_ERR(clk))
2840                 return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
2841         common->bus_freq = clk_get_rate(clk);
2842
2843         pm_runtime_enable(dev);
2844         ret = pm_runtime_resume_and_get(dev);
2845         if (ret < 0) {
2846                 pm_runtime_disable(dev);
2847                 return ret;
2848         }
2849
2850         node = of_get_child_by_name(dev->of_node, "mdio");
2851         if (!node) {
2852                 dev_warn(dev, "MDIO node not found\n");
2853         } else if (of_device_is_available(node)) {
2854                 struct platform_device *mdio_pdev;
2855
2856                 mdio_pdev = of_platform_device_create(node, NULL, dev);
2857                 if (!mdio_pdev) {
2858                         ret = -ENODEV;
2859                         goto err_pm_clear;
2860                 }
2861
2862                 common->mdio_dev =  &mdio_pdev->dev;
2863         }
2864         of_node_put(node);
2865
2866         am65_cpsw_nuss_get_ver(common);
2867
2868         ret = am65_cpsw_nuss_init_host_p(common);
2869         if (ret)
2870                 goto err_of_clear;
2871
2872         ret = am65_cpsw_nuss_init_slave_ports(common);
2873         if (ret)
2874                 goto err_of_clear;
2875
2876         /* init common data */
2877         ale_params.dev = dev;
2878         ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
2879         ale_params.ale_ports = common->port_num + 1;
2880         ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
2881         ale_params.dev_id = common->pdata.ale_dev_id;
2882         ale_params.bus_freq = common->bus_freq;
2883
2884         common->ale = cpsw_ale_create(&ale_params);
2885         if (IS_ERR(common->ale)) {
2886                 dev_err(dev, "error initializing ale engine\n");
2887                 ret = PTR_ERR(common->ale);
2888                 goto err_of_clear;
2889         }
2890
2891         ale_entries = common->ale->params.ale_entries;
2892         common->ale_context = devm_kzalloc(dev,
2893                                            ale_entries * ALE_ENTRY_WORDS * sizeof(u32),
2894                                            GFP_KERNEL);
2895         ret = am65_cpsw_init_cpts(common);
2896         if (ret)
2897                 goto err_of_clear;
2898
2899         /* init ports */
2900         for (i = 0; i < common->port_num; i++)
2901                 am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
2902
2903         dev_set_drvdata(dev, common);
2904
2905         common->is_emac_mode = true;
2906
2907         ret = am65_cpsw_nuss_init_ndevs(common);
2908         if (ret)
2909                 goto err_free_phylink;
2910
2911         ret = am65_cpsw_nuss_register_ndevs(common);
2912         if (ret)
2913                 goto err_free_phylink;
2914
2915         pm_runtime_put(dev);
2916         return 0;
2917
2918 err_free_phylink:
2919         am65_cpsw_nuss_phylink_cleanup(common);
2920 err_of_clear:
2921         of_platform_device_destroy(common->mdio_dev, NULL);
2922 err_pm_clear:
2923         pm_runtime_put_sync(dev);
2924         pm_runtime_disable(dev);
2925         return ret;
2926 }
2927
2928 static int am65_cpsw_nuss_remove(struct platform_device *pdev)
2929 {
2930         struct device *dev = &pdev->dev;
2931         struct am65_cpsw_common *common;
2932         int ret;
2933
2934         common = dev_get_drvdata(dev);
2935
2936         ret = pm_runtime_resume_and_get(&pdev->dev);
2937         if (ret < 0)
2938                 return ret;
2939
2940         am65_cpsw_unregister_devlink(common);
2941         am65_cpsw_unregister_notifiers(common);
2942
2943         /* must unregister ndevs here because DD release_driver routine calls
2944          * dma_deconfigure(dev) before devres_release_all(dev)
2945          */
2946         am65_cpsw_nuss_cleanup_ndev(common);
2947         am65_cpsw_nuss_phylink_cleanup(common);
2948         am65_cpsw_disable_serdes_phy(common);
2949
2950         of_platform_device_destroy(common->mdio_dev, NULL);
2951
2952         pm_runtime_put_sync(&pdev->dev);
2953         pm_runtime_disable(&pdev->dev);
2954         return 0;
2955 }
2956
2957 static int am65_cpsw_nuss_suspend(struct device *dev)
2958 {
2959         struct am65_cpsw_common *common = dev_get_drvdata(dev);
2960         struct am65_cpsw_host *host_p = am65_common_get_host(common);
2961         struct am65_cpsw_port *port;
2962         struct net_device *ndev;
2963         int i, ret;
2964
2965         cpsw_ale_dump(common->ale, common->ale_context);
2966         host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
2967         for (i = 0; i < common->port_num; i++) {
2968                 port = &common->ports[i];
2969                 ndev = port->ndev;
2970
2971                 if (!ndev)
2972                         continue;
2973
2974                 port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
2975                 netif_device_detach(ndev);
2976                 if (netif_running(ndev)) {
2977                         rtnl_lock();
2978                         ret = am65_cpsw_nuss_ndo_slave_stop(ndev);
2979                         rtnl_unlock();
2980                         if (ret < 0) {
2981                                 netdev_err(ndev, "failed to stop: %d", ret);
2982                                 return ret;
2983                         }
2984                 }
2985         }
2986
2987         am65_cpts_suspend(common->cpts);
2988
2989         am65_cpsw_nuss_remove_rx_chns(common);
2990         am65_cpsw_nuss_remove_tx_chns(common);
2991
2992         return 0;
2993 }
2994
2995 static int am65_cpsw_nuss_resume(struct device *dev)
2996 {
2997         struct am65_cpsw_common *common = dev_get_drvdata(dev);
2998         struct am65_cpsw_port *port;
2999         struct net_device *ndev;
3000         int i, ret;
3001         struct am65_cpsw_host *host_p = am65_common_get_host(common);
3002
3003         ret = am65_cpsw_nuss_init_tx_chns(common);
3004         if (ret)
3005                 return ret;
3006         ret = am65_cpsw_nuss_init_rx_chns(common);
3007         if (ret)
3008                 return ret;
3009
3010         /* If RX IRQ was disabled before suspend, keep it disabled */
3011         if (common->rx_irq_disabled)
3012                 disable_irq(common->rx_chns.irq);
3013
3014         am65_cpts_resume(common->cpts);
3015
3016         for (i = 0; i < common->port_num; i++) {
3017                 port = &common->ports[i];
3018                 ndev = port->ndev;
3019
3020                 if (!ndev)
3021                         continue;
3022
3023                 if (netif_running(ndev)) {
3024                         rtnl_lock();
3025                         ret = am65_cpsw_nuss_ndo_slave_open(ndev);
3026                         rtnl_unlock();
3027                         if (ret < 0) {
3028                                 netdev_err(ndev, "failed to start: %d", ret);
3029                                 return ret;
3030                         }
3031                 }
3032
3033                 netif_device_attach(ndev);
3034                 writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3035         }
3036
3037         writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3038         cpsw_ale_restore(common->ale, common->ale_context);
3039
3040         return 0;
3041 }
3042
3043 static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = {
3044         SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume)
3045 };
3046
3047 static struct platform_driver am65_cpsw_nuss_driver = {
3048         .driver = {
3049                 .name    = AM65_CPSW_DRV_NAME,
3050                 .of_match_table = am65_cpsw_nuss_of_mtable,
3051                 .pm = &am65_cpsw_nuss_dev_pm_ops,
3052         },
3053         .probe = am65_cpsw_nuss_probe,
3054         .remove = am65_cpsw_nuss_remove,
3055 };
3056
3057 module_platform_driver(am65_cpsw_nuss_driver);
3058
3059 MODULE_LICENSE("GPL v2");
3060 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
3061 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");