1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2020 MediaTek Corporation
4 * Copyright (c) 2020 BayLibre SAS
6 * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
9 #include <linux/bits.h>
10 #include <linux/clk.h>
11 #include <linux/compiler.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/mii.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 #include <linux/platform_device.h>
24 #include <linux/regmap.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/workqueue.h>
29 #define MTK_STAR_DRVNAME "mtk_star_emac"
31 #define MTK_STAR_WAIT_TIMEOUT 300
32 #define MTK_STAR_MAX_FRAME_SIZE 1514
33 #define MTK_STAR_SKB_ALIGNMENT 16
34 #define MTK_STAR_NAPI_WEIGHT 64
35 #define MTK_STAR_HASHTABLE_MC_LIMIT 256
36 #define MTK_STAR_HASHTABLE_SIZE_MAX 512
38 /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
39 * work for this controller.
41 #define MTK_STAR_IP_ALIGN 2
43 static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
44 #define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
46 /* PHY Control Register 0 */
47 #define MTK_STAR_REG_PHY_CTRL0 0x0000
48 #define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13)
49 #define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14)
50 #define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15)
51 #define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8)
52 #define MTK_STAR_OFF_PHY_CTRL0_PREG 8
53 #define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16)
54 #define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16
56 /* PHY Control Register 1 */
57 #define MTK_STAR_REG_PHY_CTRL1 0x0004
58 #define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0)
59 #define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8)
60 #define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9
61 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00
62 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01
63 #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02
64 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11)
65 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12)
66 #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13)
68 /* MAC Configuration Register */
69 #define MTK_STAR_REG_MAC_CFG 0x0008
70 #define MTK_STAR_OFF_MAC_CFG_IPG 10
71 #define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0)
72 #define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16)
73 #define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19)
74 #define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20)
75 #define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22)
76 #define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31)
78 /* Flow-Control Configuration Register */
79 #define MTK_STAR_REG_FC_CFG 0x000c
80 #define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7)
81 #define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8)
82 #define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16
83 #define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16)
84 #define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800
86 /* ARL Configuration Register */
87 #define MTK_STAR_REG_ARL_CFG 0x0010
88 #define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0)
89 #define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4)
91 /* MAC High and Low Bytes Registers */
92 #define MTK_STAR_REG_MY_MAC_H 0x0014
93 #define MTK_STAR_REG_MY_MAC_L 0x0018
95 /* Hash Table Control Register */
96 #define MTK_STAR_REG_HASH_CTRL 0x001c
97 #define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0)
98 #define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12)
99 #define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13)
100 #define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14)
101 #define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16)
102 #define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17)
103 #define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31)
105 /* TX DMA Control Register */
106 #define MTK_STAR_REG_TX_DMA_CTRL 0x0034
107 #define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0)
108 #define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1)
109 #define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2)
111 /* RX DMA Control Register */
112 #define MTK_STAR_REG_RX_DMA_CTRL 0x0038
113 #define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0)
114 #define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1)
115 #define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2)
117 /* DMA Address Registers */
118 #define MTK_STAR_REG_TX_DPTR 0x003c
119 #define MTK_STAR_REG_RX_DPTR 0x0040
120 #define MTK_STAR_REG_TX_BASE_ADDR 0x0044
121 #define MTK_STAR_REG_RX_BASE_ADDR 0x0048
123 /* Interrupt Status Register */
124 #define MTK_STAR_REG_INT_STS 0x0050
125 #define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2)
126 #define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3)
127 #define MTK_STAR_BIT_INT_STS_FNRC BIT(6)
128 #define MTK_STAR_BIT_INT_STS_TNTC BIT(8)
130 /* Interrupt Mask Register */
131 #define MTK_STAR_REG_INT_MASK 0x0054
132 #define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
134 /* Misc. Config Register */
135 #define MTK_STAR_REG_TEST1 0x005c
136 #define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
138 /* Extended Configuration Register */
139 #define MTK_STAR_REG_EXT_CFG 0x0060
140 #define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16
141 #define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16)
142 #define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400
144 /* EthSys Configuration Register */
145 #define MTK_STAR_REG_SYS_CONF 0x0094
146 #define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0)
147 #define MTK_STAR_BIT_EXT_MDC_MODE BIT(1)
148 #define MTK_STAR_BIT_SWC_MII_MODE BIT(2)
150 /* MAC Clock Configuration Register */
151 #define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
152 #define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
153 #define MTK_STAR_BIT_CLK_DIV_10 0x0a
155 /* Counter registers. */
156 #define MTK_STAR_REG_C_RXOKPKT 0x0100
157 #define MTK_STAR_REG_C_RXOKBYTE 0x0104
158 #define MTK_STAR_REG_C_RXRUNT 0x0108
159 #define MTK_STAR_REG_C_RXLONG 0x010c
160 #define MTK_STAR_REG_C_RXDROP 0x0110
161 #define MTK_STAR_REG_C_RXCRC 0x0114
162 #define MTK_STAR_REG_C_RXARLDROP 0x0118
163 #define MTK_STAR_REG_C_RXVLANDROP 0x011c
164 #define MTK_STAR_REG_C_RXCSERR 0x0120
165 #define MTK_STAR_REG_C_RXPAUSE 0x0124
166 #define MTK_STAR_REG_C_TXOKPKT 0x0128
167 #define MTK_STAR_REG_C_TXOKBYTE 0x012c
168 #define MTK_STAR_REG_C_TXPAUSECOL 0x0130
169 #define MTK_STAR_REG_C_TXRTY 0x0134
170 #define MTK_STAR_REG_C_TXSKIP 0x0138
171 #define MTK_STAR_REG_C_TX_ARP 0x013c
172 #define MTK_STAR_REG_C_RX_RERR 0x01d8
173 #define MTK_STAR_REG_C_RX_UNI 0x01dc
174 #define MTK_STAR_REG_C_RX_MULTI 0x01e0
175 #define MTK_STAR_REG_C_RX_BROAD 0x01e4
176 #define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8
177 #define MTK_STAR_REG_C_TX_UNI 0x01ec
178 #define MTK_STAR_REG_C_TX_MULTI 0x01f0
179 #define MTK_STAR_REG_C_TX_BROAD 0x01f4
180 #define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8
181 #define MTK_STAR_REG_C_TX_LATECOL 0x01fc
182 #define MTK_STAR_REG_C_RX_LENGTHERR 0x0214
183 #define MTK_STAR_REG_C_RX_TWIST 0x0218
185 /* Ethernet CFG Control */
186 #define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
187 #define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
188 #define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
190 /* Represents the actual structure of descriptors used by the MAC. We can
191 * reuse the same structure for both TX and RX - the layout is the same, only
192 * the flags differ slightly.
194 struct mtk_star_ring_desc {
195 /* Contains both the status flags as well as packet length. */
202 #define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0)
203 #define MTK_STAR_DESC_BIT_RX_CRCE BIT(24)
204 #define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25)
205 #define MTK_STAR_DESC_BIT_INT BIT(27)
206 #define MTK_STAR_DESC_BIT_LS BIT(28)
207 #define MTK_STAR_DESC_BIT_FS BIT(29)
208 #define MTK_STAR_DESC_BIT_EOR BIT(30)
209 #define MTK_STAR_DESC_BIT_COWN BIT(31)
211 /* Helper structure for storing data read from/written to descriptors in order
212 * to limit reads from/writes to DMA memory.
214 struct mtk_star_ring_desc_data {
221 #define MTK_STAR_RING_NUM_DESCS 128
222 #define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
223 #define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
224 #define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
225 #define MTK_STAR_DMA_SIZE \
226 (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
228 struct mtk_star_ring {
229 struct mtk_star_ring_desc *descs;
230 struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
231 dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
236 struct mtk_star_priv {
237 struct net_device *ndev;
240 struct regmap *pericfg;
242 struct clk_bulk_data clks[MTK_STAR_NCLKS];
245 struct mtk_star_ring_desc *descs_base;
247 struct mtk_star_ring tx_ring;
248 struct mtk_star_ring rx_ring;
251 struct napi_struct napi;
253 struct device_node *phy_node;
254 phy_interface_t phy_intf;
255 struct phy_device *phydev;
261 /* Protects against concurrent descriptor access. */
264 struct rtnl_link_stats64 stats;
265 struct work_struct stats_work;
268 static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
270 return priv->ndev->dev.parent;
273 static const struct regmap_config mtk_star_regmap_config = {
277 .disable_locking = true,
280 static void mtk_star_ring_init(struct mtk_star_ring *ring,
281 struct mtk_star_ring_desc *descs)
283 memset(ring, 0, sizeof(*ring));
289 static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
290 struct mtk_star_ring_desc_data *desc_data)
292 struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
295 status = READ_ONCE(desc->status);
296 dma_rmb(); /* Make sure we read the status bits before checking it. */
298 if (!(status & MTK_STAR_DESC_BIT_COWN))
301 desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
302 desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
303 desc_data->dma_addr = ring->dma_addrs[ring->tail];
304 desc_data->skb = ring->skbs[ring->tail];
306 ring->dma_addrs[ring->tail] = 0;
307 ring->skbs[ring->tail] = NULL;
309 status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
311 WRITE_ONCE(desc->data_ptr, 0);
312 WRITE_ONCE(desc->status, status);
314 ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
319 static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
320 struct mtk_star_ring_desc_data *desc_data,
323 struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
326 status = READ_ONCE(desc->status);
328 ring->skbs[ring->head] = desc_data->skb;
329 ring->dma_addrs[ring->head] = desc_data->dma_addr;
331 status |= desc_data->len;
335 WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
336 WRITE_ONCE(desc->status, status);
337 status &= ~MTK_STAR_DESC_BIT_COWN;
338 /* Flush previous modifications before ownership change. */
340 WRITE_ONCE(desc->status, status);
342 ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
346 mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
347 struct mtk_star_ring_desc_data *desc_data)
349 mtk_star_ring_push_head(ring, desc_data, 0);
353 mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
354 struct mtk_star_ring_desc_data *desc_data)
356 static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
357 MTK_STAR_DESC_BIT_LS |
358 MTK_STAR_DESC_BIT_INT;
360 mtk_star_ring_push_head(ring, desc_data, flags);
363 static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
365 return abs(ring->head - ring->tail);
368 static bool mtk_star_ring_full(struct mtk_star_ring *ring)
370 return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
373 static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
375 return mtk_star_ring_num_used_descs(ring) > 0;
378 static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
381 struct device *dev = mtk_star_get_dev(priv);
383 /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
384 return dma_map_single(dev, skb_tail_pointer(skb) - 2,
385 skb_tailroom(skb), DMA_FROM_DEVICE);
388 static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
389 struct mtk_star_ring_desc_data *desc_data)
391 struct device *dev = mtk_star_get_dev(priv);
393 dma_unmap_single(dev, desc_data->dma_addr,
394 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
397 static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
400 struct device *dev = mtk_star_get_dev(priv);
402 return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
405 static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
406 struct mtk_star_ring_desc_data *desc_data)
408 struct device *dev = mtk_star_get_dev(priv);
410 return dma_unmap_single(dev, desc_data->dma_addr,
411 skb_headlen(desc_data->skb), DMA_TO_DEVICE);
414 static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
416 regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
417 MTK_STAR_BIT_MAC_CFG_NIC_PD, 0);
420 /* Unmask the three interrupts we care about, mask all others. */
421 static void mtk_star_intr_enable(struct mtk_star_priv *priv)
423 unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
424 MTK_STAR_BIT_INT_STS_FNRC |
425 MTK_STAR_REG_INT_STS_MIB_CNT_TH;
427 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
430 static void mtk_star_intr_disable(struct mtk_star_priv *priv)
432 regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
435 static void mtk_star_intr_enable_tx(struct mtk_star_priv *priv)
437 regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK,
438 MTK_STAR_BIT_INT_STS_TNTC, 0);
441 static void mtk_star_intr_enable_rx(struct mtk_star_priv *priv)
443 regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK,
444 MTK_STAR_BIT_INT_STS_FNRC, 0);
447 static void mtk_star_intr_enable_stats(struct mtk_star_priv *priv)
449 regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK,
450 MTK_STAR_REG_INT_STS_MIB_CNT_TH, 0);
453 static void mtk_star_intr_disable_tx(struct mtk_star_priv *priv)
455 regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK,
456 MTK_STAR_BIT_INT_STS_TNTC,
457 MTK_STAR_BIT_INT_STS_TNTC);
460 static void mtk_star_intr_disable_rx(struct mtk_star_priv *priv)
462 regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK,
463 MTK_STAR_BIT_INT_STS_FNRC,
464 MTK_STAR_BIT_INT_STS_FNRC);
467 static void mtk_star_intr_disable_stats(struct mtk_star_priv *priv)
469 regmap_update_bits(priv->regs, MTK_STAR_REG_INT_MASK,
470 MTK_STAR_REG_INT_STS_MIB_CNT_TH,
471 MTK_STAR_REG_INT_STS_MIB_CNT_TH);
474 static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
478 regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
483 static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
487 val = mtk_star_intr_read(priv);
488 regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
493 static void mtk_star_dma_init(struct mtk_star_priv *priv)
495 struct mtk_star_ring_desc *desc;
499 priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
501 for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
502 desc = &priv->descs_base[i];
504 memset(desc, 0, sizeof(*desc));
505 desc->status = MTK_STAR_DESC_BIT_COWN;
506 if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
507 (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
508 desc->status |= MTK_STAR_DESC_BIT_EOR;
511 mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
512 mtk_star_ring_init(&priv->rx_ring,
513 priv->descs_base + MTK_STAR_NUM_TX_DESCS);
515 /* Set DMA pointers. */
516 val = (unsigned int)priv->dma_addr;
517 regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
518 regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
520 val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
521 regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
522 regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
525 static void mtk_star_dma_start(struct mtk_star_priv *priv)
527 regmap_update_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
528 MTK_STAR_BIT_TX_DMA_CTRL_START,
529 MTK_STAR_BIT_TX_DMA_CTRL_START);
530 regmap_update_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
531 MTK_STAR_BIT_RX_DMA_CTRL_START,
532 MTK_STAR_BIT_RX_DMA_CTRL_START);
535 static void mtk_star_dma_stop(struct mtk_star_priv *priv)
537 regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
538 MTK_STAR_BIT_TX_DMA_CTRL_STOP);
539 regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
540 MTK_STAR_BIT_RX_DMA_CTRL_STOP);
543 static void mtk_star_dma_disable(struct mtk_star_priv *priv)
547 mtk_star_dma_stop(priv);
549 /* Take back all descriptors. */
550 for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
551 priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
554 static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
556 regmap_update_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
557 MTK_STAR_BIT_RX_DMA_CTRL_RESUME,
558 MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
561 static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
563 regmap_update_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
564 MTK_STAR_BIT_TX_DMA_CTRL_RESUME,
565 MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
568 static void mtk_star_set_mac_addr(struct net_device *ndev)
570 struct mtk_star_priv *priv = netdev_priv(ndev);
571 u8 *mac_addr = ndev->dev_addr;
572 unsigned int high, low;
574 high = mac_addr[0] << 8 | mac_addr[1] << 0;
575 low = mac_addr[2] << 24 | mac_addr[3] << 16 |
576 mac_addr[4] << 8 | mac_addr[5];
578 regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
579 regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
582 static void mtk_star_reset_counters(struct mtk_star_priv *priv)
584 static const unsigned int counter_regs[] = {
585 MTK_STAR_REG_C_RXOKPKT,
586 MTK_STAR_REG_C_RXOKBYTE,
587 MTK_STAR_REG_C_RXRUNT,
588 MTK_STAR_REG_C_RXLONG,
589 MTK_STAR_REG_C_RXDROP,
590 MTK_STAR_REG_C_RXCRC,
591 MTK_STAR_REG_C_RXARLDROP,
592 MTK_STAR_REG_C_RXVLANDROP,
593 MTK_STAR_REG_C_RXCSERR,
594 MTK_STAR_REG_C_RXPAUSE,
595 MTK_STAR_REG_C_TXOKPKT,
596 MTK_STAR_REG_C_TXOKBYTE,
597 MTK_STAR_REG_C_TXPAUSECOL,
598 MTK_STAR_REG_C_TXRTY,
599 MTK_STAR_REG_C_TXSKIP,
600 MTK_STAR_REG_C_TX_ARP,
601 MTK_STAR_REG_C_RX_RERR,
602 MTK_STAR_REG_C_RX_UNI,
603 MTK_STAR_REG_C_RX_MULTI,
604 MTK_STAR_REG_C_RX_BROAD,
605 MTK_STAR_REG_C_RX_ALIGNERR,
606 MTK_STAR_REG_C_TX_UNI,
607 MTK_STAR_REG_C_TX_MULTI,
608 MTK_STAR_REG_C_TX_BROAD,
609 MTK_STAR_REG_C_TX_TIMEOUT,
610 MTK_STAR_REG_C_TX_LATECOL,
611 MTK_STAR_REG_C_RX_LENGTHERR,
612 MTK_STAR_REG_C_RX_TWIST,
617 for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
618 regmap_read(priv->regs, counter_regs[i], &val);
621 static void mtk_star_update_stat(struct mtk_star_priv *priv,
622 unsigned int reg, u64 *stat)
626 regmap_read(priv->regs, reg, &val);
630 /* Try to get as many stats as possible from the internal registers instead
631 * of tracking them ourselves.
633 static void mtk_star_update_stats(struct mtk_star_priv *priv)
635 struct rtnl_link_stats64 *stats = &priv->stats;
637 /* OK packets and bytes. */
638 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
639 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
640 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
641 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
643 /* RX & TX multicast. */
644 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
645 mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
648 mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
650 mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
652 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
655 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
656 &stats->rx_length_errors);
657 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
658 &stats->rx_over_errors);
659 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
660 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
661 &stats->rx_frame_errors);
662 mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
663 &stats->rx_fifo_errors);
664 /* Sum of the general RX error counter + all of the above. */
665 mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
666 stats->rx_errors += stats->rx_length_errors;
667 stats->rx_errors += stats->rx_over_errors;
668 stats->rx_errors += stats->rx_crc_errors;
669 stats->rx_errors += stats->rx_frame_errors;
670 stats->rx_errors += stats->rx_fifo_errors;
673 /* This runs in process context and parallel TX and RX paths executing in
674 * napi context may result in losing some stats data but this should happen
675 * seldom enough to be acceptable.
677 static void mtk_star_update_stats_work(struct work_struct *work)
679 struct mtk_star_priv *priv = container_of(work, struct mtk_star_priv,
682 mtk_star_update_stats(priv);
683 mtk_star_reset_counters(priv);
684 mtk_star_intr_enable_stats(priv);
687 static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
689 uintptr_t tail, offset;
692 skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
696 /* Align to 16 bytes. */
697 tail = (uintptr_t)skb_tail_pointer(skb);
698 if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
699 offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
700 skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
703 /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
704 * extract the Ethernet header (14 bytes) so we need two more bytes.
706 skb_reserve(skb, MTK_STAR_IP_ALIGN);
711 static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
713 struct mtk_star_priv *priv = netdev_priv(ndev);
714 struct mtk_star_ring *ring = &priv->rx_ring;
715 struct device *dev = mtk_star_get_dev(priv);
716 struct mtk_star_ring_desc *desc;
721 for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
722 skb = mtk_star_alloc_skb(ndev);
726 dma_addr = mtk_star_dma_map_rx(priv, skb);
727 if (dma_mapping_error(dev, dma_addr)) {
732 desc = &ring->descs[i];
733 desc->data_ptr = dma_addr;
734 desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
735 desc->status &= ~MTK_STAR_DESC_BIT_COWN;
737 ring->dma_addrs[i] = dma_addr;
744 mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
745 void (*unmap_func)(struct mtk_star_priv *,
746 struct mtk_star_ring_desc_data *))
748 struct mtk_star_ring_desc_data desc_data;
749 struct mtk_star_ring_desc *desc;
752 for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
753 if (!ring->dma_addrs[i])
756 desc = &ring->descs[i];
758 desc_data.dma_addr = ring->dma_addrs[i];
759 desc_data.skb = ring->skbs[i];
761 unmap_func(priv, &desc_data);
762 dev_kfree_skb(desc_data.skb);
766 static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
768 struct mtk_star_ring *ring = &priv->rx_ring;
770 mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
773 static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
775 struct mtk_star_ring *ring = &priv->tx_ring;
777 mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
780 /* All processing for TX and RX happens in the napi poll callback. */
781 static irqreturn_t mtk_star_handle_irq(int irq, void *data)
783 struct mtk_star_priv *priv;
784 struct net_device *ndev;
785 bool need_napi = false;
789 priv = netdev_priv(ndev);
791 if (netif_running(ndev)) {
792 status = mtk_star_intr_read(priv);
794 if (status & MTK_STAR_BIT_INT_STS_TNTC) {
795 mtk_star_intr_disable_tx(priv);
799 if (status & MTK_STAR_BIT_INT_STS_FNRC) {
800 mtk_star_intr_disable_rx(priv);
805 napi_schedule(&priv->napi);
807 /* One of the counters reached 0x8000000 - update stats and
808 * reset all counters.
810 if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
811 mtk_star_intr_disable_stats(priv);
812 schedule_work(&priv->stats_work);
815 mtk_star_intr_ack_all(priv);
821 /* Wait for the completion of any previous command - CMD_START bit must be
822 * cleared by hardware.
824 static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
828 return regmap_read_poll_timeout_atomic(priv->regs,
829 MTK_STAR_REG_HASH_CTRL, val,
830 !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
831 10, MTK_STAR_WAIT_TIMEOUT);
834 static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
839 /* Wait for BIST_DONE bit. */
840 ret = regmap_read_poll_timeout_atomic(priv->regs,
841 MTK_STAR_REG_HASH_CTRL, val,
842 val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
843 10, MTK_STAR_WAIT_TIMEOUT);
847 /* Check the BIST_OK bit. */
848 regmap_read(priv->regs, MTK_STAR_REG_HASH_CTRL, &val);
849 if (!(val & MTK_STAR_BIT_HASH_CTRL_BIST_OK))
855 static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
856 unsigned int hash_addr)
861 ret = mtk_star_hash_wait_cmd_start(priv);
865 val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
866 val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
867 val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
868 val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
869 val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
870 regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
872 return mtk_star_hash_wait_ok(priv);
875 static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
879 ret = mtk_star_hash_wait_cmd_start(priv);
883 regmap_update_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
884 MTK_STAR_BIT_HASH_CTRL_BIST_EN,
885 MTK_STAR_BIT_HASH_CTRL_BIST_EN);
886 regmap_update_bits(priv->regs, MTK_STAR_REG_TEST1,
887 MTK_STAR_BIT_TEST1_RST_HASH_MBIST,
888 MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
890 return mtk_star_hash_wait_ok(priv);
893 static void mtk_star_phy_config(struct mtk_star_priv *priv)
897 if (priv->speed == SPEED_1000)
898 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
899 else if (priv->speed == SPEED_100)
900 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
902 val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
903 val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
905 val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
906 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
907 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
908 /* Only full-duplex supported for now. */
909 val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
911 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
914 val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
915 val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
916 val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
921 regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
922 MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
923 MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
926 val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
927 val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
932 regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
933 MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
936 static void mtk_star_adjust_link(struct net_device *ndev)
938 struct mtk_star_priv *priv = netdev_priv(ndev);
939 struct phy_device *phydev = priv->phydev;
940 bool new_state = false;
944 priv->link = phydev->link;
948 if (priv->speed != phydev->speed) {
949 priv->speed = phydev->speed;
953 if (priv->pause != phydev->pause) {
954 priv->pause = phydev->pause;
959 priv->link = phydev->link;
966 mtk_star_phy_config(priv);
968 phy_print_status(ndev->phydev);
972 static void mtk_star_init_config(struct mtk_star_priv *priv)
976 val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
977 MTK_STAR_BIT_EXT_MDC_MODE |
978 MTK_STAR_BIT_SWC_MII_MODE);
980 regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
981 regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
982 MTK_STAR_MSK_MAC_CLK_CONF,
983 MTK_STAR_BIT_CLK_DIV_10);
986 static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
988 regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
989 MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
990 MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
993 static int mtk_star_enable(struct net_device *ndev)
995 struct mtk_star_priv *priv = netdev_priv(ndev);
999 mtk_star_nic_disable_pd(priv);
1000 mtk_star_intr_disable(priv);
1001 mtk_star_dma_stop(priv);
1003 mtk_star_set_mac_addr(ndev);
1005 /* Configure the MAC */
1006 val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
1007 val <<= MTK_STAR_OFF_MAC_CFG_IPG;
1008 val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
1009 val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
1010 val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
1011 regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
1013 /* Enable Hash Table BIST and reset it */
1014 ret = mtk_star_reset_hash_table(priv);
1018 /* Setup the hashing algorithm */
1019 regmap_update_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1020 MTK_STAR_BIT_ARL_CFG_HASH_ALG |
1021 MTK_STAR_BIT_ARL_CFG_MISC_MODE, 0);
1023 /* Don't strip VLAN tags */
1024 regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
1025 MTK_STAR_BIT_MAC_CFG_VLAN_STRIP, 0);
1028 mtk_star_dma_init(priv);
1030 ret = mtk_star_prepare_rx_skbs(ndev);
1034 /* Request the interrupt */
1035 ret = request_irq(ndev->irq, mtk_star_handle_irq,
1036 IRQF_TRIGGER_FALLING, ndev->name, ndev);
1040 napi_enable(&priv->napi);
1042 mtk_star_intr_ack_all(priv);
1043 mtk_star_intr_enable(priv);
1045 /* Connect to and start PHY */
1046 priv->phydev = of_phy_connect(ndev, priv->phy_node,
1047 mtk_star_adjust_link, 0, priv->phy_intf);
1048 if (!priv->phydev) {
1049 netdev_err(ndev, "failed to connect to PHY\n");
1053 mtk_star_dma_start(priv);
1054 phy_start(priv->phydev);
1055 netif_start_queue(ndev);
1060 free_irq(ndev->irq, ndev);
1062 mtk_star_free_rx_skbs(priv);
1067 static void mtk_star_disable(struct net_device *ndev)
1069 struct mtk_star_priv *priv = netdev_priv(ndev);
1071 netif_stop_queue(ndev);
1072 napi_disable(&priv->napi);
1073 mtk_star_intr_disable(priv);
1074 mtk_star_dma_disable(priv);
1075 mtk_star_intr_ack_all(priv);
1076 phy_stop(priv->phydev);
1077 phy_disconnect(priv->phydev);
1078 free_irq(ndev->irq, ndev);
1079 mtk_star_free_rx_skbs(priv);
1080 mtk_star_free_tx_skbs(priv);
1083 static int mtk_star_netdev_open(struct net_device *ndev)
1085 return mtk_star_enable(ndev);
1088 static int mtk_star_netdev_stop(struct net_device *ndev)
1090 mtk_star_disable(ndev);
1095 static int mtk_star_netdev_ioctl(struct net_device *ndev,
1096 struct ifreq *req, int cmd)
1098 if (!netif_running(ndev))
1101 return phy_mii_ioctl(ndev->phydev, req, cmd);
1104 static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
1105 struct net_device *ndev)
1107 struct mtk_star_priv *priv = netdev_priv(ndev);
1108 struct mtk_star_ring *ring = &priv->tx_ring;
1109 struct device *dev = mtk_star_get_dev(priv);
1110 struct mtk_star_ring_desc_data desc_data;
1112 desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1113 if (dma_mapping_error(dev, desc_data.dma_addr))
1114 goto err_drop_packet;
1116 desc_data.skb = skb;
1117 desc_data.len = skb->len;
1119 spin_lock_bh(&priv->lock);
1121 mtk_star_ring_push_head_tx(ring, &desc_data);
1123 netdev_sent_queue(ndev, skb->len);
1125 if (mtk_star_ring_full(ring))
1126 netif_stop_queue(ndev);
1128 spin_unlock_bh(&priv->lock);
1130 mtk_star_dma_resume_tx(priv);
1132 return NETDEV_TX_OK;
1136 ndev->stats.tx_dropped++;
1137 return NETDEV_TX_BUSY;
1140 /* Returns the number of bytes sent or a negative number on the first
1141 * descriptor owned by DMA.
1143 static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1145 struct mtk_star_ring *ring = &priv->tx_ring;
1146 struct mtk_star_ring_desc_data desc_data;
1149 ret = mtk_star_ring_pop_tail(ring, &desc_data);
1153 mtk_star_dma_unmap_tx(priv, &desc_data);
1154 ret = desc_data.skb->len;
1155 dev_kfree_skb_irq(desc_data.skb);
1160 static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
1162 struct mtk_star_ring *ring = &priv->tx_ring;
1163 struct net_device *ndev = priv->ndev;
1164 int ret, pkts_compl, bytes_compl;
1167 spin_lock(&priv->lock);
1169 for (pkts_compl = 0, bytes_compl = 0;;
1170 pkts_compl++, bytes_compl += ret, wake = true) {
1171 if (!mtk_star_ring_descs_available(ring))
1174 ret = mtk_star_tx_complete_one(priv);
1179 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1181 if (wake && netif_queue_stopped(ndev))
1182 netif_wake_queue(ndev);
1184 mtk_star_intr_enable_tx(priv);
1186 spin_unlock(&priv->lock);
1189 static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1190 struct rtnl_link_stats64 *stats)
1192 struct mtk_star_priv *priv = netdev_priv(ndev);
1194 mtk_star_update_stats(priv);
1196 memcpy(stats, &priv->stats, sizeof(*stats));
1199 static void mtk_star_set_rx_mode(struct net_device *ndev)
1201 struct mtk_star_priv *priv = netdev_priv(ndev);
1202 struct netdev_hw_addr *hw_addr;
1203 unsigned int hash_addr, i;
1206 if (ndev->flags & IFF_PROMISC) {
1207 regmap_update_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1208 MTK_STAR_BIT_ARL_CFG_MISC_MODE,
1209 MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1210 } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1211 ndev->flags & IFF_ALLMULTI) {
1212 for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1213 ret = mtk_star_set_hashbit(priv, i);
1218 /* Clear previous settings. */
1219 ret = mtk_star_reset_hash_table(priv);
1223 netdev_for_each_mc_addr(hw_addr, ndev) {
1224 hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1225 hash_addr += hw_addr->addr[5];
1226 ret = mtk_star_set_hashbit(priv, hash_addr);
1235 if (ret == -ETIMEDOUT)
1236 netdev_err(ndev, "setting hash bit timed out\n");
1238 /* Should be -EIO */
1239 netdev_err(ndev, "unable to set hash bit");
1242 static const struct net_device_ops mtk_star_netdev_ops = {
1243 .ndo_open = mtk_star_netdev_open,
1244 .ndo_stop = mtk_star_netdev_stop,
1245 .ndo_start_xmit = mtk_star_netdev_start_xmit,
1246 .ndo_get_stats64 = mtk_star_netdev_get_stats64,
1247 .ndo_set_rx_mode = mtk_star_set_rx_mode,
1248 .ndo_do_ioctl = mtk_star_netdev_ioctl,
1249 .ndo_set_mac_address = eth_mac_addr,
1250 .ndo_validate_addr = eth_validate_addr,
1253 static void mtk_star_get_drvinfo(struct net_device *dev,
1254 struct ethtool_drvinfo *info)
1256 strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1259 /* TODO Add ethtool stats. */
1260 static const struct ethtool_ops mtk_star_ethtool_ops = {
1261 .get_drvinfo = mtk_star_get_drvinfo,
1262 .get_link = ethtool_op_get_link,
1263 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1264 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1267 static int mtk_star_receive_packet(struct mtk_star_priv *priv)
1269 struct mtk_star_ring *ring = &priv->rx_ring;
1270 struct device *dev = mtk_star_get_dev(priv);
1271 struct mtk_star_ring_desc_data desc_data;
1272 struct net_device *ndev = priv->ndev;
1273 struct sk_buff *curr_skb, *new_skb;
1274 dma_addr_t new_dma_addr;
1277 spin_lock(&priv->lock);
1278 ret = mtk_star_ring_pop_tail(ring, &desc_data);
1279 spin_unlock(&priv->lock);
1283 curr_skb = desc_data.skb;
1285 if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1286 (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1287 /* Error packet -> drop and reuse skb. */
1292 /* Prepare new skb before receiving the current one. Reuse the current
1293 * skb if we fail at any point.
1295 new_skb = mtk_star_alloc_skb(ndev);
1297 ndev->stats.rx_dropped++;
1302 new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1303 if (dma_mapping_error(dev, new_dma_addr)) {
1304 ndev->stats.rx_dropped++;
1305 dev_kfree_skb(new_skb);
1307 netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1311 /* We can't fail anymore at this point: it's safe to unmap the skb. */
1312 mtk_star_dma_unmap_rx(priv, &desc_data);
1314 skb_put(desc_data.skb, desc_data.len);
1315 desc_data.skb->ip_summed = CHECKSUM_NONE;
1316 desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1317 desc_data.skb->dev = ndev;
1318 netif_receive_skb(desc_data.skb);
1321 desc_data.dma_addr = new_dma_addr;
1322 desc_data.len = skb_tailroom(new_skb);
1323 desc_data.skb = new_skb;
1325 spin_lock(&priv->lock);
1326 mtk_star_ring_push_head_rx(ring, &desc_data);
1327 spin_unlock(&priv->lock);
1332 static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
1336 for (received = 0, ret = 0; received < budget && ret == 0; received++)
1337 ret = mtk_star_receive_packet(priv);
1339 mtk_star_dma_resume_rx(priv);
1344 static int mtk_star_poll(struct napi_struct *napi, int budget)
1346 struct mtk_star_priv *priv;
1349 priv = container_of(napi, struct mtk_star_priv, napi);
1351 /* Clean-up all TX descriptors. */
1352 mtk_star_tx_complete_all(priv);
1353 /* Receive up to $budget packets. */
1354 received = mtk_star_process_rx(priv, budget);
1356 if (received < budget) {
1357 napi_complete_done(napi, received);
1358 mtk_star_intr_enable_rx(priv);
1364 static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1366 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1367 MTK_STAR_BIT_PHY_CTRL0_RWOK);
1370 static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1374 return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1375 val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1376 10, MTK_STAR_WAIT_TIMEOUT);
1379 static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1381 struct mtk_star_priv *priv = mii->priv;
1382 unsigned int val, data;
1385 if (regnum & MII_ADDR_C45)
1388 mtk_star_mdio_rwok_clear(priv);
1390 val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1391 val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1392 val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1394 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1396 ret = mtk_star_mdio_rwok_wait(priv);
1400 regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1402 data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1403 data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1408 static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1409 int regnum, u16 data)
1411 struct mtk_star_priv *priv = mii->priv;
1414 if (regnum & MII_ADDR_C45)
1417 mtk_star_mdio_rwok_clear(priv);
1420 val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1421 val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1422 regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1423 regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1425 val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1427 regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1429 return mtk_star_mdio_rwok_wait(priv);
1432 static int mtk_star_mdio_init(struct net_device *ndev)
1434 struct mtk_star_priv *priv = netdev_priv(ndev);
1435 struct device *dev = mtk_star_get_dev(priv);
1436 struct device_node *of_node, *mdio_node;
1439 of_node = dev->of_node;
1441 mdio_node = of_get_child_by_name(of_node, "mdio");
1445 if (!of_device_is_available(mdio_node)) {
1450 priv->mii = devm_mdiobus_alloc(dev);
1456 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1457 priv->mii->name = "mtk-mac-mdio";
1458 priv->mii->parent = dev;
1459 priv->mii->read = mtk_star_mdio_read;
1460 priv->mii->write = mtk_star_mdio_write;
1461 priv->mii->priv = priv;
1463 ret = of_mdiobus_register(priv->mii, mdio_node);
1466 of_node_put(mdio_node);
1470 static int mtk_star_suspend(struct device *dev)
1472 struct mtk_star_priv *priv;
1473 struct net_device *ndev;
1475 ndev = dev_get_drvdata(dev);
1476 priv = netdev_priv(ndev);
1478 if (netif_running(ndev))
1479 mtk_star_disable(ndev);
1481 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1486 static int mtk_star_resume(struct device *dev)
1488 struct mtk_star_priv *priv;
1489 struct net_device *ndev;
1492 ndev = dev_get_drvdata(dev);
1493 priv = netdev_priv(ndev);
1495 ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1499 if (netif_running(ndev)) {
1500 ret = mtk_star_enable(ndev);
1502 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1508 static void mtk_star_clk_disable_unprepare(void *data)
1510 struct mtk_star_priv *priv = data;
1512 clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1515 static void mtk_star_mdiobus_unregister(void *data)
1517 struct mtk_star_priv *priv = data;
1519 mdiobus_unregister(priv->mii);
1522 static int mtk_star_probe(struct platform_device *pdev)
1524 struct device_node *of_node;
1525 struct mtk_star_priv *priv;
1526 struct net_device *ndev;
1532 of_node = dev->of_node;
1534 ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1538 priv = netdev_priv(ndev);
1540 SET_NETDEV_DEV(ndev, dev);
1541 platform_set_drvdata(pdev, ndev);
1543 ndev->min_mtu = ETH_ZLEN;
1544 ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1546 spin_lock_init(&priv->lock);
1547 INIT_WORK(&priv->stats_work, mtk_star_update_stats_work);
1549 base = devm_platform_ioremap_resource(pdev, 0);
1551 return PTR_ERR(base);
1553 /* We won't be checking the return values of regmap read & write
1554 * functions. They can only fail for mmio if there's a clock attached
1555 * to regmap which is not the case here.
1557 priv->regs = devm_regmap_init_mmio(dev, base,
1558 &mtk_star_regmap_config);
1559 if (IS_ERR(priv->regs))
1560 return PTR_ERR(priv->regs);
1562 priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1563 "mediatek,pericfg");
1564 if (IS_ERR(priv->pericfg)) {
1565 dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1566 return PTR_ERR(priv->pericfg);
1569 ndev->irq = platform_get_irq(pdev, 0);
1573 for (i = 0; i < MTK_STAR_NCLKS; i++)
1574 priv->clks[i].id = mtk_star_clk_names[i];
1575 ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1579 ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1583 ret = devm_add_action_or_reset(dev,
1584 mtk_star_clk_disable_unprepare, priv);
1588 ret = of_get_phy_mode(of_node, &priv->phy_intf);
1591 } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
1592 dev_err(dev, "unsupported phy mode: %s\n",
1593 phy_modes(priv->phy_intf));
1597 priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1598 if (!priv->phy_node) {
1599 dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1603 mtk_star_set_mode_rmii(priv);
1605 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1607 dev_err(dev, "unsupported DMA mask\n");
1611 priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1613 GFP_KERNEL | GFP_DMA);
1614 if (!priv->ring_base)
1617 mtk_star_nic_disable_pd(priv);
1618 mtk_star_init_config(priv);
1620 ret = mtk_star_mdio_init(ndev);
1624 ret = devm_add_action_or_reset(dev, mtk_star_mdiobus_unregister, priv);
1628 ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
1629 if (ret || !is_valid_ether_addr(ndev->dev_addr))
1630 eth_hw_addr_random(ndev);
1632 ndev->netdev_ops = &mtk_star_netdev_ops;
1633 ndev->ethtool_ops = &mtk_star_ethtool_ops;
1635 netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
1637 return devm_register_netdev(dev, ndev);
1640 static const struct of_device_id mtk_star_of_match[] = {
1641 { .compatible = "mediatek,mt8516-eth", },
1642 { .compatible = "mediatek,mt8518-eth", },
1643 { .compatible = "mediatek,mt8175-eth", },
1646 MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1648 static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1649 mtk_star_suspend, mtk_star_resume);
1651 static struct platform_driver mtk_star_driver = {
1653 .name = MTK_STAR_DRVNAME,
1654 .pm = &mtk_star_pm_ops,
1655 .of_match_table = of_match_ptr(mtk_star_of_match),
1657 .probe = mtk_star_probe,
1659 module_platform_driver(mtk_star_driver);
1661 MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1662 MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1663 MODULE_LICENSE("GPL");