1 // SPDX-License-Identifier: GPL-2.0+
4 * This file is driver for Renesas Ethernet AVB.
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
8 * Based on the SuperH Ethernet driver.
19 #include <asm/cache.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <linux/mii.h>
25 #include <asm/global_data.h>
29 #define RAVB_REG_CCC 0x000
30 #define RAVB_REG_DBAT 0x004
31 #define RAVB_REG_CSR 0x00C
32 #define RAVB_REG_APSR 0x08C
33 #define RAVB_REG_RCR 0x090
34 #define RAVB_REG_TGC 0x300
35 #define RAVB_REG_TCCR 0x304
36 #define RAVB_REG_RIC0 0x360
37 #define RAVB_REG_RIC1 0x368
38 #define RAVB_REG_RIC2 0x370
39 #define RAVB_REG_TIC 0x378
40 #define RAVB_REG_ECMR 0x500
41 #define RAVB_REG_RFLR 0x508
42 #define RAVB_REG_ECSIPR 0x518
43 #define RAVB_REG_PIR 0x520
44 #define RAVB_REG_GECMR 0x5b0
45 #define RAVB_REG_MAHR 0x5c0
46 #define RAVB_REG_MALR 0x5c8
48 #define CCC_OPC_CONFIG BIT(0)
49 #define CCC_OPC_OPERATION BIT(1)
50 #define CCC_BOC BIT(20)
52 #define CSR_OPS 0x0000000F
53 #define CSR_OPS_CONFIG BIT(1)
55 #define APSR_RDM BIT(13)
56 #define APSR_TDM BIT(14)
58 #define TCCR_TSRQ0 BIT(0)
60 #define RFLR_RFL_MIN 0x05EE
62 #define PIR_MDI BIT(3)
63 #define PIR_MDO BIT(2)
64 #define PIR_MMD BIT(1)
65 #define PIR_MDC BIT(0)
67 #define ECMR_TRCCM BIT(26)
68 #define ECMR_RZPF BIT(20)
69 #define ECMR_PFR BIT(18)
70 #define ECMR_RXF BIT(17)
71 #define ECMR_RE BIT(6)
72 #define ECMR_TE BIT(5)
73 #define ECMR_DM BIT(1)
74 #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
77 #define RAVB_NUM_BASE_DESC 16
78 #define RAVB_NUM_TX_DESC 8
79 #define RAVB_NUM_RX_DESC 8
81 #define RAVB_TX_QUEUE_OFFSET 0
82 #define RAVB_RX_QUEUE_OFFSET 4
84 #define RAVB_DESC_DT(n) ((n) << 28)
85 #define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
86 #define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
87 #define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
88 #define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
89 #define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
90 #define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
92 #define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
93 #define RAVB_DESC_DS_MASK 0xfff
95 #define RAVB_RX_DESC_MSC_MC BIT(23)
96 #define RAVB_RX_DESC_MSC_CEEF BIT(22)
97 #define RAVB_RX_DESC_MSC_CRL BIT(21)
98 #define RAVB_RX_DESC_MSC_FRE BIT(20)
99 #define RAVB_RX_DESC_MSC_RTLF BIT(19)
100 #define RAVB_RX_DESC_MSC_RTSF BIT(18)
101 #define RAVB_RX_DESC_MSC_RFE BIT(17)
102 #define RAVB_RX_DESC_MSC_CRC BIT(16)
103 #define RAVB_RX_DESC_MSC_MASK (0xff << 16)
105 #define RAVB_RX_DESC_MSC_RX_ERR_MASK \
106 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
107 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
109 #define RAVB_TX_TIMEOUT_MS 1000
117 struct ravb_desc data;
118 struct ravb_desc link;
120 u8 packet[PKTSIZE_ALIGN];
124 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
125 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
126 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
130 struct phy_device *phydev;
132 void __iomem *iobase;
133 struct clk_bulk clks;
136 static inline void ravb_flush_dcache(u32 addr, u32 len)
138 flush_dcache_range(addr, addr + len);
141 static inline void ravb_invalidate_dcache(u32 addr, u32 len)
143 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
144 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
145 invalidate_dcache_range(start, end);
148 static int ravb_send(struct udevice *dev, void *packet, int len)
150 struct ravb_priv *eth = dev_get_priv(dev);
151 struct ravb_desc *desc = ð->tx_desc[eth->tx_desc_idx];
154 /* Update TX descriptor */
155 ravb_flush_dcache((uintptr_t)packet, len);
156 memset(desc, 0x0, sizeof(*desc));
157 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
158 desc->dptr = (uintptr_t)packet;
159 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
161 /* Restart the transmitter if disabled */
162 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
163 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
165 /* Wait until packet is transmitted */
166 start = get_timer(0);
167 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
168 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
169 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
174 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
177 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
181 static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
183 struct ravb_priv *eth = dev_get_priv(dev);
184 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx];
188 /* Check if the rx descriptor is ready */
189 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
190 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
193 /* Check for errors */
194 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
195 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
199 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
200 packet = (u8 *)(uintptr_t)desc->data.dptr;
201 ravb_invalidate_dcache((uintptr_t)packet, len);
207 static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
209 struct ravb_priv *eth = dev_get_priv(dev);
210 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx];
212 /* Make current descriptor available again */
213 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
214 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
216 /* Point to the next descriptor */
217 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
218 desc = ð->rx_desc[eth->rx_desc_idx];
219 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
224 static int ravb_reset(struct udevice *dev)
226 struct ravb_priv *eth = dev_get_priv(dev);
228 /* Set config mode */
229 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
231 /* Check the operating mode is changed to the config mode. */
232 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
233 CSR_OPS_CONFIG, true, 100, true);
236 static void ravb_base_desc_init(struct ravb_priv *eth)
238 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
241 /* Initialize all descriptors */
242 memset(eth->base_desc, 0x0, desc_size);
244 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
245 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
247 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
249 /* Register the descriptor base address table */
250 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
253 static void ravb_tx_desc_init(struct ravb_priv *eth)
255 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
258 /* Initialize all descriptors */
259 memset(eth->tx_desc, 0x0, desc_size);
260 eth->tx_desc_idx = 0;
262 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
263 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
265 /* Mark the end of the descriptors */
266 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
267 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
268 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
270 /* Point the controller to the TX descriptor list. */
271 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
272 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
273 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_TX_QUEUE_OFFSET],
274 sizeof(struct ravb_desc));
277 static void ravb_rx_desc_init(struct ravb_priv *eth)
279 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
282 /* Initialize all descriptors */
283 memset(eth->rx_desc, 0x0, desc_size);
284 eth->rx_desc_idx = 0;
286 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
287 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
288 RAVB_DESC_DS(PKTSIZE_ALIGN);
289 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
291 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
292 eth->rx_desc[i].link.dptr = (uintptr_t)ð->rx_desc[i + 1];
295 /* Mark the end of the descriptors */
296 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
297 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
298 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
300 /* Point the controller to the rx descriptor list */
301 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
302 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
303 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_RX_QUEUE_OFFSET],
304 sizeof(struct ravb_desc));
307 static int ravb_phy_config(struct udevice *dev)
309 struct ravb_priv *eth = dev_get_priv(dev);
310 struct eth_pdata *pdata = dev_get_plat(dev);
311 struct phy_device *phydev;
314 phydev = phy_connect(eth->bus, -1, dev, pdata->phy_interface);
318 eth->phydev = phydev;
320 phydev->supported &= SUPPORTED_100baseT_Full |
321 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
322 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
323 SUPPORTED_Asym_Pause;
325 if (pdata->max_speed != 1000) {
326 phydev->supported &= ~SUPPORTED_1000baseT_Full;
327 reg = phy_read(phydev, -1, MII_CTRL1000);
328 reg &= ~(BIT(9) | BIT(8));
329 phy_write(phydev, -1, MII_CTRL1000, reg);
337 /* Set Mac address */
338 static int ravb_write_hwaddr(struct udevice *dev)
340 struct ravb_priv *eth = dev_get_priv(dev);
341 struct eth_pdata *pdata = dev_get_plat(dev);
342 unsigned char *mac = pdata->enetaddr;
344 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
345 eth->iobase + RAVB_REG_MAHR);
347 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
352 /* E-MAC init function */
353 static int ravb_mac_init(struct ravb_priv *eth)
355 /* Disable MAC Interrupt */
356 writel(0, eth->iobase + RAVB_REG_ECSIPR);
358 /* Recv frame limit set register */
359 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
364 /* AVB-DMAC init function */
365 static int ravb_dmac_init(struct udevice *dev)
367 struct ravb_priv *eth = dev_get_priv(dev);
368 struct eth_pdata *pdata = dev_get_plat(dev);
372 bool explicit_delay = false;
374 /* Set CONFIG mode */
375 ret = ravb_reset(dev);
379 /* Disable all interrupts */
380 writel(0, eth->iobase + RAVB_REG_RIC0);
381 writel(0, eth->iobase + RAVB_REG_RIC1);
382 writel(0, eth->iobase + RAVB_REG_RIC2);
383 writel(0, eth->iobase + RAVB_REG_TIC);
385 /* Set little endian */
386 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
389 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
392 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
394 /* Delay CLK: 2ns (not applicable on R-Car E3/D3) */
395 if ((rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77990) ||
396 (rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77995))
399 if (!dev_read_u32(dev, "rx-internal-delay-ps", &delay)) {
400 /* Valid values are 0 and 1800, according to DT bindings */
403 explicit_delay = true;
407 if (!dev_read_u32(dev, "tx-internal-delay-ps", &delay)) {
408 /* Valid values are 0 and 2000, according to DT bindings */
411 explicit_delay = true;
415 if (!explicit_delay) {
416 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
417 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
420 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
421 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
425 writel(mode, eth->iobase + RAVB_REG_APSR);
430 static int ravb_config(struct udevice *dev)
432 struct ravb_priv *eth = dev_get_priv(dev);
433 struct phy_device *phy = eth->phydev;
434 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
437 /* Configure AVB-DMAC register */
440 /* Configure E-MAC registers */
442 ravb_write_hwaddr(dev);
444 ret = phy_startup(phy);
448 /* Set the transfer speed */
449 if (phy->speed == 100)
450 writel(0, eth->iobase + RAVB_REG_GECMR);
451 else if (phy->speed == 1000)
452 writel(1, eth->iobase + RAVB_REG_GECMR);
454 /* Check if full duplex mode is supported by the phy */
458 writel(mask, eth->iobase + RAVB_REG_ECMR);
463 static int ravb_start(struct udevice *dev)
465 struct ravb_priv *eth = dev_get_priv(dev);
468 ret = ravb_reset(dev);
472 ravb_base_desc_init(eth);
473 ravb_tx_desc_init(eth);
474 ravb_rx_desc_init(eth);
476 ret = ravb_config(dev);
480 /* Setting the control will start the AVB-DMAC process. */
481 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
486 static void ravb_stop(struct udevice *dev)
488 struct ravb_priv *eth = dev_get_priv(dev);
490 phy_shutdown(eth->phydev);
494 static int ravb_probe(struct udevice *dev)
496 struct eth_pdata *pdata = dev_get_plat(dev);
497 struct ravb_priv *eth = dev_get_priv(dev);
498 struct mii_dev *mdiodev;
499 void __iomem *iobase;
502 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
503 eth->iobase = iobase;
505 ret = clk_get_bulk(dev, ð->clks);
509 mdiodev = mdio_alloc();
515 mdiodev->read = bb_miiphy_read;
516 mdiodev->write = bb_miiphy_write;
517 bb_miiphy_buses[0].priv = eth;
518 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
520 ret = mdio_register(mdiodev);
522 goto err_mdio_register;
524 eth->bus = miiphy_get_dev_by_name(dev->name);
527 ret = clk_enable_bulk(ð->clks);
529 goto err_mdio_register;
531 ret = ravb_reset(dev);
535 ret = ravb_phy_config(dev);
542 clk_release_bulk(ð->clks);
546 unmap_physmem(eth->iobase, MAP_NOCACHE);
550 static int ravb_remove(struct udevice *dev)
552 struct ravb_priv *eth = dev_get_priv(dev);
554 clk_release_bulk(ð->clks);
557 mdio_unregister(eth->bus);
559 unmap_physmem(eth->iobase, MAP_NOCACHE);
564 int ravb_bb_init(struct bb_miiphy_bus *bus)
569 int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
571 struct ravb_priv *eth = bus->priv;
573 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
578 int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
580 struct ravb_priv *eth = bus->priv;
582 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
587 int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
589 struct ravb_priv *eth = bus->priv;
592 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
594 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
599 int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
601 struct ravb_priv *eth = bus->priv;
603 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
608 int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
610 struct ravb_priv *eth = bus->priv;
613 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
615 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
620 int ravb_bb_delay(struct bb_miiphy_bus *bus)
627 struct bb_miiphy_bus bb_miiphy_buses[] = {
630 .init = ravb_bb_init,
631 .mdio_active = ravb_bb_mdio_active,
632 .mdio_tristate = ravb_bb_mdio_tristate,
633 .set_mdio = ravb_bb_set_mdio,
634 .get_mdio = ravb_bb_get_mdio,
635 .set_mdc = ravb_bb_set_mdc,
636 .delay = ravb_bb_delay,
639 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
641 static const struct eth_ops ravb_ops = {
645 .free_pkt = ravb_free_pkt,
647 .write_hwaddr = ravb_write_hwaddr,
650 int ravb_of_to_plat(struct udevice *dev)
652 struct eth_pdata *pdata = dev_get_plat(dev);
655 pdata->iobase = dev_read_addr(dev);
657 pdata->phy_interface = dev_read_phy_mode(dev);
658 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
661 pdata->max_speed = 1000;
662 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
664 pdata->max_speed = fdt32_to_cpu(*cell);
666 sprintf(bb_miiphy_buses[0].name, dev->name);
671 static const struct udevice_id ravb_ids[] = {
672 { .compatible = "renesas,etheravb-rcar-gen3" },
673 { .compatible = "renesas,etheravb-rcar-gen4" },
677 U_BOOT_DRIVER(eth_ravb) = {
680 .of_match = ravb_ids,
681 .of_to_plat = ravb_of_to_plat,
683 .remove = ravb_remove,
685 .priv_auto = sizeof(struct ravb_priv),
686 .plat_auto = sizeof(struct eth_pdata),
687 .flags = DM_FLAG_ALLOC_PRIV_DMA,