1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2011 Michal Simek
5 * Michal SIMEK <monstr@monstr.eu>
7 * Based on Xilinx gmac driver:
8 * (C) Copyright 2011 Xilinx
15 #include <generic-phy.h>
22 #include <asm/cache.h>
29 #include <asm/system.h>
30 #include <asm/arch/hardware.h>
31 #include <asm/arch/sys_proto.h>
32 #include <dm/device_compat.h>
33 #include <linux/bitops.h>
34 #include <linux/err.h>
35 #include <linux/errno.h>
37 /* Bit/mask specification */
38 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
39 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
40 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
41 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
42 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
44 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
45 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
46 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
48 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
49 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
50 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
52 /* Wrap bit, last descriptor */
53 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
54 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
55 #define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */
57 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
58 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
59 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
60 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
62 #define ZYNQ_GEM_NWCFG_SPEED100 0x00000001 /* 100 Mbps operation */
63 #define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400 /* 1Gbps operation */
64 #define ZYNQ_GEM_NWCFG_FDEN 0x00000002 /* Full Duplex mode */
65 #define ZYNQ_GEM_NWCFG_NO_BRDC BIT(5) /* No broadcast */
66 #define ZYNQ_GEM_NWCFG_FSREM 0x00020000 /* FCS removal */
67 #define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000 /* SGMII Enable */
68 #define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800 /* PCS select */
70 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x00100000 /* Div pclk by 64, max 160MHz */
72 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000c0000 /* Div pclk by 48, max 120MHz */
76 # define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */
78 # define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */
81 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
82 ZYNQ_GEM_NWCFG_FDEN | \
83 ZYNQ_GEM_NWCFG_NO_BRDC | \
84 ZYNQ_GEM_NWCFG_FSREM | \
85 ZYNQ_GEM_NWCFG_MDCCLKDIV)
87 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
89 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
90 /* Use full configured addressable space (8 Kb) */
91 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
92 /* Use full configured addressable space (4 Kb) */
93 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
94 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
95 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000
97 #if defined(CONFIG_PHYS_64BIT)
98 # define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30) /* 64 bit bus */
100 # define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30) /* 32 bit bus */
103 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
104 ZYNQ_GEM_DMACR_RXSIZE | \
105 ZYNQ_GEM_DMACR_TXSIZE | \
106 ZYNQ_GEM_DMACR_RXBUF | \
107 ZYNQ_GEM_DMA_BUS_WIDTH)
109 #define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */
111 #define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000
113 #define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23)
115 #define MDIO_IDLE_TIMEOUT_MS 100
117 /* Use MII register 1 (MII status register) to detect PHY */
118 #define PHY_DETECT_REG 1
120 /* Mask used to verify certain PHY features (or register contents)
121 * in the register above:
122 * 0x1000: 10Mbps full duplex support
123 * 0x0800: 10Mbps half duplex support
124 * 0x0008: Auto-negotiation support
126 #define PHY_DETECT_MASK 0x1808
128 /* TX BD status masks */
129 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
130 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
131 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
133 /* Clock frequencies for different speeds */
134 #define ZYNQ_GEM_FREQUENCY_10 2500000UL
135 #define ZYNQ_GEM_FREQUENCY_100 25000000UL
136 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL
138 #define RXCLK_EN BIT(0)
140 /* Device registers */
141 struct zynq_gem_regs {
142 u32 nwctrl; /* 0x0 - Network Control reg */
143 u32 nwcfg; /* 0x4 - Network Config reg */
144 u32 nwsr; /* 0x8 - Network Status reg */
146 u32 dmacr; /* 0x10 - DMA Control reg */
147 u32 txsr; /* 0x14 - TX Status reg */
148 u32 rxqbase; /* 0x18 - RX Q Base address reg */
149 u32 txqbase; /* 0x1c - TX Q Base address reg */
150 u32 rxsr; /* 0x20 - RX Status reg */
152 u32 idr; /* 0x2c - Interrupt Disable reg */
154 u32 phymntnc; /* 0x34 - Phy Maintaince reg */
156 u32 hashl; /* 0x80 - Hash Low address reg */
157 u32 hashh; /* 0x84 - Hash High address reg */
160 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
161 u32 match[4]; /* 0xa8 - Type ID1 Match reg */
164 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
168 u32 dcfg6; /* 0x294 Design config reg6 */
170 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
172 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
174 u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */
176 u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */
181 u32 addr; /* Next descriptor pointer */
183 #if defined(CONFIG_PHYS_64BIT)
189 /* Reduce amount of BUFs if you have limited amount of memory */
191 /* Page table entries are set to 1MB, or multiples of 1MB
192 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
194 #define BD_SPACE 0x100000
195 /* BD separation space */
196 #define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
198 /* Setup the first free TX descriptor */
199 #define TX_FREE_DESC 2
201 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */
202 struct zynq_gem_priv {
203 struct emac_bd *tx_bd;
204 struct emac_bd *rx_bd;
210 struct zynq_gem_regs *iobase;
211 struct zynq_gem_regs *mdiobase;
212 phy_interface_t interface;
213 struct phy_device *phydev;
222 struct reset_ctl_bulk resets;
225 static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
229 struct zynq_gem_regs *regs = priv->mdiobase;
232 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
233 true, MDIO_IDLE_TIMEOUT_MS, false);
237 /* Construct mgtcr mask for the operation */
238 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
239 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
240 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
242 /* Write mgtcr and wait for completion */
243 writel(mgtcr, ®s->phymntnc);
245 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
246 true, MDIO_IDLE_TIMEOUT_MS, false);
250 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
251 *data = readl(®s->phymntnc);
256 static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
257 u32 regnum, u16 *val)
261 ret = phy_setup_op(priv, phy_addr, regnum,
262 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
265 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
266 phy_addr, regnum, *val);
271 static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
272 u32 regnum, u16 data)
274 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
277 return phy_setup_op(priv, phy_addr, regnum,
278 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
281 static int zynq_gem_setup_mac(struct udevice *dev)
283 u32 i, macaddrlow, macaddrhigh;
284 struct eth_pdata *pdata = dev_get_plat(dev);
285 struct zynq_gem_priv *priv = dev_get_priv(dev);
286 struct zynq_gem_regs *regs = priv->iobase;
288 /* Set the MAC bits [31:0] in BOT */
289 macaddrlow = pdata->enetaddr[0];
290 macaddrlow |= pdata->enetaddr[1] << 8;
291 macaddrlow |= pdata->enetaddr[2] << 16;
292 macaddrlow |= pdata->enetaddr[3] << 24;
294 /* Set MAC bits [47:32] in TOP */
295 macaddrhigh = pdata->enetaddr[4];
296 macaddrhigh |= pdata->enetaddr[5] << 8;
298 for (i = 0; i < 4; i++) {
299 writel(0, ®s->laddr[i][LADDR_LOW]);
300 writel(0, ®s->laddr[i][LADDR_HIGH]);
301 /* Do not use MATCHx register */
302 writel(0, ®s->match[i]);
305 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]);
306 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]);
311 static int zynq_phy_init(struct udevice *dev)
314 struct zynq_gem_priv *priv = dev_get_priv(dev);
315 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
316 const u32 supported = SUPPORTED_10baseT_Half |
317 SUPPORTED_10baseT_Full |
318 SUPPORTED_100baseT_Half |
319 SUPPORTED_100baseT_Full |
320 SUPPORTED_1000baseT_Half |
321 SUPPORTED_1000baseT_Full;
323 /* Enable only MDIO bus */
324 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, ®s_mdio->nwctrl);
326 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
331 if (priv->max_speed) {
332 ret = phy_set_supported(priv->phydev, priv->max_speed);
337 priv->phydev->supported &= supported | ADVERTISED_Pause |
338 ADVERTISED_Asym_Pause;
340 priv->phydev->advertising = priv->phydev->supported;
341 priv->phydev->node = priv->phy_of_node;
343 return phy_config(priv->phydev);
346 static int zynq_gem_init(struct udevice *dev)
350 unsigned long clk_rate = 0;
351 struct zynq_gem_priv *priv = dev_get_priv(dev);
352 struct zynq_gem_regs *regs = priv->iobase;
353 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
354 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
355 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
357 if (readl(®s->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
358 priv->dma_64bit = true;
360 priv->dma_64bit = false;
362 #if defined(CONFIG_PHYS_64BIT)
363 if (!priv->dma_64bit) {
364 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
370 debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
375 /* Disable all interrupts */
376 writel(0xFFFFFFFF, ®s->idr);
378 /* Disable the receiver & transmitter */
379 writel(0, ®s->nwctrl);
380 writel(0, ®s->txsr);
381 writel(0, ®s->rxsr);
382 writel(0, ®s->phymntnc);
384 /* Clear the Hash registers for the mac address
385 * pointed by AddressPtr
387 writel(0x0, ®s->hashl);
388 /* Write bits [63:32] in TOP */
389 writel(0x0, ®s->hashh);
391 /* Clear all counters */
392 for (i = 0; i < STAT_SIZE; i++)
393 readl(®s->stat[i]);
395 /* Setup RxBD space */
396 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
398 for (i = 0; i < RX_BUF; i++) {
399 priv->rx_bd[i].status = 0xF0000000;
400 priv->rx_bd[i].addr =
401 (lower_32_bits((ulong)(priv->rxbuffers)
402 + (i * PKTSIZE_ALIGN)));
403 #if defined(CONFIG_PHYS_64BIT)
404 priv->rx_bd[i].addr_hi =
405 (upper_32_bits((ulong)(priv->rxbuffers)
406 + (i * PKTSIZE_ALIGN)));
409 /* WRAP bit to last BD */
410 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
411 /* Write RxBDs to IP */
412 writel(lower_32_bits((ulong)priv->rx_bd), ®s->rxqbase);
413 #if defined(CONFIG_PHYS_64BIT)
414 writel(upper_32_bits((ulong)priv->rx_bd), ®s->upper_rxqbase);
417 /* Setup for DMA Configuration register */
418 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr);
420 /* Setup for Network Control register, MDIO, Rx and Tx enable */
421 setbits_le32(®s_mdio->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
423 /* Disable the second priority queue */
424 dummy_tx_bd->addr = 0;
425 #if defined(CONFIG_PHYS_64BIT)
426 dummy_tx_bd->addr_hi = 0;
428 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
429 ZYNQ_GEM_TXBUF_LAST_MASK|
430 ZYNQ_GEM_TXBUF_USED_MASK;
432 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
433 ZYNQ_GEM_RXBUF_NEW_MASK;
434 #if defined(CONFIG_PHYS_64BIT)
435 dummy_rx_bd->addr_hi = 0;
437 dummy_rx_bd->status = 0;
439 writel((ulong)dummy_tx_bd, ®s->transmit_q1_ptr);
440 writel((ulong)dummy_rx_bd, ®s->receive_q1_ptr);
445 ret = phy_startup(priv->phydev);
449 if (!priv->phydev->link) {
450 printf("%s: No link.\n", priv->phydev->dev->name);
454 nwconfig = ZYNQ_GEM_NWCFG_INIT;
457 * Set SGMII enable PCS selection only if internal PCS/PMA
458 * core is used and interface is SGMII.
460 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
462 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
463 ZYNQ_GEM_NWCFG_PCS_SEL;
466 switch (priv->phydev->speed) {
468 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
470 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
473 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
475 clk_rate = ZYNQ_GEM_FREQUENCY_100;
478 clk_rate = ZYNQ_GEM_FREQUENCY_10;
483 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
486 * Disable AN for fixed link configuration, enable otherwise.
487 * Must be written after PCS_SEL is set in nwconfig,
488 * otherwise writes will not take effect.
490 if (priv->phydev->phy_id != PHY_FIXED_ID)
491 writel(readl(®s->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
494 writel(readl(®s->pcscntrl) & ~ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
499 ret = clk_set_rate(&priv->tx_clk, clk_rate);
500 if (IS_ERR_VALUE(ret)) {
501 dev_err(dev, "failed to set tx clock rate\n");
505 ret = clk_enable(&priv->tx_clk);
507 dev_err(dev, "failed to enable tx clock\n");
511 if (priv->clk_en_info & RXCLK_EN) {
512 ret = clk_enable(&priv->rx_clk);
514 dev_err(dev, "failed to enable rx clock\n");
518 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
519 ZYNQ_GEM_NWCTRL_TXEN_MASK);
524 static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
528 struct zynq_gem_priv *priv = dev_get_priv(dev);
529 struct zynq_gem_regs *regs = priv->iobase;
530 struct emac_bd *current_bd = &priv->tx_bd[1];
533 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
535 priv->tx_bd->addr = lower_32_bits((ulong)ptr);
536 #if defined(CONFIG_PHYS_64BIT)
537 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
539 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
540 ZYNQ_GEM_TXBUF_LAST_MASK;
541 /* Dummy descriptor to mark it as the last in descriptor chain */
542 current_bd->addr = 0x0;
543 #if defined(CONFIG_PHYS_64BIT)
544 current_bd->addr_hi = 0x0;
546 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
547 ZYNQ_GEM_TXBUF_LAST_MASK|
548 ZYNQ_GEM_TXBUF_USED_MASK;
551 writel(lower_32_bits((ulong)priv->tx_bd), ®s->txqbase);
552 #if defined(CONFIG_PHYS_64BIT)
553 writel(upper_32_bits((ulong)priv->tx_bd), ®s->upper_txqbase);
557 addr &= ~(ARCH_DMA_MINALIGN - 1);
558 size = roundup(len, ARCH_DMA_MINALIGN);
559 flush_dcache_range(addr, addr + size);
563 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
565 /* Read TX BD status */
566 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
567 printf("TX buffers exhausted in mid frame\n");
569 return wait_for_bit_le32(®s->txsr, ZYNQ_GEM_TSR_DONE,
573 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
574 static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
578 struct zynq_gem_priv *priv = dev_get_priv(dev);
579 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
581 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
584 if (!(current_bd->status &
585 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
586 printf("GEM: SOF or EOF not set for last buffer received!\n");
590 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
592 printf("%s: Zero size packet?\n", __func__);
596 #if defined(CONFIG_PHYS_64BIT)
597 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
598 | ((dma_addr_t)current_bd->addr_hi << 32));
600 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
602 addr &= ~(ARCH_DMA_MINALIGN - 1);
604 *packetp = (uchar *)(uintptr_t)addr;
606 invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
612 static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
614 struct zynq_gem_priv *priv = dev_get_priv(dev);
615 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
616 struct emac_bd *first_bd;
619 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
620 priv->rx_first_buf = priv->rxbd_current;
622 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
623 current_bd->status = 0xF0000000; /* FIXME */
626 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
627 first_bd = &priv->rx_bd[priv->rx_first_buf];
628 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
629 first_bd->status = 0xF0000000;
632 /* Flush the cache for the packet as well */
633 #if defined(CONFIG_PHYS_64BIT)
634 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
635 | ((dma_addr_t)current_bd->addr_hi << 32));
637 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
639 flush_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN,
643 if ((++priv->rxbd_current) >= RX_BUF)
644 priv->rxbd_current = 0;
649 static void zynq_gem_halt(struct udevice *dev)
651 struct zynq_gem_priv *priv = dev_get_priv(dev);
652 struct zynq_gem_regs *regs = priv->iobase;
654 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
655 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
658 __weak int zynq_board_read_rom_ethaddr(unsigned char *ethaddr)
663 static int zynq_gem_read_rom_mac(struct udevice *dev)
665 struct eth_pdata *pdata = dev_get_plat(dev);
670 return zynq_board_read_rom_ethaddr(pdata->enetaddr);
673 static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
676 struct zynq_gem_priv *priv = bus->priv;
680 ret = phyread(priv, addr, reg, &val);
681 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
685 static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
688 struct zynq_gem_priv *priv = bus->priv;
690 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
691 return phywrite(priv, addr, reg, value);
694 static int zynq_gem_reset_init(struct udevice *dev)
696 struct zynq_gem_priv *priv = dev_get_priv(dev);
699 ret = reset_get_bulk(dev, &priv->resets);
700 if (ret == -ENOTSUPP || ret == -ENOENT)
705 ret = reset_deassert_bulk(&priv->resets);
707 reset_release_bulk(&priv->resets);
714 static int zynq_gem_probe(struct udevice *dev)
717 struct zynq_gem_priv *priv = dev_get_priv(dev);
721 if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
722 ret = generic_phy_get_by_index(dev, 0, &phy);
724 ret = generic_phy_init(&phy);
727 } else if (ret != -ENOENT) {
728 debug("could not get phy (err %d)\n", ret);
733 ret = zynq_gem_reset_init(dev);
737 /* Align rxbuffers to ARCH_DMA_MINALIGN */
738 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
739 if (!priv->rxbuffers)
742 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
743 ulong addr = (ulong)priv->rxbuffers;
744 flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
747 /* Align bd_space to MMU_SECTION_SHIFT */
748 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
754 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
755 BD_SPACE, DCACHE_OFF);
757 /* Initialize the bd spaces for tx and rx bd's */
758 priv->tx_bd = (struct emac_bd *)bd_space;
759 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
761 ret = clk_get_by_name(dev, "tx_clk", &priv->tx_clk);
763 dev_err(dev, "failed to get tx_clock\n");
767 if (priv->clk_en_info & RXCLK_EN) {
768 ret = clk_get_by_name(dev, "rx_clk", &priv->rx_clk);
770 dev_err(dev, "failed to get rx_clock\n");
775 priv->bus = mdio_alloc();
776 priv->bus->read = zynq_gem_miiphy_read;
777 priv->bus->write = zynq_gem_miiphy_write;
778 priv->bus->priv = priv;
780 ret = mdio_register_seq(priv->bus, dev_seq(dev));
784 ret = zynq_phy_init(dev);
788 if (priv->interface == PHY_INTERFACE_MODE_SGMII && phy.dev) {
789 ret = generic_phy_power_on(&phy);
797 mdio_unregister(priv->bus);
801 free(priv->rxbuffers);
805 static int zynq_gem_remove(struct udevice *dev)
807 struct zynq_gem_priv *priv = dev_get_priv(dev);
810 mdio_unregister(priv->bus);
811 mdio_free(priv->bus);
816 static const struct eth_ops zynq_gem_ops = {
817 .start = zynq_gem_init,
818 .send = zynq_gem_send,
819 .recv = zynq_gem_recv,
820 .free_pkt = zynq_gem_free_pkt,
821 .stop = zynq_gem_halt,
822 .write_hwaddr = zynq_gem_setup_mac,
823 .read_rom_hwaddr = zynq_gem_read_rom_mac,
826 static int zynq_gem_of_to_plat(struct udevice *dev)
828 struct eth_pdata *pdata = dev_get_plat(dev);
829 struct zynq_gem_priv *priv = dev_get_priv(dev);
830 struct ofnode_phandle_args phandle_args;
831 const char *phy_mode;
833 pdata->iobase = (phys_addr_t)dev_read_addr(dev);
834 priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
835 priv->mdiobase = priv->iobase;
836 /* Hardcode for now */
839 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
844 debug("phy-handle does exist %s\n", dev->name);
845 priv->phyaddr = ofnode_read_u32_default(phandle_args.node,
847 priv->phy_of_node = phandle_args.node;
848 priv->max_speed = ofnode_read_u32_default(phandle_args.node,
852 parent = ofnode_get_parent(phandle_args.node);
853 if (ofnode_name_eq(parent, "mdio"))
854 parent = ofnode_get_parent(parent);
856 addr = ofnode_get_addr(parent);
857 if (addr != FDT_ADDR_T_NONE) {
858 debug("MDIO bus not found %s\n", dev->name);
859 priv->mdiobase = (struct zynq_gem_regs *)addr;
863 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
865 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
866 if (pdata->phy_interface == -1) {
867 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
870 priv->interface = pdata->phy_interface;
872 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
874 printf("\nZYNQ GEM: %lx, mdio bus %lx, phyaddr %d, interface %s\n",
875 (ulong)priv->iobase, (ulong)priv->mdiobase, priv->phyaddr,
876 phy_string_for_interface(priv->interface));
878 priv->clk_en_info = dev_get_driver_data(dev);
883 static const struct udevice_id zynq_gem_ids[] = {
884 { .compatible = "cdns,versal-gem", .data = RXCLK_EN },
885 { .compatible = "cdns,zynqmp-gem" },
886 { .compatible = "cdns,zynq-gem" },
887 { .compatible = "cdns,gem" },
891 U_BOOT_DRIVER(zynq_gem) = {
894 .of_match = zynq_gem_ids,
895 .of_to_plat = zynq_gem_of_to_plat,
896 .probe = zynq_gem_probe,
897 .remove = zynq_gem_remove,
898 .ops = &zynq_gem_ops,
899 .priv_auto = sizeof(struct zynq_gem_priv),
900 .plat_auto = sizeof(struct eth_pdata),