1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2011 Michal Simek
5 * Michal SIMEK <monstr@monstr.eu>
7 * Based on Xilinx gmac driver:
8 * (C) Copyright 2011 Xilinx
24 #include <asm/system.h>
25 #include <asm/arch/hardware.h>
26 #include <asm/arch/sys_proto.h>
27 #include <linux/errno.h>
29 DECLARE_GLOBAL_DATA_PTR;
31 /* Bit/mask specification */
32 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
33 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
34 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
35 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
36 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
38 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
39 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
40 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
42 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
43 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
44 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
46 /* Wrap bit, last descriptor */
47 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
48 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
49 #define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */
51 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
52 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
53 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
54 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
56 #define ZYNQ_GEM_NWCFG_SPEED100 0x00000001 /* 100 Mbps operation */
57 #define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400 /* 1Gbps operation */
58 #define ZYNQ_GEM_NWCFG_FDEN 0x00000002 /* Full Duplex mode */
59 #define ZYNQ_GEM_NWCFG_FSREM 0x00020000 /* FCS removal */
60 #define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000 /* SGMII Enable */
61 #define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800 /* PCS select */
63 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x00100000 /* Div pclk by 64, max 160MHz */
65 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000c0000 /* Div pclk by 48, max 120MHz */
69 # define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */
71 # define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */
74 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
75 ZYNQ_GEM_NWCFG_FDEN | \
76 ZYNQ_GEM_NWCFG_FSREM | \
77 ZYNQ_GEM_NWCFG_MDCCLKDIV)
79 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
81 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
82 /* Use full configured addressable space (8 Kb) */
83 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
84 /* Use full configured addressable space (4 Kb) */
85 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
86 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
87 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000
89 #if defined(CONFIG_PHYS_64BIT)
90 # define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30) /* 64 bit bus */
92 # define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30) /* 32 bit bus */
95 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
96 ZYNQ_GEM_DMACR_RXSIZE | \
97 ZYNQ_GEM_DMACR_TXSIZE | \
98 ZYNQ_GEM_DMACR_RXBUF | \
99 ZYNQ_GEM_DMA_BUS_WIDTH)
101 #define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */
103 #define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000
105 #define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23)
107 /* Use MII register 1 (MII status register) to detect PHY */
108 #define PHY_DETECT_REG 1
110 /* Mask used to verify certain PHY features (or register contents)
111 * in the register above:
112 * 0x1000: 10Mbps full duplex support
113 * 0x0800: 10Mbps half duplex support
114 * 0x0008: Auto-negotiation support
116 #define PHY_DETECT_MASK 0x1808
118 /* TX BD status masks */
119 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
120 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
121 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
123 /* Clock frequencies for different speeds */
124 #define ZYNQ_GEM_FREQUENCY_10 2500000UL
125 #define ZYNQ_GEM_FREQUENCY_100 25000000UL
126 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL
128 /* Device registers */
129 struct zynq_gem_regs {
130 u32 nwctrl; /* 0x0 - Network Control reg */
131 u32 nwcfg; /* 0x4 - Network Config reg */
132 u32 nwsr; /* 0x8 - Network Status reg */
134 u32 dmacr; /* 0x10 - DMA Control reg */
135 u32 txsr; /* 0x14 - TX Status reg */
136 u32 rxqbase; /* 0x18 - RX Q Base address reg */
137 u32 txqbase; /* 0x1c - TX Q Base address reg */
138 u32 rxsr; /* 0x20 - RX Status reg */
140 u32 idr; /* 0x2c - Interrupt Disable reg */
142 u32 phymntnc; /* 0x34 - Phy Maintaince reg */
144 u32 hashl; /* 0x80 - Hash Low address reg */
145 u32 hashh; /* 0x84 - Hash High address reg */
148 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
149 u32 match[4]; /* 0xa8 - Type ID1 Match reg */
152 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
156 u32 dcfg6; /* 0x294 Design config reg6 */
158 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
160 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
162 u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */
164 u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */
169 u32 addr; /* Next descriptor pointer */
171 #if defined(CONFIG_PHYS_64BIT)
178 /* Page table entries are set to 1MB, or multiples of 1MB
179 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
181 #define BD_SPACE 0x100000
182 /* BD separation space */
183 #define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
185 /* Setup the first free TX descriptor */
186 #define TX_FREE_DESC 2
188 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */
189 struct zynq_gem_priv {
190 struct emac_bd *tx_bd;
191 struct emac_bd *rx_bd;
197 struct zynq_gem_regs *iobase;
198 phy_interface_t interface;
199 struct phy_device *phydev;
208 static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
212 struct zynq_gem_regs *regs = priv->iobase;
215 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
220 /* Construct mgtcr mask for the operation */
221 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
222 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
223 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
225 /* Write mgtcr and wait for completion */
226 writel(mgtcr, ®s->phymntnc);
228 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
233 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
234 *data = readl(®s->phymntnc);
239 static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
240 u32 regnum, u16 *val)
244 ret = phy_setup_op(priv, phy_addr, regnum,
245 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
248 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
249 phy_addr, regnum, *val);
254 static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
255 u32 regnum, u16 data)
257 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
260 return phy_setup_op(priv, phy_addr, regnum,
261 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
264 static int zynq_gem_setup_mac(struct udevice *dev)
266 u32 i, macaddrlow, macaddrhigh;
267 struct eth_pdata *pdata = dev_get_platdata(dev);
268 struct zynq_gem_priv *priv = dev_get_priv(dev);
269 struct zynq_gem_regs *regs = priv->iobase;
271 /* Set the MAC bits [31:0] in BOT */
272 macaddrlow = pdata->enetaddr[0];
273 macaddrlow |= pdata->enetaddr[1] << 8;
274 macaddrlow |= pdata->enetaddr[2] << 16;
275 macaddrlow |= pdata->enetaddr[3] << 24;
277 /* Set MAC bits [47:32] in TOP */
278 macaddrhigh = pdata->enetaddr[4];
279 macaddrhigh |= pdata->enetaddr[5] << 8;
281 for (i = 0; i < 4; i++) {
282 writel(0, ®s->laddr[i][LADDR_LOW]);
283 writel(0, ®s->laddr[i][LADDR_HIGH]);
284 /* Do not use MATCHx register */
285 writel(0, ®s->match[i]);
288 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]);
289 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]);
294 static int zynq_phy_init(struct udevice *dev)
297 struct zynq_gem_priv *priv = dev_get_priv(dev);
298 struct zynq_gem_regs *regs = priv->iobase;
299 const u32 supported = SUPPORTED_10baseT_Half |
300 SUPPORTED_10baseT_Full |
301 SUPPORTED_100baseT_Half |
302 SUPPORTED_100baseT_Full |
303 SUPPORTED_1000baseT_Half |
304 SUPPORTED_1000baseT_Full;
306 /* Enable only MDIO bus */
307 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, ®s->nwctrl);
309 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
314 if (priv->max_speed) {
315 ret = phy_set_supported(priv->phydev, priv->max_speed);
320 priv->phydev->supported &= supported | ADVERTISED_Pause |
321 ADVERTISED_Asym_Pause;
323 priv->phydev->advertising = priv->phydev->supported;
324 priv->phydev->node = priv->phy_of_node;
326 return phy_config(priv->phydev);
329 static int zynq_gem_init(struct udevice *dev)
333 unsigned long clk_rate = 0;
334 struct zynq_gem_priv *priv = dev_get_priv(dev);
335 struct zynq_gem_regs *regs = priv->iobase;
336 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
337 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
339 if (readl(®s->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
340 priv->dma_64bit = true;
342 priv->dma_64bit = false;
344 #if defined(CONFIG_PHYS_64BIT)
345 if (!priv->dma_64bit) {
346 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
352 debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
357 /* Disable all interrupts */
358 writel(0xFFFFFFFF, ®s->idr);
360 /* Disable the receiver & transmitter */
361 writel(0, ®s->nwctrl);
362 writel(0, ®s->txsr);
363 writel(0, ®s->rxsr);
364 writel(0, ®s->phymntnc);
366 /* Clear the Hash registers for the mac address
367 * pointed by AddressPtr
369 writel(0x0, ®s->hashl);
370 /* Write bits [63:32] in TOP */
371 writel(0x0, ®s->hashh);
373 /* Clear all counters */
374 for (i = 0; i < STAT_SIZE; i++)
375 readl(®s->stat[i]);
377 /* Setup RxBD space */
378 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
380 for (i = 0; i < RX_BUF; i++) {
381 priv->rx_bd[i].status = 0xF0000000;
382 priv->rx_bd[i].addr =
383 (lower_32_bits((ulong)(priv->rxbuffers)
384 + (i * PKTSIZE_ALIGN)));
385 #if defined(CONFIG_PHYS_64BIT)
386 priv->rx_bd[i].addr_hi =
387 (upper_32_bits((ulong)(priv->rxbuffers)
388 + (i * PKTSIZE_ALIGN)));
391 /* WRAP bit to last BD */
392 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
393 /* Write RxBDs to IP */
394 writel(lower_32_bits((ulong)priv->rx_bd), ®s->rxqbase);
395 #if defined(CONFIG_PHYS_64BIT)
396 writel(upper_32_bits((ulong)priv->rx_bd), ®s->upper_rxqbase);
399 /* Setup for DMA Configuration register */
400 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr);
402 /* Setup for Network Control register, MDIO, Rx and Tx enable */
403 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
405 /* Disable the second priority queue */
406 dummy_tx_bd->addr = 0;
407 #if defined(CONFIG_PHYS_64BIT)
408 dummy_tx_bd->addr_hi = 0;
410 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
411 ZYNQ_GEM_TXBUF_LAST_MASK|
412 ZYNQ_GEM_TXBUF_USED_MASK;
414 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
415 ZYNQ_GEM_RXBUF_NEW_MASK;
416 #if defined(CONFIG_PHYS_64BIT)
417 dummy_rx_bd->addr_hi = 0;
419 dummy_rx_bd->status = 0;
421 writel((ulong)dummy_tx_bd, ®s->transmit_q1_ptr);
422 writel((ulong)dummy_rx_bd, ®s->receive_q1_ptr);
427 ret = phy_startup(priv->phydev);
431 if (!priv->phydev->link) {
432 printf("%s: No link.\n", priv->phydev->dev->name);
436 nwconfig = ZYNQ_GEM_NWCFG_INIT;
439 * Set SGMII enable PCS selection only if internal PCS/PMA
440 * core is used and interface is SGMII.
442 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
444 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
445 ZYNQ_GEM_NWCFG_PCS_SEL;
447 writel(readl(®s->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
452 switch (priv->phydev->speed) {
454 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
456 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
459 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
461 clk_rate = ZYNQ_GEM_FREQUENCY_100;
464 clk_rate = ZYNQ_GEM_FREQUENCY_10;
468 #if !defined(CONFIG_ARCH_VERSAL)
469 ret = clk_set_rate(&priv->clk, clk_rate);
470 if (IS_ERR_VALUE(ret) && ret != (unsigned long)-ENOSYS) {
471 dev_err(dev, "failed to set tx clock rate\n");
475 ret = clk_enable(&priv->clk);
476 if (ret && ret != -ENOSYS) {
477 dev_err(dev, "failed to enable tx clock\n");
481 debug("requested clk_rate %ld\n", clk_rate);
484 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
485 ZYNQ_GEM_NWCTRL_TXEN_MASK);
490 static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
494 struct zynq_gem_priv *priv = dev_get_priv(dev);
495 struct zynq_gem_regs *regs = priv->iobase;
496 struct emac_bd *current_bd = &priv->tx_bd[1];
499 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
501 priv->tx_bd->addr = lower_32_bits((ulong)ptr);
502 #if defined(CONFIG_PHYS_64BIT)
503 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
505 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
506 ZYNQ_GEM_TXBUF_LAST_MASK;
507 /* Dummy descriptor to mark it as the last in descriptor chain */
508 current_bd->addr = 0x0;
509 #if defined(CONFIG_PHYS_64BIT)
510 current_bd->addr_hi = 0x0;
512 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
513 ZYNQ_GEM_TXBUF_LAST_MASK|
514 ZYNQ_GEM_TXBUF_USED_MASK;
517 writel(lower_32_bits((ulong)priv->tx_bd), ®s->txqbase);
518 #if defined(CONFIG_PHYS_64BIT)
519 writel(upper_32_bits((ulong)priv->tx_bd), ®s->upper_txqbase);
523 addr &= ~(ARCH_DMA_MINALIGN - 1);
524 size = roundup(len, ARCH_DMA_MINALIGN);
525 flush_dcache_range(addr, addr + size);
529 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
531 /* Read TX BD status */
532 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
533 printf("TX buffers exhausted in mid frame\n");
535 return wait_for_bit_le32(®s->txsr, ZYNQ_GEM_TSR_DONE,
539 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
540 static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
544 struct zynq_gem_priv *priv = dev_get_priv(dev);
545 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
547 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
550 if (!(current_bd->status &
551 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
552 printf("GEM: SOF or EOF not set for last buffer received!\n");
556 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
558 printf("%s: Zero size packet?\n", __func__);
562 #if defined(CONFIG_PHYS_64BIT)
563 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
564 | ((dma_addr_t)current_bd->addr_hi << 32));
566 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
568 addr &= ~(ARCH_DMA_MINALIGN - 1);
570 *packetp = (uchar *)(uintptr_t)addr;
572 invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
578 static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
580 struct zynq_gem_priv *priv = dev_get_priv(dev);
581 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
582 struct emac_bd *first_bd;
584 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
585 priv->rx_first_buf = priv->rxbd_current;
587 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
588 current_bd->status = 0xF0000000; /* FIXME */
591 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
592 first_bd = &priv->rx_bd[priv->rx_first_buf];
593 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
594 first_bd->status = 0xF0000000;
597 if ((++priv->rxbd_current) >= RX_BUF)
598 priv->rxbd_current = 0;
603 static void zynq_gem_halt(struct udevice *dev)
605 struct zynq_gem_priv *priv = dev_get_priv(dev);
606 struct zynq_gem_regs *regs = priv->iobase;
608 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
609 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
612 __weak int zynq_board_read_rom_ethaddr(unsigned char *ethaddr)
617 static int zynq_gem_read_rom_mac(struct udevice *dev)
619 struct eth_pdata *pdata = dev_get_platdata(dev);
624 return zynq_board_read_rom_ethaddr(pdata->enetaddr);
627 static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
630 struct zynq_gem_priv *priv = bus->priv;
634 ret = phyread(priv, addr, reg, &val);
635 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
639 static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
642 struct zynq_gem_priv *priv = bus->priv;
644 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
645 return phywrite(priv, addr, reg, value);
648 static int zynq_gem_probe(struct udevice *dev)
651 struct zynq_gem_priv *priv = dev_get_priv(dev);
654 /* Align rxbuffers to ARCH_DMA_MINALIGN */
655 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
656 if (!priv->rxbuffers)
659 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
660 u32 addr = (ulong)priv->rxbuffers;
661 flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
664 /* Align bd_space to MMU_SECTION_SHIFT */
665 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
669 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
670 BD_SPACE, DCACHE_OFF);
672 /* Initialize the bd spaces for tx and rx bd's */
673 priv->tx_bd = (struct emac_bd *)bd_space;
674 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
676 ret = clk_get_by_name(dev, "tx_clk", &priv->clk);
678 dev_err(dev, "failed to get clock\n");
682 priv->bus = mdio_alloc();
683 priv->bus->read = zynq_gem_miiphy_read;
684 priv->bus->write = zynq_gem_miiphy_write;
685 priv->bus->priv = priv;
687 ret = mdio_register_seq(priv->bus, dev->seq);
691 return zynq_phy_init(dev);
694 static int zynq_gem_remove(struct udevice *dev)
696 struct zynq_gem_priv *priv = dev_get_priv(dev);
699 mdio_unregister(priv->bus);
700 mdio_free(priv->bus);
705 static const struct eth_ops zynq_gem_ops = {
706 .start = zynq_gem_init,
707 .send = zynq_gem_send,
708 .recv = zynq_gem_recv,
709 .free_pkt = zynq_gem_free_pkt,
710 .stop = zynq_gem_halt,
711 .write_hwaddr = zynq_gem_setup_mac,
712 .read_rom_hwaddr = zynq_gem_read_rom_mac,
715 static int zynq_gem_ofdata_to_platdata(struct udevice *dev)
717 struct eth_pdata *pdata = dev_get_platdata(dev);
718 struct zynq_gem_priv *priv = dev_get_priv(dev);
719 struct ofnode_phandle_args phandle_args;
720 const char *phy_mode;
722 pdata->iobase = (phys_addr_t)dev_read_addr(dev);
723 priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
724 /* Hardcode for now */
727 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
729 debug("phy-handle does exist %s\n", dev->name);
730 priv->phyaddr = ofnode_read_u32_default(phandle_args.node,
732 priv->phy_of_node = phandle_args.node;
733 priv->max_speed = ofnode_read_u32_default(phandle_args.node,
738 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
740 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
741 if (pdata->phy_interface == -1) {
742 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
745 priv->interface = pdata->phy_interface;
747 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
749 printf("ZYNQ GEM: %lx, phyaddr %x, interface %s\n", (ulong)priv->iobase,
750 priv->phyaddr, phy_string_for_interface(priv->interface));
755 static const struct udevice_id zynq_gem_ids[] = {
756 { .compatible = "cdns,zynqmp-gem" },
757 { .compatible = "cdns,zynq-gem" },
758 { .compatible = "cdns,gem" },
762 U_BOOT_DRIVER(zynq_gem) = {
765 .of_match = zynq_gem_ids,
766 .ofdata_to_platdata = zynq_gem_ofdata_to_platdata,
767 .probe = zynq_gem_probe,
768 .remove = zynq_gem_remove,
769 .ops = &zynq_gem_ops,
770 .priv_auto_alloc_size = sizeof(struct zynq_gem_priv),
771 .platdata_auto_alloc_size = sizeof(struct eth_pdata),