1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2011 Michal Simek
5 * Michal SIMEK <monstr@monstr.eu>
7 * Based on Xilinx gmac driver:
8 * (C) Copyright 2011 Xilinx
15 #include <generic-phy.h>
22 #include <asm/cache.h>
29 #include <asm/system.h>
30 #include <asm/arch/hardware.h>
31 #include <asm/arch/sys_proto.h>
32 #include <dm/device_compat.h>
33 #include <linux/bitfield.h>
34 #include <linux/bitops.h>
35 #include <linux/err.h>
36 #include <linux/errno.h>
38 #include <zynqmp_firmware.h>
40 /* Bit/mask specification */
41 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
42 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
43 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
44 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
45 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
47 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
48 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
49 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
51 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
52 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
53 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
55 /* Wrap bit, last descriptor */
56 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
57 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
58 #define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */
60 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
61 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
62 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
63 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
65 #define ZYNQ_GEM_NWCFG_SPEED100 0x00000001 /* 100 Mbps operation */
66 #define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400 /* 1Gbps operation */
67 #define ZYNQ_GEM_NWCFG_FDEN 0x00000002 /* Full Duplex mode */
68 #define ZYNQ_GEM_NWCFG_FSREM 0x00020000 /* FCS removal */
69 #define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000 /* SGMII Enable */
70 #define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800 /* PCS select */
73 # define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */
75 # define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */
78 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
79 ZYNQ_GEM_NWCFG_FDEN | \
82 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
84 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
85 /* Use full configured addressable space (8 Kb) */
86 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
87 /* Use full configured addressable space (4 Kb) */
88 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
89 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
90 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000
92 #if defined(CONFIG_PHYS_64BIT)
93 # define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30) /* 64 bit bus */
95 # define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30) /* 32 bit bus */
98 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
99 ZYNQ_GEM_DMACR_RXSIZE | \
100 ZYNQ_GEM_DMACR_TXSIZE | \
101 ZYNQ_GEM_DMACR_RXBUF | \
102 ZYNQ_GEM_DMA_BUS_WIDTH)
104 #define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */
106 #define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000
108 #define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23)
110 #define MDIO_IDLE_TIMEOUT_MS 100
112 /* Use MII register 1 (MII status register) to detect PHY */
113 #define PHY_DETECT_REG 1
115 /* Mask used to verify certain PHY features (or register contents)
116 * in the register above:
117 * 0x1000: 10Mbps full duplex support
118 * 0x0800: 10Mbps half duplex support
119 * 0x0008: Auto-negotiation support
121 #define PHY_DETECT_MASK 0x1808
123 /* PCS (SGMII) Link Status */
124 #define ZYNQ_GEM_PCSSTATUS_LINK BIT(2)
125 #define ZYNQ_GEM_PCSSTATUS_ANEG_COMPL BIT(5)
127 /* TX BD status masks */
128 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
129 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
130 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
132 /* Clock frequencies for different speeds */
133 #define ZYNQ_GEM_FREQUENCY_10 2500000UL
134 #define ZYNQ_GEM_FREQUENCY_100 25000000UL
135 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL
137 #define RXCLK_EN BIT(0)
139 /* GEM specific constants for CLK. */
140 #define GEM_CLK_DIV8 0
141 #define GEM_CLK_DIV16 1
142 #define GEM_CLK_DIV32 2
143 #define GEM_CLK_DIV48 3
144 #define GEM_CLK_DIV64 4
145 #define GEM_CLK_DIV96 5
146 #define GEM_CLK_DIV128 6
147 #define GEM_CLK_DIV224 7
149 #define GEM_MDC_SET(val) FIELD_PREP(GENMASK(20, 18), val)
151 /* Device registers */
152 struct zynq_gem_regs {
153 u32 nwctrl; /* 0x0 - Network Control reg */
154 u32 nwcfg; /* 0x4 - Network Config reg */
155 u32 nwsr; /* 0x8 - Network Status reg */
157 u32 dmacr; /* 0x10 - DMA Control reg */
158 u32 txsr; /* 0x14 - TX Status reg */
159 u32 rxqbase; /* 0x18 - RX Q Base address reg */
160 u32 txqbase; /* 0x1c - TX Q Base address reg */
161 u32 rxsr; /* 0x20 - RX Status reg */
163 u32 idr; /* 0x2c - Interrupt Disable reg */
165 u32 phymntnc; /* 0x34 - Phy Maintaince reg */
167 u32 hashl; /* 0x80 - Hash Low address reg */
168 u32 hashh; /* 0x84 - Hash High address reg */
171 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
172 u32 match[4]; /* 0xa8 - Type ID1 Match reg */
175 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
180 u32 dcfg6; /* 0x294 Design config reg6 */
182 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
184 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
186 u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */
188 u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */
193 u32 addr; /* Next descriptor pointer */
195 #if defined(CONFIG_PHYS_64BIT)
201 /* Reduce amount of BUFs if you have limited amount of memory */
203 /* Page table entries are set to 1MB, or multiples of 1MB
204 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
206 #define BD_SPACE 0x100000
207 /* BD separation space */
208 #define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
210 /* Setup the first free TX descriptor */
211 #define TX_FREE_DESC 2
213 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */
214 struct zynq_gem_priv {
215 struct emac_bd *tx_bd;
216 struct emac_bd *rx_bd;
222 struct zynq_gem_regs *iobase;
223 struct zynq_gem_regs *mdiobase;
224 phy_interface_t interface;
225 struct phy_device *phydev;
235 struct reset_ctl_bulk resets;
238 static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
242 struct zynq_gem_regs *regs = priv->mdiobase;
245 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
246 true, MDIO_IDLE_TIMEOUT_MS, false);
250 /* Construct mgtcr mask for the operation */
251 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
252 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
253 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
255 /* Write mgtcr and wait for completion */
256 writel(mgtcr, ®s->phymntnc);
258 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
259 true, MDIO_IDLE_TIMEOUT_MS, false);
263 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
264 *data = readl(®s->phymntnc);
269 static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
270 u32 regnum, u16 *val)
274 ret = phy_setup_op(priv, phy_addr, regnum,
275 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
278 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
279 phy_addr, regnum, *val);
284 static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
285 u32 regnum, u16 data)
287 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
290 return phy_setup_op(priv, phy_addr, regnum,
291 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
294 static int zynq_gem_setup_mac(struct udevice *dev)
296 u32 i, macaddrlow, macaddrhigh;
297 struct eth_pdata *pdata = dev_get_plat(dev);
298 struct zynq_gem_priv *priv = dev_get_priv(dev);
299 struct zynq_gem_regs *regs = priv->iobase;
301 /* Set the MAC bits [31:0] in BOT */
302 macaddrlow = pdata->enetaddr[0];
303 macaddrlow |= pdata->enetaddr[1] << 8;
304 macaddrlow |= pdata->enetaddr[2] << 16;
305 macaddrlow |= pdata->enetaddr[3] << 24;
307 /* Set MAC bits [47:32] in TOP */
308 macaddrhigh = pdata->enetaddr[4];
309 macaddrhigh |= pdata->enetaddr[5] << 8;
311 for (i = 0; i < 4; i++) {
312 writel(0, ®s->laddr[i][LADDR_LOW]);
313 writel(0, ®s->laddr[i][LADDR_HIGH]);
314 /* Do not use MATCHx register */
315 writel(0, ®s->match[i]);
318 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]);
319 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]);
324 static int zynq_phy_init(struct udevice *dev)
327 struct zynq_gem_priv *priv = dev_get_priv(dev);
328 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
329 const u32 supported = SUPPORTED_10baseT_Half |
330 SUPPORTED_10baseT_Full |
331 SUPPORTED_100baseT_Half |
332 SUPPORTED_100baseT_Full |
333 SUPPORTED_1000baseT_Half |
334 SUPPORTED_1000baseT_Full;
336 /* Enable only MDIO bus */
337 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, ®s_mdio->nwctrl);
339 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
340 priv->phyaddr = eth_phy_get_addr(dev);
342 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
344 if (IS_ERR_OR_NULL(priv->phydev))
347 if (priv->max_speed) {
348 ret = phy_set_supported(priv->phydev, priv->max_speed);
353 priv->phydev->supported &= supported | ADVERTISED_Pause |
354 ADVERTISED_Asym_Pause;
356 priv->phydev->advertising = priv->phydev->supported;
357 if (!ofnode_valid(priv->phydev->node))
358 priv->phydev->node = priv->phy_of_node;
360 return phy_config(priv->phydev);
363 static u32 gem_mdc_clk_div(struct zynq_gem_priv *priv)
366 unsigned long pclk_hz;
368 pclk_hz = clk_get_rate(&priv->pclk);
369 if (pclk_hz <= 20000000)
370 config = GEM_MDC_SET(GEM_CLK_DIV8);
371 else if (pclk_hz <= 40000000)
372 config = GEM_MDC_SET(GEM_CLK_DIV16);
373 else if (pclk_hz <= 80000000)
374 config = GEM_MDC_SET(GEM_CLK_DIV32);
375 else if (pclk_hz <= 120000000)
376 config = GEM_MDC_SET(GEM_CLK_DIV48);
377 else if (pclk_hz <= 160000000)
378 config = GEM_MDC_SET(GEM_CLK_DIV64);
379 else if (pclk_hz <= 240000000)
380 config = GEM_MDC_SET(GEM_CLK_DIV96);
381 else if (pclk_hz <= 320000000)
382 config = GEM_MDC_SET(GEM_CLK_DIV128);
384 config = GEM_MDC_SET(GEM_CLK_DIV224);
389 static int zynq_gem_init(struct udevice *dev)
393 unsigned long clk_rate = 0;
394 struct zynq_gem_priv *priv = dev_get_priv(dev);
395 struct zynq_gem_regs *regs = priv->iobase;
396 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
397 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
398 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
400 if (readl(®s->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
401 priv->dma_64bit = true;
403 priv->dma_64bit = false;
405 #if defined(CONFIG_PHYS_64BIT)
406 if (!priv->dma_64bit) {
407 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
413 debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
418 /* Disable all interrupts */
419 writel(0xFFFFFFFF, ®s->idr);
421 /* Disable the receiver & transmitter */
422 writel(0, ®s->nwctrl);
423 writel(0, ®s->txsr);
424 writel(0, ®s->rxsr);
425 writel(0, ®s->phymntnc);
427 /* Clear the Hash registers for the mac address
428 * pointed by AddressPtr
430 writel(0x0, ®s->hashl);
431 /* Write bits [63:32] in TOP */
432 writel(0x0, ®s->hashh);
434 /* Clear all counters */
435 for (i = 0; i < STAT_SIZE; i++)
436 readl(®s->stat[i]);
438 /* Setup RxBD space */
439 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
441 for (i = 0; i < RX_BUF; i++) {
442 priv->rx_bd[i].status = 0xF0000000;
443 priv->rx_bd[i].addr =
444 (lower_32_bits((ulong)(priv->rxbuffers)
445 + (i * PKTSIZE_ALIGN)));
446 #if defined(CONFIG_PHYS_64BIT)
447 priv->rx_bd[i].addr_hi =
448 (upper_32_bits((ulong)(priv->rxbuffers)
449 + (i * PKTSIZE_ALIGN)));
452 /* WRAP bit to last BD */
453 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
454 /* Write RxBDs to IP */
455 writel(lower_32_bits((ulong)priv->rx_bd), ®s->rxqbase);
456 #if defined(CONFIG_PHYS_64BIT)
457 writel(upper_32_bits((ulong)priv->rx_bd), ®s->upper_rxqbase);
460 /* Setup for DMA Configuration register */
461 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr);
463 /* Setup for Network Control register, MDIO, Rx and Tx enable */
464 setbits_le32(®s_mdio->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
466 /* Disable the second priority queue */
467 dummy_tx_bd->addr = 0;
468 #if defined(CONFIG_PHYS_64BIT)
469 dummy_tx_bd->addr_hi = 0;
471 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
472 ZYNQ_GEM_TXBUF_LAST_MASK|
473 ZYNQ_GEM_TXBUF_USED_MASK;
475 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
476 ZYNQ_GEM_RXBUF_NEW_MASK;
477 #if defined(CONFIG_PHYS_64BIT)
478 dummy_rx_bd->addr_hi = 0;
480 dummy_rx_bd->status = 0;
482 writel((ulong)dummy_tx_bd, ®s->transmit_q1_ptr);
483 writel((ulong)dummy_rx_bd, ®s->receive_q1_ptr);
488 ret = phy_startup(priv->phydev);
492 if (!priv->phydev->link) {
493 printf("%s: No link.\n", priv->phydev->dev->name);
497 nwconfig = gem_mdc_clk_div(priv);
498 nwconfig |= ZYNQ_GEM_NWCFG_INIT;
501 * Set SGMII enable PCS selection only if internal PCS/PMA
502 * core is used and interface is SGMII.
504 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
506 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
507 ZYNQ_GEM_NWCFG_PCS_SEL;
510 switch (priv->phydev->speed) {
512 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
514 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
517 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
519 clk_rate = ZYNQ_GEM_FREQUENCY_100;
522 clk_rate = ZYNQ_GEM_FREQUENCY_10;
527 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
530 * Disable AN for fixed link configuration, enable otherwise.
531 * Must be written after PCS_SEL is set in nwconfig,
532 * otherwise writes will not take effect.
534 if (priv->phydev->phy_id != PHY_FIXED_ID) {
535 writel(readl(®s->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
538 * When the PHY link is already up, the PCS link needs
541 if (priv->phydev->link) {
544 pcsstatus = ZYNQ_GEM_PCSSTATUS_LINK |
545 ZYNQ_GEM_PCSSTATUS_ANEG_COMPL;
546 ret = wait_for_bit_le32(®s->pcsstatus,
551 "no PCS (SGMII) link\n");
554 * Some additional minimal delay seems
555 * to be needed so that the first
556 * packet will be sent correctly
562 writel(readl(®s->pcscntrl) & ~ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
568 ret = clk_get_rate(&priv->tx_clk);
569 if (ret != clk_rate) {
570 ret = clk_set_rate(&priv->tx_clk, clk_rate);
571 if (IS_ERR_VALUE(ret)) {
572 dev_err(dev, "failed to set tx clock rate %ld\n", clk_rate);
577 ret = clk_enable(&priv->tx_clk);
579 dev_err(dev, "failed to enable tx clock\n");
583 if (priv->clk_en_info & RXCLK_EN) {
584 ret = clk_enable(&priv->rx_clk);
586 dev_err(dev, "failed to enable rx clock\n");
590 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
591 ZYNQ_GEM_NWCTRL_TXEN_MASK);
596 static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
600 struct zynq_gem_priv *priv = dev_get_priv(dev);
601 struct zynq_gem_regs *regs = priv->iobase;
602 struct emac_bd *current_bd = &priv->tx_bd[1];
605 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
607 priv->tx_bd->addr = lower_32_bits((ulong)ptr);
608 #if defined(CONFIG_PHYS_64BIT)
609 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
611 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
612 ZYNQ_GEM_TXBUF_LAST_MASK;
613 /* Dummy descriptor to mark it as the last in descriptor chain */
614 current_bd->addr = 0x0;
615 #if defined(CONFIG_PHYS_64BIT)
616 current_bd->addr_hi = 0x0;
618 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
619 ZYNQ_GEM_TXBUF_LAST_MASK|
620 ZYNQ_GEM_TXBUF_USED_MASK;
623 writel(lower_32_bits((ulong)priv->tx_bd), ®s->txqbase);
624 #if defined(CONFIG_PHYS_64BIT)
625 writel(upper_32_bits((ulong)priv->tx_bd), ®s->upper_txqbase);
629 addr &= ~(ARCH_DMA_MINALIGN - 1);
630 size = roundup(len, ARCH_DMA_MINALIGN);
631 flush_dcache_range(addr, addr + size);
635 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
637 /* Read TX BD status */
638 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
639 printf("TX buffers exhausted in mid frame\n");
641 return wait_for_bit_le32(®s->txsr, ZYNQ_GEM_TSR_DONE,
645 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
646 static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
650 struct zynq_gem_priv *priv = dev_get_priv(dev);
651 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
653 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
656 if (!(current_bd->status &
657 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
658 printf("GEM: SOF or EOF not set for last buffer received!\n");
662 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
664 printf("%s: Zero size packet?\n", __func__);
668 #if defined(CONFIG_PHYS_64BIT)
669 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
670 | ((dma_addr_t)current_bd->addr_hi << 32));
672 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
674 addr &= ~(ARCH_DMA_MINALIGN - 1);
676 *packetp = (uchar *)(uintptr_t)addr;
678 invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
684 static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
686 struct zynq_gem_priv *priv = dev_get_priv(dev);
687 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
688 struct emac_bd *first_bd;
691 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
692 priv->rx_first_buf = priv->rxbd_current;
694 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
695 current_bd->status = 0xF0000000; /* FIXME */
698 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
699 first_bd = &priv->rx_bd[priv->rx_first_buf];
700 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
701 first_bd->status = 0xF0000000;
704 /* Flush the cache for the packet as well */
705 #if defined(CONFIG_PHYS_64BIT)
706 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
707 | ((dma_addr_t)current_bd->addr_hi << 32));
709 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
711 flush_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN,
715 if ((++priv->rxbd_current) >= RX_BUF)
716 priv->rxbd_current = 0;
721 static void zynq_gem_halt(struct udevice *dev)
723 struct zynq_gem_priv *priv = dev_get_priv(dev);
724 struct zynq_gem_regs *regs = priv->iobase;
726 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
727 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
730 static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
733 struct zynq_gem_priv *priv = bus->priv;
737 ret = phyread(priv, addr, reg, &val);
738 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
742 static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
745 struct zynq_gem_priv *priv = bus->priv;
747 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
748 return phywrite(priv, addr, reg, value);
751 static int zynq_gem_reset_init(struct udevice *dev)
753 struct zynq_gem_priv *priv = dev_get_priv(dev);
756 ret = reset_get_bulk(dev, &priv->resets);
757 if (ret == -ENOTSUPP || ret == -ENOENT)
762 ret = reset_deassert_bulk(&priv->resets);
764 reset_release_bulk(&priv->resets);
771 static int gem_zynqmp_set_dynamic_config(struct udevice *dev)
776 if (IS_ENABLED(CONFIG_ARCH_ZYNQMP) && IS_ENABLED(CONFIG_ZYNQMP_FIRMWARE)) {
777 if (!zynqmp_pm_is_function_supported(PM_IOCTL,
778 IOCTL_SET_GEM_CONFIG)) {
779 ret = ofnode_read_u32_array(dev_ofnode(dev),
782 ARRAY_SIZE(pm_info));
785 "Failed to read power-domains info\n");
789 ret = zynqmp_pm_set_gem_config(pm_info[1],
790 GEM_CONFIG_FIXED, 0);
794 ret = zynqmp_pm_set_gem_config(pm_info[1],
795 GEM_CONFIG_SGMII_MODE,
805 static int zynq_gem_probe(struct udevice *dev)
808 struct zynq_gem_priv *priv = dev_get_priv(dev);
812 if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
813 ret = generic_phy_get_by_index(dev, 0, &phy);
815 ret = generic_phy_init(&phy);
818 } else if (ret != -ENOENT) {
819 debug("could not get phy (err %d)\n", ret);
824 ret = zynq_gem_reset_init(dev);
828 /* Align rxbuffers to ARCH_DMA_MINALIGN */
829 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
830 if (!priv->rxbuffers)
833 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
834 ulong addr = (ulong)priv->rxbuffers;
835 flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
838 /* Align bd_space to MMU_SECTION_SHIFT */
839 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
845 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
846 BD_SPACE, DCACHE_OFF);
848 /* Initialize the bd spaces for tx and rx bd's */
849 priv->tx_bd = (struct emac_bd *)bd_space;
850 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
852 ret = clk_get_by_name(dev, "tx_clk", &priv->tx_clk);
854 dev_err(dev, "failed to get tx_clock\n");
858 if (priv->clk_en_info & RXCLK_EN) {
859 ret = clk_get_by_name(dev, "rx_clk", &priv->rx_clk);
861 dev_err(dev, "failed to get rx_clock\n");
866 ret = clk_get_by_name(dev, "pclk", &priv->pclk);
868 dev_err(dev, "failed to get pclk clock\n");
872 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
873 priv->bus = eth_phy_get_mdio_bus(dev);
876 priv->bus = mdio_alloc();
877 priv->bus->read = zynq_gem_miiphy_read;
878 priv->bus->write = zynq_gem_miiphy_write;
879 priv->bus->priv = priv;
881 ret = mdio_register_seq(priv->bus, dev_seq(dev));
886 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
887 eth_phy_set_mdio_bus(dev, priv->bus);
889 ret = zynq_phy_init(dev);
893 if (priv->interface == PHY_INTERFACE_MODE_SGMII && phy.dev) {
894 if (IS_ENABLED(CONFIG_DM_ETH_PHY)) {
895 if (device_is_compatible(dev, "cdns,zynqmp-gem") ||
896 device_is_compatible(dev, "xlnx,zynqmp-gem")) {
897 ret = gem_zynqmp_set_dynamic_config(dev);
901 "Failed to set gem dynamic config\n");
906 ret = generic_phy_power_on(&phy);
911 printf("\nZYNQ GEM: %lx, mdio bus %lx, phyaddr %d, interface %s\n",
912 (ulong)priv->iobase, (ulong)priv->mdiobase, priv->phydev->addr,
913 phy_string_for_interface(priv->interface));
918 mdio_unregister(priv->bus);
922 free(priv->rxbuffers);
926 static int zynq_gem_remove(struct udevice *dev)
928 struct zynq_gem_priv *priv = dev_get_priv(dev);
931 mdio_unregister(priv->bus);
932 mdio_free(priv->bus);
937 static const struct eth_ops zynq_gem_ops = {
938 .start = zynq_gem_init,
939 .send = zynq_gem_send,
940 .recv = zynq_gem_recv,
941 .free_pkt = zynq_gem_free_pkt,
942 .stop = zynq_gem_halt,
943 .write_hwaddr = zynq_gem_setup_mac,
946 static int zynq_gem_of_to_plat(struct udevice *dev)
948 struct eth_pdata *pdata = dev_get_plat(dev);
949 struct zynq_gem_priv *priv = dev_get_priv(dev);
950 struct ofnode_phandle_args phandle_args;
952 pdata->iobase = (phys_addr_t)dev_read_addr(dev);
953 priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
954 priv->mdiobase = priv->iobase;
955 /* Hardcode for now */
958 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
963 debug("phy-handle does exist %s\n", dev->name);
964 if (!(IS_ENABLED(CONFIG_DM_ETH_PHY)))
965 priv->phyaddr = ofnode_read_u32_default
966 (phandle_args.node, "reg", -1);
968 priv->phy_of_node = phandle_args.node;
969 priv->max_speed = ofnode_read_u32_default(phandle_args.node,
973 parent = ofnode_get_parent(phandle_args.node);
974 if (ofnode_name_eq(parent, "mdio"))
975 parent = ofnode_get_parent(parent);
977 addr = ofnode_get_addr(parent);
978 if (addr != FDT_ADDR_T_NONE) {
979 debug("MDIO bus not found %s\n", dev->name);
980 priv->mdiobase = (struct zynq_gem_regs *)addr;
984 pdata->phy_interface = dev_read_phy_mode(dev);
985 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
987 priv->interface = pdata->phy_interface;
989 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
991 priv->clk_en_info = dev_get_driver_data(dev);
996 static const struct udevice_id zynq_gem_ids[] = {
997 { .compatible = "xlnx,versal-gem", .data = RXCLK_EN },
998 { .compatible = "cdns,versal-gem", .data = RXCLK_EN },
999 { .compatible = "xlnx,zynqmp-gem" },
1000 { .compatible = "cdns,zynqmp-gem" },
1001 { .compatible = "xlnx,zynq-gem" },
1002 { .compatible = "cdns,zynq-gem" },
1003 { .compatible = "cdns,gem" },
1007 U_BOOT_DRIVER(zynq_gem) = {
1010 .of_match = zynq_gem_ids,
1011 .of_to_plat = zynq_gem_of_to_plat,
1012 .probe = zynq_gem_probe,
1013 .remove = zynq_gem_remove,
1014 .ops = &zynq_gem_ops,
1015 .priv_auto = sizeof(struct zynq_gem_priv),
1016 .plat_auto = sizeof(struct eth_pdata),