1 // SPDX-License-Identifier: GPL-2.0+
4 * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
8 * Designware ethernet IP driver for U-Boot
20 #include <linux/compiler.h>
21 #include <linux/err.h>
22 #include <linux/kernel.h>
24 #include <power/regulator.h>
25 #include "designware.h"
27 static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
30 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
31 struct eth_mac_regs *mac_p = priv->mac_regs_p;
33 struct eth_mac_regs *mac_p = bus->priv;
37 int timeout = CONFIG_MDIO_TIMEOUT;
39 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
40 ((reg << MIIREGSHIFT) & MII_REGMSK);
42 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
45 while (get_timer(start) < timeout) {
46 if (!(readl(&mac_p->miiaddr) & MII_BUSY))
47 return readl(&mac_p->miidata);
54 static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
58 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
59 struct eth_mac_regs *mac_p = priv->mac_regs_p;
61 struct eth_mac_regs *mac_p = bus->priv;
65 int ret = -ETIMEDOUT, timeout = CONFIG_MDIO_TIMEOUT;
67 writel(val, &mac_p->miidata);
68 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
69 ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
71 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
74 while (get_timer(start) < timeout) {
75 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
85 #if defined(CONFIG_DM_ETH) && CONFIG_IS_ENABLED(DM_GPIO)
86 static int dw_mdio_reset(struct mii_dev *bus)
88 struct udevice *dev = bus->priv;
89 struct dw_eth_dev *priv = dev_get_priv(dev);
90 struct dw_eth_pdata *pdata = dev_get_platdata(dev);
93 if (!dm_gpio_is_valid(&priv->reset_gpio))
97 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
101 udelay(pdata->reset_delays[0]);
103 ret = dm_gpio_set_value(&priv->reset_gpio, 1);
107 udelay(pdata->reset_delays[1]);
109 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
113 udelay(pdata->reset_delays[2]);
119 static int dw_mdio_init(const char *name, void *priv)
121 struct mii_dev *bus = mdio_alloc();
124 printf("Failed to allocate MDIO bus\n");
128 bus->read = dw_mdio_read;
129 bus->write = dw_mdio_write;
130 snprintf(bus->name, sizeof(bus->name), "%s", name);
131 #if defined(CONFIG_DM_ETH) && CONFIG_IS_ENABLED(DM_GPIO)
132 bus->reset = dw_mdio_reset;
137 return mdio_register(bus);
140 static void tx_descs_init(struct dw_eth_dev *priv)
142 struct eth_dma_regs *dma_p = priv->dma_regs_p;
143 struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
144 char *txbuffs = &priv->txbuffs[0];
145 struct dmamacdescr *desc_p;
148 for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
149 desc_p = &desc_table_p[idx];
150 desc_p->dmamac_addr = (ulong)&txbuffs[idx * CONFIG_ETH_BUFSIZE];
151 desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
153 #if defined(CONFIG_DW_ALTDESCRIPTOR)
154 desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
155 DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS |
156 DESC_TXSTS_TXCHECKINSCTRL |
157 DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
159 desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
160 desc_p->dmamac_cntl = 0;
161 desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
163 desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
164 desc_p->txrx_status = 0;
168 /* Correcting the last pointer of the chain */
169 desc_p->dmamac_next = (ulong)&desc_table_p[0];
171 /* Flush all Tx buffer descriptors at once */
172 flush_dcache_range((ulong)priv->tx_mac_descrtable,
173 (ulong)priv->tx_mac_descrtable +
174 sizeof(priv->tx_mac_descrtable));
176 writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
177 priv->tx_currdescnum = 0;
180 static void rx_descs_init(struct dw_eth_dev *priv)
182 struct eth_dma_regs *dma_p = priv->dma_regs_p;
183 struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
184 char *rxbuffs = &priv->rxbuffs[0];
185 struct dmamacdescr *desc_p;
188 /* Before passing buffers to GMAC we need to make sure zeros
189 * written there right after "priv" structure allocation were
191 * Otherwise there's a chance to get some of them flushed in RAM when
192 * GMAC is already pushing data to RAM via DMA. This way incoming from
193 * GMAC data will be corrupted. */
194 flush_dcache_range((ulong)rxbuffs, (ulong)rxbuffs + RX_TOTAL_BUFSIZE);
196 for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
197 desc_p = &desc_table_p[idx];
198 desc_p->dmamac_addr = (ulong)&rxbuffs[idx * CONFIG_ETH_BUFSIZE];
199 desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
201 desc_p->dmamac_cntl =
202 (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) |
205 desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
208 /* Correcting the last pointer of the chain */
209 desc_p->dmamac_next = (ulong)&desc_table_p[0];
211 /* Flush all Rx buffer descriptors at once */
212 flush_dcache_range((ulong)priv->rx_mac_descrtable,
213 (ulong)priv->rx_mac_descrtable +
214 sizeof(priv->rx_mac_descrtable));
216 writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
217 priv->rx_currdescnum = 0;
220 static int _dw_write_hwaddr(struct dw_eth_dev *priv, u8 *mac_id)
222 struct eth_mac_regs *mac_p = priv->mac_regs_p;
223 u32 macid_lo, macid_hi;
225 macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
227 macid_hi = mac_id[4] + (mac_id[5] << 8);
229 writel(macid_hi, &mac_p->macaddr0hi);
230 writel(macid_lo, &mac_p->macaddr0lo);
235 static int dw_adjust_link(struct dw_eth_dev *priv, struct eth_mac_regs *mac_p,
236 struct phy_device *phydev)
238 u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
241 printf("%s: No link.\n", phydev->dev->name);
245 if (phydev->speed != 1000)
246 conf |= MII_PORTSELECT;
248 conf &= ~MII_PORTSELECT;
250 if (phydev->speed == 100)
254 conf |= FULLDPLXMODE;
256 writel(conf, &mac_p->conf);
258 printf("Speed: %d, %s duplex%s\n", phydev->speed,
259 (phydev->duplex) ? "full" : "half",
260 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
265 static void _dw_eth_halt(struct dw_eth_dev *priv)
267 struct eth_mac_regs *mac_p = priv->mac_regs_p;
268 struct eth_dma_regs *dma_p = priv->dma_regs_p;
270 writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
271 writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
273 phy_shutdown(priv->phydev);
276 int designware_eth_init(struct dw_eth_dev *priv, u8 *enetaddr)
278 struct eth_mac_regs *mac_p = priv->mac_regs_p;
279 struct eth_dma_regs *dma_p = priv->dma_regs_p;
283 writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
286 * When a MII PHY is used, we must set the PS bit for the DMA
289 if (priv->phydev->interface == PHY_INTERFACE_MODE_MII)
290 writel(readl(&mac_p->conf) | MII_PORTSELECT, &mac_p->conf);
292 writel(readl(&mac_p->conf) & ~MII_PORTSELECT, &mac_p->conf);
294 start = get_timer(0);
295 while (readl(&dma_p->busmode) & DMAMAC_SRST) {
296 if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
297 printf("DMA reset timeout\n");
305 * Soft reset above clears HW address registers.
306 * So we have to set it here once again.
308 _dw_write_hwaddr(priv, enetaddr);
313 writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
315 #ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
316 writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
319 writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
323 writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
325 #ifdef CONFIG_DW_AXI_BURST_LEN
326 writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
329 /* Start up the PHY */
330 ret = phy_startup(priv->phydev);
332 printf("Could not initialize PHY %s\n",
333 priv->phydev->dev->name);
337 ret = dw_adjust_link(priv, mac_p, priv->phydev);
344 int designware_eth_enable(struct dw_eth_dev *priv)
346 struct eth_mac_regs *mac_p = priv->mac_regs_p;
348 if (!priv->phydev->link)
351 writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
358 static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
360 struct eth_dma_regs *dma_p = priv->dma_regs_p;
361 u32 desc_num = priv->tx_currdescnum;
362 struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
363 ulong desc_start = (ulong)desc_p;
364 ulong desc_end = desc_start +
365 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
366 ulong data_start = desc_p->dmamac_addr;
367 ulong data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
369 * Strictly we only need to invalidate the "txrx_status" field
370 * for the following check, but on some platforms we cannot
371 * invalidate only 4 bytes, so we flush the entire descriptor,
372 * which is 16 bytes in total. This is safe because the
373 * individual descriptors in the array are each aligned to
374 * ARCH_DMA_MINALIGN and padded appropriately.
376 invalidate_dcache_range(desc_start, desc_end);
378 /* Check if the descriptor is owned by CPU */
379 if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
380 printf("CPU not owner of tx frame\n");
384 memcpy((void *)data_start, packet, length);
385 if (length < ETH_ZLEN) {
386 memset(&((char *)data_start)[length], 0, ETH_ZLEN - length);
390 /* Flush data to be sent */
391 flush_dcache_range(data_start, data_end);
393 #if defined(CONFIG_DW_ALTDESCRIPTOR)
394 desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
395 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
396 ((length << DESC_TXCTRL_SIZE1SHFT) &
397 DESC_TXCTRL_SIZE1MASK);
399 desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
400 desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
402 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
403 ((length << DESC_TXCTRL_SIZE1SHFT) &
404 DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
407 desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
410 /* Flush modified buffer descriptor */
411 flush_dcache_range(desc_start, desc_end);
413 /* Test the wrap-around condition. */
414 if (++desc_num >= CONFIG_TX_DESCR_NUM)
417 priv->tx_currdescnum = desc_num;
419 /* Start the transmission */
420 writel(POLL_DATA, &dma_p->txpolldemand);
425 static int _dw_eth_recv(struct dw_eth_dev *priv, uchar **packetp)
427 u32 status, desc_num = priv->rx_currdescnum;
428 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
429 int length = -EAGAIN;
430 ulong desc_start = (ulong)desc_p;
431 ulong desc_end = desc_start +
432 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
433 ulong data_start = desc_p->dmamac_addr;
436 /* Invalidate entire buffer descriptor */
437 invalidate_dcache_range(desc_start, desc_end);
439 status = desc_p->txrx_status;
441 /* Check if the owner is the CPU */
442 if (!(status & DESC_RXSTS_OWNBYDMA)) {
444 length = (status & DESC_RXSTS_FRMLENMSK) >>
445 DESC_RXSTS_FRMLENSHFT;
447 /* Invalidate received data */
448 data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
449 invalidate_dcache_range(data_start, data_end);
450 *packetp = (uchar *)(ulong)desc_p->dmamac_addr;
456 static int _dw_free_pkt(struct dw_eth_dev *priv)
458 u32 desc_num = priv->rx_currdescnum;
459 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
460 ulong desc_start = (ulong)desc_p;
461 ulong desc_end = desc_start +
462 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
465 * Make the current descriptor valid again and go to
468 desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
470 /* Flush only status field - others weren't changed */
471 flush_dcache_range(desc_start, desc_end);
473 /* Test the wrap-around condition. */
474 if (++desc_num >= CONFIG_RX_DESCR_NUM)
476 priv->rx_currdescnum = desc_num;
481 static int dw_phy_init(struct dw_eth_dev *priv, void *dev)
483 struct phy_device *phydev;
484 int phy_addr = -1, ret;
486 #ifdef CONFIG_PHY_ADDR
487 phy_addr = CONFIG_PHY_ADDR;
490 phydev = phy_connect(priv->bus, phy_addr, dev, priv->interface);
494 phydev->supported &= PHY_GBIT_FEATURES;
495 if (priv->max_speed) {
496 ret = phy_set_supported(phydev, priv->max_speed);
500 phydev->advertising = phydev->supported;
502 priv->phydev = phydev;
508 #ifndef CONFIG_DM_ETH
509 static int dw_eth_init(struct eth_device *dev, bd_t *bis)
513 ret = designware_eth_init(dev->priv, dev->enetaddr);
515 ret = designware_eth_enable(dev->priv);
520 static int dw_eth_send(struct eth_device *dev, void *packet, int length)
522 return _dw_eth_send(dev->priv, packet, length);
525 static int dw_eth_recv(struct eth_device *dev)
530 length = _dw_eth_recv(dev->priv, &packet);
531 if (length == -EAGAIN)
533 net_process_received_packet(packet, length);
535 _dw_free_pkt(dev->priv);
540 static void dw_eth_halt(struct eth_device *dev)
542 return _dw_eth_halt(dev->priv);
545 static int dw_write_hwaddr(struct eth_device *dev)
547 return _dw_write_hwaddr(dev->priv, dev->enetaddr);
550 int designware_initialize(ulong base_addr, u32 interface)
552 struct eth_device *dev;
553 struct dw_eth_dev *priv;
555 dev = (struct eth_device *) malloc(sizeof(struct eth_device));
560 * Since the priv structure contains the descriptors which need a strict
561 * buswidth alignment, memalign is used to allocate memory
563 priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
564 sizeof(struct dw_eth_dev));
570 if ((phys_addr_t)priv + sizeof(*priv) > (1ULL << 32)) {
571 printf("designware: buffers are outside DMA memory\n");
575 memset(dev, 0, sizeof(struct eth_device));
576 memset(priv, 0, sizeof(struct dw_eth_dev));
578 sprintf(dev->name, "dwmac.%lx", base_addr);
579 dev->iobase = (int)base_addr;
583 priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
584 priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
587 dev->init = dw_eth_init;
588 dev->send = dw_eth_send;
589 dev->recv = dw_eth_recv;
590 dev->halt = dw_eth_halt;
591 dev->write_hwaddr = dw_write_hwaddr;
595 priv->interface = interface;
597 dw_mdio_init(dev->name, priv->mac_regs_p);
598 priv->bus = miiphy_get_dev_by_name(dev->name);
600 return dw_phy_init(priv, dev);
605 static int designware_eth_start(struct udevice *dev)
607 struct eth_pdata *pdata = dev_get_platdata(dev);
608 struct dw_eth_dev *priv = dev_get_priv(dev);
611 ret = designware_eth_init(priv, pdata->enetaddr);
614 ret = designware_eth_enable(priv);
621 int designware_eth_send(struct udevice *dev, void *packet, int length)
623 struct dw_eth_dev *priv = dev_get_priv(dev);
625 return _dw_eth_send(priv, packet, length);
628 int designware_eth_recv(struct udevice *dev, int flags, uchar **packetp)
630 struct dw_eth_dev *priv = dev_get_priv(dev);
632 return _dw_eth_recv(priv, packetp);
635 int designware_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
637 struct dw_eth_dev *priv = dev_get_priv(dev);
639 return _dw_free_pkt(priv);
642 void designware_eth_stop(struct udevice *dev)
644 struct dw_eth_dev *priv = dev_get_priv(dev);
646 return _dw_eth_halt(priv);
649 int designware_eth_write_hwaddr(struct udevice *dev)
651 struct eth_pdata *pdata = dev_get_platdata(dev);
652 struct dw_eth_dev *priv = dev_get_priv(dev);
654 return _dw_write_hwaddr(priv, pdata->enetaddr);
657 static int designware_eth_bind(struct udevice *dev)
660 static int num_cards;
663 /* Create a unique device name for PCI type devices */
664 if (device_is_on_pci_bus(dev)) {
665 sprintf(name, "eth_designware#%u", num_cards++);
666 device_set_name(dev, name);
673 int designware_eth_probe(struct udevice *dev)
675 struct eth_pdata *pdata = dev_get_platdata(dev);
676 struct dw_eth_dev *priv = dev_get_priv(dev);
677 u32 iobase = pdata->iobase;
680 struct reset_ctl_bulk reset_bulk;
684 priv->clock_count = 0;
685 clock_nb = dev_count_phandle_with_args(dev, "clocks", "#clock-cells");
687 priv->clocks = devm_kcalloc(dev, clock_nb, sizeof(struct clk),
692 for (i = 0; i < clock_nb; i++) {
693 err = clk_get_by_index(dev, i, &priv->clocks[i]);
697 err = clk_enable(&priv->clocks[i]);
698 if (err && err != -ENOSYS && err != -ENOTSUPP) {
699 pr_err("failed to enable clock %d\n", i);
700 clk_free(&priv->clocks[i]);
705 } else if (clock_nb != -ENOENT) {
706 pr_err("failed to get clock phandle(%d)\n", clock_nb);
711 #if defined(CONFIG_DM_REGULATOR)
712 struct udevice *phy_supply;
714 ret = device_get_supply_regulator(dev, "phy-supply",
717 debug("%s: No phy supply\n", dev->name);
719 ret = regulator_set_enable(phy_supply, true);
721 puts("Error enabling phy supply\n");
727 ret = reset_get_bulk(dev, &reset_bulk);
729 dev_warn(dev, "Can't get reset: %d\n", ret);
731 reset_deassert_bulk(&reset_bulk);
735 * If we are on PCI bus, either directly attached to a PCI root port,
736 * or via a PCI bridge, fill in platdata before we probe the hardware.
738 if (device_is_on_pci_bus(dev)) {
739 dm_pci_read_config32(dev, PCI_BASE_ADDRESS_0, &iobase);
740 iobase &= PCI_BASE_ADDRESS_MEM_MASK;
741 iobase = dm_pci_mem_to_phys(dev, iobase);
743 pdata->iobase = iobase;
744 pdata->phy_interface = PHY_INTERFACE_MODE_RMII;
748 debug("%s, iobase=%x, priv=%p\n", __func__, iobase, priv);
750 priv->mac_regs_p = (struct eth_mac_regs *)ioaddr;
751 priv->dma_regs_p = (struct eth_dma_regs *)(ioaddr + DW_DMA_BASE_OFFSET);
752 priv->interface = pdata->phy_interface;
753 priv->max_speed = pdata->max_speed;
755 ret = dw_mdio_init(dev->name, dev);
760 priv->bus = miiphy_get_dev_by_name(dev->name);
762 ret = dw_phy_init(priv, dev);
763 debug("%s, ret=%d\n", __func__, ret);
767 /* continue here for cleanup if no PHY found */
769 mdio_unregister(priv->bus);
770 mdio_free(priv->bus);
775 ret = clk_release_all(priv->clocks, priv->clock_count);
777 pr_err("failed to disable all clocks\n");
783 static int designware_eth_remove(struct udevice *dev)
785 struct dw_eth_dev *priv = dev_get_priv(dev);
788 mdio_unregister(priv->bus);
789 mdio_free(priv->bus);
792 return clk_release_all(priv->clocks, priv->clock_count);
798 const struct eth_ops designware_eth_ops = {
799 .start = designware_eth_start,
800 .send = designware_eth_send,
801 .recv = designware_eth_recv,
802 .free_pkt = designware_eth_free_pkt,
803 .stop = designware_eth_stop,
804 .write_hwaddr = designware_eth_write_hwaddr,
807 int designware_eth_ofdata_to_platdata(struct udevice *dev)
809 struct dw_eth_pdata *dw_pdata = dev_get_platdata(dev);
810 #if CONFIG_IS_ENABLED(DM_GPIO)
811 struct dw_eth_dev *priv = dev_get_priv(dev);
813 struct eth_pdata *pdata = &dw_pdata->eth_pdata;
814 const char *phy_mode;
815 #if CONFIG_IS_ENABLED(DM_GPIO)
816 int reset_flags = GPIOD_IS_OUT;
820 pdata->iobase = dev_read_addr(dev);
821 pdata->phy_interface = -1;
822 phy_mode = dev_read_string(dev, "phy-mode");
824 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
825 if (pdata->phy_interface == -1) {
826 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
830 pdata->max_speed = dev_read_u32_default(dev, "max-speed", 0);
832 #if CONFIG_IS_ENABLED(DM_GPIO)
833 if (dev_read_bool(dev, "snps,reset-active-low"))
834 reset_flags |= GPIOD_ACTIVE_LOW;
836 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
837 &priv->reset_gpio, reset_flags);
839 ret = dev_read_u32_array(dev, "snps,reset-delays-us",
840 dw_pdata->reset_delays, 3);
841 } else if (ret == -ENOENT) {
849 static const struct udevice_id designware_eth_ids[] = {
850 { .compatible = "allwinner,sun7i-a20-gmac" },
851 { .compatible = "amlogic,meson6-dwmac" },
852 { .compatible = "amlogic,meson-gx-dwmac" },
853 { .compatible = "amlogic,meson-gxbb-dwmac" },
854 { .compatible = "amlogic,meson-axg-dwmac" },
855 { .compatible = "st,stm32-dwmac" },
856 { .compatible = "snps,arc-dwmac-3.70a" },
860 U_BOOT_DRIVER(eth_designware) = {
861 .name = "eth_designware",
863 .of_match = designware_eth_ids,
864 .ofdata_to_platdata = designware_eth_ofdata_to_platdata,
865 .bind = designware_eth_bind,
866 .probe = designware_eth_probe,
867 .remove = designware_eth_remove,
868 .ops = &designware_eth_ops,
869 .priv_auto_alloc_size = sizeof(struct dw_eth_dev),
870 .platdata_auto_alloc_size = sizeof(struct dw_eth_pdata),
871 .flags = DM_FLAG_ALLOC_PRIV_DMA,
874 static struct pci_device_id supported[] = {
875 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_EMAC) },
879 U_BOOT_PCI_DEVICE(eth_designware, supported);