1 // SPDX-License-Identifier: GPL-2.0+
3 * sh_eth.c - Driver for Renesas ethernet controller.
5 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
6 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
7 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
8 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
20 #include <asm/cache.h>
21 #include <linux/delay.h>
22 #include <linux/errno.h>
23 #include <asm/global_data.h>
28 #include <linux/mii.h>
33 #ifndef CFG_SH_ETHER_USE_PORT
34 # error "Please define CFG_SH_ETHER_USE_PORT"
36 #ifndef CFG_SH_ETHER_PHY_ADDR
37 # error "Please define CFG_SH_ETHER_PHY_ADDR"
40 #if defined(CFG_SH_ETHER_CACHE_WRITEBACK) && \
41 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
42 #define flush_cache_wback(addr, len) \
43 flush_dcache_range((unsigned long)addr, \
44 (unsigned long)(addr + ALIGN(len, CFG_SH_ETHER_ALIGNE_SIZE)))
46 #define flush_cache_wback(...)
49 #if defined(CFG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
50 #define invalidate_cache(addr, len) \
52 unsigned long line_size = CFG_SH_ETHER_ALIGNE_SIZE; \
53 unsigned long start, end; \
55 start = (unsigned long)addr; \
57 start &= ~(line_size - 1); \
58 end = ((end + line_size - 1) & ~(line_size - 1)); \
60 invalidate_dcache_range(start, end); \
63 #define invalidate_cache(...)
66 #define TIMEOUT_CNT 1000
68 static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
71 struct sh_eth_info *port_info = ð->port_info[eth->port];
73 if (!packet || len > 0xffff) {
74 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
79 /* packet must be a 4 byte boundary */
80 if ((uintptr_t)packet & 3) {
81 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
87 /* Update tx descriptor */
88 flush_cache_wback(packet, len);
89 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
90 port_info->tx_desc_cur->td1 = len << 16;
91 /* Must preserve the end of descriptor list indication */
92 if (port_info->tx_desc_cur->td0 & TD_TDLE)
93 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
95 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
97 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
99 /* Restart the transmitter if disabled */
100 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
101 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
103 /* Wait until packet is transmitted */
104 timeout = TIMEOUT_CNT;
106 invalidate_cache(port_info->tx_desc_cur,
107 sizeof(struct tx_desc_s));
109 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
112 printf(SHETHER_NAME ": transmit timeout\n");
117 port_info->tx_desc_cur++;
118 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
119 port_info->tx_desc_cur = port_info->tx_desc_base;
125 static int sh_eth_recv_start(struct sh_eth_dev *eth)
127 struct sh_eth_info *port_info = ð->port_info[eth->port];
129 /* Check if the rx descriptor is ready */
130 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
131 if (port_info->rx_desc_cur->rd0 & RD_RACT)
134 /* Check for errors */
135 if (port_info->rx_desc_cur->rd0 & RD_RFE)
138 return port_info->rx_desc_cur->rd1 & 0xffff;
141 static void sh_eth_recv_finish(struct sh_eth_dev *eth)
143 struct sh_eth_info *port_info = ð->port_info[eth->port];
145 invalidate_cache(ADDR_TO_P2(port_info->rx_desc_cur->rd2), MAX_BUF_SIZE);
147 /* Make current descriptor available again */
148 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
149 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
151 port_info->rx_desc_cur->rd0 = RD_RACT;
153 flush_cache_wback(port_info->rx_desc_cur,
154 sizeof(struct rx_desc_s));
156 /* Point to the next descriptor */
157 port_info->rx_desc_cur++;
158 if (port_info->rx_desc_cur >=
159 port_info->rx_desc_base + NUM_RX_DESC)
160 port_info->rx_desc_cur = port_info->rx_desc_base;
163 static int sh_eth_reset(struct sh_eth_dev *eth)
165 struct sh_eth_info *port_info = ð->port_info[eth->port];
166 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
169 /* Start e-dmac transmitter and receiver */
170 sh_eth_write(port_info, EDSR_ENALL, EDSR);
172 /* Perform a software reset and wait for it to complete */
173 sh_eth_write(port_info, EDMR_SRST, EDMR);
174 for (i = 0; i < TIMEOUT_CNT; i++) {
175 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
180 if (i == TIMEOUT_CNT) {
181 printf(SHETHER_NAME ": Software reset timeout\n");
187 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
189 sh_eth_write(port_info,
190 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
196 static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
199 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
200 struct sh_eth_info *port_info = ð->port_info[eth->port];
201 struct tx_desc_s *cur_tx_desc;
204 * Allocate rx descriptors. They must be aligned to size of struct
207 port_info->tx_desc_alloc =
208 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
209 if (!port_info->tx_desc_alloc) {
210 printf(SHETHER_NAME ": memalign failed\n");
215 /* Make sure we use a P2 address (non-cacheable) */
216 port_info->tx_desc_base =
217 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
218 port_info->tx_desc_cur = port_info->tx_desc_base;
220 /* Initialize all descriptors */
221 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
222 cur_tx_desc++, i++) {
223 cur_tx_desc->td0 = 0x00;
224 cur_tx_desc->td1 = 0x00;
225 cur_tx_desc->td2 = 0x00;
228 /* Mark the end of the descriptors */
230 cur_tx_desc->td0 |= TD_TDLE;
232 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
234 * Point the controller to the tx descriptor list. Must use physical
237 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
238 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
239 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
240 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
241 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
248 static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
251 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
252 struct sh_eth_info *port_info = ð->port_info[eth->port];
253 struct rx_desc_s *cur_rx_desc;
257 * Allocate rx descriptors. They must be aligned to size of struct
260 port_info->rx_desc_alloc =
261 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
262 if (!port_info->rx_desc_alloc) {
263 printf(SHETHER_NAME ": memalign failed\n");
268 /* Make sure we use a P2 address (non-cacheable) */
269 port_info->rx_desc_base =
270 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
272 port_info->rx_desc_cur = port_info->rx_desc_base;
275 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
276 * aligned and in P2 area.
278 port_info->rx_buf_alloc =
279 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
280 if (!port_info->rx_buf_alloc) {
281 printf(SHETHER_NAME ": alloc failed\n");
286 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
288 /* Initialize all descriptors */
289 for (cur_rx_desc = port_info->rx_desc_base,
290 rx_buf = port_info->rx_buf_base, i = 0;
291 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
292 cur_rx_desc->rd0 = RD_RACT;
293 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
294 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
297 /* Mark the end of the descriptors */
299 cur_rx_desc->rd0 |= RD_RDLE;
301 invalidate_cache(port_info->rx_buf_alloc, NUM_RX_DESC * MAX_BUF_SIZE);
302 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
304 /* Point the controller to the rx descriptor list */
305 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
306 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
307 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
308 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
309 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
315 free(port_info->rx_desc_alloc);
316 port_info->rx_desc_alloc = NULL;
322 static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
324 struct sh_eth_info *port_info = ð->port_info[eth->port];
326 if (port_info->tx_desc_alloc) {
327 free(port_info->tx_desc_alloc);
328 port_info->tx_desc_alloc = NULL;
332 static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
334 struct sh_eth_info *port_info = ð->port_info[eth->port];
336 if (port_info->rx_desc_alloc) {
337 free(port_info->rx_desc_alloc);
338 port_info->rx_desc_alloc = NULL;
341 if (port_info->rx_buf_alloc) {
342 free(port_info->rx_buf_alloc);
343 port_info->rx_buf_alloc = NULL;
347 static int sh_eth_desc_init(struct sh_eth_dev *eth)
351 ret = sh_eth_tx_desc_init(eth);
355 ret = sh_eth_rx_desc_init(eth);
361 sh_eth_tx_desc_free(eth);
367 static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
372 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
373 sh_eth_write(port_info, val, MAHR);
375 val = (mac[4] << 8) | mac[5];
376 sh_eth_write(port_info, val, MALR);
379 static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
381 struct sh_eth_info *port_info = ð->port_info[eth->port];
384 /* Configure e-dmac registers */
385 edmr = sh_eth_read(port_info, EDMR);
386 edmr &= ~EMDR_DESC_R;
387 edmr |= EMDR_DESC | EDMR_EL;
388 #if defined(CONFIG_R8A77980)
391 sh_eth_write(port_info, edmr, EDMR);
393 sh_eth_write(port_info, 0, EESIPR);
394 sh_eth_write(port_info, 0, TRSCER);
395 sh_eth_write(port_info, 0, TFTR);
396 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
397 sh_eth_write(port_info, RMCR_RST, RMCR);
398 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
399 sh_eth_write(port_info, 0, RPADIR);
401 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
403 /* Configure e-mac registers */
404 sh_eth_write(port_info, 0, ECSIPR);
406 /* Set Mac address */
407 sh_eth_write_hwaddr(port_info, mac);
409 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
410 #if defined(SH_ETH_TYPE_GETHER)
411 sh_eth_write(port_info, 0, PIPR);
413 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
414 sh_eth_write(port_info, APR_AP, APR);
415 sh_eth_write(port_info, MPR_MP, MPR);
416 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
419 #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
420 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
421 #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
422 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
426 static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
428 struct sh_eth_info *port_info = ð->port_info[eth->port];
429 struct phy_device *phy = port_info->phydev;
433 /* Set the transfer speed */
434 if (phy->speed == 100) {
435 printf(SHETHER_NAME ": 100Base/");
436 #if defined(SH_ETH_TYPE_GETHER)
437 sh_eth_write(port_info, GECMR_100B, GECMR);
438 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
439 sh_eth_write(port_info, 1, RTRATE);
440 #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
443 } else if (phy->speed == 10) {
444 printf(SHETHER_NAME ": 10Base/");
445 #if defined(SH_ETH_TYPE_GETHER)
446 sh_eth_write(port_info, GECMR_10B, GECMR);
447 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
448 sh_eth_write(port_info, 0, RTRATE);
451 #if defined(SH_ETH_TYPE_GETHER)
452 else if (phy->speed == 1000) {
453 printf(SHETHER_NAME ": 1000Base/");
454 sh_eth_write(port_info, GECMR_1000B, GECMR);
458 /* Check if full duplex mode is supported by the phy */
461 sh_eth_write(port_info,
462 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
466 sh_eth_write(port_info,
467 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
474 static void sh_eth_start(struct sh_eth_dev *eth)
476 struct sh_eth_info *port_info = ð->port_info[eth->port];
479 * Enable the e-dmac receiver only. The transmitter will be enabled when
480 * we have something to transmit
482 sh_eth_write(port_info, EDRRR_R, EDRRR);
485 static void sh_eth_stop(struct sh_eth_dev *eth)
487 struct sh_eth_info *port_info = ð->port_info[eth->port];
489 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
492 static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
496 ret = sh_eth_reset(eth);
500 ret = sh_eth_desc_init(eth);
504 sh_eth_mac_regs_config(eth, mac);
509 static int sh_eth_start_common(struct sh_eth_dev *eth)
511 struct sh_eth_info *port_info = ð->port_info[eth->port];
514 ret = phy_startup(port_info->phydev);
516 printf(SHETHER_NAME ": phy startup failure\n");
520 ret = sh_eth_phy_regs_config(eth);
529 struct sh_ether_priv {
530 struct sh_eth_dev shdev;
537 static int sh_ether_send(struct udevice *dev, void *packet, int len)
539 struct sh_ether_priv *priv = dev_get_priv(dev);
540 struct sh_eth_dev *eth = &priv->shdev;
542 return sh_eth_send_common(eth, packet, len);
545 static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
547 struct sh_ether_priv *priv = dev_get_priv(dev);
548 struct sh_eth_dev *eth = &priv->shdev;
549 struct sh_eth_info *port_info = ð->port_info[eth->port];
550 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
553 len = sh_eth_recv_start(eth);
555 invalidate_cache(packet, len);
561 /* Restart the receiver if disabled */
562 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
563 sh_eth_write(port_info, EDRRR_R, EDRRR);
568 static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
570 struct sh_ether_priv *priv = dev_get_priv(dev);
571 struct sh_eth_dev *eth = &priv->shdev;
572 struct sh_eth_info *port_info = ð->port_info[eth->port];
574 sh_eth_recv_finish(eth);
576 /* Restart the receiver if disabled */
577 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
578 sh_eth_write(port_info, EDRRR_R, EDRRR);
583 static int sh_ether_write_hwaddr(struct udevice *dev)
585 struct sh_ether_priv *priv = dev_get_priv(dev);
586 struct sh_eth_dev *eth = &priv->shdev;
587 struct sh_eth_info *port_info = ð->port_info[eth->port];
588 struct eth_pdata *pdata = dev_get_plat(dev);
590 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
595 static int sh_eth_phy_config(struct udevice *dev)
597 struct sh_ether_priv *priv = dev_get_priv(dev);
598 struct eth_pdata *pdata = dev_get_plat(dev);
599 struct sh_eth_dev *eth = &priv->shdev;
601 struct sh_eth_info *port_info = ð->port_info[eth->port];
602 struct phy_device *phydev;
604 phydev = phy_connect(priv->bus, -1, dev, pdata->phy_interface);
608 port_info->phydev = phydev;
614 static int sh_ether_start(struct udevice *dev)
616 struct sh_ether_priv *priv = dev_get_priv(dev);
617 struct eth_pdata *pdata = dev_get_plat(dev);
618 struct sh_eth_dev *eth = &priv->shdev;
621 ret = sh_eth_init_common(eth, pdata->enetaddr);
625 ret = sh_eth_start_common(eth);
632 sh_eth_tx_desc_free(eth);
633 sh_eth_rx_desc_free(eth);
637 static void sh_ether_stop(struct udevice *dev)
639 struct sh_ether_priv *priv = dev_get_priv(dev);
640 struct sh_eth_dev *eth = &priv->shdev;
641 struct sh_eth_info *port_info = ð->port_info[eth->port];
643 phy_shutdown(port_info->phydev);
644 sh_eth_stop(&priv->shdev);
647 static int sh_ether_probe(struct udevice *udev)
649 struct eth_pdata *pdata = dev_get_plat(udev);
650 struct sh_ether_priv *priv = dev_get_priv(udev);
651 struct sh_eth_dev *eth = &priv->shdev;
652 struct mii_dev *mdiodev;
655 priv->iobase = pdata->iobase;
657 #if CONFIG_IS_ENABLED(CLK)
658 ret = clk_get_by_index(udev, 0, &priv->clk);
662 mdiodev = mdio_alloc();
668 mdiodev->read = bb_miiphy_read;
669 mdiodev->write = bb_miiphy_write;
670 bb_miiphy_buses[0].priv = eth;
671 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
673 ret = mdio_register(mdiodev);
675 goto err_mdio_register;
677 priv->bus = miiphy_get_dev_by_name(udev->name);
679 eth->port = CFG_SH_ETHER_USE_PORT;
680 eth->port_info[eth->port].phy_addr = CFG_SH_ETHER_PHY_ADDR;
681 eth->port_info[eth->port].iobase =
682 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
684 #if CONFIG_IS_ENABLED(CLK)
685 ret = clk_enable(&priv->clk);
687 goto err_mdio_register;
690 ret = sh_eth_init_common(eth, pdata->enetaddr);
694 ret = sh_eth_phy_config(udev);
696 printf(SHETHER_NAME ": phy config timeout\n");
703 #if CONFIG_IS_ENABLED(CLK)
704 clk_disable(&priv->clk);
711 static int sh_ether_remove(struct udevice *udev)
713 struct sh_ether_priv *priv = dev_get_priv(udev);
714 struct sh_eth_dev *eth = &priv->shdev;
715 struct sh_eth_info *port_info = ð->port_info[eth->port];
717 #if CONFIG_IS_ENABLED(CLK)
718 clk_disable(&priv->clk);
720 free(port_info->phydev);
721 mdio_unregister(priv->bus);
722 mdio_free(priv->bus);
727 static const struct eth_ops sh_ether_ops = {
728 .start = sh_ether_start,
729 .send = sh_ether_send,
730 .recv = sh_ether_recv,
731 .free_pkt = sh_ether_free_pkt,
732 .stop = sh_ether_stop,
733 .write_hwaddr = sh_ether_write_hwaddr,
736 int sh_ether_of_to_plat(struct udevice *dev)
738 struct eth_pdata *pdata = dev_get_plat(dev);
741 pdata->iobase = dev_read_addr(dev);
743 pdata->phy_interface = dev_read_phy_mode(dev);
744 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
747 pdata->max_speed = 1000;
748 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
750 pdata->max_speed = fdt32_to_cpu(*cell);
752 sprintf(bb_miiphy_buses[0].name, dev->name);
757 static const struct udevice_id sh_ether_ids[] = {
758 { .compatible = "renesas,ether-r7s72100" },
759 { .compatible = "renesas,ether-r8a7790" },
760 { .compatible = "renesas,ether-r8a7791" },
761 { .compatible = "renesas,ether-r8a7793" },
762 { .compatible = "renesas,ether-r8a7794" },
763 { .compatible = "renesas,gether-r8a77980" },
767 U_BOOT_DRIVER(eth_sh_ether) = {
770 .of_match = sh_ether_ids,
771 .of_to_plat = sh_ether_of_to_plat,
772 .probe = sh_ether_probe,
773 .remove = sh_ether_remove,
774 .ops = &sh_ether_ops,
775 .priv_auto = sizeof(struct sh_ether_priv),
776 .plat_auto = sizeof(struct eth_pdata),
777 .flags = DM_FLAG_ALLOC_PRIV_DMA,
780 /******* for bb_miiphy *******/
781 static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
786 static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
788 struct sh_eth_dev *eth = bus->priv;
789 struct sh_eth_info *port_info = ð->port_info[eth->port];
791 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
796 static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
798 struct sh_eth_dev *eth = bus->priv;
799 struct sh_eth_info *port_info = ð->port_info[eth->port];
801 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
806 static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
808 struct sh_eth_dev *eth = bus->priv;
809 struct sh_eth_info *port_info = ð->port_info[eth->port];
812 sh_eth_write(port_info,
813 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
815 sh_eth_write(port_info,
816 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
821 static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
823 struct sh_eth_dev *eth = bus->priv;
824 struct sh_eth_info *port_info = ð->port_info[eth->port];
826 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
831 static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
833 struct sh_eth_dev *eth = bus->priv;
834 struct sh_eth_info *port_info = ð->port_info[eth->port];
837 sh_eth_write(port_info,
838 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
840 sh_eth_write(port_info,
841 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
846 static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
853 struct bb_miiphy_bus bb_miiphy_buses[] = {
856 .init = sh_eth_bb_init,
857 .mdio_active = sh_eth_bb_mdio_active,
858 .mdio_tristate = sh_eth_bb_mdio_tristate,
859 .set_mdio = sh_eth_bb_set_mdio,
860 .get_mdio = sh_eth_bb_get_mdio,
861 .set_mdc = sh_eth_bb_set_mdc,
862 .delay = sh_eth_bb_delay,
866 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);