1 // SPDX-License-Identifier: GPL-2.0+
3 * sh_eth.c - Driver for Renesas ethernet controller.
5 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
6 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
7 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
8 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
19 #include <asm/cache.h>
20 #include <linux/delay.h>
21 #include <linux/errno.h>
22 #include <asm/global_data.h>
27 #include <linux/mii.h>
32 #ifndef CFG_SH_ETHER_USE_PORT
33 # error "Please define CFG_SH_ETHER_USE_PORT"
35 #ifndef CFG_SH_ETHER_PHY_ADDR
36 # error "Please define CFG_SH_ETHER_PHY_ADDR"
39 #if defined(CFG_SH_ETHER_CACHE_WRITEBACK) && \
40 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
41 #define flush_cache_wback(addr, len) \
42 flush_dcache_range((unsigned long)addr, \
43 (unsigned long)(addr + ALIGN(len, CFG_SH_ETHER_ALIGNE_SIZE)))
45 #define flush_cache_wback(...)
48 #if defined(CFG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
49 #define invalidate_cache(addr, len) \
51 unsigned long line_size = CFG_SH_ETHER_ALIGNE_SIZE; \
52 unsigned long start, end; \
54 start = (unsigned long)addr; \
56 start &= ~(line_size - 1); \
57 end = ((end + line_size - 1) & ~(line_size - 1)); \
59 invalidate_dcache_range(start, end); \
62 #define invalidate_cache(...)
65 #define TIMEOUT_CNT 1000
67 static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
70 struct sh_eth_info *port_info = ð->port_info[eth->port];
72 if (!packet || len > 0xffff) {
73 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
78 /* packet must be a 4 byte boundary */
79 if ((uintptr_t)packet & 3) {
80 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
86 /* Update tx descriptor */
87 flush_cache_wback(packet, len);
88 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
89 port_info->tx_desc_cur->td1 = len << 16;
90 /* Must preserve the end of descriptor list indication */
91 if (port_info->tx_desc_cur->td0 & TD_TDLE)
92 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
94 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
96 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
98 /* Restart the transmitter if disabled */
99 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
100 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
102 /* Wait until packet is transmitted */
103 timeout = TIMEOUT_CNT;
105 invalidate_cache(port_info->tx_desc_cur,
106 sizeof(struct tx_desc_s));
108 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
111 printf(SHETHER_NAME ": transmit timeout\n");
116 port_info->tx_desc_cur++;
117 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
118 port_info->tx_desc_cur = port_info->tx_desc_base;
124 static int sh_eth_recv_start(struct sh_eth_dev *eth)
126 struct sh_eth_info *port_info = ð->port_info[eth->port];
128 /* Check if the rx descriptor is ready */
129 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
130 if (port_info->rx_desc_cur->rd0 & RD_RACT)
133 /* Check for errors */
134 if (port_info->rx_desc_cur->rd0 & RD_RFE)
137 return port_info->rx_desc_cur->rd1 & 0xffff;
140 static void sh_eth_recv_finish(struct sh_eth_dev *eth)
142 struct sh_eth_info *port_info = ð->port_info[eth->port];
144 invalidate_cache(ADDR_TO_P2(port_info->rx_desc_cur->rd2), MAX_BUF_SIZE);
146 /* Make current descriptor available again */
147 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
148 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
150 port_info->rx_desc_cur->rd0 = RD_RACT;
152 flush_cache_wback(port_info->rx_desc_cur,
153 sizeof(struct rx_desc_s));
155 /* Point to the next descriptor */
156 port_info->rx_desc_cur++;
157 if (port_info->rx_desc_cur >=
158 port_info->rx_desc_base + NUM_RX_DESC)
159 port_info->rx_desc_cur = port_info->rx_desc_base;
162 static int sh_eth_reset(struct sh_eth_dev *eth)
164 struct sh_eth_info *port_info = ð->port_info[eth->port];
165 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
168 /* Start e-dmac transmitter and receiver */
169 sh_eth_write(port_info, EDSR_ENALL, EDSR);
171 /* Perform a software reset and wait for it to complete */
172 sh_eth_write(port_info, EDMR_SRST, EDMR);
173 for (i = 0; i < TIMEOUT_CNT; i++) {
174 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
179 if (i == TIMEOUT_CNT) {
180 printf(SHETHER_NAME ": Software reset timeout\n");
186 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
188 sh_eth_write(port_info,
189 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
195 static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
198 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
199 struct sh_eth_info *port_info = ð->port_info[eth->port];
200 struct tx_desc_s *cur_tx_desc;
203 * Allocate rx descriptors. They must be aligned to size of struct
206 port_info->tx_desc_alloc =
207 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
208 if (!port_info->tx_desc_alloc) {
209 printf(SHETHER_NAME ": memalign failed\n");
214 /* Make sure we use a P2 address (non-cacheable) */
215 port_info->tx_desc_base =
216 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
217 port_info->tx_desc_cur = port_info->tx_desc_base;
219 /* Initialize all descriptors */
220 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
221 cur_tx_desc++, i++) {
222 cur_tx_desc->td0 = 0x00;
223 cur_tx_desc->td1 = 0x00;
224 cur_tx_desc->td2 = 0x00;
227 /* Mark the end of the descriptors */
229 cur_tx_desc->td0 |= TD_TDLE;
231 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
233 * Point the controller to the tx descriptor list. Must use physical
236 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
237 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
238 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
239 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
240 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
247 static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
250 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
251 struct sh_eth_info *port_info = ð->port_info[eth->port];
252 struct rx_desc_s *cur_rx_desc;
256 * Allocate rx descriptors. They must be aligned to size of struct
259 port_info->rx_desc_alloc =
260 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
261 if (!port_info->rx_desc_alloc) {
262 printf(SHETHER_NAME ": memalign failed\n");
267 /* Make sure we use a P2 address (non-cacheable) */
268 port_info->rx_desc_base =
269 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
271 port_info->rx_desc_cur = port_info->rx_desc_base;
274 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
275 * aligned and in P2 area.
277 port_info->rx_buf_alloc =
278 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
279 if (!port_info->rx_buf_alloc) {
280 printf(SHETHER_NAME ": alloc failed\n");
285 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
287 /* Initialize all descriptors */
288 for (cur_rx_desc = port_info->rx_desc_base,
289 rx_buf = port_info->rx_buf_base, i = 0;
290 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
291 cur_rx_desc->rd0 = RD_RACT;
292 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
293 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
296 /* Mark the end of the descriptors */
298 cur_rx_desc->rd0 |= RD_RDLE;
300 invalidate_cache(port_info->rx_buf_alloc, NUM_RX_DESC * MAX_BUF_SIZE);
301 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
303 /* Point the controller to the rx descriptor list */
304 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
305 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
306 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
307 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
308 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
314 free(port_info->rx_desc_alloc);
315 port_info->rx_desc_alloc = NULL;
321 static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
323 struct sh_eth_info *port_info = ð->port_info[eth->port];
325 if (port_info->tx_desc_alloc) {
326 free(port_info->tx_desc_alloc);
327 port_info->tx_desc_alloc = NULL;
331 static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
333 struct sh_eth_info *port_info = ð->port_info[eth->port];
335 if (port_info->rx_desc_alloc) {
336 free(port_info->rx_desc_alloc);
337 port_info->rx_desc_alloc = NULL;
340 if (port_info->rx_buf_alloc) {
341 free(port_info->rx_buf_alloc);
342 port_info->rx_buf_alloc = NULL;
346 static int sh_eth_desc_init(struct sh_eth_dev *eth)
350 ret = sh_eth_tx_desc_init(eth);
354 ret = sh_eth_rx_desc_init(eth);
360 sh_eth_tx_desc_free(eth);
366 static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
371 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
372 sh_eth_write(port_info, val, MAHR);
374 val = (mac[4] << 8) | mac[5];
375 sh_eth_write(port_info, val, MALR);
378 static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
380 struct sh_eth_info *port_info = ð->port_info[eth->port];
383 /* Configure e-dmac registers */
384 edmr = sh_eth_read(port_info, EDMR);
385 edmr &= ~EMDR_DESC_R;
386 edmr |= EMDR_DESC | EDMR_EL;
387 #if defined(CONFIG_R8A77980)
390 sh_eth_write(port_info, edmr, EDMR);
392 sh_eth_write(port_info, 0, EESIPR);
393 sh_eth_write(port_info, 0, TRSCER);
394 sh_eth_write(port_info, 0, TFTR);
395 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
396 sh_eth_write(port_info, RMCR_RST, RMCR);
397 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
398 sh_eth_write(port_info, 0, RPADIR);
400 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
402 /* Configure e-mac registers */
403 sh_eth_write(port_info, 0, ECSIPR);
405 /* Set Mac address */
406 sh_eth_write_hwaddr(port_info, mac);
408 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
409 #if defined(SH_ETH_TYPE_GETHER)
410 sh_eth_write(port_info, 0, PIPR);
412 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
413 sh_eth_write(port_info, APR_AP, APR);
414 sh_eth_write(port_info, MPR_MP, MPR);
415 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
418 #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
419 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
420 #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
421 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
425 static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
427 struct sh_eth_info *port_info = ð->port_info[eth->port];
428 struct phy_device *phy = port_info->phydev;
432 /* Set the transfer speed */
433 if (phy->speed == 100) {
434 printf(SHETHER_NAME ": 100Base/");
435 #if defined(SH_ETH_TYPE_GETHER)
436 sh_eth_write(port_info, GECMR_100B, GECMR);
437 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
438 sh_eth_write(port_info, 1, RTRATE);
439 #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
442 } else if (phy->speed == 10) {
443 printf(SHETHER_NAME ": 10Base/");
444 #if defined(SH_ETH_TYPE_GETHER)
445 sh_eth_write(port_info, GECMR_10B, GECMR);
446 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
447 sh_eth_write(port_info, 0, RTRATE);
450 #if defined(SH_ETH_TYPE_GETHER)
451 else if (phy->speed == 1000) {
452 printf(SHETHER_NAME ": 1000Base/");
453 sh_eth_write(port_info, GECMR_1000B, GECMR);
457 /* Check if full duplex mode is supported by the phy */
460 sh_eth_write(port_info,
461 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
465 sh_eth_write(port_info,
466 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
473 static void sh_eth_start(struct sh_eth_dev *eth)
475 struct sh_eth_info *port_info = ð->port_info[eth->port];
478 * Enable the e-dmac receiver only. The transmitter will be enabled when
479 * we have something to transmit
481 sh_eth_write(port_info, EDRRR_R, EDRRR);
484 static void sh_eth_stop(struct sh_eth_dev *eth)
486 struct sh_eth_info *port_info = ð->port_info[eth->port];
488 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
491 static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
495 ret = sh_eth_reset(eth);
499 ret = sh_eth_desc_init(eth);
503 sh_eth_mac_regs_config(eth, mac);
508 static int sh_eth_start_common(struct sh_eth_dev *eth)
510 struct sh_eth_info *port_info = ð->port_info[eth->port];
513 ret = phy_startup(port_info->phydev);
515 printf(SHETHER_NAME ": phy startup failure\n");
519 ret = sh_eth_phy_regs_config(eth);
528 struct sh_ether_priv {
529 struct sh_eth_dev shdev;
536 static int sh_ether_send(struct udevice *dev, void *packet, int len)
538 struct sh_ether_priv *priv = dev_get_priv(dev);
539 struct sh_eth_dev *eth = &priv->shdev;
541 return sh_eth_send_common(eth, packet, len);
544 static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
546 struct sh_ether_priv *priv = dev_get_priv(dev);
547 struct sh_eth_dev *eth = &priv->shdev;
548 struct sh_eth_info *port_info = ð->port_info[eth->port];
549 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
552 len = sh_eth_recv_start(eth);
554 invalidate_cache(packet, len);
560 /* Restart the receiver if disabled */
561 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
562 sh_eth_write(port_info, EDRRR_R, EDRRR);
567 static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
569 struct sh_ether_priv *priv = dev_get_priv(dev);
570 struct sh_eth_dev *eth = &priv->shdev;
571 struct sh_eth_info *port_info = ð->port_info[eth->port];
573 sh_eth_recv_finish(eth);
575 /* Restart the receiver if disabled */
576 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
577 sh_eth_write(port_info, EDRRR_R, EDRRR);
582 static int sh_ether_write_hwaddr(struct udevice *dev)
584 struct sh_ether_priv *priv = dev_get_priv(dev);
585 struct sh_eth_dev *eth = &priv->shdev;
586 struct sh_eth_info *port_info = ð->port_info[eth->port];
587 struct eth_pdata *pdata = dev_get_plat(dev);
589 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
594 static int sh_eth_phy_config(struct udevice *dev)
596 struct sh_ether_priv *priv = dev_get_priv(dev);
597 struct eth_pdata *pdata = dev_get_plat(dev);
598 struct sh_eth_dev *eth = &priv->shdev;
600 struct sh_eth_info *port_info = ð->port_info[eth->port];
601 struct phy_device *phydev;
603 phydev = phy_connect(priv->bus, -1, dev, pdata->phy_interface);
607 port_info->phydev = phydev;
613 static int sh_ether_start(struct udevice *dev)
615 struct sh_ether_priv *priv = dev_get_priv(dev);
616 struct eth_pdata *pdata = dev_get_plat(dev);
617 struct sh_eth_dev *eth = &priv->shdev;
620 ret = sh_eth_init_common(eth, pdata->enetaddr);
624 ret = sh_eth_start_common(eth);
631 sh_eth_tx_desc_free(eth);
632 sh_eth_rx_desc_free(eth);
636 static void sh_ether_stop(struct udevice *dev)
638 struct sh_ether_priv *priv = dev_get_priv(dev);
639 struct sh_eth_dev *eth = &priv->shdev;
640 struct sh_eth_info *port_info = ð->port_info[eth->port];
642 phy_shutdown(port_info->phydev);
643 sh_eth_stop(&priv->shdev);
646 static int sh_ether_probe(struct udevice *udev)
648 struct eth_pdata *pdata = dev_get_plat(udev);
649 struct sh_ether_priv *priv = dev_get_priv(udev);
650 struct sh_eth_dev *eth = &priv->shdev;
651 struct mii_dev *mdiodev;
654 priv->iobase = pdata->iobase;
656 #if CONFIG_IS_ENABLED(CLK)
657 ret = clk_get_by_index(udev, 0, &priv->clk);
661 mdiodev = mdio_alloc();
667 mdiodev->read = bb_miiphy_read;
668 mdiodev->write = bb_miiphy_write;
669 bb_miiphy_buses[0].priv = eth;
670 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
672 ret = mdio_register(mdiodev);
674 goto err_mdio_register;
676 priv->bus = miiphy_get_dev_by_name(udev->name);
678 eth->port = CFG_SH_ETHER_USE_PORT;
679 eth->port_info[eth->port].phy_addr = CFG_SH_ETHER_PHY_ADDR;
680 eth->port_info[eth->port].iobase =
681 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
683 #if CONFIG_IS_ENABLED(CLK)
684 ret = clk_enable(&priv->clk);
686 goto err_mdio_register;
689 ret = sh_eth_init_common(eth, pdata->enetaddr);
693 ret = sh_eth_phy_config(udev);
695 printf(SHETHER_NAME ": phy config timeout\n");
702 #if CONFIG_IS_ENABLED(CLK)
703 clk_disable(&priv->clk);
710 static int sh_ether_remove(struct udevice *udev)
712 struct sh_ether_priv *priv = dev_get_priv(udev);
713 struct sh_eth_dev *eth = &priv->shdev;
714 struct sh_eth_info *port_info = ð->port_info[eth->port];
716 #if CONFIG_IS_ENABLED(CLK)
717 clk_disable(&priv->clk);
719 free(port_info->phydev);
720 mdio_unregister(priv->bus);
721 mdio_free(priv->bus);
726 static const struct eth_ops sh_ether_ops = {
727 .start = sh_ether_start,
728 .send = sh_ether_send,
729 .recv = sh_ether_recv,
730 .free_pkt = sh_ether_free_pkt,
731 .stop = sh_ether_stop,
732 .write_hwaddr = sh_ether_write_hwaddr,
735 int sh_ether_of_to_plat(struct udevice *dev)
737 struct eth_pdata *pdata = dev_get_plat(dev);
740 pdata->iobase = dev_read_addr(dev);
742 pdata->phy_interface = dev_read_phy_mode(dev);
743 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
746 pdata->max_speed = 1000;
747 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
749 pdata->max_speed = fdt32_to_cpu(*cell);
751 sprintf(bb_miiphy_buses[0].name, dev->name);
756 static const struct udevice_id sh_ether_ids[] = {
757 { .compatible = "renesas,ether-r7s72100" },
758 { .compatible = "renesas,ether-r8a7790" },
759 { .compatible = "renesas,ether-r8a7791" },
760 { .compatible = "renesas,ether-r8a7793" },
761 { .compatible = "renesas,ether-r8a7794" },
762 { .compatible = "renesas,gether-r8a77980" },
766 U_BOOT_DRIVER(eth_sh_ether) = {
769 .of_match = sh_ether_ids,
770 .of_to_plat = sh_ether_of_to_plat,
771 .probe = sh_ether_probe,
772 .remove = sh_ether_remove,
773 .ops = &sh_ether_ops,
774 .priv_auto = sizeof(struct sh_ether_priv),
775 .plat_auto = sizeof(struct eth_pdata),
776 .flags = DM_FLAG_ALLOC_PRIV_DMA,
779 /******* for bb_miiphy *******/
780 static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
785 static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
787 struct sh_eth_dev *eth = bus->priv;
788 struct sh_eth_info *port_info = ð->port_info[eth->port];
790 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
795 static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
797 struct sh_eth_dev *eth = bus->priv;
798 struct sh_eth_info *port_info = ð->port_info[eth->port];
800 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
805 static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
807 struct sh_eth_dev *eth = bus->priv;
808 struct sh_eth_info *port_info = ð->port_info[eth->port];
811 sh_eth_write(port_info,
812 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
814 sh_eth_write(port_info,
815 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
820 static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
822 struct sh_eth_dev *eth = bus->priv;
823 struct sh_eth_info *port_info = ð->port_info[eth->port];
825 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
830 static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
832 struct sh_eth_dev *eth = bus->priv;
833 struct sh_eth_info *port_info = ð->port_info[eth->port];
836 sh_eth_write(port_info,
837 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
839 sh_eth_write(port_info,
840 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
845 static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
852 struct bb_miiphy_bus bb_miiphy_buses[] = {
855 .init = sh_eth_bb_init,
856 .mdio_active = sh_eth_bb_mdio_active,
857 .mdio_tristate = sh_eth_bb_mdio_tristate,
858 .set_mdio = sh_eth_bb_set_mdio,
859 .get_mdio = sh_eth_bb_get_mdio,
860 .set_mdc = sh_eth_bb_set_mdc,
861 .delay = sh_eth_bb_delay,
865 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);