2 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
3 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
4 * (C) Copyright 2008 Armadeus Systems nc
5 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
6 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
8 * SPDX-License-Identifier: GPL-2.0+
18 #include <asm/arch/clock.h>
19 #include <asm/arch/imx-regs.h>
21 #include <asm/errno.h>
22 #include <linux/compiler.h>
24 DECLARE_GLOBAL_DATA_PTR;
27 * Timeout the transfer after 5 mS. This is usually a bit more, since
28 * the code in the tightloops this timeout is used in adds some overhead.
30 #define FEC_XFER_TIMEOUT 5000
33 * The standard 32-byte DMA alignment does not work on mx6solox, which requires
34 * 64-byte alignment in the DMA RX FEC buffer.
35 * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also
36 * satisfies the alignment on other SoCs (32-bytes)
38 #define FEC_DMA_RX_MINALIGN 64
41 #error "CONFIG_MII has to be defined!"
44 #ifndef CONFIG_FEC_XCV_TYPE
45 #define CONFIG_FEC_XCV_TYPE MII100
49 * The i.MX28 operates with packets in big endian. We need to swap them before
50 * sending and after receiving.
53 #define CONFIG_FEC_MXC_SWAP_PACKET
56 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
58 /* Check various alignment issues at compile time */
59 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
60 #error "ARCH_DMA_MINALIGN must be multiple of 16!"
63 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
64 (PKTALIGN % ARCH_DMA_MINALIGN != 0))
65 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
71 uint8_t data[1500]; /**< actual data */
72 int length; /**< actual length */
73 int used; /**< buffer in use or not */
74 uint8_t head[16]; /**< MAC header(6 + 6 + 2) + 2(aligned) */
77 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
78 static void swap_packet(uint32_t *packet, int length)
82 for (i = 0; i < DIV_ROUND_UP(length, 4); i++)
83 packet[i] = __swab32(packet[i]);
88 * MII-interface related functions
90 static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr,
93 uint32_t reg; /* convenient holder for the PHY register */
94 uint32_t phy; /* convenient holder for the PHY */
99 * reading from any PHY's register is done by properly
100 * programming the FEC's MII data register.
102 writel(FEC_IEVENT_MII, ð->ievent);
103 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
104 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
106 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA |
107 phy | reg, ð->mii_data);
110 * wait for the related interrupt
112 start = get_timer(0);
113 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) {
114 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
115 printf("Read MDIO failed...\n");
121 * clear mii interrupt bit
123 writel(FEC_IEVENT_MII, ð->ievent);
126 * it's now safe to read the PHY's register
128 val = (unsigned short)readl(ð->mii_data);
129 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
134 static void fec_mii_setspeed(struct ethernet_regs *eth)
137 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
138 * and do not drop the Preamble.
140 register u32 speed = DIV_ROUND_UP(imx_get_fecclk(), 5000000);
141 #ifdef FEC_QUIRK_ENET_MAC
145 writel(speed, ð->mii_speed);
146 debug("%s: mii_speed %08x\n", __func__, readl(ð->mii_speed));
149 static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr,
150 uint8_t regAddr, uint16_t data)
152 uint32_t reg; /* convenient holder for the PHY register */
153 uint32_t phy; /* convenient holder for the PHY */
156 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
157 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
159 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR |
160 FEC_MII_DATA_TA | phy | reg | data, ð->mii_data);
163 * wait for the MII interrupt
165 start = get_timer(0);
166 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) {
167 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
168 printf("Write MDIO failed...\n");
174 * clear MII interrupt bit
176 writel(FEC_IEVENT_MII, ð->ievent);
177 debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyAddr,
183 static int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr,
186 return fec_mdio_read(bus->priv, phyAddr, regAddr);
189 static int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr,
190 int regAddr, u16 data)
192 return fec_mdio_write(bus->priv, phyAddr, regAddr, data);
195 #ifndef CONFIG_PHYLIB
196 static int miiphy_restart_aneg(struct eth_device *dev)
199 #if !defined(CONFIG_FEC_MXC_NO_ANEG)
200 struct fec_priv *fec = (struct fec_priv *)dev->priv;
201 struct ethernet_regs *eth = fec->bus->priv;
204 * Wake up from sleep if necessary
205 * Reset PHY, then delay 300ns
208 fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF);
210 fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET);
214 * Set the auto-negotiation advertisement register bits
216 fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE,
217 LPA_100FULL | LPA_100HALF | LPA_10FULL |
218 LPA_10HALF | PHY_ANLPAR_PSB_802_3);
219 fec_mdio_write(eth, fec->phy_id, MII_BMCR,
220 BMCR_ANENABLE | BMCR_ANRESTART);
222 if (fec->mii_postcall)
223 ret = fec->mii_postcall(fec->phy_id);
229 static int miiphy_wait_aneg(struct eth_device *dev)
233 struct fec_priv *fec = (struct fec_priv *)dev->priv;
234 struct ethernet_regs *eth = fec->bus->priv;
237 * Wait for AN completion
239 start = get_timer(0);
241 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
242 printf("%s: Autonegotiation timeout\n", dev->name);
246 status = fec_mdio_read(eth, fec->phy_id, MII_BMSR);
248 printf("%s: Autonegotiation failed. status: %d\n",
252 } while (!(status & BMSR_LSTATUS));
258 static int fec_rx_task_enable(struct fec_priv *fec)
260 writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active);
264 static int fec_rx_task_disable(struct fec_priv *fec)
269 static int fec_tx_task_enable(struct fec_priv *fec)
271 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active);
275 static int fec_tx_task_disable(struct fec_priv *fec)
281 * Initialize receive task's buffer descriptors
282 * @param[in] fec all we know about the device yet
283 * @param[in] count receive buffer count to be allocated
284 * @param[in] dsize desired size of each receive buffer
285 * @return 0 on success
287 * Init all RX descriptors to default values.
289 static void fec_rbd_init(struct fec_priv *fec, int count, int dsize)
296 * Reload the RX descriptors with default values and wipe
299 size = roundup(dsize, ARCH_DMA_MINALIGN);
300 for (i = 0; i < count; i++) {
301 data = (uint8_t *)fec->rbd_base[i].data_pointer;
302 memset(data, 0, dsize);
303 flush_dcache_range((uint32_t)data, (uint32_t)data + size);
305 fec->rbd_base[i].status = FEC_RBD_EMPTY;
306 fec->rbd_base[i].data_length = 0;
309 /* Mark the last RBD to close the ring. */
310 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
313 flush_dcache_range((unsigned)fec->rbd_base,
314 (unsigned)fec->rbd_base + size);
318 * Initialize transmit task's buffer descriptors
319 * @param[in] fec all we know about the device yet
321 * Transmit buffers are created externally. We only have to init the BDs here.\n
322 * Note: There is a race condition in the hardware. When only one BD is in
323 * use it must be marked with the WRAP bit to use it for every transmitt.
324 * This bit in combination with the READY bit results into double transmit
325 * of each data buffer. It seems the state machine checks READY earlier then
326 * resetting it after the first transfer.
327 * Using two BDs solves this issue.
329 static void fec_tbd_init(struct fec_priv *fec)
331 unsigned addr = (unsigned)fec->tbd_base;
332 unsigned size = roundup(2 * sizeof(struct fec_bd),
335 memset(fec->tbd_base, 0, size);
336 fec->tbd_base[0].status = 0;
337 fec->tbd_base[1].status = FEC_TBD_WRAP;
339 flush_dcache_range(addr, addr + size);
343 * Mark the given read buffer descriptor as free
344 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
345 * @param[in] pRbd buffer descriptor to mark free again
347 static void fec_rbd_clean(int last, struct fec_bd *pRbd)
349 unsigned short flags = FEC_RBD_EMPTY;
351 flags |= FEC_RBD_WRAP;
352 writew(flags, &pRbd->status);
353 writew(0, &pRbd->data_length);
356 static int fec_get_hwaddr(struct eth_device *dev, int dev_id,
359 imx_get_mac_from_fuse(dev_id, mac);
360 return !is_valid_ether_addr(mac);
363 static int fec_set_hwaddr(struct eth_device *dev)
365 uchar *mac = dev->enetaddr;
366 struct fec_priv *fec = (struct fec_priv *)dev->priv;
368 writel(0, &fec->eth->iaddr1);
369 writel(0, &fec->eth->iaddr2);
370 writel(0, &fec->eth->gaddr1);
371 writel(0, &fec->eth->gaddr2);
374 * Set physical address
376 writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3],
378 writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2);
384 * Do initial configuration of the FEC registers
386 static void fec_reg_setup(struct fec_priv *fec)
391 * Set interrupt mask register
393 writel(0x00000000, &fec->eth->imask);
396 * Clear FEC-Lite interrupt event register(IEVENT)
398 writel(0xffffffff, &fec->eth->ievent);
402 * Set FEC-Lite receive control register(R_CNTRL):
405 /* Start with frame length = 1518, common for all modes. */
406 rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT;
407 if (fec->xcv_type != SEVENWIRE) /* xMII modes */
408 rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE;
409 if (fec->xcv_type == RGMII)
410 rcntrl |= FEC_RCNTRL_RGMII;
411 else if (fec->xcv_type == RMII)
412 rcntrl |= FEC_RCNTRL_RMII;
414 writel(rcntrl, &fec->eth->r_cntrl);
418 * Start the FEC engine
419 * @param[in] dev Our device to handle
421 static int fec_open(struct eth_device *edev)
423 struct fec_priv *fec = (struct fec_priv *)edev->priv;
428 debug("fec_open: fec_open(dev)\n");
429 /* full-duplex, heartbeat disabled */
430 writel(1 << 2, &fec->eth->x_cntrl);
433 /* Invalidate all descriptors */
434 for (i = 0; i < FEC_RBD_NUM - 1; i++)
435 fec_rbd_clean(0, &fec->rbd_base[i]);
436 fec_rbd_clean(1, &fec->rbd_base[i]);
438 /* Flush the descriptors into RAM */
439 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
441 addr = (uint32_t)fec->rbd_base;
442 flush_dcache_range(addr, addr + size);
444 #ifdef FEC_QUIRK_ENET_MAC
445 /* Enable ENET HW endian SWAP */
446 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
448 /* Enable ENET store and forward mode */
449 writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
453 * Enable FEC-Lite controller
455 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
457 #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
460 * setup the MII gasket for RMII mode
463 /* disable the gasket */
464 writew(0, &fec->eth->miigsk_enr);
466 /* wait for the gasket to be disabled */
467 while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
470 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */
471 writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);
473 /* re-enable the gasket */
474 writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);
476 /* wait until MII gasket is ready */
478 while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
479 if (--max_loops <= 0) {
480 printf("WAIT for MII Gasket ready timed out\n");
488 /* Start up the PHY */
489 int ret = phy_startup(fec->phydev);
492 printf("Could not initialize PHY %s\n",
493 fec->phydev->dev->name);
496 speed = fec->phydev->speed;
499 miiphy_wait_aneg(edev);
500 speed = miiphy_speed(edev->name, fec->phy_id);
501 miiphy_duplex(edev->name, fec->phy_id);
504 #ifdef FEC_QUIRK_ENET_MAC
506 u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
507 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
508 if (speed == _1000BASET)
509 ecr |= FEC_ECNTRL_SPEED;
510 else if (speed != _100BASET)
511 rcr |= FEC_RCNTRL_RMII_10T;
512 writel(ecr, &fec->eth->ecntrl);
513 writel(rcr, &fec->eth->r_cntrl);
516 debug("%s:Speed=%i\n", __func__, speed);
519 * Enable SmartDMA receive task
521 fec_rx_task_enable(fec);
527 static int fec_init(struct eth_device *dev, bd_t* bd)
529 struct fec_priv *fec = (struct fec_priv *)dev->priv;
530 uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
533 /* Initialize MAC address */
537 * Setup transmit descriptors, there are two in total.
541 /* Setup receive descriptors. */
542 fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE);
546 if (fec->xcv_type != SEVENWIRE)
547 fec_mii_setspeed(fec->bus->priv);
550 * Set Opcode/Pause Duration Register
552 writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */
553 writel(0x2, &fec->eth->x_wmrk);
555 * Set multicast address filter
557 writel(0x00000000, &fec->eth->gaddr1);
558 writel(0x00000000, &fec->eth->gaddr2);
562 for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
565 /* FIFO receive start register */
566 writel(0x520, &fec->eth->r_fstart);
568 /* size and address of each buffer */
569 writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
570 writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
571 writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
573 #ifndef CONFIG_PHYLIB
574 if (fec->xcv_type != SEVENWIRE)
575 miiphy_restart_aneg(dev);
582 * Halt the FEC engine
583 * @param[in] dev Our device to handle
585 static void fec_halt(struct eth_device *dev)
587 struct fec_priv *fec = (struct fec_priv *)dev->priv;
588 int counter = 0xffff;
591 * issue graceful stop command to the FEC transmitter if necessary
593 writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl),
596 debug("eth_halt: wait for stop regs\n");
598 * wait for graceful stop to register
600 while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA)))
604 * Disable SmartDMA tasks
606 fec_tx_task_disable(fec);
607 fec_rx_task_disable(fec);
610 * Disable the Ethernet Controller
611 * Note: this will also reset the BD index counter!
613 writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN,
617 debug("eth_halt: done\n");
622 * @param[in] dev Our ethernet device to handle
623 * @param[in] packet Pointer to the data to be transmitted
624 * @param[in] length Data count in bytes
625 * @return 0 on success
627 static int fec_send(struct eth_device *dev, void *packet, int length)
632 int timeout = FEC_XFER_TIMEOUT;
636 * This routine transmits one frame. This routine only accepts
637 * 6-byte Ethernet addresses.
639 struct fec_priv *fec = (struct fec_priv *)dev->priv;
642 * Check for valid length of data.
644 if ((length > 1500) || (length <= 0)) {
645 printf("Payload (%d) too large\n", length);
650 * Setup the transmit buffer. We are always using the first buffer for
651 * transmission, the second will be empty and only used to stop the DMA
652 * engine. We also flush the packet to RAM here to avoid cache trouble.
654 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
655 swap_packet((uint32_t *)packet, length);
658 addr = (uint32_t)packet;
659 end = roundup(addr + length, ARCH_DMA_MINALIGN);
660 addr &= ~(ARCH_DMA_MINALIGN - 1);
661 flush_dcache_range(addr, end);
663 writew(length, &fec->tbd_base[fec->tbd_index].data_length);
664 writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer);
667 * update BD's status now
669 * - is always the last in a chain (means no chain)
670 * - should transmitt the CRC
671 * - might be the last BD in the list, so the address counter should
672 * wrap (-> keep the WRAP flag)
674 status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
675 status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
676 writew(status, &fec->tbd_base[fec->tbd_index].status);
679 * Flush data cache. This code flushes both TX descriptors to RAM.
680 * After this code, the descriptors will be safely in RAM and we
683 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
684 addr = (uint32_t)fec->tbd_base;
685 flush_dcache_range(addr, addr + size);
688 * Below we read the DMA descriptor's last four bytes back from the
689 * DRAM. This is important in order to make sure that all WRITE
690 * operations on the bus that were triggered by previous cache FLUSH
693 * Otherwise, on MX28, it is possible to observe a corruption of the
694 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
695 * for the bus structure of MX28. The scenario is as follows:
697 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
698 * to DRAM due to flush_dcache_range()
699 * 2) ARM core writes the FEC registers via AHB_ARB2
700 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
702 * Note that 2) does sometimes finish before 1) due to reordering of
703 * WRITE accesses on the AHB bus, therefore triggering 3) before the
704 * DMA descriptor is fully written into DRAM. This results in occasional
705 * corruption of the DMA descriptor.
707 readl(addr + size - 4);
710 * Enable SmartDMA transmit task
712 fec_tx_task_enable(fec);
715 * Wait until frame is sent. On each turn of the wait cycle, we must
716 * invalidate data cache to see what's really in RAM. Also, we need
720 if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
730 * The TDAR bit is cleared when the descriptors are all out from TX
731 * but on mx6solox we noticed that the READY bit is still not cleared
733 * These are two distinct signals, and in IC simulation, we found that
734 * TDAR always gets cleared prior than the READY bit of last BD becomes
736 * In mx6solox, we use a later version of FEC IP. It looks like that
737 * this intrinsic behaviour of TDAR bit has changed in this newer FEC
740 * Fix this by polling the READY bit of BD after the TDAR polling,
741 * which covers the mx6solox case and does not harm the other SoCs.
743 timeout = FEC_XFER_TIMEOUT;
745 invalidate_dcache_range(addr, addr + size);
746 if (!(readw(&fec->tbd_base[fec->tbd_index].status) &
755 debug("fec_send: status 0x%x index %d ret %i\n",
756 readw(&fec->tbd_base[fec->tbd_index].status),
757 fec->tbd_index, ret);
758 /* for next transmission use the other buffer */
768 * Pull one frame from the card
769 * @param[in] dev Our ethernet device to handle
770 * @return Length of packet read
772 static int fec_recv(struct eth_device *dev)
774 struct fec_priv *fec = (struct fec_priv *)dev->priv;
775 struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
776 unsigned long ievent;
777 int frame_length, len = 0;
780 uint32_t addr, size, end;
782 ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);
785 * Check if any critical events have happened
787 ievent = readl(&fec->eth->ievent);
788 writel(ievent, &fec->eth->ievent);
789 debug("fec_recv: ievent 0x%lx\n", ievent);
790 if (ievent & FEC_IEVENT_BABR) {
792 fec_init(dev, fec->bd);
793 printf("some error: 0x%08lx\n", ievent);
796 if (ievent & FEC_IEVENT_HBERR) {
797 /* Heartbeat error */
798 writel(0x00000001 | readl(&fec->eth->x_cntrl),
801 if (ievent & FEC_IEVENT_GRA) {
802 /* Graceful stop complete */
803 if (readl(&fec->eth->x_cntrl) & 0x00000001) {
805 writel(~0x00000001 & readl(&fec->eth->x_cntrl),
807 fec_init(dev, fec->bd);
812 * Read the buffer status. Before the status can be read, the data cache
813 * must be invalidated, because the data in RAM might have been changed
814 * by DMA. The descriptors are properly aligned to cachelines so there's
815 * no need to worry they'd overlap.
817 * WARNING: By invalidating the descriptor here, we also invalidate
818 * the descriptors surrounding this one. Therefore we can NOT change the
819 * contents of this descriptor nor the surrounding ones. The problem is
820 * that in order to mark the descriptor as processed, we need to change
821 * the descriptor. The solution is to mark the whole cache line when all
822 * descriptors in the cache line are processed.
824 addr = (uint32_t)rbd;
825 addr &= ~(ARCH_DMA_MINALIGN - 1);
826 size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
827 invalidate_dcache_range(addr, addr + size);
829 bd_status = readw(&rbd->status);
830 debug("fec_recv: status 0x%x\n", bd_status);
832 if (!(bd_status & FEC_RBD_EMPTY)) {
833 if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
834 ((readw(&rbd->data_length) - 4) > 14)) {
836 * Get buffer address and size
838 frame = (struct nbuf *)readl(&rbd->data_pointer);
839 frame_length = readw(&rbd->data_length) - 4;
841 * Invalidate data cache over the buffer
843 addr = (uint32_t)frame;
844 end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
845 addr &= ~(ARCH_DMA_MINALIGN - 1);
846 invalidate_dcache_range(addr, end);
849 * Fill the buffer and pass it to upper layers
851 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
852 swap_packet((uint32_t *)frame->data, frame_length);
854 memcpy(buff, frame->data, frame_length);
855 NetReceive(buff, frame_length);
858 if (bd_status & FEC_RBD_ERR)
859 printf("error frame: 0x%08lx 0x%08x\n",
860 (ulong)rbd->data_pointer,
865 * Free the current buffer, restart the engine and move forward
866 * to the next buffer. Here we check if the whole cacheline of
867 * descriptors was already processed and if so, we mark it free
870 size = RXDESC_PER_CACHELINE - 1;
871 if ((fec->rbd_index & size) == size) {
872 i = fec->rbd_index - size;
873 addr = (uint32_t)&fec->rbd_base[i];
874 for (; i <= fec->rbd_index ; i++) {
875 fec_rbd_clean(i == (FEC_RBD_NUM - 1),
878 flush_dcache_range(addr,
879 addr + ARCH_DMA_MINALIGN);
882 fec_rx_task_enable(fec);
883 fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
885 debug("fec_recv: stop\n");
890 static void fec_set_dev_name(char *dest, int dev_id)
892 sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id);
895 static int fec_alloc_descs(struct fec_priv *fec)
901 /* Allocate TX descriptors. */
902 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
903 fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
907 /* Allocate RX descriptors. */
908 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
909 fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
913 memset(fec->rbd_base, 0, size);
915 /* Allocate RX buffers. */
917 /* Maximum RX buffer size. */
918 size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN);
919 for (i = 0; i < FEC_RBD_NUM; i++) {
920 data = memalign(FEC_DMA_RX_MINALIGN, size);
922 printf("%s: error allocating rxbuf %d\n", __func__, i);
926 memset(data, 0, size);
928 fec->rbd_base[i].data_pointer = (uint32_t)data;
929 fec->rbd_base[i].status = FEC_RBD_EMPTY;
930 fec->rbd_base[i].data_length = 0;
931 /* Flush the buffer to memory. */
932 flush_dcache_range((uint32_t)data, (uint32_t)data + size);
935 /* Mark the last RBD to close the ring. */
936 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
945 free((void *)fec->rbd_base[i].data_pointer);
953 static void fec_free_descs(struct fec_priv *fec)
957 for (i = 0; i < FEC_RBD_NUM; i++)
958 free((void *)fec->rbd_base[i].data_pointer);
964 int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
965 struct mii_dev *bus, struct phy_device *phydev)
967 static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
968 struct mii_dev *bus, int phy_id)
971 struct eth_device *edev;
972 struct fec_priv *fec;
973 unsigned char ethaddr[6];
977 /* create and fill edev struct */
978 edev = (struct eth_device *)malloc(sizeof(struct eth_device));
980 puts("fec_mxc: not enough malloc memory for eth_device\n");
985 fec = (struct fec_priv *)malloc(sizeof(struct fec_priv));
987 puts("fec_mxc: not enough malloc memory for fec_priv\n");
992 memset(edev, 0, sizeof(*edev));
993 memset(fec, 0, sizeof(*fec));
995 ret = fec_alloc_descs(fec);
1000 edev->init = fec_init;
1001 edev->send = fec_send;
1002 edev->recv = fec_recv;
1003 edev->halt = fec_halt;
1004 edev->write_hwaddr = fec_set_hwaddr;
1006 fec->eth = (struct ethernet_regs *)base_addr;
1009 fec->xcv_type = CONFIG_FEC_XCV_TYPE;
1012 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl);
1013 start = get_timer(0);
1014 while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) {
1015 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
1016 printf("FEC MXC: Timeout reseting chip\n");
1023 fec_set_dev_name(edev->name, dev_id);
1024 fec->dev_id = (dev_id == -1) ? 0 : dev_id;
1026 fec_mii_setspeed(bus->priv);
1027 #ifdef CONFIG_PHYLIB
1028 fec->phydev = phydev;
1029 phy_connect_dev(phydev, edev);
1033 fec->phy_id = phy_id;
1037 if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) {
1038 debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr);
1039 memcpy(edev->enetaddr, ethaddr, 6);
1040 if (!getenv("ethaddr"))
1041 eth_setenv_enetaddr("ethaddr", ethaddr);
1045 fec_free_descs(fec);
1054 struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id)
1056 struct ethernet_regs *eth = (struct ethernet_regs *)base_addr;
1057 struct mii_dev *bus;
1062 printf("mdio_alloc failed\n");
1065 bus->read = fec_phy_read;
1066 bus->write = fec_phy_write;
1068 fec_set_dev_name(bus->name, dev_id);
1070 ret = mdio_register(bus);
1072 printf("mdio_register failed\n");
1076 fec_mii_setspeed(eth);
1080 int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr)
1083 struct mii_dev *bus = NULL;
1084 #ifdef CONFIG_PHYLIB
1085 struct phy_device *phydev = NULL;
1091 * The i.MX28 has two ethernet interfaces, but they are not equal.
1092 * Only the first one can access the MDIO bus.
1094 base_mii = MXS_ENET0_BASE;
1098 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr);
1099 bus = fec_get_miibus(base_mii, dev_id);
1102 #ifdef CONFIG_PHYLIB
1103 phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII);
1108 ret = fec_probe(bd, dev_id, addr, bus, phydev);
1110 ret = fec_probe(bd, dev_id, addr, bus, phy_id);
1113 #ifdef CONFIG_PHYLIB
1121 #ifdef CONFIG_FEC_MXC_PHYADDR
1122 int fecmxc_initialize(bd_t *bd)
1124 return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR,
1129 #ifndef CONFIG_PHYLIB
1130 int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int))
1132 struct fec_priv *fec = (struct fec_priv *)dev->priv;
1133 fec->mii_postcall = cb;