1 /*-----------------------------------------------------------------------------+
3 * This source code has been made available to you by IBM on an AS-IS
4 * basis. Anyone receiving this source is licensed under IBM
5 * copyrights to use it in any way he or she deems fit, including
6 * copying it, modifying it, compiling it, and redistributing it either
7 * with or without modifications. No license under IBM patents or
8 * patent applications is to be implied by the copyright license.
10 * Any user of this software should understand that IBM cannot provide
11 * technical support for this software and will not be responsible for
12 * any consequences resulting from the use of this software.
14 * Any person who transfers this source code or any derivative work
15 * must include the IBM copyright notice, this paragraph, and the
16 * preceding two paragraphs in the transferred software.
18 * COPYRIGHT I B M CORPORATION 1995
19 * LICENSED MATERIAL - PROGRAM PROPERTY OF I B M
20 *-----------------------------------------------------------------------------*/
21 /*-----------------------------------------------------------------------------+
23 * File Name: enetemac.c
25 * Function: Device driver for the ethernet EMAC3 macro on the 405GP.
31 * Date Description of Change BY
32 * --------- --------------------- ---
33 * 05-May-99 Created MKW
34 * 27-Jun-99 Clean up JWB
35 * 16-Jul-99 Added MAL error recovery and better IP packet handling MKW
36 * 29-Jul-99 Added Full duplex support MKW
37 * 06-Aug-99 Changed names for Mal CR reg MKW
38 * 23-Aug-99 Turned off SYE when running at 10Mbs MKW
39 * 24-Aug-99 Marked descriptor empty after call_xlc MKW
40 * 07-Sep-99 Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16 MCG
41 * to avoid chaining maximum sized packets. Push starting
42 * RX descriptor address up to the next cache line boundary.
43 * 16-Jan-00 Added support for booting with IP of 0x0 MKW
44 * 15-Mar-00 Updated enetInit() to enable broadcast addresses in the
45 * EMAC_RXM register. JWB
46 * 12-Mar-01 anne-sophie.harnois@nextream.fr
47 * - Variables are compatible with those already defined in
49 * - Receive buffer descriptor ring is used to send buffers
51 * - Info print about send/received/handled packet number if
52 * INFO_405_ENET is set
53 * 17-Apr-01 stefan.roese@esd-electronics.com
54 * - MAL reset in "eth_halt" included
55 * - Enet speed and duplex output now in one line
56 * 08-May-01 stefan.roese@esd-electronics.com
57 * - MAL error handling added (eth_init called again)
58 * 13-Nov-01 stefan.roese@esd-electronics.com
59 * - Set IST bit in EMAC_M1 reg upon 100MBit or full duplex
60 * 04-Jan-02 stefan.roese@esd-electronics.com
61 * - Wait for PHY auto negotiation to complete added
62 * 06-Feb-02 stefan.roese@esd-electronics.com
63 * - Bug fixed in waiting for auto negotiation to complete
64 * 26-Feb-02 stefan.roese@esd-electronics.com
65 * - rx and tx buffer descriptors now allocated (no fixed address
67 * 17-Jun-02 stefan.roese@esd-electronics.com
68 * - MAL error debug printf 'M' removed (rx de interrupt may
69 * occur upon many incoming packets with only 4 rx buffers).
70 *-----------------------------------------------------------------------------*
71 * 17-Nov-03 travis.sawyer@sandburst.com
72 * - ported from 405gp_enet.c to utilized upto 4 EMAC ports
73 * in the 440GX. This port should work with the 440GP
75 * 15-Aug-05 sr@denx.de
76 * - merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
77 now handling all 4xx cpu's.
78 *-----------------------------------------------------------------------------*/
83 #include <asm/processor.h>
86 #include <ppc4xx_enet.h>
93 * Only compile for platform with AMCC EMAC ethernet controller and
94 * network support enabled.
95 * Remark: CONFIG_405 describes Xilinx PPC405 FPGA without EMAC controller!
97 #if (CONFIG_COMMANDS & CFG_CMD_NET) && !defined(CONFIG_405) && !defined(CONFIG_IOP480)
99 #if !(defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII))
100 #error "CONFIG_MII has to be defined!"
103 #if defined(CONFIG_NETCONSOLE) && !defined(CONFIG_NET_MULTI)
104 #error "CONFIG_NET_MULTI has to be defined for NetConsole"
107 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
108 #define PHY_AUTONEGOTIATE_TIMEOUT 4000 /* 4000 ms autonegotiate timeout */
110 /* Ethernet Transmit and Receive Buffers */
112 * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
113 * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
115 #define ENET_MAX_MTU PKTSIZE
116 #define ENET_MAX_MTU_ALIGNED PKTSIZE_ALIGN
118 /*-----------------------------------------------------------------------------+
119 * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
120 * Interrupt Controller).
121 *-----------------------------------------------------------------------------*/
122 #define MAL_UIC_ERR ( UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
123 #define MAL_UIC_DEF (UIC_MAL_RXEOB | MAL_UIC_ERR)
124 #define EMAC_UIC_DEF UIC_ENET
125 #define EMAC_UIC_DEF1 UIC_ENET1
126 #define SEL_UIC_DEF(p) (p ? UIC_ENET1 : UIC_ENET )
130 #define BI_PHYMODE_NONE 0
131 #define BI_PHYMODE_ZMII 1
132 #define BI_PHYMODE_RGMII 2
135 /*-----------------------------------------------------------------------------+
136 * Global variables. TX and RX descriptors and buffers.
137 *-----------------------------------------------------------------------------*/
139 static uint32_t mal_ier;
141 #if !defined(CONFIG_NET_MULTI)
142 struct eth_device *emac0_dev = NULL;
146 * Get count of EMAC devices (doesn't have to be the max. possible number
147 * supported by the cpu)
149 #if defined(CONFIG_HAS_ETH3)
150 #define LAST_EMAC_NUM 4
151 #elif defined(CONFIG_HAS_ETH2)
152 #define LAST_EMAC_NUM 3
153 #elif defined(CONFIG_HAS_ETH1)
154 #define LAST_EMAC_NUM 2
156 #define LAST_EMAC_NUM 1
159 /*-----------------------------------------------------------------------------+
160 * Prototypes and externals.
161 *-----------------------------------------------------------------------------*/
162 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
164 int enetInt (struct eth_device *dev);
165 static void mal_err (struct eth_device *dev, unsigned long isr,
166 unsigned long uic, unsigned long maldef,
167 unsigned long mal_errr);
168 static void emac_err (struct eth_device *dev, unsigned long isr);
170 extern int phy_setup_aneg (char *devname, unsigned char addr);
171 extern int emac4xx_miiphy_read (char *devname, unsigned char addr,
172 unsigned char reg, unsigned short *value);
173 extern int emac4xx_miiphy_write (char *devname, unsigned char addr,
174 unsigned char reg, unsigned short value);
176 /*-----------------------------------------------------------------------------+
178 | Disable MAL channel, and EMACn
179 +-----------------------------------------------------------------------------*/
180 static void ppc_4xx_eth_halt (struct eth_device *dev)
182 EMAC_4XX_HW_PST hw_p = dev->priv;
183 uint32_t failsafe = 10000;
184 #if defined(CONFIG_440SPE)
188 out32 (EMAC_IER + hw_p->hw_addr, 0x00000000); /* disable emac interrupts */
190 /* 1st reset MAL channel */
191 /* Note: writing a 0 to a channel has no effect */
192 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
193 mtdcr (maltxcarr, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
195 mtdcr (maltxcarr, (MAL_CR_MMSR >> hw_p->devnum));
197 mtdcr (malrxcarr, (MAL_CR_MMSR >> hw_p->devnum));
200 while (mfdcr (malrxcasr) & (MAL_CR_MMSR >> hw_p->devnum)) {
201 udelay (1000); /* Delay 1 MS so as not to hammer the register */
208 #if defined(CONFIG_440SPE)
209 /* provide clocks for EMAC internal loopback */
210 mfsdr (sdr_mfr, mfr);
215 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
217 #if defined(CONFIG_440SPE)
218 /* remove clocks for EMAC internal loopback */
219 mfsdr (sdr_mfr, mfr);
225 #ifndef CONFIG_NETCONSOLE
226 hw_p->print_speed = 1; /* print speed message again next time */
232 #if defined (CONFIG_440GX)
233 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
236 unsigned long zmiifer;
237 unsigned long rmiifer;
239 mfsdr(sdr_pfc1, pfc1);
240 pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
247 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
248 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
249 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
250 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
251 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
252 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
253 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
254 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
257 zmiifer = ZMII_FER_SMII << ZMII_FER_V(0);
258 zmiifer = ZMII_FER_SMII << ZMII_FER_V(1);
259 zmiifer = ZMII_FER_SMII << ZMII_FER_V(2);
260 zmiifer = ZMII_FER_SMII << ZMII_FER_V(3);
261 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
262 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
263 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
264 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
267 zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
268 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
269 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
270 bis->bi_phymode[1] = BI_PHYMODE_NONE;
271 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
272 bis->bi_phymode[3] = BI_PHYMODE_NONE;
275 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
276 zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
277 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
278 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
279 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
280 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
281 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
282 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
285 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
286 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
287 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
288 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
289 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
290 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
291 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
292 bis->bi_phymode[3] = BI_PHYMODE_RGMII;
295 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
296 zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
297 rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
298 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
299 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
300 bis->bi_phymode[2] = BI_PHYMODE_RGMII;
304 zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
306 bis->bi_phymode[0] = BI_PHYMODE_ZMII;
307 bis->bi_phymode[1] = BI_PHYMODE_ZMII;
308 bis->bi_phymode[2] = BI_PHYMODE_ZMII;
309 bis->bi_phymode[3] = BI_PHYMODE_ZMII;
313 /* Ensure we setup mdio for this devnum and ONLY this devnum */
314 zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
316 out32 (ZMII_FER, zmiifer);
317 out32 (RGMII_FER, rmiifer);
322 #endif /* CONFIG_440_GX */
324 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
327 unsigned long reg = 0;
330 unsigned long duplex;
331 unsigned long failsafe;
333 unsigned short devnum;
334 unsigned short reg_short;
335 #if defined(CONFIG_440GX) || defined(CONFIG_440SP) || defined(CONFIG_440SPE)
337 #if defined(CONFIG_440GX) || defined(CONFIG_440SPE)
341 #if defined(CONFIG_440SPE)
346 EMAC_4XX_HW_PST hw_p = dev->priv;
348 /* before doing anything, figure out if we have a MAC address */
350 if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
351 printf("ERROR: ethaddr not set!\n");
355 #if defined(CONFIG_440GX) || defined(CONFIG_440SP) || defined(CONFIG_440SPE)
356 /* Need to get the OPB frequency so we can access the PHY */
357 get_sys_info (&sysinfo);
361 mtmsr (msr & ~(MSR_EE)); /* disable interrupts */
363 devnum = hw_p->devnum;
368 * hw_p->stats.pkts_handled <= hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
369 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
370 * is possible that new packets (without relationship with
371 * current transfer) have got the time to arrived before
372 * netloop calls eth_halt
374 printf ("About preceeding transfer (eth%d):\n"
375 "- Sent packet number %d\n"
376 "- Received packet number %d\n"
377 "- Handled packet number %d\n",
380 hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
382 hw_p->stats.pkts_tx = 0;
383 hw_p->stats.pkts_rx = 0;
384 hw_p->stats.pkts_handled = 0;
385 hw_p->print_speed = 1; /* print speed message again next time */
388 hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
389 hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
391 hw_p->rx_slot = 0; /* MAL Receive Slot */
392 hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */
393 hw_p->rx_u_index = 0; /* Receive User Queue Index */
395 hw_p->tx_slot = 0; /* MAL Transmit Slot */
396 hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */
397 hw_p->tx_u_index = 0; /* Transmit User Queue Index */
399 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
401 /* NOTE: 440GX spec states that mode is mutually exclusive */
402 /* NOTE: Therefore, disable all other EMACS, since we handle */
403 /* NOTE: only one emac at a time */
408 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
409 out32 (ZMII_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
410 #elif defined(CONFIG_440GX)
411 ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
412 #elif defined(CONFIG_440GP)
414 out32 (ZMII_FER, ZMII_RMII | ZMII_MDI0);
416 if ((devnum == 0) || (devnum == 1)) {
417 out32 (ZMII_FER, (ZMII_FER_SMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
419 else { /* ((devnum == 2) || (devnum == 3)) */
420 out32 (ZMII_FER, ZMII_FER_MDI << ZMII_FER_V (devnum));
421 out32 (RGMII_FER, ((RGMII_FER_RGMII << RGMII_FER_V (2)) |
422 (RGMII_FER_RGMII << RGMII_FER_V (3))));
426 out32 (ZMII_SSR, ZMII_SSR_SP << ZMII_SSR_V(devnum));
427 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
429 __asm__ volatile ("eieio");
431 /* reset emac so we have access to the phy */
432 #if defined(CONFIG_440SPE)
433 /* provide clocks for EMAC internal loopback */
434 mfsdr (sdr_mfr, mfr);
439 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST);
440 __asm__ volatile ("eieio");
443 while ((in32 (EMAC_M0 + hw_p->hw_addr) & (EMAC_M0_SRST)) && failsafe) {
448 #if defined(CONFIG_440SPE)
449 /* remove clocks for EMAC internal loopback */
450 mfsdr (sdr_mfr, mfr);
455 #if defined(CONFIG_440GX) || defined(CONFIG_440SP) || defined(CONFIG_440SPE)
456 /* Whack the M1 register */
458 mode_reg &= ~0x00000038;
459 if (sysinfo.freqOPB <= 50000000);
460 else if (sysinfo.freqOPB <= 66666667)
461 mode_reg |= EMAC_M1_OBCI_66;
462 else if (sysinfo.freqOPB <= 83333333)
463 mode_reg |= EMAC_M1_OBCI_83;
464 else if (sysinfo.freqOPB <= 100000000)
465 mode_reg |= EMAC_M1_OBCI_100;
467 mode_reg |= EMAC_M1_OBCI_GT100;
469 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
470 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
472 /* wait for PHY to complete auto negotiation */
474 #ifndef CONFIG_CS8952_PHY
477 reg = CONFIG_PHY_ADDR;
479 #if defined (CONFIG_PHY1_ADDR)
481 reg = CONFIG_PHY1_ADDR;
484 #if defined (CONFIG_440GX)
486 reg = CONFIG_PHY2_ADDR;
489 reg = CONFIG_PHY3_ADDR;
493 reg = CONFIG_PHY_ADDR;
497 bis->bi_phynum[devnum] = reg;
499 #if defined(CONFIG_PHY_RESET)
501 * Reset the phy, only if its the first time through
502 * otherwise, just check the speeds & feeds
504 if (hw_p->first_init == 0) {
505 miiphy_reset (dev->name, reg);
507 #if defined(CONFIG_440GX) || defined(CONFIG_440SP) || defined(CONFIG_440SPE)
508 #if defined(CONFIG_CIS8201_PHY)
510 * Cicada 8201 PHY needs to have an extended register whacked
513 if ( ((devnum == 2) || (devnum ==3)) && (4 == ethgroup) ) {
514 #if defined(CONFIG_CIS8201_SHORT_ETCH)
515 miiphy_write (dev->name, reg, 23, 0x1300);
517 miiphy_write (dev->name, reg, 23, 0x1000);
520 * Vitesse VSC8201/Cicada CIS8201 errata:
521 * Interoperability problem with Intel 82547EI phys
522 * This work around (provided by Vitesse) changes
523 * the default timer convergence from 8ms to 12ms
525 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
526 miiphy_write (dev->name, reg, 0x08, 0x0200);
527 miiphy_write (dev->name, reg, 0x1f, 0x52b5);
528 miiphy_write (dev->name, reg, 0x02, 0x0004);
529 miiphy_write (dev->name, reg, 0x01, 0x0671);
530 miiphy_write (dev->name, reg, 0x00, 0x8fae);
531 miiphy_write (dev->name, reg, 0x1f, 0x2a30);
532 miiphy_write (dev->name, reg, 0x08, 0x0000);
533 miiphy_write (dev->name, reg, 0x1f, 0x0000);
534 /* end Vitesse/Cicada errata */
538 /* Start/Restart autonegotiation */
539 phy_setup_aneg (dev->name, reg);
542 #endif /* defined(CONFIG_PHY_RESET) */
544 miiphy_read (dev->name, reg, PHY_BMSR, ®_short);
547 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
549 if ((reg_short & PHY_BMSR_AUTN_ABLE)
550 && !(reg_short & PHY_BMSR_AUTN_COMP)) {
551 puts ("Waiting for PHY auto negotiation to complete");
553 while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
557 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
558 puts (" TIMEOUT !\n");
562 if ((i++ % 1000) == 0) {
565 udelay (1000); /* 1 ms */
566 miiphy_read (dev->name, reg, PHY_BMSR, ®_short);
570 udelay (500000); /* another 500 ms (results in faster booting) */
572 #endif /* #ifndef CONFIG_CS8952_PHY */
574 speed = miiphy_speed (dev->name, reg);
575 duplex = miiphy_duplex (dev->name, reg);
577 if (hw_p->print_speed) {
578 hw_p->print_speed = 0;
579 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
580 (int) speed, (duplex == HALF) ? "HALF" : "FULL");
583 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
584 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
587 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
589 reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
594 /* Set ZMII/RGMII speed according to the phy link speed */
595 reg = in32 (ZMII_SSR);
596 if ( (speed == 100) || (speed == 1000) )
597 out32 (ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum)));
599 out32 (ZMII_SSR, reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum))));
601 if ((devnum == 2) || (devnum == 3)) {
603 reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
604 else if (speed == 100)
605 reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
607 reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
609 out32 (RGMII_SSR, reg);
611 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
613 /* set the Mal configuration reg */
614 #if defined(CONFIG_440GX) || defined(CONFIG_440SP) || defined(CONFIG_440SPE)
615 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
616 MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
618 mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
619 /* Errata 1.12: MAL_1 -- Disable MAL bursting */
620 if (get_pvr() == PVR_440GP_RB) {
621 mtdcr (malmcr, mfdcr(malmcr) & ~MAL_CR_PLBB);
625 /* Free "old" buffers */
626 if (hw_p->alloc_tx_buf)
627 free (hw_p->alloc_tx_buf);
628 if (hw_p->alloc_rx_buf)
629 free (hw_p->alloc_rx_buf);
632 * Malloc MAL buffer desciptors, make sure they are
633 * aligned on cache line boundary size
634 * (401/403/IOP480 = 16, 405 = 32)
635 * and doesn't cross cache block boundaries.
638 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_TX_BUFF) +
639 ((2 * CFG_CACHELINE_SIZE) - 2));
640 if (NULL == hw_p->alloc_tx_buf)
642 if (((int) hw_p->alloc_tx_buf & CACHELINE_MASK) != 0) {
644 (mal_desc_t *) ((int) hw_p->alloc_tx_buf +
647 alloc_tx_buf & CACHELINE_MASK));
649 hw_p->tx = hw_p->alloc_tx_buf;
653 (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_RX_BUFF) +
654 ((2 * CFG_CACHELINE_SIZE) - 2));
655 if (NULL == hw_p->alloc_rx_buf) {
656 free(hw_p->alloc_tx_buf);
657 hw_p->alloc_tx_buf = NULL;
661 if (((int) hw_p->alloc_rx_buf & CACHELINE_MASK) != 0) {
663 (mal_desc_t *) ((int) hw_p->alloc_rx_buf +
666 alloc_rx_buf & CACHELINE_MASK));
668 hw_p->rx = hw_p->alloc_rx_buf;
671 for (i = 0; i < NUM_TX_BUFF; i++) {
672 hw_p->tx[i].ctrl = 0;
673 hw_p->tx[i].data_len = 0;
674 if (hw_p->first_init == 0) {
676 (char *) malloc (ENET_MAX_MTU_ALIGNED);
677 if (NULL == hw_p->txbuf_ptr) {
678 free(hw_p->alloc_rx_buf);
679 free(hw_p->alloc_tx_buf);
680 hw_p->alloc_rx_buf = NULL;
681 hw_p->alloc_tx_buf = NULL;
682 for(j = 0; j < i; j++) {
683 free(hw_p->tx[i].data_ptr);
684 hw_p->tx[i].data_ptr = NULL;
688 hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
689 if ((NUM_TX_BUFF - 1) == i)
690 hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
691 hw_p->tx_run[i] = -1;
693 printf ("TX_BUFF %d @ 0x%08lx\n", i,
694 (ulong) hw_p->tx[i].data_ptr);
698 for (i = 0; i < NUM_RX_BUFF; i++) {
699 hw_p->rx[i].ctrl = 0;
700 hw_p->rx[i].data_len = 0;
701 /* rx[i].data_ptr = (char *) &rx_buff[i]; */
702 hw_p->rx[i].data_ptr = (char *) NetRxPackets[i];
703 if ((NUM_RX_BUFF - 1) == i)
704 hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
705 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
706 hw_p->rx_ready[i] = -1;
708 printf ("RX_BUFF %d @ 0x%08lx\n", i, (ulong) rx[i].data_ptr);
714 reg |= dev->enetaddr[0]; /* set high address */
716 reg |= dev->enetaddr[1];
718 out32 (EMAC_IAH + hw_p->hw_addr, reg);
721 reg |= dev->enetaddr[2]; /* set low address */
723 reg |= dev->enetaddr[3];
725 reg |= dev->enetaddr[4];
727 reg |= dev->enetaddr[5];
729 out32 (EMAC_IAL + hw_p->hw_addr, reg);
733 /* setup MAL tx & rx channel pointers */
734 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
735 mtdcr (maltxctp2r, hw_p->tx);
737 mtdcr (maltxctp1r, hw_p->tx);
739 #if defined(CONFIG_440)
740 mtdcr (maltxbattr, 0x0);
741 mtdcr (malrxbattr, 0x0);
743 mtdcr (malrxctp1r, hw_p->rx);
744 /* set RX buffer size */
745 mtdcr (malrcbs1, ENET_MAX_MTU_ALIGNED / 16);
747 #if defined (CONFIG_440GX)
749 /* setup MAL tx & rx channel pointers */
750 mtdcr (maltxbattr, 0x0);
751 mtdcr (malrxbattr, 0x0);
752 mtdcr (maltxctp2r, hw_p->tx);
753 mtdcr (malrxctp2r, hw_p->rx);
754 /* set RX buffer size */
755 mtdcr (malrcbs2, ENET_MAX_MTU_ALIGNED / 16);
758 /* setup MAL tx & rx channel pointers */
759 mtdcr (maltxbattr, 0x0);
760 mtdcr (maltxctp3r, hw_p->tx);
761 mtdcr (malrxbattr, 0x0);
762 mtdcr (malrxctp3r, hw_p->rx);
763 /* set RX buffer size */
764 mtdcr (malrcbs3, ENET_MAX_MTU_ALIGNED / 16);
766 #endif /* CONFIG_440GX */
769 /* setup MAL tx & rx channel pointers */
770 #if defined(CONFIG_440)
771 mtdcr (maltxbattr, 0x0);
772 mtdcr (malrxbattr, 0x0);
774 mtdcr (maltxctp0r, hw_p->tx);
775 mtdcr (malrxctp0r, hw_p->rx);
776 /* set RX buffer size */
777 mtdcr (malrcbs0, ENET_MAX_MTU_ALIGNED / 16);
781 /* Enable MAL transmit and receive channels */
782 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
783 mtdcr (maltxcasr, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
785 mtdcr (maltxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
787 mtdcr (malrxcasr, (MAL_TXRX_CASR >> hw_p->devnum));
789 /* set transmit enable & receive enable */
790 out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_TXE | EMAC_M0_RXE);
792 /* set receive fifo to 4k and tx fifo to 2k */
793 mode_reg = in32 (EMAC_M1 + hw_p->hw_addr);
794 mode_reg |= EMAC_M1_RFS_4K | EMAC_M1_TX_FIFO_2K;
797 if (speed == _1000BASET) {
798 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
800 mfsdr (sdr_pfc1, pfc1);
801 pfc1 |= SDR0_PFC1_EM_1000;
802 mtsdr (sdr_pfc1, pfc1);
804 mode_reg = mode_reg | EMAC_M1_MF_1000MBPS | EMAC_M1_IST;
805 } else if (speed == _100BASET)
806 mode_reg = mode_reg | EMAC_M1_MF_100MBPS | EMAC_M1_IST;
808 mode_reg = mode_reg & ~0x00C00000; /* 10 MBPS */
810 mode_reg = mode_reg | 0x80000000 | EMAC_M1_IST;
812 out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);
814 /* Enable broadcast and indvidual address */
815 /* TBS: enabling runts as some misbehaved nics will send runts */
816 out32 (EMAC_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
818 /* we probably need to set the tx mode1 reg? maybe at tx time */
820 /* set transmit request threshold register */
821 out32 (EMAC_TRTR + hw_p->hw_addr, 0x18000000); /* 256 byte threshold */
823 /* set receive low/high water mark register */
824 #if defined(CONFIG_440)
825 /* 440s has a 64 byte burst length */
826 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
828 /* 405s have a 16 byte burst length */
829 out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
830 #endif /* defined(CONFIG_440) */
831 out32 (EMAC_TXM1 + hw_p->hw_addr, 0xf8640000);
833 /* Set fifo limit entry in tx mode 0 */
834 out32 (EMAC_TXM0 + hw_p->hw_addr, 0x00000003);
836 out32 (EMAC_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
839 hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
840 if (speed == _100BASET)
841 hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
843 out32 (EMAC_ISR + hw_p->hw_addr, 0xffffffff); /* clear pending interrupts */
844 out32 (EMAC_IER + hw_p->hw_addr, hw_p->emac_ier);
846 if (hw_p->first_init == 0) {
848 * Connect interrupt service routines
850 irq_install_handler (VECNUM_ETH0 + (hw_p->devnum * 2),
851 (interrupt_handler_t *) enetInt, dev);
854 mtmsr (msr); /* enable interrupts again */
857 hw_p->first_init = 1;
863 static int ppc_4xx_eth_send (struct eth_device *dev, volatile void *ptr,
866 struct enet_frame *ef_ptr;
867 ulong time_start, time_now;
868 unsigned long temp_txm0;
869 EMAC_4XX_HW_PST hw_p = dev->priv;
871 ef_ptr = (struct enet_frame *) ptr;
873 /*-----------------------------------------------------------------------+
874 * Copy in our address into the frame.
875 *-----------------------------------------------------------------------*/
876 (void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
878 /*-----------------------------------------------------------------------+
879 * If frame is too long or too short, modify length.
880 *-----------------------------------------------------------------------*/
881 /* TBS: where does the fragment go???? */
882 if (len > ENET_MAX_MTU)
885 /* memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
886 memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
888 /*-----------------------------------------------------------------------+
889 * set TX Buffer busy, and send it
890 *-----------------------------------------------------------------------*/
891 hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
892 EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
893 ~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
894 if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
895 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
897 hw_p->tx[hw_p->tx_slot].data_len = (short) len;
898 hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
900 __asm__ volatile ("eieio");
902 out32 (EMAC_TXM0 + hw_p->hw_addr,
903 in32 (EMAC_TXM0 + hw_p->hw_addr) | EMAC_TXM0_GNP0);
905 hw_p->stats.pkts_tx++;
908 /*-----------------------------------------------------------------------+
909 * poll unitl the packet is sent and then make sure it is OK
910 *-----------------------------------------------------------------------*/
911 time_start = get_timer (0);
913 temp_txm0 = in32 (EMAC_TXM0 + hw_p->hw_addr);
914 /* loop until either TINT turns on or 3 seconds elapse */
915 if ((temp_txm0 & EMAC_TXM0_GNP0) != 0) {
916 /* transmit is done, so now check for errors
917 * If there is an error, an interrupt should
918 * happen when we return
920 time_now = get_timer (0);
921 if ((time_now - time_start) > 3000) {
931 #if defined (CONFIG_440)
933 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
935 * Hack: On 440SP all enet irq sources are located on UIC1
936 * Needs some cleanup. --sr
938 #define UIC0MSR uic1msr
939 #define UIC0SR uic1sr
941 #define UIC0MSR uic0msr
942 #define UIC0SR uic0sr
945 int enetInt (struct eth_device *dev)
948 int rc = -1; /* default to not us */
949 unsigned long mal_isr;
950 unsigned long emac_isr = 0;
951 unsigned long mal_rx_eob;
952 unsigned long my_uic0msr, my_uic1msr;
954 #if defined(CONFIG_440GX)
955 unsigned long my_uic2msr;
957 EMAC_4XX_HW_PST hw_p;
960 * Because the mal is generic, we need to get the current
963 #if defined(CONFIG_NET_MULTI)
971 /* enter loop that stays in interrupt code until nothing to service */
975 my_uic0msr = mfdcr (UIC0MSR);
976 my_uic1msr = mfdcr (uic1msr);
977 #if defined(CONFIG_440GX)
978 my_uic2msr = mfdcr (uic2msr);
980 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
981 && !(my_uic1msr & (UIC_ETH0 | UIC_ETH1 | UIC_MS | UIC_MTDE | UIC_MRDE))) {
985 #if defined (CONFIG_440GX)
986 if (!(my_uic0msr & (UIC_MRE | UIC_MTE))
987 && !(my_uic2msr & (UIC_ETH2 | UIC_ETH3))) {
992 /* get and clear controller status interrupts */
993 /* look at Mal and EMAC interrupts */
994 if ((my_uic0msr & (UIC_MRE | UIC_MTE))
995 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
996 /* we have a MAL interrupt */
997 mal_isr = mfdcr (malesr);
998 /* look for mal error */
999 if (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE)) {
1000 mal_err (dev, mal_isr, my_uic0msr,
1001 MAL_UIC_DEF, MAL_UIC_ERR);
1007 /* port by port dispatch of emac interrupts */
1008 if (hw_p->devnum == 0) {
1009 if (UIC_ETH0 & my_uic1msr) { /* look for EMAC errors */
1010 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1011 if ((hw_p->emac_ier & emac_isr) != 0) {
1012 emac_err (dev, emac_isr);
1017 if ((hw_p->emac_ier & emac_isr)
1018 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1019 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1020 mtdcr (uic1sr, UIC_ETH0 | UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1021 return (rc); /* we had errors so get out */
1025 #if !defined(CONFIG_440SP)
1026 if (hw_p->devnum == 1) {
1027 if (UIC_ETH1 & my_uic1msr) { /* look for EMAC errors */
1028 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1029 if ((hw_p->emac_ier & emac_isr) != 0) {
1030 emac_err (dev, emac_isr);
1035 if ((hw_p->emac_ier & emac_isr)
1036 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1037 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1038 mtdcr (uic1sr, UIC_ETH1 | UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1039 return (rc); /* we had errors so get out */
1042 #if defined (CONFIG_440GX)
1043 if (hw_p->devnum == 2) {
1044 if (UIC_ETH2 & my_uic2msr) { /* look for EMAC errors */
1045 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1046 if ((hw_p->emac_ier & emac_isr) != 0) {
1047 emac_err (dev, emac_isr);
1052 if ((hw_p->emac_ier & emac_isr)
1053 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1054 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1055 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1056 mtdcr (uic2sr, UIC_ETH2);
1057 return (rc); /* we had errors so get out */
1061 if (hw_p->devnum == 3) {
1062 if (UIC_ETH3 & my_uic2msr) { /* look for EMAC errors */
1063 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1064 if ((hw_p->emac_ier & emac_isr) != 0) {
1065 emac_err (dev, emac_isr);
1070 if ((hw_p->emac_ier & emac_isr)
1071 || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {
1072 mtdcr (UIC0SR, UIC_MRE | UIC_MTE); /* Clear */
1073 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1074 mtdcr (uic2sr, UIC_ETH3);
1075 return (rc); /* we had errors so get out */
1078 #endif /* CONFIG_440GX */
1079 #endif /* !CONFIG_440SP */
1081 /* handle MAX TX EOB interrupt from a tx */
1082 if (my_uic0msr & UIC_MTE) {
1083 mal_rx_eob = mfdcr (maltxeobisr);
1084 mtdcr (maltxeobisr, mal_rx_eob);
1085 mtdcr (UIC0SR, UIC_MTE);
1087 /* handle MAL RX EOB interupt from a receive */
1088 /* check for EOB on valid channels */
1089 if (my_uic0msr & UIC_MRE) {
1090 mal_rx_eob = mfdcr (malrxeobisr);
1091 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1093 mtdcr(malrxeobisr, mal_rx_eob); */
1094 enet_rcv (dev, emac_isr);
1095 /* indicate that we serviced an interrupt */
1101 mtdcr (UIC0SR, UIC_MRE); /* Clear */
1102 mtdcr (uic1sr, UIC_MS | UIC_MTDE | UIC_MRDE); /* Clear */
1103 switch (hw_p->devnum) {
1105 mtdcr (uic1sr, UIC_ETH0);
1108 mtdcr (uic1sr, UIC_ETH1);
1110 #if defined (CONFIG_440GX)
1112 mtdcr (uic2sr, UIC_ETH2);
1115 mtdcr (uic2sr, UIC_ETH3);
1117 #endif /* CONFIG_440GX */
1126 #else /* CONFIG_440 */
1128 int enetInt (struct eth_device *dev)
1131 int rc = -1; /* default to not us */
1132 unsigned long mal_isr;
1133 unsigned long emac_isr = 0;
1134 unsigned long mal_rx_eob;
1135 unsigned long my_uicmsr;
1137 EMAC_4XX_HW_PST hw_p;
1140 * Because the mal is generic, we need to get the current
1143 #if defined(CONFIG_NET_MULTI)
1144 dev = eth_get_dev();
1151 /* enter loop that stays in interrupt code until nothing to service */
1155 my_uicmsr = mfdcr (uicmsr);
1157 if ((my_uicmsr & (MAL_UIC_DEF | EMAC_UIC_DEF)) == 0) { /* not for us */
1160 /* get and clear controller status interrupts */
1161 /* look at Mal and EMAC interrupts */
1162 if ((MAL_UIC_DEF & my_uicmsr) != 0) { /* we have a MAL interrupt */
1163 mal_isr = mfdcr (malesr);
1164 /* look for mal error */
1165 if ((my_uicmsr & MAL_UIC_ERR) != 0) {
1166 mal_err (dev, mal_isr, my_uicmsr, MAL_UIC_DEF, MAL_UIC_ERR);
1172 /* port by port dispatch of emac interrupts */
1174 if ((SEL_UIC_DEF(hw_p->devnum) & my_uicmsr) != 0) { /* look for EMAC errors */
1175 emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);
1176 if ((hw_p->emac_ier & emac_isr) != 0) {
1177 emac_err (dev, emac_isr);
1182 if (((hw_p->emac_ier & emac_isr) != 0) || ((MAL_UIC_ERR & my_uicmsr) != 0)) {
1183 mtdcr (uicsr, MAL_UIC_DEF | SEL_UIC_DEF(hw_p->devnum)); /* Clear */
1184 return (rc); /* we had errors so get out */
1187 /* handle MAX TX EOB interrupt from a tx */
1188 if (my_uicmsr & UIC_MAL_TXEOB) {
1189 mal_rx_eob = mfdcr (maltxeobisr);
1190 mtdcr (maltxeobisr, mal_rx_eob);
1191 mtdcr (uicsr, UIC_MAL_TXEOB);
1193 /* handle MAL RX EOB interupt from a receive */
1194 /* check for EOB on valid channels */
1195 if (my_uicmsr & UIC_MAL_RXEOB)
1197 mal_rx_eob = mfdcr (malrxeobisr);
1198 if ((mal_rx_eob & (0x80000000 >> hw_p->devnum)) != 0) { /* call emac routine for channel x */
1200 mtdcr(malrxeobisr, mal_rx_eob); */
1201 enet_rcv (dev, emac_isr);
1202 /* indicate that we serviced an interrupt */
1207 mtdcr (uicsr, MAL_UIC_DEF|EMAC_UIC_DEF|EMAC_UIC_DEF1); /* Clear */
1214 #endif /* CONFIG_440 */
1216 /*-----------------------------------------------------------------------------+
1218 *-----------------------------------------------------------------------------*/
1219 static void mal_err (struct eth_device *dev, unsigned long isr,
1220 unsigned long uic, unsigned long maldef,
1221 unsigned long mal_errr)
1223 EMAC_4XX_HW_PST hw_p = dev->priv;
1225 mtdcr (malesr, isr); /* clear interrupt */
1227 /* clear DE interrupt */
1228 mtdcr (maltxdeir, 0xC0000000);
1229 mtdcr (malrxdeir, 0x80000000);
1231 #ifdef INFO_4XX_ENET
1232 printf ("\nMAL error occured.... ISR = %lx UIC = = %lx MAL_DEF = %lx MAL_ERR= %lx \n", isr, uic, maldef, mal_errr);
1235 eth_init (hw_p->bis); /* start again... */
1238 /*-----------------------------------------------------------------------------+
1239 * EMAC Error Routine
1240 *-----------------------------------------------------------------------------*/
1241 static void emac_err (struct eth_device *dev, unsigned long isr)
1243 EMAC_4XX_HW_PST hw_p = dev->priv;
1245 printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
1246 out32 (EMAC_ISR + hw_p->hw_addr, isr);
1249 /*-----------------------------------------------------------------------------+
1250 * enet_rcv() handles the ethernet receive data
1251 *-----------------------------------------------------------------------------*/
1252 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1254 struct enet_frame *ef_ptr;
1255 unsigned long data_len;
1256 unsigned long rx_eob_isr;
1257 EMAC_4XX_HW_PST hw_p = dev->priv;
1263 rx_eob_isr = mfdcr (malrxeobisr);
1264 if ((0x80000000 >> hw_p->devnum) & rx_eob_isr) {
1266 mtdcr (malrxeobisr, rx_eob_isr);
1269 while (1) { /* do all */
1272 if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1273 || (loop_count >= NUM_RX_BUFF))
1277 if (NUM_RX_BUFF == hw_p->rx_slot)
1280 data_len = (unsigned long) hw_p->rx[i].data_len; /* Get len */
1282 if (data_len > ENET_MAX_MTU) /* Check len */
1285 if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) { /* Check Errors */
1287 hw_p->stats.rx_err_log[hw_p->
1290 hw_p->rx_err_index++;
1291 if (hw_p->rx_err_index ==
1293 hw_p->rx_err_index =
1296 } /* data_len < max mtu */
1298 if (!data_len) { /* no data */
1299 hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY; /* Free Recv Buffer */
1301 hw_p->stats.data_len_err++; /* Error at Rx */
1306 /* Check if user has already eaten buffer */
1307 /* if not => ERROR */
1308 else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1309 if (hw_p->is_receiving)
1310 printf ("ERROR : Receive buffers are full!\n");
1313 hw_p->stats.rx_frames++;
1314 hw_p->stats.rx += data_len;
1315 ef_ptr = (struct enet_frame *) hw_p->rx[i].
1317 #ifdef INFO_4XX_ENET
1318 hw_p->stats.pkts_rx++;
1323 hw_p->rx_ready[hw_p->rx_i_index] = i;
1325 if (NUM_RX_BUFF == hw_p->rx_i_index)
1326 hw_p->rx_i_index = 0;
1329 * free receive buffer only when
1330 * buffer has been handled (eth_rx)
1331 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1335 } /* if EMACK_RXCHL */
1339 static int ppc_4xx_eth_rx (struct eth_device *dev)
1344 EMAC_4XX_HW_PST hw_p = dev->priv;
1346 hw_p->is_receiving = 1; /* tell driver */
1350 * use ring buffer and
1351 * get index from rx buffer desciptor queue
1353 user_index = hw_p->rx_ready[hw_p->rx_u_index];
1354 if (user_index == -1) {
1356 break; /* nothing received - leave for() loop */
1360 mtmsr (msr & ~(MSR_EE));
1362 length = hw_p->rx[user_index].data_len;
1364 /* Pass the packet up to the protocol layers. */
1365 /* NetReceive(NetRxPackets[rxIdx], length - 4); */
1366 /* NetReceive(NetRxPackets[i], length); */
1367 NetReceive (NetRxPackets[user_index], length - 4);
1368 /* Free Recv Buffer */
1369 hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1370 /* Free rx buffer descriptor queue */
1371 hw_p->rx_ready[hw_p->rx_u_index] = -1;
1373 if (NUM_RX_BUFF == hw_p->rx_u_index)
1374 hw_p->rx_u_index = 0;
1376 #ifdef INFO_4XX_ENET
1377 hw_p->stats.pkts_handled++;
1380 mtmsr (msr); /* Enable IRQ's */
1383 hw_p->is_receiving = 0; /* tell driver */
1388 int ppc_4xx_eth_initialize (bd_t * bis)
1390 static int virgin = 0;
1391 struct eth_device *dev;
1393 EMAC_4XX_HW_PST hw = NULL;
1395 #if defined(CONFIG_440GX)
1398 mfsdr (sdr_pfc1, pfc1);
1399 pfc1 &= ~(0x01e00000);
1401 mtsdr (sdr_pfc1, pfc1);
1403 /* set phy num and mode */
1404 bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1405 bis->bi_phymode[0] = 0;
1407 #if defined(CONFIG_PHY1_ADDR)
1408 bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1409 bis->bi_phymode[1] = 0;
1411 #if defined(CONFIG_440GX)
1412 bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1413 bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1414 bis->bi_phymode[2] = 2;
1415 bis->bi_phymode[3] = 2;
1417 ppc_4xx_eth_setup_bridge(0, bis);
1420 for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1422 /* See if we can actually bring up the interface, otherwise, skip it */
1424 default: /* fall through */
1426 if (memcmp (bis->bi_enetaddr, "\0\0\0\0\0\0", 6) == 0) {
1427 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1431 #ifdef CONFIG_HAS_ETH1
1433 if (memcmp (bis->bi_enet1addr, "\0\0\0\0\0\0", 6) == 0) {
1434 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1439 #ifdef CONFIG_HAS_ETH2
1441 if (memcmp (bis->bi_enet2addr, "\0\0\0\0\0\0", 6) == 0) {
1442 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1447 #ifdef CONFIG_HAS_ETH3
1449 if (memcmp (bis->bi_enet3addr, "\0\0\0\0\0\0", 6) == 0) {
1450 bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1457 /* Allocate device structure */
1458 dev = (struct eth_device *) malloc (sizeof (*dev));
1460 printf ("ppc_4xx_eth_initialize: "
1461 "Cannot allocate eth_device %d\n", eth_num);
1464 memset(dev, 0, sizeof(*dev));
1466 /* Allocate our private use data */
1467 hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
1469 printf ("ppc_4xx_eth_initialize: "
1470 "Cannot allocate private hw data for eth_device %d",
1475 memset(hw, 0, sizeof(*hw));
1478 default: /* fall through */
1481 memcpy (dev->enetaddr, bis->bi_enetaddr, 6);
1483 #ifdef CONFIG_HAS_ETH1
1485 hw->hw_addr = 0x100;
1486 memcpy (dev->enetaddr, bis->bi_enet1addr, 6);
1489 #ifdef CONFIG_HAS_ETH2
1491 hw->hw_addr = 0x400;
1492 memcpy (dev->enetaddr, bis->bi_enet2addr, 6);
1495 #ifdef CONFIG_HAS_ETH3
1497 hw->hw_addr = 0x600;
1498 memcpy (dev->enetaddr, bis->bi_enet3addr, 6);
1503 hw->devnum = eth_num;
1504 hw->print_speed = 1;
1506 sprintf (dev->name, "ppc_4xx_eth%d", eth_num);
1507 dev->priv = (void *) hw;
1508 dev->init = ppc_4xx_eth_init;
1509 dev->halt = ppc_4xx_eth_halt;
1510 dev->send = ppc_4xx_eth_send;
1511 dev->recv = ppc_4xx_eth_rx;
1514 /* set the MAL IER ??? names may change with new spec ??? */
1515 #if defined(CONFIG_440SPE)
1517 MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE |
1518 MAL_IER_DE | MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE ;
1521 MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
1522 MAL_IER_OPBE | MAL_IER_PLBE;
1524 mtdcr (malesr, 0xffffffff); /* clear pending interrupts */
1525 mtdcr (maltxdeir, 0xffffffff); /* clear pending interrupts */
1526 mtdcr (malrxdeir, 0xffffffff); /* clear pending interrupts */
1527 mtdcr (malier, mal_ier);
1529 /* install MAL interrupt handler */
1530 irq_install_handler (VECNUM_MS,
1531 (interrupt_handler_t *) enetInt,
1533 irq_install_handler (VECNUM_MTE,
1534 (interrupt_handler_t *) enetInt,
1536 irq_install_handler (VECNUM_MRE,
1537 (interrupt_handler_t *) enetInt,
1539 irq_install_handler (VECNUM_TXDE,
1540 (interrupt_handler_t *) enetInt,
1542 irq_install_handler (VECNUM_RXDE,
1543 (interrupt_handler_t *) enetInt,
1548 #if defined(CONFIG_NET_MULTI)
1554 #if defined(CONFIG_NET_MULTI)
1555 #if defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII)
1556 miiphy_register (dev->name,
1557 emac4xx_miiphy_read, emac4xx_miiphy_write);
1560 } /* end for each supported device */
1565 #if !defined(CONFIG_NET_MULTI)
1566 void eth_halt (void) {
1568 ppc_4xx_eth_halt(emac0_dev);
1574 int eth_init (bd_t *bis)
1576 ppc_4xx_eth_initialize(bis);
1578 return ppc_4xx_eth_init(emac0_dev, bis);
1580 printf("ERROR: ethaddr not set!\n");
1585 int eth_send(volatile void *packet, int length)
1587 return (ppc_4xx_eth_send(emac0_dev, packet, length));
1592 return (ppc_4xx_eth_rx(emac0_dev));
1595 int emac4xx_miiphy_initialize (bd_t * bis)
1597 #if defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII)
1598 miiphy_register ("ppc_4xx_eth0",
1599 emac4xx_miiphy_read, emac4xx_miiphy_write);
1604 #endif /* !defined(CONFIG_NET_MULTI) */
1606 #endif /* #if (CONFIG_COMMANDS & CFG_CMD_NET) */