2 * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved.
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
8 * QE UCC Gigabit Ethernet Driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/stddef.h>
20 #include <linux/interrupt.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/fsl_devices.h>
28 #include <linux/mii.h>
29 #include <linux/phy.h>
30 #include <linux/workqueue.h>
32 #include <asm/of_platform.h>
33 #include <asm/uaccess.h>
36 #include <asm/immap_qe.h>
39 #include <asm/ucc_fast.h>
42 #include "ucc_geth_mii.h"
46 #define ugeth_printk(level, format, arg...) \
47 printk(level format "\n", ## arg)
49 #define ugeth_dbg(format, arg...) \
50 ugeth_printk(KERN_DEBUG , format , ## arg)
51 #define ugeth_err(format, arg...) \
52 ugeth_printk(KERN_ERR , format , ## arg)
53 #define ugeth_info(format, arg...) \
54 ugeth_printk(KERN_INFO , format , ## arg)
55 #define ugeth_warn(format, arg...) \
56 ugeth_printk(KERN_WARNING , format , ## arg)
58 #ifdef UGETH_VERBOSE_DEBUG
59 #define ugeth_vdbg ugeth_dbg
61 #define ugeth_vdbg(fmt, args...) do { } while (0)
62 #endif /* UGETH_VERBOSE_DEBUG */
63 #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
66 static DEFINE_SPINLOCK(ugeth_lock);
72 module_param_named(debug, debug.msg_enable, int, 0);
73 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
75 static struct ucc_geth_info ugeth_primary_info = {
77 .bd_mem_part = MEM_PART_SYSTEM,
78 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
79 .max_rx_buf_length = 1536,
80 /* adjusted at startup if max-speed 1000 */
81 .urfs = UCC_GETH_URFS_INIT,
82 .urfet = UCC_GETH_URFET_INIT,
83 .urfset = UCC_GETH_URFSET_INIT,
84 .utfs = UCC_GETH_UTFS_INIT,
85 .utfet = UCC_GETH_UTFET_INIT,
86 .utftt = UCC_GETH_UTFTT_INIT,
88 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
89 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
90 .tenc = UCC_FAST_TX_ENCODING_NRZ,
91 .renc = UCC_FAST_RX_ENCODING_NRZ,
92 .tcrc = UCC_FAST_16_BIT_CRC,
93 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
97 .extendedFilteringChainPointer = ((uint32_t) NULL),
98 .typeorlen = 3072 /*1536 */ ,
99 .nonBackToBackIfgPart1 = 0x40,
100 .nonBackToBackIfgPart2 = 0x60,
101 .miminumInterFrameGapEnforcement = 0x50,
102 .backToBackInterFrameGap = 0x60,
106 .strictpriorityq = 0xff,
107 .altBebTruncation = 0xa,
109 .maxRetransmission = 0xf,
110 .collisionWindow = 0x37,
111 .receiveFlowControl = 1,
112 .transmitFlowControl = 1,
113 .maxGroupAddrInHash = 4,
114 .maxIndAddrInHash = 4,
116 .maxFrameLength = 1518,
117 .minFrameLength = 64,
121 .ecamptr = ((uint32_t) NULL),
122 .eventRegMask = UCCE_OTHER,
123 .pausePeriod = 0xf000,
124 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
145 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
146 .largestexternallookupkeysize =
147 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
148 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
149 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
150 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
151 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
152 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
153 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
154 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
155 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
156 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
157 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
158 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
159 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
162 static struct ucc_geth_info ugeth_info[8];
165 static void mem_disp(u8 *addr, int size)
168 int size16Aling = (size >> 4) << 4;
169 int size4Aling = (size >> 2) << 2;
174 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
175 printk("0x%08x: %08x %08x %08x %08x\r\n",
179 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
181 printk("0x%08x: ", (u32) i);
182 for (; (u32) i < (u32) addr + size4Aling; i += 4)
183 printk("%08x ", *((u32 *) (i)));
184 for (; (u32) i < (u32) addr + size; i++)
185 printk("%02x", *((u8 *) (i)));
191 #ifdef CONFIG_UGETH_FILTERING
192 static void enqueue(struct list_head *node, struct list_head *lh)
196 spin_lock_irqsave(&ugeth_lock, flags);
197 list_add_tail(node, lh);
198 spin_unlock_irqrestore(&ugeth_lock, flags);
200 #endif /* CONFIG_UGETH_FILTERING */
202 static struct list_head *dequeue(struct list_head *lh)
206 spin_lock_irqsave(&ugeth_lock, flags);
207 if (!list_empty(lh)) {
208 struct list_head *node = lh->next;
210 spin_unlock_irqrestore(&ugeth_lock, flags);
213 spin_unlock_irqrestore(&ugeth_lock, flags);
218 static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
221 struct sk_buff *skb = NULL;
223 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
224 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
229 /* We need the data buffer to be aligned properly. We will reserve
230 * as many bytes as needed to align the data properly
233 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
234 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
237 skb->dev = ugeth->dev;
239 out_be32(&((struct qe_bd __iomem *)bd)->buf,
242 ugeth->ug_info->uf_info.max_rx_buf_length +
243 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
246 out_be32((u32 __iomem *)bd,
247 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
252 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
259 bd = ugeth->p_rx_bd_ring[rxQ];
263 bd_status = in_be32((u32 __iomem *)bd);
264 skb = get_new_skb(ugeth, bd);
266 if (!skb) /* If can not allocate data buffer,
267 abort. Cleanup will be elsewhere */
270 ugeth->rx_skbuff[rxQ][i] = skb;
272 /* advance the BD pointer */
273 bd += sizeof(struct qe_bd);
275 } while (!(bd_status & R_W));
280 static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
284 u32 thread_alignment,
285 enum qe_risc_allocation risc,
286 int skip_page_for_first_entry)
288 u32 init_enet_offset;
292 for (i = 0; i < num_entries; i++) {
293 if ((snum = qe_get_snum()) < 0) {
294 if (netif_msg_ifup(ugeth))
295 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
298 if ((i == 0) && skip_page_for_first_entry)
299 /* First entry of Rx does not have page */
300 init_enet_offset = 0;
303 qe_muram_alloc(thread_size, thread_alignment);
304 if (IS_ERR_VALUE(init_enet_offset)) {
305 if (netif_msg_ifup(ugeth))
306 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
307 qe_put_snum((u8) snum);
312 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
319 static int return_init_enet_entries(struct ucc_geth_private *ugeth,
322 enum qe_risc_allocation risc,
323 int skip_page_for_first_entry)
325 u32 init_enet_offset;
329 for (i = 0; i < num_entries; i++) {
332 /* Check that this entry was actually valid --
333 needed in case failed in allocations */
334 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
336 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
337 ENET_INIT_PARAM_SNUM_SHIFT;
338 qe_put_snum((u8) snum);
339 if (!((i == 0) && skip_page_for_first_entry)) {
340 /* First entry of Rx does not have page */
342 (val & ENET_INIT_PARAM_PTR_MASK);
343 qe_muram_free(init_enet_offset);
353 static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
354 u32 __iomem *p_start,
357 enum qe_risc_allocation risc,
358 int skip_page_for_first_entry)
360 u32 init_enet_offset;
364 for (i = 0; i < num_entries; i++) {
365 u32 val = in_be32(p_start);
367 /* Check that this entry was actually valid --
368 needed in case failed in allocations */
369 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
371 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
372 ENET_INIT_PARAM_SNUM_SHIFT;
373 qe_put_snum((u8) snum);
374 if (!((i == 0) && skip_page_for_first_entry)) {
375 /* First entry of Rx does not have page */
378 ENET_INIT_PARAM_PTR_MASK);
379 ugeth_info("Init enet entry %d:", i);
380 ugeth_info("Base address: 0x%08x",
382 qe_muram_addr(init_enet_offset));
383 mem_disp(qe_muram_addr(init_enet_offset),
394 #ifdef CONFIG_UGETH_FILTERING
395 static struct enet_addr_container *get_enet_addr_container(void)
397 struct enet_addr_container *enet_addr_cont;
399 /* allocate memory */
400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
401 if (!enet_addr_cont) {
402 ugeth_err("%s: No memory for enet_addr_container object.",
407 return enet_addr_cont;
409 #endif /* CONFIG_UGETH_FILTERING */
411 static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
413 kfree(enet_addr_cont);
416 static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
418 out_be16(®[0], ((u16)mac[5] << 8) | mac[4]);
419 out_be16(®[1], ((u16)mac[3] << 8) | mac[2]);
420 out_be16(®[2], ((u16)mac[1] << 8) | mac[0]);
423 #ifdef CONFIG_UGETH_FILTERING
424 static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
425 u8 *p_enet_addr, u8 paddr_num)
427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
429 if (!(paddr_num < NUM_OF_PADDRS)) {
430 ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__);
435 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
438 /* Ethernet frames are defined in Little Endian mode, */
439 /* therefore to insert the address we reverse the bytes. */
440 set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr);
443 #endif /* CONFIG_UGETH_FILTERING */
445 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
449 if (!(paddr_num < NUM_OF_PADDRS)) {
450 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
455 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
458 /* Writing address ff.ff.ff.ff.ff.ff disables address
459 recognition for this register */
460 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
461 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
462 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
467 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
470 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
474 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
478 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
480 /* Ethernet frames are defined in Little Endian mode,
481 therefor to insert */
482 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
484 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
486 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
487 QE_CR_PROTOCOL_ETHERNET, 0);
490 #ifdef CONFIG_UGETH_MAGIC_PACKET
491 static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
493 struct ucc_fast_private *uccf;
494 struct ucc_geth __iomem *ug_regs;
498 ug_regs = ugeth->ug_regs;
500 /* Enable interrupts for magic packet detection */
501 uccm = in_be32(uccf->p_uccm);
503 out_be32(uccf->p_uccm, uccm);
505 /* Enable magic packet detection */
506 maccfg2 = in_be32(&ug_regs->maccfg2);
507 maccfg2 |= MACCFG2_MPE;
508 out_be32(&ug_regs->maccfg2, maccfg2);
511 static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
513 struct ucc_fast_private *uccf;
514 struct ucc_geth __iomem *ug_regs;
518 ug_regs = ugeth->ug_regs;
520 /* Disable interrupts for magic packet detection */
521 uccm = in_be32(uccf->p_uccm);
523 out_be32(uccf->p_uccm, uccm);
525 /* Disable magic packet detection */
526 maccfg2 = in_be32(&ug_regs->maccfg2);
527 maccfg2 &= ~MACCFG2_MPE;
528 out_be32(&ug_regs->maccfg2, maccfg2);
530 #endif /* MAGIC_PACKET */
532 static inline int compare_addr(u8 **addr1, u8 **addr2)
534 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
538 static void get_statistics(struct ucc_geth_private *ugeth,
539 struct ucc_geth_tx_firmware_statistics *
540 tx_firmware_statistics,
541 struct ucc_geth_rx_firmware_statistics *
542 rx_firmware_statistics,
543 struct ucc_geth_hardware_statistics *hardware_statistics)
545 struct ucc_fast __iomem *uf_regs;
546 struct ucc_geth __iomem *ug_regs;
547 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
548 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
550 ug_regs = ugeth->ug_regs;
551 uf_regs = (struct ucc_fast __iomem *) ug_regs;
552 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
553 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
555 /* Tx firmware only if user handed pointer and driver actually
556 gathers Tx firmware statistics */
557 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
558 tx_firmware_statistics->sicoltx =
559 in_be32(&p_tx_fw_statistics_pram->sicoltx);
560 tx_firmware_statistics->mulcoltx =
561 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
562 tx_firmware_statistics->latecoltxfr =
563 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
564 tx_firmware_statistics->frabortduecol =
565 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
566 tx_firmware_statistics->frlostinmactxer =
567 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
568 tx_firmware_statistics->carriersenseertx =
569 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
570 tx_firmware_statistics->frtxok =
571 in_be32(&p_tx_fw_statistics_pram->frtxok);
572 tx_firmware_statistics->txfrexcessivedefer =
573 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
574 tx_firmware_statistics->txpkts256 =
575 in_be32(&p_tx_fw_statistics_pram->txpkts256);
576 tx_firmware_statistics->txpkts512 =
577 in_be32(&p_tx_fw_statistics_pram->txpkts512);
578 tx_firmware_statistics->txpkts1024 =
579 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
580 tx_firmware_statistics->txpktsjumbo =
581 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
584 /* Rx firmware only if user handed pointer and driver actually
585 * gathers Rx firmware statistics */
586 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
588 rx_firmware_statistics->frrxfcser =
589 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
590 rx_firmware_statistics->fraligner =
591 in_be32(&p_rx_fw_statistics_pram->fraligner);
592 rx_firmware_statistics->inrangelenrxer =
593 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
594 rx_firmware_statistics->outrangelenrxer =
595 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
596 rx_firmware_statistics->frtoolong =
597 in_be32(&p_rx_fw_statistics_pram->frtoolong);
598 rx_firmware_statistics->runt =
599 in_be32(&p_rx_fw_statistics_pram->runt);
600 rx_firmware_statistics->verylongevent =
601 in_be32(&p_rx_fw_statistics_pram->verylongevent);
602 rx_firmware_statistics->symbolerror =
603 in_be32(&p_rx_fw_statistics_pram->symbolerror);
604 rx_firmware_statistics->dropbsy =
605 in_be32(&p_rx_fw_statistics_pram->dropbsy);
606 for (i = 0; i < 0x8; i++)
607 rx_firmware_statistics->res0[i] =
608 p_rx_fw_statistics_pram->res0[i];
609 rx_firmware_statistics->mismatchdrop =
610 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
611 rx_firmware_statistics->underpkts =
612 in_be32(&p_rx_fw_statistics_pram->underpkts);
613 rx_firmware_statistics->pkts256 =
614 in_be32(&p_rx_fw_statistics_pram->pkts256);
615 rx_firmware_statistics->pkts512 =
616 in_be32(&p_rx_fw_statistics_pram->pkts512);
617 rx_firmware_statistics->pkts1024 =
618 in_be32(&p_rx_fw_statistics_pram->pkts1024);
619 rx_firmware_statistics->pktsjumbo =
620 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
621 rx_firmware_statistics->frlossinmacer =
622 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
623 rx_firmware_statistics->pausefr =
624 in_be32(&p_rx_fw_statistics_pram->pausefr);
625 for (i = 0; i < 0x4; i++)
626 rx_firmware_statistics->res1[i] =
627 p_rx_fw_statistics_pram->res1[i];
628 rx_firmware_statistics->removevlan =
629 in_be32(&p_rx_fw_statistics_pram->removevlan);
630 rx_firmware_statistics->replacevlan =
631 in_be32(&p_rx_fw_statistics_pram->replacevlan);
632 rx_firmware_statistics->insertvlan =
633 in_be32(&p_rx_fw_statistics_pram->insertvlan);
636 /* Hardware only if user handed pointer and driver actually
637 gathers hardware statistics */
638 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
639 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
640 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
641 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
642 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
643 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
644 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
645 hardware_statistics->txok = in_be32(&ug_regs->txok);
646 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
647 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
648 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
649 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
650 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
651 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
652 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
653 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
657 static void dump_bds(struct ucc_geth_private *ugeth)
662 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
663 if (ugeth->p_tx_bd_ring[i]) {
665 (ugeth->ug_info->bdRingLenTx[i] *
666 sizeof(struct qe_bd));
667 ugeth_info("TX BDs[%d]", i);
668 mem_disp(ugeth->p_tx_bd_ring[i], length);
671 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
672 if (ugeth->p_rx_bd_ring[i]) {
674 (ugeth->ug_info->bdRingLenRx[i] *
675 sizeof(struct qe_bd));
676 ugeth_info("RX BDs[%d]", i);
677 mem_disp(ugeth->p_rx_bd_ring[i], length);
682 static void dump_regs(struct ucc_geth_private *ugeth)
686 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
687 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
689 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
690 (u32) & ugeth->ug_regs->maccfg1,
691 in_be32(&ugeth->ug_regs->maccfg1));
692 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
693 (u32) & ugeth->ug_regs->maccfg2,
694 in_be32(&ugeth->ug_regs->maccfg2));
695 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
696 (u32) & ugeth->ug_regs->ipgifg,
697 in_be32(&ugeth->ug_regs->ipgifg));
698 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
699 (u32) & ugeth->ug_regs->hafdup,
700 in_be32(&ugeth->ug_regs->hafdup));
701 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
702 (u32) & ugeth->ug_regs->ifctl,
703 in_be32(&ugeth->ug_regs->ifctl));
704 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
705 (u32) & ugeth->ug_regs->ifstat,
706 in_be32(&ugeth->ug_regs->ifstat));
707 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
708 (u32) & ugeth->ug_regs->macstnaddr1,
709 in_be32(&ugeth->ug_regs->macstnaddr1));
710 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
711 (u32) & ugeth->ug_regs->macstnaddr2,
712 in_be32(&ugeth->ug_regs->macstnaddr2));
713 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
714 (u32) & ugeth->ug_regs->uempr,
715 in_be32(&ugeth->ug_regs->uempr));
716 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
717 (u32) & ugeth->ug_regs->utbipar,
718 in_be32(&ugeth->ug_regs->utbipar));
719 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
720 (u32) & ugeth->ug_regs->uescr,
721 in_be16(&ugeth->ug_regs->uescr));
722 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
723 (u32) & ugeth->ug_regs->tx64,
724 in_be32(&ugeth->ug_regs->tx64));
725 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
726 (u32) & ugeth->ug_regs->tx127,
727 in_be32(&ugeth->ug_regs->tx127));
728 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
729 (u32) & ugeth->ug_regs->tx255,
730 in_be32(&ugeth->ug_regs->tx255));
731 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
732 (u32) & ugeth->ug_regs->rx64,
733 in_be32(&ugeth->ug_regs->rx64));
734 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
735 (u32) & ugeth->ug_regs->rx127,
736 in_be32(&ugeth->ug_regs->rx127));
737 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
738 (u32) & ugeth->ug_regs->rx255,
739 in_be32(&ugeth->ug_regs->rx255));
740 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
741 (u32) & ugeth->ug_regs->txok,
742 in_be32(&ugeth->ug_regs->txok));
743 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
744 (u32) & ugeth->ug_regs->txcf,
745 in_be16(&ugeth->ug_regs->txcf));
746 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
747 (u32) & ugeth->ug_regs->tmca,
748 in_be32(&ugeth->ug_regs->tmca));
749 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
750 (u32) & ugeth->ug_regs->tbca,
751 in_be32(&ugeth->ug_regs->tbca));
752 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
753 (u32) & ugeth->ug_regs->rxfok,
754 in_be32(&ugeth->ug_regs->rxfok));
755 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
756 (u32) & ugeth->ug_regs->rxbok,
757 in_be32(&ugeth->ug_regs->rxbok));
758 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
759 (u32) & ugeth->ug_regs->rbyt,
760 in_be32(&ugeth->ug_regs->rbyt));
761 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
762 (u32) & ugeth->ug_regs->rmca,
763 in_be32(&ugeth->ug_regs->rmca));
764 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
765 (u32) & ugeth->ug_regs->rbca,
766 in_be32(&ugeth->ug_regs->rbca));
767 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
768 (u32) & ugeth->ug_regs->scar,
769 in_be32(&ugeth->ug_regs->scar));
770 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
771 (u32) & ugeth->ug_regs->scam,
772 in_be32(&ugeth->ug_regs->scam));
774 if (ugeth->p_thread_data_tx) {
775 int numThreadsTxNumerical;
776 switch (ugeth->ug_info->numThreadsTx) {
777 case UCC_GETH_NUM_OF_THREADS_1:
778 numThreadsTxNumerical = 1;
780 case UCC_GETH_NUM_OF_THREADS_2:
781 numThreadsTxNumerical = 2;
783 case UCC_GETH_NUM_OF_THREADS_4:
784 numThreadsTxNumerical = 4;
786 case UCC_GETH_NUM_OF_THREADS_6:
787 numThreadsTxNumerical = 6;
789 case UCC_GETH_NUM_OF_THREADS_8:
790 numThreadsTxNumerical = 8;
793 numThreadsTxNumerical = 0;
797 ugeth_info("Thread data TXs:");
798 ugeth_info("Base address: 0x%08x",
799 (u32) ugeth->p_thread_data_tx);
800 for (i = 0; i < numThreadsTxNumerical; i++) {
801 ugeth_info("Thread data TX[%d]:", i);
802 ugeth_info("Base address: 0x%08x",
803 (u32) & ugeth->p_thread_data_tx[i]);
804 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
805 sizeof(struct ucc_geth_thread_data_tx));
808 if (ugeth->p_thread_data_rx) {
809 int numThreadsRxNumerical;
810 switch (ugeth->ug_info->numThreadsRx) {
811 case UCC_GETH_NUM_OF_THREADS_1:
812 numThreadsRxNumerical = 1;
814 case UCC_GETH_NUM_OF_THREADS_2:
815 numThreadsRxNumerical = 2;
817 case UCC_GETH_NUM_OF_THREADS_4:
818 numThreadsRxNumerical = 4;
820 case UCC_GETH_NUM_OF_THREADS_6:
821 numThreadsRxNumerical = 6;
823 case UCC_GETH_NUM_OF_THREADS_8:
824 numThreadsRxNumerical = 8;
827 numThreadsRxNumerical = 0;
831 ugeth_info("Thread data RX:");
832 ugeth_info("Base address: 0x%08x",
833 (u32) ugeth->p_thread_data_rx);
834 for (i = 0; i < numThreadsRxNumerical; i++) {
835 ugeth_info("Thread data RX[%d]:", i);
836 ugeth_info("Base address: 0x%08x",
837 (u32) & ugeth->p_thread_data_rx[i]);
838 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
839 sizeof(struct ucc_geth_thread_data_rx));
842 if (ugeth->p_exf_glbl_param) {
843 ugeth_info("EXF global param:");
844 ugeth_info("Base address: 0x%08x",
845 (u32) ugeth->p_exf_glbl_param);
846 mem_disp((u8 *) ugeth->p_exf_glbl_param,
847 sizeof(*ugeth->p_exf_glbl_param));
849 if (ugeth->p_tx_glbl_pram) {
850 ugeth_info("TX global param:");
851 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
852 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
853 (u32) & ugeth->p_tx_glbl_pram->temoder,
854 in_be16(&ugeth->p_tx_glbl_pram->temoder));
855 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
856 (u32) & ugeth->p_tx_glbl_pram->sqptr,
857 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
858 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
859 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
860 in_be32(&ugeth->p_tx_glbl_pram->
861 schedulerbasepointer));
862 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
863 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
864 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
865 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
866 (u32) & ugeth->p_tx_glbl_pram->tstate,
867 in_be32(&ugeth->p_tx_glbl_pram->tstate));
868 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
869 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
870 ugeth->p_tx_glbl_pram->iphoffset[0]);
871 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
872 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
873 ugeth->p_tx_glbl_pram->iphoffset[1]);
874 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
875 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
876 ugeth->p_tx_glbl_pram->iphoffset[2]);
877 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
878 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
879 ugeth->p_tx_glbl_pram->iphoffset[3]);
880 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
881 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
882 ugeth->p_tx_glbl_pram->iphoffset[4]);
883 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
884 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
885 ugeth->p_tx_glbl_pram->iphoffset[5]);
886 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
887 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
888 ugeth->p_tx_glbl_pram->iphoffset[6]);
889 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
890 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
891 ugeth->p_tx_glbl_pram->iphoffset[7]);
892 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
893 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
894 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
895 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
896 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
897 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
898 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
899 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
900 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
901 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
902 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
903 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
904 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
905 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
906 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
907 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
908 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
909 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
910 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
911 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
912 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
913 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
914 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
915 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
916 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
917 (u32) & ugeth->p_tx_glbl_pram->tqptr,
918 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
920 if (ugeth->p_rx_glbl_pram) {
921 ugeth_info("RX global param:");
922 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
923 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
924 (u32) & ugeth->p_rx_glbl_pram->remoder,
925 in_be32(&ugeth->p_rx_glbl_pram->remoder));
926 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
927 (u32) & ugeth->p_rx_glbl_pram->rqptr,
928 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
929 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
930 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
931 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
932 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
933 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
934 ugeth->p_rx_glbl_pram->rxgstpack);
935 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
936 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
937 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
938 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
939 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
940 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
941 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
942 (u32) & ugeth->p_rx_glbl_pram->rstate,
943 ugeth->p_rx_glbl_pram->rstate);
944 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
945 (u32) & ugeth->p_rx_glbl_pram->mrblr,
946 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
947 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
948 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
949 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
950 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
951 (u32) & ugeth->p_rx_glbl_pram->mflr,
952 in_be16(&ugeth->p_rx_glbl_pram->mflr));
953 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
954 (u32) & ugeth->p_rx_glbl_pram->minflr,
955 in_be16(&ugeth->p_rx_glbl_pram->minflr));
956 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
957 (u32) & ugeth->p_rx_glbl_pram->maxd1,
958 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
959 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
960 (u32) & ugeth->p_rx_glbl_pram->maxd2,
961 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
962 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
963 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
964 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
965 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
966 (u32) & ugeth->p_rx_glbl_pram->l2qt,
967 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
968 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
969 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
970 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
971 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
972 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
973 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
974 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
975 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
976 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
977 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
978 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
979 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
980 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
981 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
982 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
983 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
984 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
985 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
986 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
987 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
988 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
989 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
990 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
991 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
992 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
993 (u32) & ugeth->p_rx_glbl_pram->vlantype,
994 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
995 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
996 (u32) & ugeth->p_rx_glbl_pram->vlantci,
997 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
998 for (i = 0; i < 64; i++)
1000 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
1002 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
1003 ugeth->p_rx_glbl_pram->addressfiltering[i]);
1004 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
1005 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
1006 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
1008 if (ugeth->p_send_q_mem_reg) {
1009 ugeth_info("Send Q memory registers:");
1010 ugeth_info("Base address: 0x%08x",
1011 (u32) ugeth->p_send_q_mem_reg);
1012 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1013 ugeth_info("SQQD[%d]:", i);
1014 ugeth_info("Base address: 0x%08x",
1015 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1016 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1017 sizeof(struct ucc_geth_send_queue_qd));
1020 if (ugeth->p_scheduler) {
1021 ugeth_info("Scheduler:");
1022 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1023 mem_disp((u8 *) ugeth->p_scheduler,
1024 sizeof(*ugeth->p_scheduler));
1026 if (ugeth->p_tx_fw_statistics_pram) {
1027 ugeth_info("TX FW statistics pram:");
1028 ugeth_info("Base address: 0x%08x",
1029 (u32) ugeth->p_tx_fw_statistics_pram);
1030 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1031 sizeof(*ugeth->p_tx_fw_statistics_pram));
1033 if (ugeth->p_rx_fw_statistics_pram) {
1034 ugeth_info("RX FW statistics pram:");
1035 ugeth_info("Base address: 0x%08x",
1036 (u32) ugeth->p_rx_fw_statistics_pram);
1037 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1038 sizeof(*ugeth->p_rx_fw_statistics_pram));
1040 if (ugeth->p_rx_irq_coalescing_tbl) {
1041 ugeth_info("RX IRQ coalescing tables:");
1042 ugeth_info("Base address: 0x%08x",
1043 (u32) ugeth->p_rx_irq_coalescing_tbl);
1044 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1045 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1046 ugeth_info("Base address: 0x%08x",
1047 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1048 coalescingentry[i]);
1050 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1051 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1052 coalescingentry[i].interruptcoalescingmaxvalue,
1053 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1055 interruptcoalescingmaxvalue));
1057 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1058 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1059 coalescingentry[i].interruptcoalescingcounter,
1060 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1062 interruptcoalescingcounter));
1065 if (ugeth->p_rx_bd_qs_tbl) {
1066 ugeth_info("RX BD QS tables:");
1067 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1068 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1069 ugeth_info("RX BD QS table[%d]:", i);
1070 ugeth_info("Base address: 0x%08x",
1071 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1073 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1074 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1075 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1077 ("bdptr : addr - 0x%08x, val - 0x%08x",
1078 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1079 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1081 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1082 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1083 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1084 externalbdbaseptr));
1086 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1087 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1088 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1089 ugeth_info("ucode RX Prefetched BDs:");
1090 ugeth_info("Base address: 0x%08x",
1092 qe_muram_addr(in_be32
1093 (&ugeth->p_rx_bd_qs_tbl[i].
1096 qe_muram_addr(in_be32
1097 (&ugeth->p_rx_bd_qs_tbl[i].
1099 sizeof(struct ucc_geth_rx_prefetched_bds));
1102 if (ugeth->p_init_enet_param_shadow) {
1104 ugeth_info("Init enet param shadow:");
1105 ugeth_info("Base address: 0x%08x",
1106 (u32) ugeth->p_init_enet_param_shadow);
1107 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1108 sizeof(*ugeth->p_init_enet_param_shadow));
1110 size = sizeof(struct ucc_geth_thread_rx_pram);
1111 if (ugeth->ug_info->rxExtendedFiltering) {
1113 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1114 if (ugeth->ug_info->largestexternallookupkeysize ==
1115 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1117 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1118 if (ugeth->ug_info->largestexternallookupkeysize ==
1119 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1121 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1124 dump_init_enet_entries(ugeth,
1125 &(ugeth->p_init_enet_param_shadow->
1127 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1128 sizeof(struct ucc_geth_thread_tx_pram),
1129 ugeth->ug_info->riscTx, 0);
1130 dump_init_enet_entries(ugeth,
1131 &(ugeth->p_init_enet_param_shadow->
1133 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1134 ugeth->ug_info->riscRx, 1);
1139 static void init_default_reg_vals(u32 __iomem *upsmr_register,
1140 u32 __iomem *maccfg1_register,
1141 u32 __iomem *maccfg2_register)
1143 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1144 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1145 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1148 static int init_half_duplex_params(int alt_beb,
1149 int back_pressure_no_backoff,
1152 u8 alt_beb_truncation,
1153 u8 max_retransmissions,
1154 u8 collision_window,
1155 u32 __iomem *hafdup_register)
1159 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1160 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1161 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1164 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1167 value |= HALFDUP_ALT_BEB;
1168 if (back_pressure_no_backoff)
1169 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1171 value |= HALFDUP_NO_BACKOFF;
1173 value |= HALFDUP_EXCESSIVE_DEFER;
1175 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1177 value |= collision_window;
1179 out_be32(hafdup_register, value);
1183 static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1187 u32 __iomem *ipgifg_register)
1191 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1193 if (non_btb_cs_ipg > non_btb_ipg)
1196 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1197 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1198 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1199 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1203 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1204 IPGIFG_NBTB_CS_IPG_MASK);
1206 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1207 IPGIFG_NBTB_IPG_MASK);
1209 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1210 IPGIFG_MIN_IFG_MASK);
1211 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1213 out_be32(ipgifg_register, value);
1217 int init_flow_control_params(u32 automatic_flow_control_mode,
1218 int rx_flow_control_enable,
1219 int tx_flow_control_enable,
1221 u16 extension_field,
1222 u32 __iomem *upsmr_register,
1223 u32 __iomem *uempr_register,
1224 u32 __iomem *maccfg1_register)
1228 /* Set UEMPR register */
1229 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1230 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1231 out_be32(uempr_register, value);
1233 /* Set UPSMR register */
1234 value = in_be32(upsmr_register);
1235 value |= automatic_flow_control_mode;
1236 out_be32(upsmr_register, value);
1238 value = in_be32(maccfg1_register);
1239 if (rx_flow_control_enable)
1240 value |= MACCFG1_FLOW_RX;
1241 if (tx_flow_control_enable)
1242 value |= MACCFG1_FLOW_TX;
1243 out_be32(maccfg1_register, value);
1248 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1249 int auto_zero_hardware_statistics,
1250 u32 __iomem *upsmr_register,
1251 u16 __iomem *uescr_register)
1253 u32 upsmr_value = 0;
1254 u16 uescr_value = 0;
1255 /* Enable hardware statistics gathering if requested */
1256 if (enable_hardware_statistics) {
1257 upsmr_value = in_be32(upsmr_register);
1258 upsmr_value |= UPSMR_HSE;
1259 out_be32(upsmr_register, upsmr_value);
1262 /* Clear hardware statistics counters */
1263 uescr_value = in_be16(uescr_register);
1264 uescr_value |= UESCR_CLRCNT;
1265 /* Automatically zero hardware statistics counters on read,
1267 if (auto_zero_hardware_statistics)
1268 uescr_value |= UESCR_AUTOZ;
1269 out_be16(uescr_register, uescr_value);
1274 static int init_firmware_statistics_gathering_mode(int
1275 enable_tx_firmware_statistics,
1276 int enable_rx_firmware_statistics,
1277 u32 __iomem *tx_rmon_base_ptr,
1278 u32 tx_firmware_statistics_structure_address,
1279 u32 __iomem *rx_rmon_base_ptr,
1280 u32 rx_firmware_statistics_structure_address,
1281 u16 __iomem *temoder_register,
1282 u32 __iomem *remoder_register)
1284 /* Note: this function does not check if */
1285 /* the parameters it receives are NULL */
1289 if (enable_tx_firmware_statistics) {
1290 out_be32(tx_rmon_base_ptr,
1291 tx_firmware_statistics_structure_address);
1292 temoder_value = in_be16(temoder_register);
1293 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1294 out_be16(temoder_register, temoder_value);
1297 if (enable_rx_firmware_statistics) {
1298 out_be32(rx_rmon_base_ptr,
1299 rx_firmware_statistics_structure_address);
1300 remoder_value = in_be32(remoder_register);
1301 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1302 out_be32(remoder_register, remoder_value);
1308 static int init_mac_station_addr_regs(u8 address_byte_0,
1314 u32 __iomem *macstnaddr1_register,
1315 u32 __iomem *macstnaddr2_register)
1319 /* Example: for a station address of 0x12345678ABCD, */
1320 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1322 /* MACSTNADDR1 Register: */
1325 /* station address byte 5 station address byte 4 */
1327 /* station address byte 3 station address byte 2 */
1328 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1329 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1330 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1331 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1333 out_be32(macstnaddr1_register, value);
1335 /* MACSTNADDR2 Register: */
1338 /* station address byte 1 station address byte 0 */
1340 /* reserved reserved */
1342 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1343 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1345 out_be32(macstnaddr2_register, value);
1350 static int init_check_frame_length_mode(int length_check,
1351 u32 __iomem *maccfg2_register)
1355 value = in_be32(maccfg2_register);
1358 value |= MACCFG2_LC;
1360 value &= ~MACCFG2_LC;
1362 out_be32(maccfg2_register, value);
1366 static int init_preamble_length(u8 preamble_length,
1367 u32 __iomem *maccfg2_register)
1371 if ((preamble_length < 3) || (preamble_length > 7))
1374 value = in_be32(maccfg2_register);
1375 value &= ~MACCFG2_PREL_MASK;
1376 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1377 out_be32(maccfg2_register, value);
1381 static int init_rx_parameters(int reject_broadcast,
1382 int receive_short_frames,
1383 int promiscuous, u32 __iomem *upsmr_register)
1387 value = in_be32(upsmr_register);
1389 if (reject_broadcast)
1392 value &= ~UPSMR_BRO;
1394 if (receive_short_frames)
1397 value &= ~UPSMR_RSH;
1402 value &= ~UPSMR_PRO;
1404 out_be32(upsmr_register, value);
1409 static int init_max_rx_buff_len(u16 max_rx_buf_len,
1410 u16 __iomem *mrblr_register)
1412 /* max_rx_buf_len value must be a multiple of 128 */
1413 if ((max_rx_buf_len == 0)
1414 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1417 out_be16(mrblr_register, max_rx_buf_len);
1421 static int init_min_frame_len(u16 min_frame_length,
1422 u16 __iomem *minflr_register,
1423 u16 __iomem *mrblr_register)
1425 u16 mrblr_value = 0;
1427 mrblr_value = in_be16(mrblr_register);
1428 if (min_frame_length >= (mrblr_value - 4))
1431 out_be16(minflr_register, min_frame_length);
1435 static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1437 struct ucc_geth_info *ug_info;
1438 struct ucc_geth __iomem *ug_regs;
1439 struct ucc_fast __iomem *uf_regs;
1441 u32 upsmr, maccfg2, tbiBaseAddress;
1444 ugeth_vdbg("%s: IN", __FUNCTION__);
1446 ug_info = ugeth->ug_info;
1447 ug_regs = ugeth->ug_regs;
1448 uf_regs = ugeth->uccf->uf_regs;
1451 maccfg2 = in_be32(&ug_regs->maccfg2);
1452 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1453 if ((ugeth->max_speed == SPEED_10) ||
1454 (ugeth->max_speed == SPEED_100))
1455 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1456 else if (ugeth->max_speed == SPEED_1000)
1457 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1458 maccfg2 |= ug_info->padAndCrc;
1459 out_be32(&ug_regs->maccfg2, maccfg2);
1462 upsmr = in_be32(&uf_regs->upsmr);
1463 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1464 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1465 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1466 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1467 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1468 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
1469 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1471 switch (ugeth->max_speed) {
1473 upsmr |= UPSMR_R10M;
1476 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
1480 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1481 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1482 upsmr |= UPSMR_TBIM;
1484 out_be32(&uf_regs->upsmr, upsmr);
1486 /* Disable autonegotiation in tbi mode, because by default it
1487 comes up in autonegotiation mode. */
1488 /* Note that this depends on proper setting in utbipar register. */
1489 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1490 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1491 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1492 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1493 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1494 value = ugeth->phydev->bus->read(ugeth->phydev->bus,
1495 (u8) tbiBaseAddress, ENET_TBI_MII_CR);
1496 value &= ~0x1000; /* Turn off autonegotiation */
1497 ugeth->phydev->bus->write(ugeth->phydev->bus,
1498 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
1501 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1503 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1505 if (netif_msg_probe(ugeth))
1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
1514 /* Called every time the controller might need to be made
1515 * aware of new link state. The PHY code conveys this
1516 * information through variables in the ugeth structure, and this
1517 * function converts those variables into the appropriate
1518 * register values, and can bring down the device if needed.
1521 static void adjust_link(struct net_device *dev)
1523 struct ucc_geth_private *ugeth = netdev_priv(dev);
1524 struct ucc_geth __iomem *ug_regs;
1525 struct ucc_fast __iomem *uf_regs;
1526 struct phy_device *phydev = ugeth->phydev;
1527 unsigned long flags;
1530 ug_regs = ugeth->ug_regs;
1531 uf_regs = ugeth->uccf->uf_regs;
1533 spin_lock_irqsave(&ugeth->lock, flags);
1536 u32 tempval = in_be32(&ug_regs->maccfg2);
1537 u32 upsmr = in_be32(&uf_regs->upsmr);
1538 /* Now we make sure that we can be in full duplex mode.
1539 * If not, we operate in half-duplex mode. */
1540 if (phydev->duplex != ugeth->oldduplex) {
1542 if (!(phydev->duplex))
1543 tempval &= ~(MACCFG2_FDX);
1545 tempval |= MACCFG2_FDX;
1546 ugeth->oldduplex = phydev->duplex;
1549 if (phydev->speed != ugeth->oldspeed) {
1551 switch (phydev->speed) {
1553 tempval = ((tempval &
1554 ~(MACCFG2_INTERFACE_MODE_MASK)) |
1555 MACCFG2_INTERFACE_MODE_BYTE);
1559 tempval = ((tempval &
1560 ~(MACCFG2_INTERFACE_MODE_MASK)) |
1561 MACCFG2_INTERFACE_MODE_NIBBLE);
1562 /* if reduced mode, re-set UPSMR.R10M */
1563 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1564 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1565 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1566 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1567 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
1568 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1569 if (phydev->speed == SPEED_10)
1570 upsmr |= UPSMR_R10M;
1572 upsmr &= ~(UPSMR_R10M);
1576 if (netif_msg_link(ugeth))
1578 "%s: Ack! Speed (%d) is not 10/100/1000!",
1579 dev->name, phydev->speed);
1582 ugeth->oldspeed = phydev->speed;
1585 out_be32(&ug_regs->maccfg2, tempval);
1586 out_be32(&uf_regs->upsmr, upsmr);
1588 if (!ugeth->oldlink) {
1591 netif_schedule(dev);
1593 } else if (ugeth->oldlink) {
1596 ugeth->oldspeed = 0;
1597 ugeth->oldduplex = -1;
1600 if (new_state && netif_msg_link(ugeth))
1601 phy_print_status(phydev);
1603 spin_unlock_irqrestore(&ugeth->lock, flags);
1606 /* Configure the PHY for dev.
1607 * returns 0 if success. -1 if failure
1609 static int init_phy(struct net_device *dev)
1611 struct ucc_geth_private *priv = netdev_priv(dev);
1612 struct phy_device *phydev;
1613 char phy_id[BUS_ID_SIZE];
1617 priv->oldduplex = -1;
1619 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->ug_info->mdio_bus,
1620 priv->ug_info->phy_address);
1622 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
1624 if (IS_ERR(phydev)) {
1625 printk("%s: Could not attach to PHY\n", dev->name);
1626 return PTR_ERR(phydev);
1629 phydev->supported &= (ADVERTISED_10baseT_Half |
1630 ADVERTISED_10baseT_Full |
1631 ADVERTISED_100baseT_Half |
1632 ADVERTISED_100baseT_Full);
1634 if (priv->max_speed == SPEED_1000)
1635 phydev->supported |= ADVERTISED_1000baseT_Full;
1637 phydev->advertising = phydev->supported;
1639 priv->phydev = phydev;
1646 static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1648 struct ucc_fast_private *uccf;
1654 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1655 temp = in_be32(uccf->p_uccm);
1657 out_be32(uccf->p_uccm, temp);
1658 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1660 /* Issue host command */
1662 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1663 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1664 QE_CR_PROTOCOL_ETHERNET, 0);
1666 /* Wait for command to complete */
1668 temp = in_be32(uccf->p_ucce);
1669 } while (!(temp & UCCE_GRA));
1671 uccf->stopped_tx = 1;
1676 static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1678 struct ucc_fast_private *uccf;
1684 /* Clear acknowledge bit */
1685 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1686 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1687 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
1689 /* Keep issuing command and checking acknowledge bit until
1690 it is asserted, according to spec */
1692 /* Issue host command */
1694 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1696 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1697 QE_CR_PROTOCOL_ETHERNET, 0);
1699 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1700 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1702 uccf->stopped_rx = 1;
1707 static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
1709 struct ucc_fast_private *uccf;
1715 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1716 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
1717 uccf->stopped_tx = 0;
1722 static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
1724 struct ucc_fast_private *uccf;
1730 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1731 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
1733 uccf->stopped_rx = 0;
1738 static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1740 struct ucc_fast_private *uccf;
1741 int enabled_tx, enabled_rx;
1745 /* check if the UCC number is in range. */
1746 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1747 if (netif_msg_probe(ugeth))
1748 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1752 enabled_tx = uccf->enabled_tx;
1753 enabled_rx = uccf->enabled_rx;
1755 /* Get Tx and Rx going again, in case this channel was actively
1757 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
1758 ugeth_restart_tx(ugeth);
1759 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
1760 ugeth_restart_rx(ugeth);
1762 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
1768 static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1770 struct ucc_fast_private *uccf;
1774 /* check if the UCC number is in range. */
1775 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1776 if (netif_msg_probe(ugeth))
1777 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1781 /* Stop any transmissions */
1782 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
1783 ugeth_graceful_stop_tx(ugeth);
1785 /* Stop any receptions */
1786 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
1787 ugeth_graceful_stop_rx(ugeth);
1789 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
1794 static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
1797 ucc_fast_dump_regs(ugeth->uccf);
1803 #ifdef CONFIG_UGETH_FILTERING
1804 static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params *
1806 struct qe_fltr_tad *qe_fltr_tad)
1810 /* Zero serialized TAD */
1811 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
1813 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
1814 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
1815 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
1816 || (p_UccGethTadParams->vnontag_op !=
1817 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
1819 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
1820 if (p_UccGethTadParams->reject_frame)
1821 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
1823 (u16) (((u16) p_UccGethTadParams->
1824 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
1825 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
1827 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
1828 if (p_UccGethTadParams->vnontag_op ==
1829 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
1830 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
1831 qe_fltr_tad->serialized[1] |=
1832 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
1834 qe_fltr_tad->serialized[2] |=
1835 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
1837 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
1839 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
1844 static struct enet_addr_container_t
1845 *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth,
1846 struct enet_addr *p_enet_addr)
1848 struct enet_addr_container *enet_addr_cont;
1849 struct list_head *p_lh;
1854 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1855 p_lh = &ugeth->group_hash_q;
1856 p_counter = &(ugeth->numGroupAddrInHash);
1858 p_lh = &ugeth->ind_hash_q;
1859 p_counter = &(ugeth->numIndAddrInHash);
1867 for (i = 0; i < num; i++) {
1869 (struct enet_addr_container *)
1870 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
1871 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
1872 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
1875 return enet_addr_cont; /* Found */
1877 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1882 static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth,
1883 struct enet_addr *p_enet_addr)
1885 enum ucc_geth_enet_address_recognition_location location;
1886 struct enet_addr_container *enet_addr_cont;
1887 struct list_head *p_lh;
1892 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1893 p_lh = &ugeth->group_hash_q;
1894 limit = ugeth->ug_info->maxGroupAddrInHash;
1896 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
1897 p_counter = &(ugeth->numGroupAddrInHash);
1899 p_lh = &ugeth->ind_hash_q;
1900 limit = ugeth->ug_info->maxIndAddrInHash;
1902 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
1903 p_counter = &(ugeth->numIndAddrInHash);
1906 if ((enet_addr_cont =
1907 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
1908 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
1911 if ((!p_lh) || (!(*p_counter < limit)))
1913 if (!(enet_addr_cont = get_enet_addr_container()))
1915 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
1916 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
1917 enet_addr_cont->location = location;
1918 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1921 hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
1925 static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth,
1926 struct enet_addr *p_enet_addr)
1928 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
1929 struct enet_addr_container *enet_addr_cont;
1930 struct ucc_fast_private *uccf;
1931 enum comm_dir comm_dir;
1933 struct list_head *p_lh;
1934 u32 *addr_h, *addr_l;
1940 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
1945 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
1948 /* It's been found and removed from the CQ. */
1949 /* Now destroy its container */
1950 put_enet_addr_container(enet_addr_cont);
1952 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1953 addr_h = &(p_82xx_addr_filt->gaddr_h);
1954 addr_l = &(p_82xx_addr_filt->gaddr_l);
1955 p_lh = &ugeth->group_hash_q;
1956 p_counter = &(ugeth->numGroupAddrInHash);
1958 addr_h = &(p_82xx_addr_filt->iaddr_h);
1959 addr_l = &(p_82xx_addr_filt->iaddr_l);
1960 p_lh = &ugeth->ind_hash_q;
1961 p_counter = &(ugeth->numIndAddrInHash);
1965 if (uccf->enabled_tx)
1966 comm_dir |= COMM_DIR_TX;
1967 if (uccf->enabled_rx)
1968 comm_dir |= COMM_DIR_RX;
1970 ugeth_disable(ugeth, comm_dir);
1972 /* Clear the hash table. */
1973 out_be32(addr_h, 0x00000000);
1974 out_be32(addr_l, 0x00000000);
1976 /* Add all remaining CQ elements back into hash */
1977 num = --(*p_counter);
1978 for (i = 0; i < num; i++) {
1980 (struct enet_addr_container *)
1981 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
1982 hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
1983 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1987 ugeth_enable(ugeth, comm_dir);
1991 #endif /* CONFIG_UGETH_FILTERING */
1993 static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
1998 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
1999 struct ucc_fast_private *uccf;
2000 enum comm_dir comm_dir;
2001 struct list_head *p_lh;
2003 u32 __iomem *addr_h;
2004 u32 __iomem *addr_l;
2010 (struct ucc_geth_82xx_address_filtering_pram __iomem *)
2011 ugeth->p_rx_glbl_pram->addressfiltering;
2013 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2014 addr_h = &(p_82xx_addr_filt->gaddr_h);
2015 addr_l = &(p_82xx_addr_filt->gaddr_l);
2016 p_lh = &ugeth->group_hash_q;
2017 p_counter = &(ugeth->numGroupAddrInHash);
2018 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2019 addr_h = &(p_82xx_addr_filt->iaddr_h);
2020 addr_l = &(p_82xx_addr_filt->iaddr_l);
2021 p_lh = &ugeth->ind_hash_q;
2022 p_counter = &(ugeth->numIndAddrInHash);
2027 if (uccf->enabled_tx)
2028 comm_dir |= COMM_DIR_TX;
2029 if (uccf->enabled_rx)
2030 comm_dir |= COMM_DIR_RX;
2032 ugeth_disable(ugeth, comm_dir);
2034 /* Clear the hash table. */
2035 out_be32(addr_h, 0x00000000);
2036 out_be32(addr_l, 0x00000000);
2043 /* Delete all remaining CQ elements */
2044 for (i = 0; i < num; i++)
2045 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2050 ugeth_enable(ugeth, comm_dir);
2055 #ifdef CONFIG_UGETH_FILTERING
2056 static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth,
2057 struct enet_addr *p_enet_addr,
2062 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2064 ("%s: multicast address added to paddr will have no "
2065 "effect - is this what you wanted?",
2068 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2069 /* store address in our database */
2070 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2071 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2072 /* put in hardware */
2073 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2075 #endif /* CONFIG_UGETH_FILTERING */
2077 static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
2080 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2081 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2084 static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2093 ucc_fast_free(ugeth->uccf);
2097 if (ugeth->p_thread_data_tx) {
2098 qe_muram_free(ugeth->thread_dat_tx_offset);
2099 ugeth->p_thread_data_tx = NULL;
2101 if (ugeth->p_thread_data_rx) {
2102 qe_muram_free(ugeth->thread_dat_rx_offset);
2103 ugeth->p_thread_data_rx = NULL;
2105 if (ugeth->p_exf_glbl_param) {
2106 qe_muram_free(ugeth->exf_glbl_param_offset);
2107 ugeth->p_exf_glbl_param = NULL;
2109 if (ugeth->p_rx_glbl_pram) {
2110 qe_muram_free(ugeth->rx_glbl_pram_offset);
2111 ugeth->p_rx_glbl_pram = NULL;
2113 if (ugeth->p_tx_glbl_pram) {
2114 qe_muram_free(ugeth->tx_glbl_pram_offset);
2115 ugeth->p_tx_glbl_pram = NULL;
2117 if (ugeth->p_send_q_mem_reg) {
2118 qe_muram_free(ugeth->send_q_mem_reg_offset);
2119 ugeth->p_send_q_mem_reg = NULL;
2121 if (ugeth->p_scheduler) {
2122 qe_muram_free(ugeth->scheduler_offset);
2123 ugeth->p_scheduler = NULL;
2125 if (ugeth->p_tx_fw_statistics_pram) {
2126 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2127 ugeth->p_tx_fw_statistics_pram = NULL;
2129 if (ugeth->p_rx_fw_statistics_pram) {
2130 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2131 ugeth->p_rx_fw_statistics_pram = NULL;
2133 if (ugeth->p_rx_irq_coalescing_tbl) {
2134 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2135 ugeth->p_rx_irq_coalescing_tbl = NULL;
2137 if (ugeth->p_rx_bd_qs_tbl) {
2138 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2139 ugeth->p_rx_bd_qs_tbl = NULL;
2141 if (ugeth->p_init_enet_param_shadow) {
2142 return_init_enet_entries(ugeth,
2143 &(ugeth->p_init_enet_param_shadow->
2145 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2146 ugeth->ug_info->riscRx, 1);
2147 return_init_enet_entries(ugeth,
2148 &(ugeth->p_init_enet_param_shadow->
2150 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2151 ugeth->ug_info->riscTx, 0);
2152 kfree(ugeth->p_init_enet_param_shadow);
2153 ugeth->p_init_enet_param_shadow = NULL;
2155 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2156 bd = ugeth->p_tx_bd_ring[i];
2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2160 if (ugeth->tx_skbuff[i][j]) {
2161 dma_unmap_single(NULL,
2162 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2163 (in_be32((u32 __iomem *)bd) &
2166 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2167 ugeth->tx_skbuff[i][j] = NULL;
2171 kfree(ugeth->tx_skbuff[i]);
2173 if (ugeth->p_tx_bd_ring[i]) {
2174 if (ugeth->ug_info->uf_info.bd_mem_part ==
2176 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2177 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2179 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2180 ugeth->p_tx_bd_ring[i] = NULL;
2183 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2184 if (ugeth->p_rx_bd_ring[i]) {
2185 /* Return existing data buffers in ring */
2186 bd = ugeth->p_rx_bd_ring[i];
2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2188 if (ugeth->rx_skbuff[i][j]) {
2189 dma_unmap_single(NULL,
2190 in_be32(&((struct qe_bd __iomem *)bd)->buf),
2192 uf_info.max_rx_buf_length +
2193 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2196 ugeth->rx_skbuff[i][j]);
2197 ugeth->rx_skbuff[i][j] = NULL;
2199 bd += sizeof(struct qe_bd);
2202 kfree(ugeth->rx_skbuff[i]);
2204 if (ugeth->ug_info->uf_info.bd_mem_part ==
2206 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2207 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2209 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2210 ugeth->p_rx_bd_ring[i] = NULL;
2213 while (!list_empty(&ugeth->group_hash_q))
2214 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2215 (dequeue(&ugeth->group_hash_q)));
2216 while (!list_empty(&ugeth->ind_hash_q))
2217 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2218 (dequeue(&ugeth->ind_hash_q)));
2222 static void ucc_geth_set_multi(struct net_device *dev)
2224 struct ucc_geth_private *ugeth;
2225 struct dev_mc_list *dmi;
2226 struct ucc_fast __iomem *uf_regs;
2227 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2230 ugeth = netdev_priv(dev);
2232 uf_regs = ugeth->uccf->uf_regs;
2234 if (dev->flags & IFF_PROMISC) {
2236 out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO);
2240 out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO);
2243 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
2244 p_rx_glbl_pram->addressfiltering;
2246 if (dev->flags & IFF_ALLMULTI) {
2247 /* Catch all multicast addresses, so set the
2248 * filter to all 1's.
2250 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2251 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2253 /* Clear filter and add the addresses in the list.
2255 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2256 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2260 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2262 /* Only support group multicast for now.
2264 if (!(dmi->dmi_addr[0] & 1))
2267 /* Ask CPM to run CRC and set bit in
2270 hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
2276 static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2278 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
2279 struct phy_device *phydev = ugeth->phydev;
2282 ugeth_vdbg("%s: IN", __FUNCTION__);
2284 /* Disable the controller */
2285 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2287 /* Tell the kernel the link is down */
2290 /* Mask all interrupts */
2291 out_be32(ugeth->uccf->p_uccm, 0x00000000);
2293 /* Clear all interrupts */
2294 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2296 /* Disable Rx and Tx */
2297 tempval = in_be32(&ug_regs->maccfg1);
2298 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2299 out_be32(&ug_regs->maccfg1, tempval);
2301 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2303 ucc_geth_memclean(ugeth);
2306 static int ucc_struct_init(struct ucc_geth_private *ugeth)
2308 struct ucc_geth_info *ug_info;
2309 struct ucc_fast_info *uf_info;
2312 ug_info = ugeth->ug_info;
2313 uf_info = &ug_info->uf_info;
2315 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2316 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2317 if (netif_msg_probe(ugeth))
2318 ugeth_err("%s: Bad memory partition value.",
2324 for (i = 0; i < ug_info->numQueuesRx; i++) {
2325 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2326 (ug_info->bdRingLenRx[i] %
2327 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2328 if (netif_msg_probe(ugeth))
2330 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
2337 for (i = 0; i < ug_info->numQueuesTx; i++) {
2338 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2339 if (netif_msg_probe(ugeth))
2341 ("%s: Tx BD ring length must be no smaller than 2.",
2348 if ((uf_info->max_rx_buf_length == 0) ||
2349 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2350 if (netif_msg_probe(ugeth))
2352 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2358 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2359 if (netif_msg_probe(ugeth))
2360 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2365 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2366 if (netif_msg_probe(ugeth))
2367 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2372 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2373 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2374 if (netif_msg_probe(ugeth))
2376 ("%s: VLAN priority table entry must not be"
2377 " larger than number of Rx queues.",
2384 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2385 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2386 if (netif_msg_probe(ugeth))
2388 ("%s: IP priority table entry must not be"
2389 " larger than number of Rx queues.",
2395 if (ug_info->cam && !ug_info->ecamptr) {
2396 if (netif_msg_probe(ugeth))
2397 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2402 if ((ug_info->numStationAddresses !=
2403 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2404 && ug_info->rxExtendedFiltering) {
2405 if (netif_msg_probe(ugeth))
2406 ugeth_err("%s: Number of station addresses greater than 1 "
2407 "not allowed in extended parsing mode.",
2412 /* Generate uccm_mask for receive */
2413 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2414 for (i = 0; i < ug_info->numQueuesRx; i++)
2415 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2417 for (i = 0; i < ug_info->numQueuesTx; i++)
2418 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2419 /* Initialize the general fast UCC block. */
2420 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2421 if (netif_msg_probe(ugeth))
2422 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2423 ucc_geth_memclean(ugeth);
2427 ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth));
2432 static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2434 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2435 struct ucc_geth_init_pram __iomem *p_init_enet_pram;
2436 struct ucc_fast_private *uccf;
2437 struct ucc_geth_info *ug_info;
2438 struct ucc_fast_info *uf_info;
2439 struct ucc_fast __iomem *uf_regs;
2440 struct ucc_geth __iomem *ug_regs;
2441 int ret_val = -EINVAL;
2442 u32 remoder = UCC_GETH_REMODER_INIT;
2443 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2444 u32 ifstat, i, j, size, l2qt, l3qt, length;
2445 u16 temoder = UCC_GETH_TEMODER_INIT;
2447 u8 function_code = 0;
2449 u8 __iomem *endOfRing;
2450 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2452 ugeth_vdbg("%s: IN", __FUNCTION__);
2454 ug_info = ugeth->ug_info;
2455 uf_info = &ug_info->uf_info;
2456 uf_regs = uccf->uf_regs;
2457 ug_regs = ugeth->ug_regs;
2459 switch (ug_info->numThreadsRx) {
2460 case UCC_GETH_NUM_OF_THREADS_1:
2461 numThreadsRxNumerical = 1;
2463 case UCC_GETH_NUM_OF_THREADS_2:
2464 numThreadsRxNumerical = 2;
2466 case UCC_GETH_NUM_OF_THREADS_4:
2467 numThreadsRxNumerical = 4;
2469 case UCC_GETH_NUM_OF_THREADS_6:
2470 numThreadsRxNumerical = 6;
2472 case UCC_GETH_NUM_OF_THREADS_8:
2473 numThreadsRxNumerical = 8;
2476 if (netif_msg_ifup(ugeth))
2477 ugeth_err("%s: Bad number of Rx threads value.",
2479 ucc_geth_memclean(ugeth);
2484 switch (ug_info->numThreadsTx) {
2485 case UCC_GETH_NUM_OF_THREADS_1:
2486 numThreadsTxNumerical = 1;
2488 case UCC_GETH_NUM_OF_THREADS_2:
2489 numThreadsTxNumerical = 2;
2491 case UCC_GETH_NUM_OF_THREADS_4:
2492 numThreadsTxNumerical = 4;
2494 case UCC_GETH_NUM_OF_THREADS_6:
2495 numThreadsTxNumerical = 6;
2497 case UCC_GETH_NUM_OF_THREADS_8:
2498 numThreadsTxNumerical = 8;
2501 if (netif_msg_ifup(ugeth))
2502 ugeth_err("%s: Bad number of Tx threads value.",
2504 ucc_geth_memclean(ugeth);
2509 /* Calculate rx_extended_features */
2510 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2511 ug_info->ipAddressAlignment ||
2512 (ug_info->numStationAddresses !=
2513 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2515 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2516 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2517 || (ug_info->vlanOperationNonTagged !=
2518 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2520 init_default_reg_vals(&uf_regs->upsmr,
2521 &ug_regs->maccfg1, &ug_regs->maccfg2);
2524 /* For more details see the hardware spec. */
2525 init_rx_parameters(ug_info->bro,
2526 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2528 /* We're going to ignore other registers for now, */
2529 /* except as needed to get up and running */
2532 /* For more details see the hardware spec. */
2533 init_flow_control_params(ug_info->aufc,
2534 ug_info->receiveFlowControl,
2535 ug_info->transmitFlowControl,
2536 ug_info->pausePeriod,
2537 ug_info->extensionField,
2539 &ug_regs->uempr, &ug_regs->maccfg1);
2541 maccfg1 = in_be32(&ug_regs->maccfg1);
2542 maccfg1 |= MACCFG1_ENABLE_RX;
2543 maccfg1 |= MACCFG1_ENABLE_TX;
2544 out_be32(&ug_regs->maccfg1, maccfg1);
2547 /* For more details see the hardware spec. */
2548 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2549 ug_info->nonBackToBackIfgPart2,
2551 miminumInterFrameGapEnforcement,
2552 ug_info->backToBackInterFrameGap,
2555 if (netif_msg_ifup(ugeth))
2556 ugeth_err("%s: IPGIFG initialization parameter too large.",
2558 ucc_geth_memclean(ugeth);
2563 /* For more details see the hardware spec. */
2564 ret_val = init_half_duplex_params(ug_info->altBeb,
2565 ug_info->backPressureNoBackoff,
2567 ug_info->excessDefer,
2568 ug_info->altBebTruncation,
2569 ug_info->maxRetransmission,
2570 ug_info->collisionWindow,
2573 if (netif_msg_ifup(ugeth))
2574 ugeth_err("%s: Half Duplex initialization parameter too large.",
2576 ucc_geth_memclean(ugeth);
2581 /* For more details see the hardware spec. */
2582 /* Read only - resets upon read */
2583 ifstat = in_be32(&ug_regs->ifstat);
2586 /* For more details see the hardware spec. */
2587 out_be32(&ug_regs->uempr, 0);
2590 /* For more details see the hardware spec. */
2591 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2592 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2593 0, &uf_regs->upsmr, &ug_regs->uescr);
2595 /* Allocate Tx bds */
2596 for (j = 0; j < ug_info->numQueuesTx; j++) {
2597 /* Allocate in multiple of
2598 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2599 according to spec */
2600 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
2601 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2602 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2603 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
2604 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2605 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2606 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2608 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2609 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2610 ugeth->tx_bd_ring_offset[j] =
2611 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2613 if (ugeth->tx_bd_ring_offset[j] != 0)
2614 ugeth->p_tx_bd_ring[j] =
2615 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
2616 align) & ~(align - 1));
2617 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2618 ugeth->tx_bd_ring_offset[j] =
2619 qe_muram_alloc(length,
2620 UCC_GETH_TX_BD_RING_ALIGNMENT);
2621 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2622 ugeth->p_tx_bd_ring[j] =
2623 (u8 __iomem *) qe_muram_addr(ugeth->
2624 tx_bd_ring_offset[j]);
2626 if (!ugeth->p_tx_bd_ring[j]) {
2627 if (netif_msg_ifup(ugeth))
2629 ("%s: Can not allocate memory for Tx bd rings.",
2631 ucc_geth_memclean(ugeth);
2634 /* Zero unused end of bd ring, according to spec */
2635 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
2636 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
2637 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2640 /* Allocate Rx bds */
2641 for (j = 0; j < ug_info->numQueuesRx; j++) {
2642 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
2643 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2645 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2646 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2647 ugeth->rx_bd_ring_offset[j] =
2648 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2649 if (ugeth->rx_bd_ring_offset[j] != 0)
2650 ugeth->p_rx_bd_ring[j] =
2651 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
2652 align) & ~(align - 1));
2653 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2654 ugeth->rx_bd_ring_offset[j] =
2655 qe_muram_alloc(length,
2656 UCC_GETH_RX_BD_RING_ALIGNMENT);
2657 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2658 ugeth->p_rx_bd_ring[j] =
2659 (u8 __iomem *) qe_muram_addr(ugeth->
2660 rx_bd_ring_offset[j]);
2662 if (!ugeth->p_rx_bd_ring[j]) {
2663 if (netif_msg_ifup(ugeth))
2665 ("%s: Can not allocate memory for Rx bd rings.",
2667 ucc_geth_memclean(ugeth);
2673 for (j = 0; j < ug_info->numQueuesTx; j++) {
2674 /* Setup the skbuff rings */
2675 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2676 ugeth->ug_info->bdRingLenTx[j],
2679 if (ugeth->tx_skbuff[j] == NULL) {
2680 if (netif_msg_ifup(ugeth))
2681 ugeth_err("%s: Could not allocate tx_skbuff",
2683 ucc_geth_memclean(ugeth);
2687 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2688 ugeth->tx_skbuff[j][i] = NULL;
2690 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2691 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2692 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2693 /* clear bd buffer */
2694 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2695 /* set bd status and length */
2696 out_be32((u32 __iomem *)bd, 0);
2697 bd += sizeof(struct qe_bd);
2699 bd -= sizeof(struct qe_bd);
2700 /* set bd status and length */
2701 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
2705 for (j = 0; j < ug_info->numQueuesRx; j++) {
2706 /* Setup the skbuff rings */
2707 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2708 ugeth->ug_info->bdRingLenRx[j],
2711 if (ugeth->rx_skbuff[j] == NULL) {
2712 if (netif_msg_ifup(ugeth))
2713 ugeth_err("%s: Could not allocate rx_skbuff",
2715 ucc_geth_memclean(ugeth);
2719 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2720 ugeth->rx_skbuff[j][i] = NULL;
2722 ugeth->skb_currx[j] = 0;
2723 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2724 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2725 /* set bd status and length */
2726 out_be32((u32 __iomem *)bd, R_I);
2727 /* clear bd buffer */
2728 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2729 bd += sizeof(struct qe_bd);
2731 bd -= sizeof(struct qe_bd);
2732 /* set bd status and length */
2733 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
2739 /* Tx global PRAM */
2740 /* Allocate global tx parameter RAM page */
2741 ugeth->tx_glbl_pram_offset =
2742 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
2743 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
2744 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
2745 if (netif_msg_ifup(ugeth))
2747 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2749 ucc_geth_memclean(ugeth);
2752 ugeth->p_tx_glbl_pram =
2753 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
2754 tx_glbl_pram_offset);
2755 /* Zero out p_tx_glbl_pram */
2756 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2758 /* Fill global PRAM */
2761 /* Size varies with number of Tx threads */
2762 ugeth->thread_dat_tx_offset =
2763 qe_muram_alloc(numThreadsTxNumerical *
2764 sizeof(struct ucc_geth_thread_data_tx) +
2765 32 * (numThreadsTxNumerical == 1),
2766 UCC_GETH_THREAD_DATA_ALIGNMENT);
2767 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
2768 if (netif_msg_ifup(ugeth))
2770 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2772 ucc_geth_memclean(ugeth);
2776 ugeth->p_thread_data_tx =
2777 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
2778 thread_dat_tx_offset);
2779 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2782 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
2783 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
2784 ug_info->vtagtable[i]);
2787 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
2788 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
2789 ug_info->iphoffset[i]);
2792 /* Size varies with number of Tx queues */
2793 ugeth->send_q_mem_reg_offset =
2794 qe_muram_alloc(ug_info->numQueuesTx *
2795 sizeof(struct ucc_geth_send_queue_qd),
2796 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
2797 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
2798 if (netif_msg_ifup(ugeth))
2800 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2802 ucc_geth_memclean(ugeth);
2806 ugeth->p_send_q_mem_reg =
2807 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
2808 send_q_mem_reg_offset);
2809 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2811 /* Setup the table */
2812 /* Assume BD rings are already established */
2813 for (i = 0; i < ug_info->numQueuesTx; i++) {
2815 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
2816 1) * sizeof(struct qe_bd);
2817 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2818 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2819 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
2820 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2821 last_bd_completed_address,
2822 (u32) virt_to_phys(endOfRing));
2823 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
2825 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2826 (u32) immrbar_virt_to_phys(ugeth->
2828 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2829 last_bd_completed_address,
2830 (u32) immrbar_virt_to_phys(endOfRing));
2834 /* schedulerbasepointer */
2836 if (ug_info->numQueuesTx > 1) {
2837 /* scheduler exists only if more than 1 tx queue */
2838 ugeth->scheduler_offset =
2839 qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
2840 UCC_GETH_SCHEDULER_ALIGNMENT);
2841 if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
2842 if (netif_msg_ifup(ugeth))
2844 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2846 ucc_geth_memclean(ugeth);
2850 ugeth->p_scheduler =
2851 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
2853 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
2854 ugeth->scheduler_offset);
2855 /* Zero out p_scheduler */
2856 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
2858 /* Set values in scheduler */
2859 out_be32(&ugeth->p_scheduler->mblinterval,
2860 ug_info->mblinterval);
2861 out_be16(&ugeth->p_scheduler->nortsrbytetime,
2862 ug_info->nortsrbytetime);
2863 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
2864 out_8(&ugeth->p_scheduler->strictpriorityq,
2865 ug_info->strictpriorityq);
2866 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
2867 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
2868 for (i = 0; i < NUM_TX_QUEUES; i++)
2869 out_8(&ugeth->p_scheduler->weightfactor[i],
2870 ug_info->weightfactor[i]);
2872 /* Set pointers to cpucount registers in scheduler */
2873 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
2874 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
2875 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
2876 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
2877 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
2878 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
2879 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
2880 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
2883 /* schedulerbasepointer */
2884 /* TxRMON_PTR (statistics) */
2886 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
2887 ugeth->tx_fw_statistics_pram_offset =
2888 qe_muram_alloc(sizeof
2889 (struct ucc_geth_tx_firmware_statistics_pram),
2890 UCC_GETH_TX_STATISTICS_ALIGNMENT);
2891 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
2892 if (netif_msg_ifup(ugeth))
2894 ("%s: Can not allocate DPRAM memory for"
2895 " p_tx_fw_statistics_pram.",
2897 ucc_geth_memclean(ugeth);
2900 ugeth->p_tx_fw_statistics_pram =
2901 (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
2902 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
2903 /* Zero out p_tx_fw_statistics_pram */
2904 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
2905 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
2909 /* Already has speed set */
2911 if (ug_info->numQueuesTx > 1)
2912 temoder |= TEMODER_SCHEDULER_ENABLE;
2913 if (ug_info->ipCheckSumGenerate)
2914 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
2915 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
2916 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
2918 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
2920 /* Function code register value to be used later */
2921 function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
2922 /* Required for QE */
2924 /* function code register */
2925 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
2927 /* Rx global PRAM */
2928 /* Allocate global rx parameter RAM page */
2929 ugeth->rx_glbl_pram_offset =
2930 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
2931 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
2932 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
2933 if (netif_msg_ifup(ugeth))
2935 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2937 ucc_geth_memclean(ugeth);
2940 ugeth->p_rx_glbl_pram =
2941 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
2942 rx_glbl_pram_offset);
2943 /* Zero out p_rx_glbl_pram */
2944 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
2946 /* Fill global PRAM */
2949 /* Size varies with number of Rx threads */
2950 ugeth->thread_dat_rx_offset =
2951 qe_muram_alloc(numThreadsRxNumerical *
2952 sizeof(struct ucc_geth_thread_data_rx),
2953 UCC_GETH_THREAD_DATA_ALIGNMENT);
2954 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
2955 if (netif_msg_ifup(ugeth))
2957 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2959 ucc_geth_memclean(ugeth);
2963 ugeth->p_thread_data_rx =
2964 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
2965 thread_dat_rx_offset);
2966 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
2969 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
2971 /* rxrmonbaseptr (statistics) */
2973 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
2974 ugeth->rx_fw_statistics_pram_offset =
2975 qe_muram_alloc(sizeof
2976 (struct ucc_geth_rx_firmware_statistics_pram),
2977 UCC_GETH_RX_STATISTICS_ALIGNMENT);
2978 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
2979 if (netif_msg_ifup(ugeth))
2981 ("%s: Can not allocate DPRAM memory for"
2982 " p_rx_fw_statistics_pram.", __FUNCTION__);
2983 ucc_geth_memclean(ugeth);
2986 ugeth->p_rx_fw_statistics_pram =
2987 (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
2988 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
2989 /* Zero out p_rx_fw_statistics_pram */
2990 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
2991 sizeof(struct ucc_geth_rx_firmware_statistics_pram));
2994 /* intCoalescingPtr */
2996 /* Size varies with number of Rx queues */
2997 ugeth->rx_irq_coalescing_tbl_offset =
2998 qe_muram_alloc(ug_info->numQueuesRx *
2999 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
3000 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
3001 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
3002 if (netif_msg_ifup(ugeth))
3004 ("%s: Can not allocate DPRAM memory for"
3005 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
3006 ucc_geth_memclean(ugeth);
3010 ugeth->p_rx_irq_coalescing_tbl =
3011 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
3012 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3013 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3014 ugeth->rx_irq_coalescing_tbl_offset);
3016 /* Fill interrupt coalescing table */
3017 for (i = 0; i < ug_info->numQueuesRx; i++) {
3018 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3019 interruptcoalescingmaxvalue,
3020 ug_info->interruptcoalescingmaxvalue[i]);
3021 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3022 interruptcoalescingcounter,
3023 ug_info->interruptcoalescingmaxvalue[i]);
3027 init_max_rx_buff_len(uf_info->max_rx_buf_length,
3028 &ugeth->p_rx_glbl_pram->mrblr);
3030 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
3032 init_min_frame_len(ug_info->minFrameLength,
3033 &ugeth->p_rx_glbl_pram->minflr,
3034 &ugeth->p_rx_glbl_pram->mrblr);
3036 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
3038 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3042 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3043 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3044 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3047 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3049 for (i = 0; i < 8; i++)
3050 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3051 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
3055 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3058 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3061 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3064 /* Size varies with number of Rx queues */
3065 ugeth->rx_bd_qs_tbl_offset =
3066 qe_muram_alloc(ug_info->numQueuesRx *
3067 (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3068 sizeof(struct ucc_geth_rx_prefetched_bds)),
3069 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3070 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
3071 if (netif_msg_ifup(ugeth))
3073 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3075 ucc_geth_memclean(ugeth);
3079 ugeth->p_rx_bd_qs_tbl =
3080 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
3081 rx_bd_qs_tbl_offset);
3082 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3083 /* Zero out p_rx_bd_qs_tbl */
3084 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
3086 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3087 sizeof(struct ucc_geth_rx_prefetched_bds)));
3089 /* Setup the table */
3090 /* Assume BD rings are already established */
3091 for (i = 0; i < ug_info->numQueuesRx; i++) {
3092 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3093 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3094 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3095 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3097 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3098 (u32) immrbar_virt_to_phys(ugeth->
3101 /* rest of fields handled by QE */
3105 /* Already has speed set */
3107 if (ugeth->rx_extended_features)
3108 remoder |= REMODER_RX_EXTENDED_FEATURES;
3109 if (ug_info->rxExtendedFiltering)
3110 remoder |= REMODER_RX_EXTENDED_FILTERING;
3111 if (ug_info->dynamicMaxFrameLength)
3112 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3113 if (ug_info->dynamicMinFrameLength)
3114 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3116 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3119 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3120 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3121 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3122 if (ug_info->ipCheckSumCheck)
3123 remoder |= REMODER_IP_CHECKSUM_CHECK;
3124 if (ug_info->ipAddressAlignment)
3125 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3126 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3128 /* Note that this function must be called */
3129 /* ONLY AFTER p_tx_fw_statistics_pram */
3130 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3131 init_firmware_statistics_gathering_mode((ug_info->
3133 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3134 (ug_info->statisticsMode &
3135 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3136 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3137 ugeth->tx_fw_statistics_pram_offset,
3138 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3139 ugeth->rx_fw_statistics_pram_offset,
3140 &ugeth->p_tx_glbl_pram->temoder,
3141 &ugeth->p_rx_glbl_pram->remoder);
3143 /* function code register */
3144 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
3146 /* initialize extended filtering */
3147 if (ug_info->rxExtendedFiltering) {
3148 if (!ug_info->extendedFilteringChainPointer) {
3149 if (netif_msg_ifup(ugeth))
3150 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3152 ucc_geth_memclean(ugeth);
3156 /* Allocate memory for extended filtering Mode Global
3158 ugeth->exf_glbl_param_offset =
3159 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
3160 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3161 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
3162 if (netif_msg_ifup(ugeth))
3164 ("%s: Can not allocate DPRAM memory for"
3165 " p_exf_glbl_param.", __FUNCTION__);
3166 ucc_geth_memclean(ugeth);
3170 ugeth->p_exf_glbl_param =
3171 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
3172 exf_glbl_param_offset);
3173 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3174 ugeth->exf_glbl_param_offset);
3175 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3176 (u32) ug_info->extendedFilteringChainPointer);
3178 } else { /* initialize 82xx style address filtering */
3180 /* Init individual address recognition registers to disabled */
3182 for (j = 0; j < NUM_OF_PADDRS; j++)
3183 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3186 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
3187 p_rx_glbl_pram->addressfiltering;
3189 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3190 ENET_ADDR_TYPE_GROUP);
3191 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3192 ENET_ADDR_TYPE_INDIVIDUAL);
3196 * Initialize UCC at QE level
3199 command = QE_INIT_TX_RX;
3201 /* Allocate shadow InitEnet command parameter structure.
3202 * This is needed because after the InitEnet command is executed,
3203 * the structure in DPRAM is released, because DPRAM is a premium
3205 * This shadow structure keeps a copy of what was done so that the
3206 * allocated resources can be released when the channel is freed.
3208 if (!(ugeth->p_init_enet_param_shadow =
3209 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3210 if (netif_msg_ifup(ugeth))
3212 ("%s: Can not allocate memory for"
3213 " p_UccInitEnetParamShadows.", __FUNCTION__);
3214 ucc_geth_memclean(ugeth);
3217 /* Zero out *p_init_enet_param_shadow */
3218 memset((char *)ugeth->p_init_enet_param_shadow,
3219 0, sizeof(struct ucc_geth_init_pram));
3221 /* Fill shadow InitEnet command parameter structure */
3223 ugeth->p_init_enet_param_shadow->resinit1 =
3224 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3225 ugeth->p_init_enet_param_shadow->resinit2 =
3226 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3227 ugeth->p_init_enet_param_shadow->resinit3 =
3228 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3229 ugeth->p_init_enet_param_shadow->resinit4 =
3230 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3231 ugeth->p_init_enet_param_shadow->resinit5 =
3232 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3233 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3234 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3235 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3236 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3238 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3239 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3240 if ((ug_info->largestexternallookupkeysize !=
3241 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3242 && (ug_info->largestexternallookupkeysize !=
3243 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3244 && (ug_info->largestexternallookupkeysize !=
3245 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3246 if (netif_msg_ifup(ugeth))
3247 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3249 ucc_geth_memclean(ugeth);
3252 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3253 ug_info->largestexternallookupkeysize;
3254 size = sizeof(struct ucc_geth_thread_rx_pram);
3255 if (ug_info->rxExtendedFiltering) {
3256 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3257 if (ug_info->largestexternallookupkeysize ==
3258 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3260 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3261 if (ug_info->largestexternallookupkeysize ==
3262 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3264 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3267 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3268 p_init_enet_param_shadow->rxthread[0]),
3269 (u8) (numThreadsRxNumerical + 1)
3270 /* Rx needs one extra for terminator */
3271 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3272 ug_info->riscRx, 1)) != 0) {
3273 if (netif_msg_ifup(ugeth))
3274 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3276 ucc_geth_memclean(ugeth);
3280 ugeth->p_init_enet_param_shadow->txglobal =
3281 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3283 fill_init_enet_entries(ugeth,
3284 &(ugeth->p_init_enet_param_shadow->
3285 txthread[0]), numThreadsTxNumerical,
3286 sizeof(struct ucc_geth_thread_tx_pram),
3287 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3288 ug_info->riscTx, 0)) != 0) {
3289 if (netif_msg_ifup(ugeth))
3290 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3292 ucc_geth_memclean(ugeth);
3296 /* Load Rx bds with buffers */
3297 for (i = 0; i < ug_info->numQueuesRx; i++) {
3298 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3299 if (netif_msg_ifup(ugeth))
3300 ugeth_err("%s: Can not fill Rx bds with buffers.",
3302 ucc_geth_memclean(ugeth);
3307 /* Allocate InitEnet command parameter structure */
3308 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
3309 if (IS_ERR_VALUE(init_enet_pram_offset)) {
3310 if (netif_msg_ifup(ugeth))
3312 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3314 ucc_geth_memclean(ugeth);
3318 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
3320 /* Copy shadow InitEnet command parameter structure into PRAM */
3321 out_8(&p_init_enet_pram->resinit1,
3322 ugeth->p_init_enet_param_shadow->resinit1);
3323 out_8(&p_init_enet_pram->resinit2,
3324 ugeth->p_init_enet_param_shadow->resinit2);
3325 out_8(&p_init_enet_pram->resinit3,
3326 ugeth->p_init_enet_param_shadow->resinit3);
3327 out_8(&p_init_enet_pram->resinit4,
3328 ugeth->p_init_enet_param_shadow->resinit4);
3329 out_be16(&p_init_enet_pram->resinit5,
3330 ugeth->p_init_enet_param_shadow->resinit5);
3331 out_8(&p_init_enet_pram->largestexternallookupkeysize,
3332 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
3333 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3334 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3335 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3336 out_be32(&p_init_enet_pram->rxthread[i],
3337 ugeth->p_init_enet_param_shadow->rxthread[i]);
3338 out_be32(&p_init_enet_pram->txglobal,
3339 ugeth->p_init_enet_param_shadow->txglobal);
3340 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3341 out_be32(&p_init_enet_pram->txthread[i],
3342 ugeth->p_init_enet_param_shadow->txthread[i]);
3344 /* Issue QE command */
3346 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3347 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
3348 init_enet_pram_offset);
3350 /* Free InitEnet command parameter */
3351 qe_muram_free(init_enet_pram_offset);
3356 /* ucc_geth_timeout gets called when a packet has not been
3357 * transmitted after a set amount of time.
3358 * For now, assume that clearing out all the structures, and
3359 * starting over will fix the problem. */
3360 static void ucc_geth_timeout(struct net_device *dev)
3362 struct ucc_geth_private *ugeth = netdev_priv(dev);
3364 ugeth_vdbg("%s: IN", __FUNCTION__);
3366 dev->stats.tx_errors++;
3368 ugeth_dump_regs(ugeth);
3370 if (dev->flags & IFF_UP) {
3371 ucc_geth_stop(ugeth);
3372 ucc_geth_startup(ugeth);
3375 netif_schedule(dev);
3378 /* This is called by the kernel when a frame is ready for transmission. */
3379 /* It is pointed to by the dev->hard_start_xmit function pointer */
3380 static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3382 struct ucc_geth_private *ugeth = netdev_priv(dev);
3383 #ifdef CONFIG_UGETH_TX_ON_DEMAND
3384 struct ucc_fast_private *uccf;
3386 u8 __iomem *bd; /* BD pointer */
3390 ugeth_vdbg("%s: IN", __FUNCTION__);
3392 spin_lock_irq(&ugeth->lock);
3394 dev->stats.tx_bytes += skb->len;
3396 /* Start from the next BD that should be filled */
3397 bd = ugeth->txBd[txQ];
3398 bd_status = in_be32((u32 __iomem *)bd);
3399 /* Save the skb pointer so we can free it later */
3400 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3402 /* Update the current skb pointer (wrapping if this was the last) */
3403 ugeth->skb_curtx[txQ] =
3404 (ugeth->skb_curtx[txQ] +
3405 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3407 /* set up the buffer descriptor */
3408 out_be32(&((struct qe_bd __iomem *)bd)->buf,
3409 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3411 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
3413 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3415 /* set bd status and length */
3416 out_be32((u32 __iomem *)bd, bd_status);
3418 dev->trans_start = jiffies;
3420 /* Move to next BD in the ring */
3421 if (!(bd_status & T_W))
3422 bd += sizeof(struct qe_bd);
3424 bd = ugeth->p_tx_bd_ring[txQ];
3426 /* If the next BD still needs to be cleaned up, then the bds
3427 are full. We need to tell the kernel to stop sending us stuff. */
3428 if (bd == ugeth->confBd[txQ]) {
3429 if (!netif_queue_stopped(dev))
3430 netif_stop_queue(dev);
3433 ugeth->txBd[txQ] = bd;
3435 if (ugeth->p_scheduler) {
3436 ugeth->cpucount[txQ]++;
3437 /* Indicate to QE that there are more Tx bds ready for
3439 /* This is done by writing a running counter of the bd
3440 count to the scheduler PRAM. */
3441 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3444 #ifdef CONFIG_UGETH_TX_ON_DEMAND
3446 out_be16(uccf->p_utodr, UCC_FAST_TOD);
3448 spin_unlock_irq(&ugeth->lock);
3453 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3455 struct sk_buff *skb;
3457 u16 length, howmany = 0;
3460 struct net_device *dev;
3462 ugeth_vdbg("%s: IN", __FUNCTION__);
3466 /* collect received buffers */
3467 bd = ugeth->rxBd[rxQ];
3469 bd_status = in_be32((u32 __iomem *)bd);
3471 /* while there are received buffers and BD is full (~R_E) */
3472 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3473 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
3474 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3475 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3477 /* determine whether buffer is first, last, first and last
3478 (single buffer frame) or middle (not first and not last) */
3480 (!(bd_status & (R_F | R_L))) ||
3481 (bd_status & R_ERRORS_FATAL)) {
3482 if (netif_msg_rx_err(ugeth))
3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3484 __FUNCTION__, __LINE__, (u32) skb);
3486 dev_kfree_skb_any(skb);
3488 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3489 dev->stats.rx_dropped++;
3491 dev->stats.rx_packets++;
3494 /* Prep the skb for the packet */
3495 skb_put(skb, length);
3497 /* Tell the skb what kind of packet this is */
3498 skb->protocol = eth_type_trans(skb, ugeth->dev);
3500 dev->stats.rx_bytes += length;
3501 /* Send the packet up the stack */
3502 #ifdef CONFIG_UGETH_NAPI
3503 netif_receive_skb(skb);
3506 #endif /* CONFIG_UGETH_NAPI */
3509 ugeth->dev->last_rx = jiffies;
3511 skb = get_new_skb(ugeth, bd);
3513 if (netif_msg_rx_err(ugeth))
3514 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3515 dev->stats.rx_dropped++;
3519 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3521 /* update to point at the next skb */
3522 ugeth->skb_currx[rxQ] =
3523 (ugeth->skb_currx[rxQ] +
3524 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3526 if (bd_status & R_W)
3527 bd = ugeth->p_rx_bd_ring[rxQ];
3529 bd += sizeof(struct qe_bd);
3531 bd_status = in_be32((u32 __iomem *)bd);
3534 ugeth->rxBd[rxQ] = bd;
3538 static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3540 /* Start from the next BD that should be filled */
3541 struct ucc_geth_private *ugeth = netdev_priv(dev);
3542 u8 __iomem *bd; /* BD pointer */
3545 bd = ugeth->confBd[txQ];
3546 bd_status = in_be32((u32 __iomem *)bd);
3548 /* Normal processing. */
3549 while ((bd_status & T_R) == 0) {
3550 /* BD contains already transmitted buffer. */
3551 /* Handle the transmitted buffer and release */
3552 /* the BD to be used with the current frame */
3554 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3557 dev->stats.tx_packets++;
3559 /* Free the sk buffer associated with this TxBD */
3560 dev_kfree_skb_irq(ugeth->
3561 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3562 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3563 ugeth->skb_dirtytx[txQ] =
3564 (ugeth->skb_dirtytx[txQ] +
3565 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3567 /* We freed a buffer, so now we can restart transmission */
3568 if (netif_queue_stopped(dev))
3569 netif_wake_queue(dev);
3571 /* Advance the confirmation BD pointer */
3572 if (!(bd_status & T_W))
3573 bd += sizeof(struct qe_bd);
3575 bd = ugeth->p_tx_bd_ring[txQ];
3576 bd_status = in_be32((u32 __iomem *)bd);
3578 ugeth->confBd[txQ] = bd;
3582 #ifdef CONFIG_UGETH_NAPI
3583 static int ucc_geth_poll(struct napi_struct *napi, int budget)
3585 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
3586 struct net_device *dev = ugeth->dev;
3587 struct ucc_geth_info *ug_info;
3590 ug_info = ugeth->ug_info;
3593 for (i = 0; i < ug_info->numQueuesRx; i++)
3594 howmany += ucc_geth_rx(ugeth, i, budget - howmany);
3596 if (howmany < budget) {
3597 struct ucc_fast_private *uccf;
3600 netif_rx_complete(dev, napi);
3602 uccm = in_be32(uccf->p_uccm);
3603 uccm |= UCCE_RX_EVENTS;
3604 out_be32(uccf->p_uccm, uccm);
3609 #endif /* CONFIG_UGETH_NAPI */
3611 static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3613 struct net_device *dev = info;
3614 struct ucc_geth_private *ugeth = netdev_priv(dev);
3615 struct ucc_fast_private *uccf;
3616 struct ucc_geth_info *ug_info;
3619 #ifndef CONFIG_UGETH_NAPI
3620 register u32 rx_mask;
3622 register u32 tx_mask;
3625 ugeth_vdbg("%s: IN", __FUNCTION__);
3628 ug_info = ugeth->ug_info;
3630 /* read and clear events */
3631 ucce = (u32) in_be32(uccf->p_ucce);
3632 uccm = (u32) in_be32(uccf->p_uccm);
3634 out_be32(uccf->p_ucce, ucce);
3636 /* check for receive events that require processing */
3637 if (ucce & UCCE_RX_EVENTS) {
3638 #ifdef CONFIG_UGETH_NAPI
3639 if (netif_rx_schedule_prep(dev, &ugeth->napi)) {
3640 uccm &= ~UCCE_RX_EVENTS;
3641 out_be32(uccf->p_uccm, uccm);
3642 __netif_rx_schedule(dev, &ugeth->napi);
3645 rx_mask = UCCE_RXBF_SINGLE_MASK;
3646 for (i = 0; i < ug_info->numQueuesRx; i++) {
3648 ucc_geth_rx(ugeth, i, (int)ugeth->ug_info->bdRingLenRx[i]);
3652 #endif /* CONFIG_UGETH_NAPI */
3655 /* Tx event processing */
3656 if (ucce & UCCE_TX_EVENTS) {
3657 spin_lock(&ugeth->lock);
3658 tx_mask = UCCE_TXBF_SINGLE_MASK;
3659 for (i = 0; i < ug_info->numQueuesTx; i++) {
3661 ucc_geth_tx(dev, i);
3665 spin_unlock(&ugeth->lock);
3668 /* Errors and other events */
3669 if (ucce & UCCE_OTHER) {
3670 if (ucce & UCCE_BSY) {
3671 dev->stats.rx_errors++;
3673 if (ucce & UCCE_TXE) {
3674 dev->stats.tx_errors++;
3681 #ifdef CONFIG_NET_POLL_CONTROLLER
3683 * Polling 'interrupt' - used by things like netconsole to send skbs
3684 * without having to re-enable interrupts. It's not called while
3685 * the interrupt routine is executing.
3687 static void ucc_netpoll(struct net_device *dev)
3689 struct ucc_geth_private *ugeth = netdev_priv(dev);
3690 int irq = ugeth->ug_info->uf_info.irq;
3693 ucc_geth_irq_handler(irq, dev);
3696 #endif /* CONFIG_NET_POLL_CONTROLLER */
3698 /* Called when something needs to use the ethernet device */
3699 /* Returns 0 for success. */
3700 static int ucc_geth_open(struct net_device *dev)
3702 struct ucc_geth_private *ugeth = netdev_priv(dev);
3705 ugeth_vdbg("%s: IN", __FUNCTION__);
3707 /* Test station address */
3708 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3709 if (netif_msg_ifup(ugeth))
3710 ugeth_err("%s: Multicast address used for station address"
3711 " - is this what you wanted?", __FUNCTION__);
3715 err = ucc_struct_init(ugeth);
3717 if (netif_msg_ifup(ugeth))
3718 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
3722 #ifdef CONFIG_UGETH_NAPI
3723 napi_enable(&ugeth->napi);
3725 err = ucc_geth_startup(ugeth);
3727 if (netif_msg_ifup(ugeth))
3728 ugeth_err("%s: Cannot configure net device, aborting.",
3733 err = adjust_enet_interface(ugeth);
3735 if (netif_msg_ifup(ugeth))
3736 ugeth_err("%s: Cannot configure net device, aborting.",
3741 /* Set MACSTNADDR1, MACSTNADDR2 */
3742 /* For more details see the hardware spec. */
3743 init_mac_station_addr_regs(dev->dev_addr[0],
3749 &ugeth->ug_regs->macstnaddr1,
3750 &ugeth->ug_regs->macstnaddr2);
3752 err = init_phy(dev);
3754 if (netif_msg_ifup(ugeth))
3755 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
3759 phy_start(ugeth->phydev);
3762 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
3765 if (netif_msg_ifup(ugeth))
3766 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
3768 ucc_geth_stop(ugeth);
3772 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3774 if (netif_msg_ifup(ugeth))
3775 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
3776 ucc_geth_stop(ugeth);
3780 netif_start_queue(dev);
3785 #ifdef CONFIG_UGETH_NAPI
3786 napi_disable(&ugeth->napi);
3791 /* Stops the kernel queue, and halts the controller */
3792 static int ucc_geth_close(struct net_device *dev)
3794 struct ucc_geth_private *ugeth = netdev_priv(dev);
3796 ugeth_vdbg("%s: IN", __FUNCTION__);
3798 #ifdef CONFIG_UGETH_NAPI
3799 napi_disable(&ugeth->napi);
3802 ucc_geth_stop(ugeth);
3804 phy_disconnect(ugeth->phydev);
3805 ugeth->phydev = NULL;
3807 netif_stop_queue(dev);
3812 static phy_interface_t to_phy_interface(const char *phy_connection_type)
3814 if (strcasecmp(phy_connection_type, "mii") == 0)
3815 return PHY_INTERFACE_MODE_MII;
3816 if (strcasecmp(phy_connection_type, "gmii") == 0)
3817 return PHY_INTERFACE_MODE_GMII;
3818 if (strcasecmp(phy_connection_type, "tbi") == 0)
3819 return PHY_INTERFACE_MODE_TBI;
3820 if (strcasecmp(phy_connection_type, "rmii") == 0)
3821 return PHY_INTERFACE_MODE_RMII;
3822 if (strcasecmp(phy_connection_type, "rgmii") == 0)
3823 return PHY_INTERFACE_MODE_RGMII;
3824 if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
3825 return PHY_INTERFACE_MODE_RGMII_ID;
3826 if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
3827 return PHY_INTERFACE_MODE_RGMII_TXID;
3828 if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
3829 return PHY_INTERFACE_MODE_RGMII_RXID;
3830 if (strcasecmp(phy_connection_type, "rtbi") == 0)
3831 return PHY_INTERFACE_MODE_RTBI;
3833 return PHY_INTERFACE_MODE_MII;
3836 static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
3838 struct device *device = &ofdev->dev;
3839 struct device_node *np = ofdev->node;
3840 struct device_node *mdio;
3841 struct net_device *dev = NULL;
3842 struct ucc_geth_private *ugeth = NULL;
3843 struct ucc_geth_info *ug_info;
3844 struct resource res;
3845 struct device_node *phy;
3846 int err, ucc_num, max_speed = 0;
3848 const u32 *fixed_link;
3849 const unsigned int *prop;
3851 const void *mac_addr;
3852 phy_interface_t phy_interface;
3853 static const int enet_to_speed[] = {
3854 SPEED_10, SPEED_10, SPEED_10,
3855 SPEED_100, SPEED_100, SPEED_100,
3856 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
3858 static const phy_interface_t enet_to_phy_interface[] = {
3859 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
3860 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
3861 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
3862 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
3863 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3866 ugeth_vdbg("%s: IN", __FUNCTION__);
3868 prop = of_get_property(np, "cell-index", NULL);
3870 prop = of_get_property(np, "device-id", NULL);
3875 ucc_num = *prop - 1;
3876 if ((ucc_num < 0) || (ucc_num > 7))
3879 ug_info = &ugeth_info[ucc_num];
3880 if (ug_info == NULL) {
3881 if (netif_msg_probe(&debug))
3882 ugeth_err("%s: [%d] Missing additional data!",
3883 __FUNCTION__, ucc_num);
3887 ug_info->uf_info.ucc_num = ucc_num;
3889 sprop = of_get_property(np, "rx-clock-name", NULL);
3891 ug_info->uf_info.rx_clock = qe_clock_source(sprop);
3892 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
3893 (ug_info->uf_info.rx_clock > QE_CLK24)) {
3895 "ucc_geth: invalid rx-clock-name property\n");
3899 prop = of_get_property(np, "rx-clock", NULL);
3901 /* If both rx-clock-name and rx-clock are missing,
3902 we want to tell people to use rx-clock-name. */
3904 "ucc_geth: missing rx-clock-name property\n");
3907 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
3909 "ucc_geth: invalid rx-clock propperty\n");
3912 ug_info->uf_info.rx_clock = *prop;
3915 sprop = of_get_property(np, "tx-clock-name", NULL);
3917 ug_info->uf_info.tx_clock = qe_clock_source(sprop);
3918 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
3919 (ug_info->uf_info.tx_clock > QE_CLK24)) {
3921 "ucc_geth: invalid tx-clock-name property\n");
3925 prop = of_get_property(np, "tx-clock", NULL);
3928 "ucc_geth: mising tx-clock-name property\n");
3931 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
3933 "ucc_geth: invalid tx-clock property\n");
3936 ug_info->uf_info.tx_clock = *prop;
3939 err = of_address_to_resource(np, 0, &res);
3943 ug_info->uf_info.regs = res.start;
3944 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3945 fixed_link = of_get_property(np, "fixed-link", NULL);
3947 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "0");
3948 ug_info->phy_address = fixed_link[0];
3951 ph = of_get_property(np, "phy-handle", NULL);
3952 phy = of_find_node_by_phandle(*ph);
3957 /* set the PHY address */
3958 prop = of_get_property(phy, "reg", NULL);
3961 ug_info->phy_address = *prop;
3963 /* Set the bus id */
3964 mdio = of_get_parent(phy);
3969 err = of_address_to_resource(mdio, 0, &res);
3975 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start);
3978 /* get the phy interface type, or default to MII */
3979 prop = of_get_property(np, "phy-connection-type", NULL);
3981 /* handle interface property present in old trees */
3982 prop = of_get_property(phy, "interface", NULL);
3984 phy_interface = enet_to_phy_interface[*prop];
3985 max_speed = enet_to_speed[*prop];
3987 phy_interface = PHY_INTERFACE_MODE_MII;
3989 phy_interface = to_phy_interface((const char *)prop);
3992 /* get speed, or derive from PHY interface */
3994 switch (phy_interface) {
3995 case PHY_INTERFACE_MODE_GMII:
3996 case PHY_INTERFACE_MODE_RGMII:
3997 case PHY_INTERFACE_MODE_RGMII_ID:
3998 case PHY_INTERFACE_MODE_RGMII_RXID:
3999 case PHY_INTERFACE_MODE_RGMII_TXID:
4000 case PHY_INTERFACE_MODE_TBI:
4001 case PHY_INTERFACE_MODE_RTBI:
4002 max_speed = SPEED_1000;
4005 max_speed = SPEED_100;
4009 if (max_speed == SPEED_1000) {
4010 /* configure muram FIFOs for gigabit operation */
4011 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
4012 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
4013 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
4014 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
4015 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
4016 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
4017 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
4018 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
4021 if (netif_msg_probe(&debug))
4022 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
4023 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
4024 ug_info->uf_info.irq);
4026 /* Create an ethernet device instance */
4027 dev = alloc_etherdev(sizeof(*ugeth));
4032 ugeth = netdev_priv(dev);
4033 spin_lock_init(&ugeth->lock);
4035 /* Create CQs for hash tables */
4036 INIT_LIST_HEAD(&ugeth->group_hash_q);
4037 INIT_LIST_HEAD(&ugeth->ind_hash_q);
4039 dev_set_drvdata(device, dev);
4041 /* Set the dev->base_addr to the gfar reg region */
4042 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
4044 SET_NETDEV_DEV(dev, device);
4046 /* Fill in the dev structure */
4047 uec_set_ethtool_ops(dev);
4048 dev->open = ucc_geth_open;
4049 dev->hard_start_xmit = ucc_geth_start_xmit;
4050 dev->tx_timeout = ucc_geth_timeout;
4051 dev->watchdog_timeo = TX_TIMEOUT;
4052 #ifdef CONFIG_UGETH_NAPI
4053 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
4054 #endif /* CONFIG_UGETH_NAPI */
4055 #ifdef CONFIG_NET_POLL_CONTROLLER
4056 dev->poll_controller = ucc_netpoll;
4058 dev->stop = ucc_geth_close;
4059 // dev->change_mtu = ucc_geth_change_mtu;
4061 dev->set_multicast_list = ucc_geth_set_multi;
4063 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
4064 ugeth->phy_interface = phy_interface;
4065 ugeth->max_speed = max_speed;
4067 err = register_netdev(dev);
4069 if (netif_msg_probe(ugeth))
4070 ugeth_err("%s: Cannot register net device, aborting.",
4076 mac_addr = of_get_mac_address(np);
4078 memcpy(dev->dev_addr, mac_addr, 6);
4080 ugeth->ug_info = ug_info;
4086 static int ucc_geth_remove(struct of_device* ofdev)
4088 struct device *device = &ofdev->dev;
4089 struct net_device *dev = dev_get_drvdata(device);
4090 struct ucc_geth_private *ugeth = netdev_priv(dev);
4092 unregister_netdev(dev);
4094 ucc_geth_memclean(ugeth);
4095 dev_set_drvdata(device, NULL);
4100 static struct of_device_id ucc_geth_match[] = {
4103 .compatible = "ucc_geth",
4108 MODULE_DEVICE_TABLE(of, ucc_geth_match);
4110 static struct of_platform_driver ucc_geth_driver = {
4112 .match_table = ucc_geth_match,
4113 .probe = ucc_geth_probe,
4114 .remove = ucc_geth_remove,
4117 static int __init ucc_geth_init(void)
4121 ret = uec_mdio_init();
4126 if (netif_msg_drv(&debug))
4127 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4128 for (i = 0; i < 8; i++)
4129 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4130 sizeof(ugeth_primary_info));
4132 ret = of_register_platform_driver(&ucc_geth_driver);
4140 static void __exit ucc_geth_exit(void)
4142 of_unregister_platform_driver(&ucc_geth_driver);
4146 module_init(ucc_geth_init);
4147 module_exit(ucc_geth_exit);
4149 MODULE_AUTHOR("Freescale Semiconductor, Inc");
4150 MODULE_DESCRIPTION(DRV_DESC);
4151 MODULE_VERSION(DRV_VERSION);
4152 MODULE_LICENSE("GPL");