2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 unsigned int wait_count = 30;
148 if (!ql_sem_trylock(qdev, sem_mask))
151 } while (--wait_count);
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169 int count = UDELAY_COUNT;
172 temp = ql_read32(qdev, reg);
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp & bit)
182 udelay(UDELAY_DELAY);
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 int count = UDELAY_COUNT;
199 temp = ql_read32(qdev, CFG);
204 udelay(UDELAY_DELAY);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
237 status = ql_wait_cfg(qdev, bit);
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
252 * Wait for the bit to clear after signaling hw.
254 status = ql_wait_cfg(qdev, bit);
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
339 case MAC_ADDR_TYPE_MULTI_MAC:
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
371 case MAC_ADDR_TYPE_CAM_MAC:
374 u32 upper = (addr[0] << 8) | addr[1];
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 ql_wait_reg_rdy(qdev,
380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
388 ql_wait_reg_rdy(qdev,
389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
397 ql_wait_reg_rdy(qdev,
398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
408 cam_output = (CAM_OUT_ROUTE_NIC |
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
418 case MAC_ADDR_TYPE_VLAN:
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
427 ql_wait_reg_rdy(qdev,
428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
434 enable_bit); /* enable/disable */
437 case MAC_ADDR_TYPE_MULTI_FLTR:
439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
447 /* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
454 char zero_mac_addr[ETH_ALEN];
458 addr = &qdev->current_mac_addr[0];
459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
479 void ql_link_on(struct ql_adapter *qdev)
481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
486 void ql_link_off(struct ql_adapter *qdev)
488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
493 /* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
509 *value = ql_read32(qdev, RT_DATA);
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
522 int status = -EINVAL; /* Return error if no mask match. */
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
591 case 0: /* Clear the E-bit on an entry. */
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
630 * incremented every time we queue a worker and decremented every time
631 * a worker finishes. Once it hits zero we enable the interrupt.
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
643 ql_write32(qdev, INTR_EN,
645 var = ql_read32(qdev, STS);
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
653 var = ql_read32(qdev, STS);
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
662 struct intr_context *ctx;
664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
670 ctx = qdev->intr_context + intr;
671 spin_lock(&qdev->hw_lock);
672 if (!atomic_read(&ctx->irq_cnt)) {
673 ql_write32(qdev, INTR_EN,
675 var = ql_read32(qdev, STS);
677 atomic_inc(&ctx->irq_cnt);
678 spin_unlock(&qdev->hw_lock);
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693 ql_enable_completion_interrupt(qdev, i);
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
702 __le16 *flash = (__le16 *)&qdev->flash;
704 status = strncmp((char *)&qdev->flash, str, 4);
706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
748 __le32 *p = (__le32 *)&qdev->flash;
752 /* Get flash offset for function and adjust
756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
782 /* Extract either manufacturer or BOFM modified
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
794 if (!is_valid_ether_addr(mac_addr)) {
795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
800 memcpy(qdev->ndev->dev_addr,
802 qdev->ndev->addr_len);
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
813 __le32 *p = (__le32 *)&qdev->flash;
815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
817 /* Second function's parameters follow the first
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
826 for (i = 0; i < size; i++, p++) {
827 status = ql_read_flash_word(qdev, i+offset, p);
829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
898 *data = ql_read32(qdev, XGMAC_DATA);
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
918 *data = (u64) lo | ((u64) hi << 32);
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
928 * Get MPI firmware version for driver banner
931 status = ql_mb_about_fw(qdev);
934 status = ql_mb_get_fw_state(qdev);
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943 /* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1007 /* Turn on jumbo. */
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
1046 dma_unmap_addr(lbq_desc, mapaddr),
1047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1050 /* If it's the last chunk of our master page then
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1092 if (!rx_ring->pg_chunk.page) {
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
1098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
1109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1122 /* Adjust the master page chunk for next
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
1141 struct bq_desc *lbq_desc;
1145 while (rx_ring->lbq_free_cnt > 32) {
1146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1150 lbq_desc = &rx_ring->lbq[clean_idx];
1151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152 rx_ring->lbq_clean_idx = clean_idx;
1153 netif_err(qdev, ifup, qdev->ndev,
1154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
1161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
1163 rx_ring->lbq_buf_size);
1164 *lbq_desc->addr = cpu_to_le64(map);
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
1170 if (clean_idx == rx_ring->lbq_len)
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
1178 rx_ring->lbq_free_cnt -= 16;
1181 if (start_idx != clean_idx) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
1185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
1195 struct bq_desc *sbq_desc;
1199 while (rx_ring->sbq_free_cnt > 16) {
1200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201 sbq_desc = &rx_ring->sbq[clean_idx];
1202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1205 if (sbq_desc->p.skb == NULL) {
1206 netif_printk(qdev, rx_status, KERN_DEBUG,
1208 "sbq: getting new skb for index %d.\n",
1211 netdev_alloc_skb(qdev->ndev,
1213 if (sbq_desc->p.skb == NULL) {
1214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
1216 rx_ring->sbq_clean_idx = clean_idx;
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
1222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
1224 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
1232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
1234 rx_ring->sbq_buf_size);
1235 *sbq_desc->addr = cpu_to_le64(map);
1239 if (clean_idx == rx_ring->sbq_len)
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
1246 rx_ring->sbq_free_cnt -= 16;
1249 if (start_idx != clean_idx) {
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1284 netif_printk(qdev, tx_done, KERN_DEBUG,
1286 "unmapping OAL area.\n");
1288 pci_unmap_single(qdev->pdev,
1289 dma_unmap_addr(&tx_ring_desc->map[i],
1291 dma_unmap_len(&tx_ring_desc->map[i],
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->pdev,
1298 dma_unmap_addr(&tx_ring_desc->map[i],
1300 dma_unmap_len(&tx_ring_desc->map[i],
1301 maplen), PCI_DMA_TODEVICE);
1307 /* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1310 static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1314 int len = skb_headlen(skb);
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
1325 * Map the skb buffer first.
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
1334 return NETDEV_TX_BUSY;
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1384 tbd->addr = cpu_to_le64(map);
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1404 err = dma_mapping_error(&qdev->pdev->dev, map);
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 skb_frag_size(frag));
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
1440 struct nic_stats *stats = &qdev->nic_stats;
1442 stats->rx_err_count++;
1443 rx_ring->rx_errors++;
1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 stats->rx_code_err++;
1449 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 stats->rx_oversize_err++;
1452 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 stats->rx_undersize_err++;
1455 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 stats->rx_preamble_err++;
1458 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 stats->rx_frame_len_err++;
1461 case IB_MAC_IOCB_RSP_ERR_CRC:
1462 stats->rx_crc_err++;
1468 /* Process an inbound completion from an rx ring. */
1469 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1470 struct rx_ring *rx_ring,
1471 struct ib_mac_iocb_rsp *ib_mac_rsp,
1475 struct sk_buff *skb;
1476 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1477 struct napi_struct *napi = &rx_ring->napi;
1479 /* Frame error, so drop the packet. */
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1481 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1482 put_page(lbq_desc->p.pg_chunk.page);
1485 napi->dev = qdev->ndev;
1487 skb = napi_get_frags(napi);
1489 netif_err(qdev, drv, qdev->ndev,
1490 "Couldn't get an skb, exiting.\n");
1491 rx_ring->rx_dropped++;
1492 put_page(lbq_desc->p.pg_chunk.page);
1495 prefetch(lbq_desc->p.pg_chunk.va);
1496 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1497 lbq_desc->p.pg_chunk.page,
1498 lbq_desc->p.pg_chunk.offset,
1502 skb->data_len += length;
1503 skb->truesize += length;
1504 skb_shinfo(skb)->nr_frags++;
1506 rx_ring->rx_packets++;
1507 rx_ring->rx_bytes += length;
1508 skb->ip_summed = CHECKSUM_UNNECESSARY;
1509 skb_record_rx_queue(skb, rx_ring->cq_id);
1510 if (vlan_id != 0xffff)
1511 __vlan_hwaccel_put_tag(skb, vlan_id);
1512 napi_gro_frags(napi);
1515 /* Process an inbound completion from an rx ring. */
1516 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517 struct rx_ring *rx_ring,
1518 struct ib_mac_iocb_rsp *ib_mac_rsp,
1522 struct net_device *ndev = qdev->ndev;
1523 struct sk_buff *skb = NULL;
1525 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526 struct napi_struct *napi = &rx_ring->napi;
1528 skb = netdev_alloc_skb(ndev, length);
1530 netif_err(qdev, drv, qdev->ndev,
1531 "Couldn't get an skb, need to unwind!.\n");
1532 rx_ring->rx_dropped++;
1533 put_page(lbq_desc->p.pg_chunk.page);
1537 addr = lbq_desc->p.pg_chunk.va;
1540 /* Frame error, so drop the packet. */
1541 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1542 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1546 /* The max framesize filter on this chip is set higher than
1547 * MTU since FCoE uses 2k frames.
1549 if (skb->len > ndev->mtu + ETH_HLEN) {
1550 netif_err(qdev, drv, qdev->ndev,
1551 "Segment too small, dropping.\n");
1552 rx_ring->rx_dropped++;
1555 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1556 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1557 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1559 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1560 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1562 skb->len += length-ETH_HLEN;
1563 skb->data_len += length-ETH_HLEN;
1564 skb->truesize += length-ETH_HLEN;
1566 rx_ring->rx_packets++;
1567 rx_ring->rx_bytes += skb->len;
1568 skb->protocol = eth_type_trans(skb, ndev);
1569 skb_checksum_none_assert(skb);
1571 if ((ndev->features & NETIF_F_RXCSUM) &&
1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1574 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1575 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1576 "TCP checksum done!\n");
1577 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1579 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1580 /* Unfragmented ipv4 UDP frame. */
1582 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1583 if (!(iph->frag_off &
1584 htons(IP_MF|IP_OFFSET))) {
1585 skb->ip_summed = CHECKSUM_UNNECESSARY;
1586 netif_printk(qdev, rx_status, KERN_DEBUG,
1588 "UDP checksum done!\n");
1593 skb_record_rx_queue(skb, rx_ring->cq_id);
1594 if (vlan_id != 0xffff)
1595 __vlan_hwaccel_put_tag(skb, vlan_id);
1596 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1597 napi_gro_receive(napi, skb);
1599 netif_receive_skb(skb);
1602 dev_kfree_skb_any(skb);
1603 put_page(lbq_desc->p.pg_chunk.page);
1606 /* Process an inbound completion from an rx ring. */
1607 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1608 struct rx_ring *rx_ring,
1609 struct ib_mac_iocb_rsp *ib_mac_rsp,
1613 struct net_device *ndev = qdev->ndev;
1614 struct sk_buff *skb = NULL;
1615 struct sk_buff *new_skb = NULL;
1616 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1618 skb = sbq_desc->p.skb;
1619 /* Allocate new_skb and copy */
1620 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1621 if (new_skb == NULL) {
1622 netif_err(qdev, probe, qdev->ndev,
1623 "No skb available, drop the packet.\n");
1624 rx_ring->rx_dropped++;
1627 skb_reserve(new_skb, NET_IP_ALIGN);
1628 memcpy(skb_put(new_skb, length), skb->data, length);
1631 /* Frame error, so drop the packet. */
1632 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1633 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1634 dev_kfree_skb_any(skb);
1638 /* loopback self test for ethtool */
1639 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1640 ql_check_lb_frame(qdev, skb);
1641 dev_kfree_skb_any(skb);
1645 /* The max framesize filter on this chip is set higher than
1646 * MTU since FCoE uses 2k frames.
1648 if (skb->len > ndev->mtu + ETH_HLEN) {
1649 dev_kfree_skb_any(skb);
1650 rx_ring->rx_dropped++;
1654 prefetch(skb->data);
1655 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1665 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 "Promiscuous Packet.\n");
1669 rx_ring->rx_packets++;
1670 rx_ring->rx_bytes += skb->len;
1671 skb->protocol = eth_type_trans(skb, ndev);
1672 skb_checksum_none_assert(skb);
1674 /* If rx checksum is on, and there are no
1675 * csum or frame errors.
1677 if ((ndev->features & NETIF_F_RXCSUM) &&
1678 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1680 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682 "TCP checksum done!\n");
1683 skb->ip_summed = CHECKSUM_UNNECESSARY;
1684 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686 /* Unfragmented ipv4 UDP frame. */
1687 struct iphdr *iph = (struct iphdr *) skb->data;
1688 if (!(iph->frag_off &
1689 htons(IP_MF|IP_OFFSET))) {
1690 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 netif_printk(qdev, rx_status, KERN_DEBUG,
1693 "UDP checksum done!\n");
1698 skb_record_rx_queue(skb, rx_ring->cq_id);
1699 if (vlan_id != 0xffff)
1700 __vlan_hwaccel_put_tag(skb, vlan_id);
1701 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702 napi_gro_receive(&rx_ring->napi, skb);
1704 netif_receive_skb(skb);
1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1709 void *temp_addr = skb->data;
1711 /* Undo the skb_reserve(skb,32) we did before
1712 * giving to hardware, and realign data on
1713 * a 2-byte boundary.
1715 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717 skb_copy_to_linear_data(skb, temp_addr,
1722 * This function builds an skb for the given inbound
1723 * completion. It will be rewritten for readability in the near
1724 * future, but for not it works well.
1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727 struct rx_ring *rx_ring,
1728 struct ib_mac_iocb_rsp *ib_mac_rsp)
1730 struct bq_desc *lbq_desc;
1731 struct bq_desc *sbq_desc;
1732 struct sk_buff *skb = NULL;
1733 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1737 * Handle the header buffer if present.
1739 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742 "Header of %d bytes in small buffer.\n", hdr_len);
1744 * Headers fit nicely into a small buffer.
1746 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 pci_unmap_single(qdev->pdev,
1748 dma_unmap_addr(sbq_desc, mapaddr),
1749 dma_unmap_len(sbq_desc, maplen),
1750 PCI_DMA_FROMDEVICE);
1751 skb = sbq_desc->p.skb;
1752 ql_realign_skb(skb, hdr_len);
1753 skb_put(skb, hdr_len);
1754 sbq_desc->p.skb = NULL;
1758 * Handle the data buffer(s).
1760 if (unlikely(!length)) { /* Is there data too? */
1761 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762 "No Data buffer in this packet.\n");
1766 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 "Headers in small, data of %d bytes in small, combine them.\n",
1772 * Data is less than small buffer size so it's
1773 * stuffed in a small buffer.
1774 * For this case we append the data
1775 * from the "data" small buffer to the "header" small
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 pci_dma_sync_single_for_cpu(qdev->pdev,
1781 (sbq_desc, mapaddr),
1784 PCI_DMA_FROMDEVICE);
1785 memcpy(skb_put(skb, length),
1786 sbq_desc->p.skb->data, length);
1787 pci_dma_sync_single_for_device(qdev->pdev,
1794 PCI_DMA_FROMDEVICE);
1796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "%d bytes in a single small buffer.\n",
1799 sbq_desc = ql_get_curr_sbuf(rx_ring);
1800 skb = sbq_desc->p.skb;
1801 ql_realign_skb(skb, length);
1802 skb_put(skb, length);
1803 pci_unmap_single(qdev->pdev,
1804 dma_unmap_addr(sbq_desc,
1806 dma_unmap_len(sbq_desc,
1808 PCI_DMA_FROMDEVICE);
1809 sbq_desc->p.skb = NULL;
1811 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 "Header in small, %d bytes in large. Chain large to small!\n",
1817 * The data is in a single large buffer. We
1818 * chain it to the header buffer's skb and let
1821 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823 "Chaining page at offset = %d, for %d bytes to skb.\n",
1824 lbq_desc->p.pg_chunk.offset, length);
1825 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826 lbq_desc->p.pg_chunk.offset,
1829 skb->data_len += length;
1830 skb->truesize += length;
1833 * The headers and data are in a single large buffer. We
1834 * copy it to a new skb and let it go. This can happen with
1835 * jumbo mtu on a non-TCP/UDP frame.
1837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838 skb = netdev_alloc_skb(qdev->ndev, length);
1840 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841 "No skb available, drop the packet.\n");
1844 pci_unmap_page(qdev->pdev,
1845 dma_unmap_addr(lbq_desc,
1847 dma_unmap_len(lbq_desc, maplen),
1848 PCI_DMA_FROMDEVICE);
1849 skb_reserve(skb, NET_IP_ALIGN);
1850 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1853 skb_fill_page_desc(skb, 0,
1854 lbq_desc->p.pg_chunk.page,
1855 lbq_desc->p.pg_chunk.offset,
1858 skb->data_len += length;
1859 skb->truesize += length;
1861 __pskb_pull_tail(skb,
1862 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863 VLAN_ETH_HLEN : ETH_HLEN);
1867 * The data is in a chain of large buffers
1868 * pointed to by a small buffer. We loop
1869 * thru and chain them to the our small header
1871 * frags: There are 18 max frags and our small
1872 * buffer will hold 32 of them. The thing is,
1873 * we'll use 3 max for our 9000 byte jumbo
1874 * frames. If the MTU goes up we could
1875 * eventually be in trouble.
1878 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879 pci_unmap_single(qdev->pdev,
1880 dma_unmap_addr(sbq_desc, mapaddr),
1881 dma_unmap_len(sbq_desc, maplen),
1882 PCI_DMA_FROMDEVICE);
1883 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1885 * This is an non TCP/UDP IP frame, so
1886 * the headers aren't split into a small
1887 * buffer. We have to use the small buffer
1888 * that contains our sg list as our skb to
1889 * send upstairs. Copy the sg list here to
1890 * a local buffer and use it to find the
1893 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894 "%d bytes of headers & data in chain of large.\n",
1896 skb = sbq_desc->p.skb;
1897 sbq_desc->p.skb = NULL;
1898 skb_reserve(skb, NET_IP_ALIGN);
1900 while (length > 0) {
1901 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902 size = (length < rx_ring->lbq_buf_size) ? length :
1903 rx_ring->lbq_buf_size;
1905 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 "Adding page %d to skb for %d bytes.\n",
1908 skb_fill_page_desc(skb, i,
1909 lbq_desc->p.pg_chunk.page,
1910 lbq_desc->p.pg_chunk.offset,
1913 skb->data_len += size;
1914 skb->truesize += size;
1918 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919 VLAN_ETH_HLEN : ETH_HLEN);
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926 struct rx_ring *rx_ring,
1927 struct ib_mac_iocb_rsp *ib_mac_rsp,
1930 struct net_device *ndev = qdev->ndev;
1931 struct sk_buff *skb = NULL;
1933 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1935 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936 if (unlikely(!skb)) {
1937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 "No skb available, drop packet.\n");
1939 rx_ring->rx_dropped++;
1943 /* Frame error, so drop the packet. */
1944 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1946 dev_kfree_skb_any(skb);
1950 /* The max framesize filter on this chip is set higher than
1951 * MTU since FCoE uses 2k frames.
1953 if (skb->len > ndev->mtu + ETH_HLEN) {
1954 dev_kfree_skb_any(skb);
1955 rx_ring->rx_dropped++;
1959 /* loopback self test for ethtool */
1960 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1961 ql_check_lb_frame(qdev, skb);
1962 dev_kfree_skb_any(skb);
1966 prefetch(skb->data);
1967 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1968 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1969 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1970 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1971 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1972 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1973 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1974 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1975 rx_ring->rx_multicast++;
1977 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1978 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1979 "Promiscuous Packet.\n");
1982 skb->protocol = eth_type_trans(skb, ndev);
1983 skb_checksum_none_assert(skb);
1985 /* If rx checksum is on, and there are no
1986 * csum or frame errors.
1988 if ((ndev->features & NETIF_F_RXCSUM) &&
1989 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1991 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1992 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993 "TCP checksum done!\n");
1994 skb->ip_summed = CHECKSUM_UNNECESSARY;
1995 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1996 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1997 /* Unfragmented ipv4 UDP frame. */
1998 struct iphdr *iph = (struct iphdr *) skb->data;
1999 if (!(iph->frag_off &
2000 htons(IP_MF|IP_OFFSET))) {
2001 skb->ip_summed = CHECKSUM_UNNECESSARY;
2002 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003 "TCP checksum done!\n");
2008 rx_ring->rx_packets++;
2009 rx_ring->rx_bytes += skb->len;
2010 skb_record_rx_queue(skb, rx_ring->cq_id);
2011 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2012 __vlan_hwaccel_put_tag(skb, vlan_id);
2013 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2014 napi_gro_receive(&rx_ring->napi, skb);
2016 netif_receive_skb(skb);
2019 /* Process an inbound completion from an rx ring. */
2020 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2021 struct rx_ring *rx_ring,
2022 struct ib_mac_iocb_rsp *ib_mac_rsp)
2024 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2025 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2026 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2027 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2029 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2031 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2032 /* The data and headers are split into
2035 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2037 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2038 /* The data fit in a single small buffer.
2039 * Allocate a new skb, copy the data and
2040 * return the buffer to the free pool.
2042 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2044 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2045 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2046 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2047 /* TCP packet in a page chunk that's been checksummed.
2048 * Tack it on to our GRO skb and let it go.
2050 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2052 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2053 /* Non-TCP packet in a page chunk. Allocate an
2054 * skb, tack it on frags, and send it up.
2056 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2059 /* Non-TCP/UDP large frames that span multiple buffers
2060 * can be processed corrrectly by the split frame logic.
2062 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 return (unsigned long)length;
2069 /* Process an outbound completion from an rx ring. */
2070 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2071 struct ob_mac_iocb_rsp *mac_rsp)
2073 struct tx_ring *tx_ring;
2074 struct tx_ring_desc *tx_ring_desc;
2076 QL_DUMP_OB_MAC_RSP(mac_rsp);
2077 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2078 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2079 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2080 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2081 tx_ring->tx_packets++;
2082 dev_kfree_skb(tx_ring_desc->skb);
2083 tx_ring_desc->skb = NULL;
2085 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2088 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2089 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2090 netif_warn(qdev, tx_done, qdev->ndev,
2091 "Total descriptor length did not match transfer length.\n");
2093 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2094 netif_warn(qdev, tx_done, qdev->ndev,
2095 "Frame too short to be valid, not sent.\n");
2097 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2098 netif_warn(qdev, tx_done, qdev->ndev,
2099 "Frame too long, but sent anyway.\n");
2101 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2102 netif_warn(qdev, tx_done, qdev->ndev,
2103 "PCI backplane error. Frame not sent.\n");
2106 atomic_inc(&tx_ring->tx_count);
2109 /* Fire up a handler to reset the MPI processor. */
2110 void ql_queue_fw_error(struct ql_adapter *qdev)
2113 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2116 void ql_queue_asic_error(struct ql_adapter *qdev)
2119 ql_disable_interrupts(qdev);
2120 /* Clear adapter up bit to signal the recovery
2121 * process that it shouldn't kill the reset worker
2124 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2125 /* Set asic recovery bit to indicate reset process that we are
2126 * in fatal error recovery process rather than normal close
2128 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2129 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2132 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2133 struct ib_ae_iocb_rsp *ib_ae_rsp)
2135 switch (ib_ae_rsp->event) {
2136 case MGMT_ERR_EVENT:
2137 netif_err(qdev, rx_err, qdev->ndev,
2138 "Management Processor Fatal Error.\n");
2139 ql_queue_fw_error(qdev);
2142 case CAM_LOOKUP_ERR_EVENT:
2143 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2144 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2145 ql_queue_asic_error(qdev);
2148 case SOFT_ECC_ERROR_EVENT:
2149 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2150 ql_queue_asic_error(qdev);
2153 case PCI_ERR_ANON_BUF_RD:
2154 netdev_err(qdev->ndev, "PCI error occurred when reading "
2155 "anonymous buffers from rx_ring %d.\n",
2157 ql_queue_asic_error(qdev);
2161 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2163 ql_queue_asic_error(qdev);
2168 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2170 struct ql_adapter *qdev = rx_ring->qdev;
2171 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2172 struct ob_mac_iocb_rsp *net_rsp = NULL;
2175 struct tx_ring *tx_ring;
2176 /* While there are entries in the completion queue. */
2177 while (prod != rx_ring->cnsmr_idx) {
2179 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2180 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2181 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2183 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2185 switch (net_rsp->opcode) {
2187 case OPCODE_OB_MAC_TSO_IOCB:
2188 case OPCODE_OB_MAC_IOCB:
2189 ql_process_mac_tx_intr(qdev, net_rsp);
2192 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2193 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2197 ql_update_cq(rx_ring);
2198 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2202 ql_write_cq_idx(rx_ring);
2203 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2204 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2205 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2207 * The queue got stopped because the tx_ring was full.
2208 * Wake it up, because it's now at least 25% empty.
2210 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2216 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2218 struct ql_adapter *qdev = rx_ring->qdev;
2219 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2220 struct ql_net_rsp_iocb *net_rsp;
2223 /* While there are entries in the completion queue. */
2224 while (prod != rx_ring->cnsmr_idx) {
2226 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2227 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2228 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2230 net_rsp = rx_ring->curr_entry;
2232 switch (net_rsp->opcode) {
2233 case OPCODE_IB_MAC_IOCB:
2234 ql_process_mac_rx_intr(qdev, rx_ring,
2235 (struct ib_mac_iocb_rsp *)
2239 case OPCODE_IB_AE_IOCB:
2240 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2244 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2245 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2250 ql_update_cq(rx_ring);
2251 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2252 if (count == budget)
2255 ql_update_buffer_queues(qdev, rx_ring);
2256 ql_write_cq_idx(rx_ring);
2260 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2262 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2263 struct ql_adapter *qdev = rx_ring->qdev;
2264 struct rx_ring *trx_ring;
2265 int i, work_done = 0;
2266 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2268 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2269 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2271 /* Service the TX rings first. They start
2272 * right after the RSS rings. */
2273 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2274 trx_ring = &qdev->rx_ring[i];
2275 /* If this TX completion ring belongs to this vector and
2276 * it's not empty then service it.
2278 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2279 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2280 trx_ring->cnsmr_idx)) {
2281 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2282 "%s: Servicing TX completion ring %d.\n",
2283 __func__, trx_ring->cq_id);
2284 ql_clean_outbound_rx_ring(trx_ring);
2289 * Now service the RSS ring if it's active.
2291 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2292 rx_ring->cnsmr_idx) {
2293 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2294 "%s: Servicing RX completion ring %d.\n",
2295 __func__, rx_ring->cq_id);
2296 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2299 if (work_done < budget) {
2300 napi_complete(napi);
2301 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2306 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2308 struct ql_adapter *qdev = netdev_priv(ndev);
2310 if (features & NETIF_F_HW_VLAN_RX) {
2311 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2312 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2314 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2318 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2319 netdev_features_t features)
2322 * Since there is no support for separate rx/tx vlan accel
2323 * enable/disable make sure tx flag is always in same state as rx.
2325 if (features & NETIF_F_HW_VLAN_RX)
2326 features |= NETIF_F_HW_VLAN_TX;
2328 features &= ~NETIF_F_HW_VLAN_TX;
2333 static int qlge_set_features(struct net_device *ndev,
2334 netdev_features_t features)
2336 netdev_features_t changed = ndev->features ^ features;
2338 if (changed & NETIF_F_HW_VLAN_RX)
2339 qlge_vlan_mode(ndev, features);
2344 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2346 u32 enable_bit = MAC_ADDR_E;
2349 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2350 MAC_ADDR_TYPE_VLAN, vid);
2352 netif_err(qdev, ifup, qdev->ndev,
2353 "Failed to init vlan address.\n");
2357 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2359 struct ql_adapter *qdev = netdev_priv(ndev);
2363 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367 err = __qlge_vlan_rx_add_vid(qdev, vid);
2368 set_bit(vid, qdev->active_vlans);
2370 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2375 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2380 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2381 MAC_ADDR_TYPE_VLAN, vid);
2383 netif_err(qdev, ifup, qdev->ndev,
2384 "Failed to clear vlan address.\n");
2388 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2390 struct ql_adapter *qdev = netdev_priv(ndev);
2394 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2398 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2399 clear_bit(vid, qdev->active_vlans);
2401 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2406 static void qlge_restore_vlan(struct ql_adapter *qdev)
2411 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2415 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2416 __qlge_vlan_rx_add_vid(qdev, vid);
2418 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2421 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2422 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2424 struct rx_ring *rx_ring = dev_id;
2425 napi_schedule(&rx_ring->napi);
2429 /* This handles a fatal error, MPI activity, and the default
2430 * rx_ring in an MSI-X multiple vector environment.
2431 * In MSI/Legacy environment it also process the rest of
2434 static irqreturn_t qlge_isr(int irq, void *dev_id)
2436 struct rx_ring *rx_ring = dev_id;
2437 struct ql_adapter *qdev = rx_ring->qdev;
2438 struct intr_context *intr_context = &qdev->intr_context[0];
2442 spin_lock(&qdev->hw_lock);
2443 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2444 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2445 "Shared Interrupt, Not ours!\n");
2446 spin_unlock(&qdev->hw_lock);
2449 spin_unlock(&qdev->hw_lock);
2451 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2454 * Check for fatal error.
2457 ql_queue_asic_error(qdev);
2458 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2459 var = ql_read32(qdev, ERR_STS);
2460 netdev_err(qdev->ndev, "Resetting chip. "
2461 "Error Status Register = 0x%x\n", var);
2466 * Check MPI processor activity.
2468 if ((var & STS_PI) &&
2469 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2471 * We've got an async event or mailbox completion.
2472 * Handle it and clear the source of the interrupt.
2474 netif_err(qdev, intr, qdev->ndev,
2475 "Got MPI processor interrupt.\n");
2476 ql_disable_completion_interrupt(qdev, intr_context->intr);
2477 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2478 queue_delayed_work_on(smp_processor_id(),
2479 qdev->workqueue, &qdev->mpi_work, 0);
2484 * Get the bit-mask that shows the active queues for this
2485 * pass. Compare it to the queues that this irq services
2486 * and call napi if there's a match.
2488 var = ql_read32(qdev, ISR1);
2489 if (var & intr_context->irq_mask) {
2490 netif_info(qdev, intr, qdev->ndev,
2491 "Waking handler for rx_ring[0].\n");
2492 ql_disable_completion_interrupt(qdev, intr_context->intr);
2493 napi_schedule(&rx_ring->napi);
2496 ql_enable_completion_interrupt(qdev, intr_context->intr);
2497 return work_done ? IRQ_HANDLED : IRQ_NONE;
2500 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2503 if (skb_is_gso(skb)) {
2505 if (skb_header_cloned(skb)) {
2506 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2511 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2512 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2513 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2514 mac_iocb_ptr->total_hdrs_len =
2515 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2516 mac_iocb_ptr->net_trans_offset =
2517 cpu_to_le16(skb_network_offset(skb) |
2518 skb_transport_offset(skb)
2519 << OB_MAC_TRANSPORT_HDR_SHIFT);
2520 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2521 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2522 if (likely(skb->protocol == htons(ETH_P_IP))) {
2523 struct iphdr *iph = ip_hdr(skb);
2525 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2526 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2530 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2531 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2532 tcp_hdr(skb)->check =
2533 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2534 &ipv6_hdr(skb)->daddr,
2542 static void ql_hw_csum_setup(struct sk_buff *skb,
2543 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2546 struct iphdr *iph = ip_hdr(skb);
2548 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2549 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2550 mac_iocb_ptr->net_trans_offset =
2551 cpu_to_le16(skb_network_offset(skb) |
2552 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2554 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2555 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2556 if (likely(iph->protocol == IPPROTO_TCP)) {
2557 check = &(tcp_hdr(skb)->check);
2558 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2559 mac_iocb_ptr->total_hdrs_len =
2560 cpu_to_le16(skb_transport_offset(skb) +
2561 (tcp_hdr(skb)->doff << 2));
2563 check = &(udp_hdr(skb)->check);
2564 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2565 mac_iocb_ptr->total_hdrs_len =
2566 cpu_to_le16(skb_transport_offset(skb) +
2567 sizeof(struct udphdr));
2569 *check = ~csum_tcpudp_magic(iph->saddr,
2570 iph->daddr, len, iph->protocol, 0);
2573 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2575 struct tx_ring_desc *tx_ring_desc;
2576 struct ob_mac_iocb_req *mac_iocb_ptr;
2577 struct ql_adapter *qdev = netdev_priv(ndev);
2579 struct tx_ring *tx_ring;
2580 u32 tx_ring_idx = (u32) skb->queue_mapping;
2582 tx_ring = &qdev->tx_ring[tx_ring_idx];
2584 if (skb_padto(skb, ETH_ZLEN))
2585 return NETDEV_TX_OK;
2587 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2588 netif_info(qdev, tx_queued, qdev->ndev,
2589 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2590 __func__, tx_ring_idx);
2591 netif_stop_subqueue(ndev, tx_ring->wq_id);
2592 tx_ring->tx_errors++;
2593 return NETDEV_TX_BUSY;
2595 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2596 mac_iocb_ptr = tx_ring_desc->queue_entry;
2597 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2599 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2600 mac_iocb_ptr->tid = tx_ring_desc->index;
2601 /* We use the upper 32-bits to store the tx queue for this IO.
2602 * When we get the completion we can use it to establish the context.
2604 mac_iocb_ptr->txq_idx = tx_ring_idx;
2605 tx_ring_desc->skb = skb;
2607 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2609 if (vlan_tx_tag_present(skb)) {
2610 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2611 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2612 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2613 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2615 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2617 dev_kfree_skb_any(skb);
2618 return NETDEV_TX_OK;
2619 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2620 ql_hw_csum_setup(skb,
2621 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2623 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2625 netif_err(qdev, tx_queued, qdev->ndev,
2626 "Could not map the segments.\n");
2627 tx_ring->tx_errors++;
2628 return NETDEV_TX_BUSY;
2630 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2631 tx_ring->prod_idx++;
2632 if (tx_ring->prod_idx == tx_ring->wq_len)
2633 tx_ring->prod_idx = 0;
2636 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2637 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2638 "tx queued, slot %d, len %d\n",
2639 tx_ring->prod_idx, skb->len);
2641 atomic_dec(&tx_ring->tx_count);
2643 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2644 netif_stop_subqueue(ndev, tx_ring->wq_id);
2645 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2647 * The queue got stopped because the tx_ring was full.
2648 * Wake it up, because it's now at least 25% empty.
2650 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2652 return NETDEV_TX_OK;
2656 static void ql_free_shadow_space(struct ql_adapter *qdev)
2658 if (qdev->rx_ring_shadow_reg_area) {
2659 pci_free_consistent(qdev->pdev,
2661 qdev->rx_ring_shadow_reg_area,
2662 qdev->rx_ring_shadow_reg_dma);
2663 qdev->rx_ring_shadow_reg_area = NULL;
2665 if (qdev->tx_ring_shadow_reg_area) {
2666 pci_free_consistent(qdev->pdev,
2668 qdev->tx_ring_shadow_reg_area,
2669 qdev->tx_ring_shadow_reg_dma);
2670 qdev->tx_ring_shadow_reg_area = NULL;
2674 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2676 qdev->rx_ring_shadow_reg_area =
2677 pci_alloc_consistent(qdev->pdev,
2678 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2679 if (qdev->rx_ring_shadow_reg_area == NULL) {
2680 netif_err(qdev, ifup, qdev->ndev,
2681 "Allocation of RX shadow space failed.\n");
2684 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2685 qdev->tx_ring_shadow_reg_area =
2686 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2687 &qdev->tx_ring_shadow_reg_dma);
2688 if (qdev->tx_ring_shadow_reg_area == NULL) {
2689 netif_err(qdev, ifup, qdev->ndev,
2690 "Allocation of TX shadow space failed.\n");
2691 goto err_wqp_sh_area;
2693 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2697 pci_free_consistent(qdev->pdev,
2699 qdev->rx_ring_shadow_reg_area,
2700 qdev->rx_ring_shadow_reg_dma);
2704 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2706 struct tx_ring_desc *tx_ring_desc;
2708 struct ob_mac_iocb_req *mac_iocb_ptr;
2710 mac_iocb_ptr = tx_ring->wq_base;
2711 tx_ring_desc = tx_ring->q;
2712 for (i = 0; i < tx_ring->wq_len; i++) {
2713 tx_ring_desc->index = i;
2714 tx_ring_desc->skb = NULL;
2715 tx_ring_desc->queue_entry = mac_iocb_ptr;
2719 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2722 static void ql_free_tx_resources(struct ql_adapter *qdev,
2723 struct tx_ring *tx_ring)
2725 if (tx_ring->wq_base) {
2726 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2727 tx_ring->wq_base, tx_ring->wq_base_dma);
2728 tx_ring->wq_base = NULL;
2734 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2735 struct tx_ring *tx_ring)
2738 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2739 &tx_ring->wq_base_dma);
2741 if ((tx_ring->wq_base == NULL) ||
2742 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2746 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2747 if (tx_ring->q == NULL)
2752 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2753 tx_ring->wq_base, tx_ring->wq_base_dma);
2754 tx_ring->wq_base = NULL;
2756 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2760 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2762 struct bq_desc *lbq_desc;
2764 uint32_t curr_idx, clean_idx;
2766 curr_idx = rx_ring->lbq_curr_idx;
2767 clean_idx = rx_ring->lbq_clean_idx;
2768 while (curr_idx != clean_idx) {
2769 lbq_desc = &rx_ring->lbq[curr_idx];
2771 if (lbq_desc->p.pg_chunk.last_flag) {
2772 pci_unmap_page(qdev->pdev,
2773 lbq_desc->p.pg_chunk.map,
2774 ql_lbq_block_size(qdev),
2775 PCI_DMA_FROMDEVICE);
2776 lbq_desc->p.pg_chunk.last_flag = 0;
2779 put_page(lbq_desc->p.pg_chunk.page);
2780 lbq_desc->p.pg_chunk.page = NULL;
2782 if (++curr_idx == rx_ring->lbq_len)
2788 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2791 struct bq_desc *sbq_desc;
2793 for (i = 0; i < rx_ring->sbq_len; i++) {
2794 sbq_desc = &rx_ring->sbq[i];
2795 if (sbq_desc == NULL) {
2796 netif_err(qdev, ifup, qdev->ndev,
2797 "sbq_desc %d is NULL.\n", i);
2800 if (sbq_desc->p.skb) {
2801 pci_unmap_single(qdev->pdev,
2802 dma_unmap_addr(sbq_desc, mapaddr),
2803 dma_unmap_len(sbq_desc, maplen),
2804 PCI_DMA_FROMDEVICE);
2805 dev_kfree_skb(sbq_desc->p.skb);
2806 sbq_desc->p.skb = NULL;
2811 /* Free all large and small rx buffers associated
2812 * with the completion queues for this device.
2814 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2817 struct rx_ring *rx_ring;
2819 for (i = 0; i < qdev->rx_ring_count; i++) {
2820 rx_ring = &qdev->rx_ring[i];
2822 ql_free_lbq_buffers(qdev, rx_ring);
2824 ql_free_sbq_buffers(qdev, rx_ring);
2828 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2830 struct rx_ring *rx_ring;
2833 for (i = 0; i < qdev->rx_ring_count; i++) {
2834 rx_ring = &qdev->rx_ring[i];
2835 if (rx_ring->type != TX_Q)
2836 ql_update_buffer_queues(qdev, rx_ring);
2840 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2841 struct rx_ring *rx_ring)
2844 struct bq_desc *lbq_desc;
2845 __le64 *bq = rx_ring->lbq_base;
2847 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2848 for (i = 0; i < rx_ring->lbq_len; i++) {
2849 lbq_desc = &rx_ring->lbq[i];
2850 memset(lbq_desc, 0, sizeof(*lbq_desc));
2851 lbq_desc->index = i;
2852 lbq_desc->addr = bq;
2857 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2858 struct rx_ring *rx_ring)
2861 struct bq_desc *sbq_desc;
2862 __le64 *bq = rx_ring->sbq_base;
2864 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2865 for (i = 0; i < rx_ring->sbq_len; i++) {
2866 sbq_desc = &rx_ring->sbq[i];
2867 memset(sbq_desc, 0, sizeof(*sbq_desc));
2868 sbq_desc->index = i;
2869 sbq_desc->addr = bq;
2874 static void ql_free_rx_resources(struct ql_adapter *qdev,
2875 struct rx_ring *rx_ring)
2877 /* Free the small buffer queue. */
2878 if (rx_ring->sbq_base) {
2879 pci_free_consistent(qdev->pdev,
2881 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2882 rx_ring->sbq_base = NULL;
2885 /* Free the small buffer queue control blocks. */
2886 kfree(rx_ring->sbq);
2887 rx_ring->sbq = NULL;
2889 /* Free the large buffer queue. */
2890 if (rx_ring->lbq_base) {
2891 pci_free_consistent(qdev->pdev,
2893 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2894 rx_ring->lbq_base = NULL;
2897 /* Free the large buffer queue control blocks. */
2898 kfree(rx_ring->lbq);
2899 rx_ring->lbq = NULL;
2901 /* Free the rx queue. */
2902 if (rx_ring->cq_base) {
2903 pci_free_consistent(qdev->pdev,
2905 rx_ring->cq_base, rx_ring->cq_base_dma);
2906 rx_ring->cq_base = NULL;
2910 /* Allocate queues and buffers for this completions queue based
2911 * on the values in the parameter structure. */
2912 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2913 struct rx_ring *rx_ring)
2917 * Allocate the completion queue for this rx_ring.
2920 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2921 &rx_ring->cq_base_dma);
2923 if (rx_ring->cq_base == NULL) {
2924 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2928 if (rx_ring->sbq_len) {
2930 * Allocate small buffer queue.
2933 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2934 &rx_ring->sbq_base_dma);
2936 if (rx_ring->sbq_base == NULL) {
2937 netif_err(qdev, ifup, qdev->ndev,
2938 "Small buffer queue allocation failed.\n");
2943 * Allocate small buffer queue control blocks.
2945 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
2946 sizeof(struct bq_desc),
2948 if (rx_ring->sbq == NULL)
2951 ql_init_sbq_ring(qdev, rx_ring);
2954 if (rx_ring->lbq_len) {
2956 * Allocate large buffer queue.
2959 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2960 &rx_ring->lbq_base_dma);
2962 if (rx_ring->lbq_base == NULL) {
2963 netif_err(qdev, ifup, qdev->ndev,
2964 "Large buffer queue allocation failed.\n");
2968 * Allocate large buffer queue control blocks.
2970 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
2971 sizeof(struct bq_desc),
2973 if (rx_ring->lbq == NULL)
2976 ql_init_lbq_ring(qdev, rx_ring);
2982 ql_free_rx_resources(qdev, rx_ring);
2986 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2988 struct tx_ring *tx_ring;
2989 struct tx_ring_desc *tx_ring_desc;
2993 * Loop through all queues and free
2996 for (j = 0; j < qdev->tx_ring_count; j++) {
2997 tx_ring = &qdev->tx_ring[j];
2998 for (i = 0; i < tx_ring->wq_len; i++) {
2999 tx_ring_desc = &tx_ring->q[i];
3000 if (tx_ring_desc && tx_ring_desc->skb) {
3001 netif_err(qdev, ifdown, qdev->ndev,
3002 "Freeing lost SKB %p, from queue %d, index %d.\n",
3003 tx_ring_desc->skb, j,
3004 tx_ring_desc->index);
3005 ql_unmap_send(qdev, tx_ring_desc,
3006 tx_ring_desc->map_cnt);
3007 dev_kfree_skb(tx_ring_desc->skb);
3008 tx_ring_desc->skb = NULL;
3014 static void ql_free_mem_resources(struct ql_adapter *qdev)
3018 for (i = 0; i < qdev->tx_ring_count; i++)
3019 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3020 for (i = 0; i < qdev->rx_ring_count; i++)
3021 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3022 ql_free_shadow_space(qdev);
3025 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3029 /* Allocate space for our shadow registers and such. */
3030 if (ql_alloc_shadow_space(qdev))
3033 for (i = 0; i < qdev->rx_ring_count; i++) {
3034 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3035 netif_err(qdev, ifup, qdev->ndev,
3036 "RX resource allocation failed.\n");
3040 /* Allocate tx queue resources */
3041 for (i = 0; i < qdev->tx_ring_count; i++) {
3042 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3043 netif_err(qdev, ifup, qdev->ndev,
3044 "TX resource allocation failed.\n");
3051 ql_free_mem_resources(qdev);
3055 /* Set up the rx ring control block and pass it to the chip.
3056 * The control block is defined as
3057 * "Completion Queue Initialization Control Block", or cqicb.
3059 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3061 struct cqicb *cqicb = &rx_ring->cqicb;
3062 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3063 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3064 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3065 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3066 void __iomem *doorbell_area =
3067 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3071 __le64 *base_indirect_ptr;
3074 /* Set up the shadow registers for this ring. */
3075 rx_ring->prod_idx_sh_reg = shadow_reg;
3076 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3077 *rx_ring->prod_idx_sh_reg = 0;
3078 shadow_reg += sizeof(u64);
3079 shadow_reg_dma += sizeof(u64);
3080 rx_ring->lbq_base_indirect = shadow_reg;
3081 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3082 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3083 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3084 rx_ring->sbq_base_indirect = shadow_reg;
3085 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3087 /* PCI doorbell mem area + 0x00 for consumer index register */
3088 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3089 rx_ring->cnsmr_idx = 0;
3090 rx_ring->curr_entry = rx_ring->cq_base;
3092 /* PCI doorbell mem area + 0x04 for valid register */
3093 rx_ring->valid_db_reg = doorbell_area + 0x04;
3095 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3096 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3098 /* PCI doorbell mem area + 0x1c */
3099 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3101 memset((void *)cqicb, 0, sizeof(struct cqicb));
3102 cqicb->msix_vect = rx_ring->irq;
3104 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3105 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3107 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3109 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3112 * Set up the control block load flags.
3114 cqicb->flags = FLAGS_LC | /* Load queue base address */
3115 FLAGS_LV | /* Load MSI-X vector */
3116 FLAGS_LI; /* Load irq delay values */
3117 if (rx_ring->lbq_len) {
3118 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3119 tmp = (u64)rx_ring->lbq_base_dma;
3120 base_indirect_ptr = rx_ring->lbq_base_indirect;
3123 *base_indirect_ptr = cpu_to_le64(tmp);
3124 tmp += DB_PAGE_SIZE;
3125 base_indirect_ptr++;
3127 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3129 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3130 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3131 (u16) rx_ring->lbq_buf_size;
3132 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3133 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3134 (u16) rx_ring->lbq_len;
3135 cqicb->lbq_len = cpu_to_le16(bq_len);
3136 rx_ring->lbq_prod_idx = 0;
3137 rx_ring->lbq_curr_idx = 0;
3138 rx_ring->lbq_clean_idx = 0;
3139 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3141 if (rx_ring->sbq_len) {
3142 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3143 tmp = (u64)rx_ring->sbq_base_dma;
3144 base_indirect_ptr = rx_ring->sbq_base_indirect;
3147 *base_indirect_ptr = cpu_to_le64(tmp);
3148 tmp += DB_PAGE_SIZE;
3149 base_indirect_ptr++;
3151 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3153 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3154 cqicb->sbq_buf_size =
3155 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3156 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3157 (u16) rx_ring->sbq_len;
3158 cqicb->sbq_len = cpu_to_le16(bq_len);
3159 rx_ring->sbq_prod_idx = 0;
3160 rx_ring->sbq_curr_idx = 0;
3161 rx_ring->sbq_clean_idx = 0;
3162 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3164 switch (rx_ring->type) {
3166 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3167 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3170 /* Inbound completion handling rx_rings run in
3171 * separate NAPI contexts.
3173 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3175 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3176 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3179 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3180 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3182 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3183 CFG_LCQ, rx_ring->cq_id);
3185 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3191 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3193 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3194 void __iomem *doorbell_area =
3195 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3196 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3197 (tx_ring->wq_id * sizeof(u64));
3198 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3199 (tx_ring->wq_id * sizeof(u64));
3203 * Assign doorbell registers for this tx_ring.
3205 /* TX PCI doorbell mem area for tx producer index */
3206 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3207 tx_ring->prod_idx = 0;
3208 /* TX PCI doorbell mem area + 0x04 */
3209 tx_ring->valid_db_reg = doorbell_area + 0x04;
3212 * Assign shadow registers for this tx_ring.
3214 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3215 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3217 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3218 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3219 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3220 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3222 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3224 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3226 ql_init_tx_ring(qdev, tx_ring);
3228 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3229 (u16) tx_ring->wq_id);
3231 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3237 static void ql_disable_msix(struct ql_adapter *qdev)
3239 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3240 pci_disable_msix(qdev->pdev);
3241 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3242 kfree(qdev->msi_x_entry);
3243 qdev->msi_x_entry = NULL;
3244 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3245 pci_disable_msi(qdev->pdev);
3246 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3250 /* We start by trying to get the number of vectors
3251 * stored in qdev->intr_count. If we don't get that
3252 * many then we reduce the count and try again.
3254 static void ql_enable_msix(struct ql_adapter *qdev)
3258 /* Get the MSIX vectors. */
3259 if (qlge_irq_type == MSIX_IRQ) {
3260 /* Try to alloc space for the msix struct,
3261 * if it fails then go to MSI/legacy.
3263 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3264 sizeof(struct msix_entry),
3266 if (!qdev->msi_x_entry) {
3267 qlge_irq_type = MSI_IRQ;
3271 for (i = 0; i < qdev->intr_count; i++)
3272 qdev->msi_x_entry[i].entry = i;
3274 /* Loop to get our vectors. We start with
3275 * what we want and settle for what we get.
3278 err = pci_enable_msix(qdev->pdev,
3279 qdev->msi_x_entry, qdev->intr_count);
3281 qdev->intr_count = err;
3285 kfree(qdev->msi_x_entry);
3286 qdev->msi_x_entry = NULL;
3287 netif_warn(qdev, ifup, qdev->ndev,
3288 "MSI-X Enable failed, trying MSI.\n");
3289 qdev->intr_count = 1;
3290 qlge_irq_type = MSI_IRQ;
3291 } else if (err == 0) {
3292 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3293 netif_info(qdev, ifup, qdev->ndev,
3294 "MSI-X Enabled, got %d vectors.\n",
3300 qdev->intr_count = 1;
3301 if (qlge_irq_type == MSI_IRQ) {
3302 if (!pci_enable_msi(qdev->pdev)) {
3303 set_bit(QL_MSI_ENABLED, &qdev->flags);
3304 netif_info(qdev, ifup, qdev->ndev,
3305 "Running with MSI interrupts.\n");
3309 qlge_irq_type = LEG_IRQ;
3310 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3311 "Running with legacy interrupts.\n");
3314 /* Each vector services 1 RSS ring and and 1 or more
3315 * TX completion rings. This function loops through
3316 * the TX completion rings and assigns the vector that
3317 * will service it. An example would be if there are
3318 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3319 * This would mean that vector 0 would service RSS ring 0
3320 * and TX completion rings 0,1,2 and 3. Vector 1 would
3321 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3323 static void ql_set_tx_vect(struct ql_adapter *qdev)
3326 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3328 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3329 /* Assign irq vectors to TX rx_rings.*/
3330 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3331 i < qdev->rx_ring_count; i++) {
3332 if (j == tx_rings_per_vector) {
3336 qdev->rx_ring[i].irq = vect;
3340 /* For single vector all rings have an irq
3343 for (i = 0; i < qdev->rx_ring_count; i++)
3344 qdev->rx_ring[i].irq = 0;
3348 /* Set the interrupt mask for this vector. Each vector
3349 * will service 1 RSS ring and 1 or more TX completion
3350 * rings. This function sets up a bit mask per vector
3351 * that indicates which rings it services.
3353 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3355 int j, vect = ctx->intr;
3356 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3358 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3359 /* Add the RSS ring serviced by this vector
3362 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3363 /* Add the TX ring(s) serviced by this vector
3365 for (j = 0; j < tx_rings_per_vector; j++) {
3367 (1 << qdev->rx_ring[qdev->rss_ring_count +
3368 (vect * tx_rings_per_vector) + j].cq_id);
3371 /* For single vector we just shift each queue's
3374 for (j = 0; j < qdev->rx_ring_count; j++)
3375 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3380 * Here we build the intr_context structures based on
3381 * our rx_ring count and intr vector count.
3382 * The intr_context structure is used to hook each vector
3383 * to possibly different handlers.
3385 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3388 struct intr_context *intr_context = &qdev->intr_context[0];
3390 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3391 /* Each rx_ring has it's
3392 * own intr_context since we have separate
3393 * vectors for each queue.
3395 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3396 qdev->rx_ring[i].irq = i;
3397 intr_context->intr = i;
3398 intr_context->qdev = qdev;
3399 /* Set up this vector's bit-mask that indicates
3400 * which queues it services.
3402 ql_set_irq_mask(qdev, intr_context);
3404 * We set up each vectors enable/disable/read bits so
3405 * there's no bit/mask calculations in the critical path.
3407 intr_context->intr_en_mask =
3408 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3409 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3411 intr_context->intr_dis_mask =
3412 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3413 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3415 intr_context->intr_read_mask =
3416 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3417 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3420 /* The first vector/queue handles
3421 * broadcast/multicast, fatal errors,
3422 * and firmware events. This in addition
3423 * to normal inbound NAPI processing.
3425 intr_context->handler = qlge_isr;
3426 sprintf(intr_context->name, "%s-rx-%d",
3427 qdev->ndev->name, i);
3430 * Inbound queues handle unicast frames only.
3432 intr_context->handler = qlge_msix_rx_isr;
3433 sprintf(intr_context->name, "%s-rx-%d",
3434 qdev->ndev->name, i);
3439 * All rx_rings use the same intr_context since
3440 * there is only one vector.
3442 intr_context->intr = 0;
3443 intr_context->qdev = qdev;
3445 * We set up each vectors enable/disable/read bits so
3446 * there's no bit/mask calculations in the critical path.
3448 intr_context->intr_en_mask =
3449 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3450 intr_context->intr_dis_mask =
3451 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3452 INTR_EN_TYPE_DISABLE;
3453 intr_context->intr_read_mask =
3454 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3456 * Single interrupt means one handler for all rings.
3458 intr_context->handler = qlge_isr;
3459 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3460 /* Set up this vector's bit-mask that indicates
3461 * which queues it services. In this case there is
3462 * a single vector so it will service all RSS and
3463 * TX completion rings.
3465 ql_set_irq_mask(qdev, intr_context);
3467 /* Tell the TX completion rings which MSIx vector
3468 * they will be using.
3470 ql_set_tx_vect(qdev);
3473 static void ql_free_irq(struct ql_adapter *qdev)
3476 struct intr_context *intr_context = &qdev->intr_context[0];
3478 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3479 if (intr_context->hooked) {
3480 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3481 free_irq(qdev->msi_x_entry[i].vector,
3484 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3488 ql_disable_msix(qdev);
3491 static int ql_request_irq(struct ql_adapter *qdev)
3495 struct pci_dev *pdev = qdev->pdev;
3496 struct intr_context *intr_context = &qdev->intr_context[0];
3498 ql_resolve_queues_to_irqs(qdev);
3500 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3501 atomic_set(&intr_context->irq_cnt, 0);
3502 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3503 status = request_irq(qdev->msi_x_entry[i].vector,
3504 intr_context->handler,
3509 netif_err(qdev, ifup, qdev->ndev,
3510 "Failed request for MSIX interrupt %d.\n",
3515 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3516 "trying msi or legacy interrupts.\n");
3517 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3518 "%s: irq = %d.\n", __func__, pdev->irq);
3519 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3520 "%s: context->name = %s.\n", __func__,
3521 intr_context->name);
3522 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3523 "%s: dev_id = 0x%p.\n", __func__,
3526 request_irq(pdev->irq, qlge_isr,
3527 test_bit(QL_MSI_ENABLED,
3529 flags) ? 0 : IRQF_SHARED,
3530 intr_context->name, &qdev->rx_ring[0]);
3534 netif_err(qdev, ifup, qdev->ndev,
3535 "Hooked intr %d, queue type %s, with name %s.\n",
3537 qdev->rx_ring[0].type == DEFAULT_Q ?
3539 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3540 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3541 intr_context->name);
3543 intr_context->hooked = 1;
3547 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3552 static int ql_start_rss(struct ql_adapter *qdev)
3554 static const u8 init_hash_seed[] = {
3555 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3556 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3557 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3558 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3559 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3561 struct ricb *ricb = &qdev->ricb;
3564 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3566 memset((void *)ricb, 0, sizeof(*ricb));
3568 ricb->base_cq = RSS_L4K;
3570 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3571 ricb->mask = cpu_to_le16((u16)(0x3ff));
3574 * Fill out the Indirection Table.
3576 for (i = 0; i < 1024; i++)
3577 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3579 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3580 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3582 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3584 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3590 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3594 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3597 /* Clear all the entries in the routing table. */
3598 for (i = 0; i < 16; i++) {
3599 status = ql_set_routing_reg(qdev, i, 0, 0);
3601 netif_err(qdev, ifup, qdev->ndev,
3602 "Failed to init routing register for CAM packets.\n");
3606 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3610 /* Initialize the frame-to-queue routing. */
3611 static int ql_route_initialize(struct ql_adapter *qdev)
3615 /* Clear all the entries in the routing table. */
3616 status = ql_clear_routing_entries(qdev);
3620 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3624 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3625 RT_IDX_IP_CSUM_ERR, 1);
3627 netif_err(qdev, ifup, qdev->ndev,
3628 "Failed to init routing register "
3629 "for IP CSUM error packets.\n");
3632 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3633 RT_IDX_TU_CSUM_ERR, 1);
3635 netif_err(qdev, ifup, qdev->ndev,
3636 "Failed to init routing register "
3637 "for TCP/UDP CSUM error packets.\n");
3640 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3642 netif_err(qdev, ifup, qdev->ndev,
3643 "Failed to init routing register for broadcast packets.\n");
3646 /* If we have more than one inbound queue, then turn on RSS in the
3649 if (qdev->rss_ring_count > 1) {
3650 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3651 RT_IDX_RSS_MATCH, 1);
3653 netif_err(qdev, ifup, qdev->ndev,
3654 "Failed to init routing register for MATCH RSS packets.\n");
3659 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3662 netif_err(qdev, ifup, qdev->ndev,
3663 "Failed to init routing register for CAM packets.\n");
3665 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3669 int ql_cam_route_initialize(struct ql_adapter *qdev)
3673 /* If check if the link is up and use to
3674 * determine if we are setting or clearing
3675 * the MAC address in the CAM.
3677 set = ql_read32(qdev, STS);
3678 set &= qdev->port_link_up;
3679 status = ql_set_mac_addr(qdev, set);
3681 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3685 status = ql_route_initialize(qdev);
3687 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3692 static int ql_adapter_initialize(struct ql_adapter *qdev)
3699 * Set up the System register to halt on errors.
3701 value = SYS_EFE | SYS_FAE;
3703 ql_write32(qdev, SYS, mask | value);
3705 /* Set the default queue, and VLAN behavior. */
3706 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3707 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3708 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3710 /* Set the MPI interrupt to enabled. */
3711 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3713 /* Enable the function, set pagesize, enable error checking. */
3714 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3715 FSC_EC | FSC_VM_PAGE_4K;
3716 value |= SPLT_SETTING;
3718 /* Set/clear header splitting. */
3719 mask = FSC_VM_PAGESIZE_MASK |
3720 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3721 ql_write32(qdev, FSC, mask | value);
3723 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3725 /* Set RX packet routing to use port/pci function on which the
3726 * packet arrived on in addition to usual frame routing.
3727 * This is helpful on bonding where both interfaces can have
3728 * the same MAC address.
3730 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3731 /* Reroute all packets to our Interface.
3732 * They may have been routed to MPI firmware
3735 value = ql_read32(qdev, MGMT_RCV_CFG);
3736 value &= ~MGMT_RCV_CFG_RM;
3739 /* Sticky reg needs clearing due to WOL. */
3740 ql_write32(qdev, MGMT_RCV_CFG, mask);
3741 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3743 /* Default WOL is enable on Mezz cards */
3744 if (qdev->pdev->subsystem_device == 0x0068 ||
3745 qdev->pdev->subsystem_device == 0x0180)
3746 qdev->wol = WAKE_MAGIC;
3748 /* Start up the rx queues. */
3749 for (i = 0; i < qdev->rx_ring_count; i++) {
3750 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3752 netif_err(qdev, ifup, qdev->ndev,
3753 "Failed to start rx ring[%d].\n", i);
3758 /* If there is more than one inbound completion queue
3759 * then download a RICB to configure RSS.
3761 if (qdev->rss_ring_count > 1) {
3762 status = ql_start_rss(qdev);
3764 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3769 /* Start up the tx queues. */
3770 for (i = 0; i < qdev->tx_ring_count; i++) {
3771 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3773 netif_err(qdev, ifup, qdev->ndev,
3774 "Failed to start tx ring[%d].\n", i);
3779 /* Initialize the port and set the max framesize. */
3780 status = qdev->nic_ops->port_initialize(qdev);
3782 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3784 /* Set up the MAC address and frame routing filter. */
3785 status = ql_cam_route_initialize(qdev);
3787 netif_err(qdev, ifup, qdev->ndev,
3788 "Failed to init CAM/Routing tables.\n");
3792 /* Start NAPI for the RSS queues. */
3793 for (i = 0; i < qdev->rss_ring_count; i++)
3794 napi_enable(&qdev->rx_ring[i].napi);
3799 /* Issue soft reset to chip. */
3800 static int ql_adapter_reset(struct ql_adapter *qdev)
3804 unsigned long end_jiffies;
3806 /* Clear all the entries in the routing table. */
3807 status = ql_clear_routing_entries(qdev);
3809 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3813 end_jiffies = jiffies +
3814 max((unsigned long)1, usecs_to_jiffies(30));
3816 /* Check if bit is set then skip the mailbox command and
3817 * clear the bit, else we are in normal reset process.
3819 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3820 /* Stop management traffic. */
3821 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3823 /* Wait for the NIC and MGMNT FIFOs to empty. */
3824 ql_wait_fifo_empty(qdev);
3826 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3828 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3831 value = ql_read32(qdev, RST_FO);
3832 if ((value & RST_FO_FR) == 0)
3835 } while (time_before(jiffies, end_jiffies));
3837 if (value & RST_FO_FR) {
3838 netif_err(qdev, ifdown, qdev->ndev,
3839 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3840 status = -ETIMEDOUT;
3843 /* Resume management traffic. */
3844 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3848 static void ql_display_dev_info(struct net_device *ndev)
3850 struct ql_adapter *qdev = netdev_priv(ndev);
3852 netif_info(qdev, probe, qdev->ndev,
3853 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3854 "XG Roll = %d, XG Rev = %d.\n",
3857 qdev->chip_rev_id & 0x0000000f,
3858 qdev->chip_rev_id >> 4 & 0x0000000f,
3859 qdev->chip_rev_id >> 8 & 0x0000000f,
3860 qdev->chip_rev_id >> 12 & 0x0000000f);
3861 netif_info(qdev, probe, qdev->ndev,
3862 "MAC address %pM\n", ndev->dev_addr);
3865 static int ql_wol(struct ql_adapter *qdev)
3868 u32 wol = MB_WOL_DISABLE;
3870 /* The CAM is still intact after a reset, but if we
3871 * are doing WOL, then we may need to program the
3872 * routing regs. We would also need to issue the mailbox
3873 * commands to instruct the MPI what to do per the ethtool
3877 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3878 WAKE_MCAST | WAKE_BCAST)) {
3879 netif_err(qdev, ifdown, qdev->ndev,
3880 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3885 if (qdev->wol & WAKE_MAGIC) {
3886 status = ql_mb_wol_set_magic(qdev, 1);
3888 netif_err(qdev, ifdown, qdev->ndev,
3889 "Failed to set magic packet on %s.\n",
3893 netif_info(qdev, drv, qdev->ndev,
3894 "Enabled magic packet successfully on %s.\n",
3897 wol |= MB_WOL_MAGIC_PKT;
3901 wol |= MB_WOL_MODE_ON;
3902 status = ql_mb_wol_mode(qdev, wol);
3903 netif_err(qdev, drv, qdev->ndev,
3904 "WOL %s (wol code 0x%x) on %s\n",
3905 (status == 0) ? "Successfully set" : "Failed",
3906 wol, qdev->ndev->name);
3912 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3915 /* Don't kill the reset worker thread if we
3916 * are in the process of recovery.
3918 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3919 cancel_delayed_work_sync(&qdev->asic_reset_work);
3920 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3921 cancel_delayed_work_sync(&qdev->mpi_work);
3922 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3923 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3924 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3927 static int ql_adapter_down(struct ql_adapter *qdev)
3933 ql_cancel_all_work_sync(qdev);
3935 for (i = 0; i < qdev->rss_ring_count; i++)
3936 napi_disable(&qdev->rx_ring[i].napi);
3938 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3940 ql_disable_interrupts(qdev);
3942 ql_tx_ring_clean(qdev);
3944 /* Call netif_napi_del() from common point.
3946 for (i = 0; i < qdev->rss_ring_count; i++)
3947 netif_napi_del(&qdev->rx_ring[i].napi);
3949 status = ql_adapter_reset(qdev);
3951 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3953 ql_free_rx_buffers(qdev);
3958 static int ql_adapter_up(struct ql_adapter *qdev)
3962 err = ql_adapter_initialize(qdev);
3964 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3967 set_bit(QL_ADAPTER_UP, &qdev->flags);
3968 ql_alloc_rx_buffers(qdev);
3969 /* If the port is initialized and the
3970 * link is up the turn on the carrier.
3972 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3973 (ql_read32(qdev, STS) & qdev->port_link_up))
3975 /* Restore rx mode. */
3976 clear_bit(QL_ALLMULTI, &qdev->flags);
3977 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3978 qlge_set_multicast_list(qdev->ndev);
3980 /* Restore vlan setting. */
3981 qlge_restore_vlan(qdev);
3983 ql_enable_interrupts(qdev);
3984 ql_enable_all_completion_interrupts(qdev);
3985 netif_tx_start_all_queues(qdev->ndev);
3989 ql_adapter_reset(qdev);
3993 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3995 ql_free_mem_resources(qdev);
3999 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4003 if (ql_alloc_mem_resources(qdev)) {
4004 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4007 status = ql_request_irq(qdev);
4011 static int qlge_close(struct net_device *ndev)
4013 struct ql_adapter *qdev = netdev_priv(ndev);
4015 /* If we hit pci_channel_io_perm_failure
4016 * failure condition, then we already
4017 * brought the adapter down.
4019 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4020 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4021 clear_bit(QL_EEH_FATAL, &qdev->flags);
4026 * Wait for device to recover from a reset.
4027 * (Rarely happens, but possible.)
4029 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4031 ql_adapter_down(qdev);
4032 ql_release_adapter_resources(qdev);
4036 static int ql_configure_rings(struct ql_adapter *qdev)
4039 struct rx_ring *rx_ring;
4040 struct tx_ring *tx_ring;
4041 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4042 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4043 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4045 qdev->lbq_buf_order = get_order(lbq_buf_len);
4047 /* In a perfect world we have one RSS ring for each CPU
4048 * and each has it's own vector. To do that we ask for
4049 * cpu_cnt vectors. ql_enable_msix() will adjust the
4050 * vector count to what we actually get. We then
4051 * allocate an RSS ring for each.
4052 * Essentially, we are doing min(cpu_count, msix_vector_count).
4054 qdev->intr_count = cpu_cnt;
4055 ql_enable_msix(qdev);
4056 /* Adjust the RSS ring count to the actual vector count. */
4057 qdev->rss_ring_count = qdev->intr_count;
4058 qdev->tx_ring_count = cpu_cnt;
4059 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4061 for (i = 0; i < qdev->tx_ring_count; i++) {
4062 tx_ring = &qdev->tx_ring[i];
4063 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4064 tx_ring->qdev = qdev;
4066 tx_ring->wq_len = qdev->tx_ring_size;
4068 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4071 * The completion queue ID for the tx rings start
4072 * immediately after the rss rings.
4074 tx_ring->cq_id = qdev->rss_ring_count + i;
4077 for (i = 0; i < qdev->rx_ring_count; i++) {
4078 rx_ring = &qdev->rx_ring[i];
4079 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4080 rx_ring->qdev = qdev;
4082 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4083 if (i < qdev->rss_ring_count) {
4085 * Inbound (RSS) queues.
4087 rx_ring->cq_len = qdev->rx_ring_size;
4089 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4090 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4092 rx_ring->lbq_len * sizeof(__le64);
4093 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4094 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4096 rx_ring->sbq_len * sizeof(__le64);
4097 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4098 rx_ring->type = RX_Q;
4101 * Outbound queue handles outbound completions only.
4103 /* outbound cq is same size as tx_ring it services. */
4104 rx_ring->cq_len = qdev->tx_ring_size;
4106 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4107 rx_ring->lbq_len = 0;
4108 rx_ring->lbq_size = 0;
4109 rx_ring->lbq_buf_size = 0;
4110 rx_ring->sbq_len = 0;
4111 rx_ring->sbq_size = 0;
4112 rx_ring->sbq_buf_size = 0;
4113 rx_ring->type = TX_Q;
4119 static int qlge_open(struct net_device *ndev)
4122 struct ql_adapter *qdev = netdev_priv(ndev);
4124 err = ql_adapter_reset(qdev);
4128 err = ql_configure_rings(qdev);
4132 err = ql_get_adapter_resources(qdev);
4136 err = ql_adapter_up(qdev);
4143 ql_release_adapter_resources(qdev);
4147 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4149 struct rx_ring *rx_ring;
4153 /* Wait for an outstanding reset to complete. */
4154 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4156 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4157 netif_err(qdev, ifup, qdev->ndev,
4158 "Waiting for adapter UP...\n");
4163 netif_err(qdev, ifup, qdev->ndev,
4164 "Timed out waiting for adapter UP\n");
4169 status = ql_adapter_down(qdev);
4173 /* Get the new rx buffer size. */
4174 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4175 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4176 qdev->lbq_buf_order = get_order(lbq_buf_len);
4178 for (i = 0; i < qdev->rss_ring_count; i++) {
4179 rx_ring = &qdev->rx_ring[i];
4180 /* Set the new size. */
4181 rx_ring->lbq_buf_size = lbq_buf_len;
4184 status = ql_adapter_up(qdev);
4190 netif_alert(qdev, ifup, qdev->ndev,
4191 "Driver up/down cycle failed, closing device.\n");
4192 set_bit(QL_ADAPTER_UP, &qdev->flags);
4193 dev_close(qdev->ndev);
4197 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4199 struct ql_adapter *qdev = netdev_priv(ndev);
4202 if (ndev->mtu == 1500 && new_mtu == 9000) {
4203 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4204 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4205 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4209 queue_delayed_work(qdev->workqueue,
4210 &qdev->mpi_port_cfg_work, 3*HZ);
4212 ndev->mtu = new_mtu;
4214 if (!netif_running(qdev->ndev)) {
4218 status = ql_change_rx_buffers(qdev);
4220 netif_err(qdev, ifup, qdev->ndev,
4221 "Changing MTU failed.\n");
4227 static struct net_device_stats *qlge_get_stats(struct net_device
4230 struct ql_adapter *qdev = netdev_priv(ndev);
4231 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4232 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4233 unsigned long pkts, mcast, dropped, errors, bytes;
4237 pkts = mcast = dropped = errors = bytes = 0;
4238 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4239 pkts += rx_ring->rx_packets;
4240 bytes += rx_ring->rx_bytes;
4241 dropped += rx_ring->rx_dropped;
4242 errors += rx_ring->rx_errors;
4243 mcast += rx_ring->rx_multicast;
4245 ndev->stats.rx_packets = pkts;
4246 ndev->stats.rx_bytes = bytes;
4247 ndev->stats.rx_dropped = dropped;
4248 ndev->stats.rx_errors = errors;
4249 ndev->stats.multicast = mcast;
4252 pkts = errors = bytes = 0;
4253 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4254 pkts += tx_ring->tx_packets;
4255 bytes += tx_ring->tx_bytes;
4256 errors += tx_ring->tx_errors;
4258 ndev->stats.tx_packets = pkts;
4259 ndev->stats.tx_bytes = bytes;
4260 ndev->stats.tx_errors = errors;
4261 return &ndev->stats;
4264 static void qlge_set_multicast_list(struct net_device *ndev)
4266 struct ql_adapter *qdev = netdev_priv(ndev);
4267 struct netdev_hw_addr *ha;
4270 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4274 * Set or clear promiscuous mode if a
4275 * transition is taking place.
4277 if (ndev->flags & IFF_PROMISC) {
4278 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4279 if (ql_set_routing_reg
4280 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4281 netif_err(qdev, hw, qdev->ndev,
4282 "Failed to set promiscuous mode.\n");
4284 set_bit(QL_PROMISCUOUS, &qdev->flags);
4288 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4289 if (ql_set_routing_reg
4290 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4291 netif_err(qdev, hw, qdev->ndev,
4292 "Failed to clear promiscuous mode.\n");
4294 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4300 * Set or clear all multicast mode if a
4301 * transition is taking place.
4303 if ((ndev->flags & IFF_ALLMULTI) ||
4304 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4305 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4306 if (ql_set_routing_reg
4307 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4308 netif_err(qdev, hw, qdev->ndev,
4309 "Failed to set all-multi mode.\n");
4311 set_bit(QL_ALLMULTI, &qdev->flags);
4315 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4316 if (ql_set_routing_reg
4317 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4318 netif_err(qdev, hw, qdev->ndev,
4319 "Failed to clear all-multi mode.\n");
4321 clear_bit(QL_ALLMULTI, &qdev->flags);
4326 if (!netdev_mc_empty(ndev)) {
4327 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4331 netdev_for_each_mc_addr(ha, ndev) {
4332 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4333 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4334 netif_err(qdev, hw, qdev->ndev,
4335 "Failed to loadmulticast address.\n");
4336 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4341 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4342 if (ql_set_routing_reg
4343 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4344 netif_err(qdev, hw, qdev->ndev,
4345 "Failed to set multicast match mode.\n");
4347 set_bit(QL_ALLMULTI, &qdev->flags);
4351 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4354 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4356 struct ql_adapter *qdev = netdev_priv(ndev);
4357 struct sockaddr *addr = p;
4360 if (!is_valid_ether_addr(addr->sa_data))
4361 return -EADDRNOTAVAIL;
4362 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4363 /* Update local copy of current mac address. */
4364 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4366 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4369 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4370 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4372 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4373 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4377 static void qlge_tx_timeout(struct net_device *ndev)
4379 struct ql_adapter *qdev = netdev_priv(ndev);
4380 ql_queue_asic_error(qdev);
4383 static void ql_asic_reset_work(struct work_struct *work)
4385 struct ql_adapter *qdev =
4386 container_of(work, struct ql_adapter, asic_reset_work.work);
4389 status = ql_adapter_down(qdev);
4393 status = ql_adapter_up(qdev);
4397 /* Restore rx mode. */
4398 clear_bit(QL_ALLMULTI, &qdev->flags);
4399 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4400 qlge_set_multicast_list(qdev->ndev);
4405 netif_alert(qdev, ifup, qdev->ndev,
4406 "Driver up/down cycle failed, closing device\n");
4408 set_bit(QL_ADAPTER_UP, &qdev->flags);
4409 dev_close(qdev->ndev);
4413 static const struct nic_operations qla8012_nic_ops = {
4414 .get_flash = ql_get_8012_flash_params,
4415 .port_initialize = ql_8012_port_initialize,
4418 static const struct nic_operations qla8000_nic_ops = {
4419 .get_flash = ql_get_8000_flash_params,
4420 .port_initialize = ql_8000_port_initialize,
4423 /* Find the pcie function number for the other NIC
4424 * on this chip. Since both NIC functions share a
4425 * common firmware we have the lowest enabled function
4426 * do any common work. Examples would be resetting
4427 * after a fatal firmware error, or doing a firmware
4430 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4434 u32 nic_func1, nic_func2;
4436 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4441 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4442 MPI_TEST_NIC_FUNC_MASK);
4443 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4444 MPI_TEST_NIC_FUNC_MASK);
4446 if (qdev->func == nic_func1)
4447 qdev->alt_func = nic_func2;
4448 else if (qdev->func == nic_func2)
4449 qdev->alt_func = nic_func1;
4456 static int ql_get_board_info(struct ql_adapter *qdev)
4460 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4464 status = ql_get_alt_pcie_func(qdev);
4468 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4470 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4471 qdev->port_link_up = STS_PL1;
4472 qdev->port_init = STS_PI1;
4473 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4474 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4476 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4477 qdev->port_link_up = STS_PL0;
4478 qdev->port_init = STS_PI0;
4479 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4480 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4482 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4483 qdev->device_id = qdev->pdev->device;
4484 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4485 qdev->nic_ops = &qla8012_nic_ops;
4486 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4487 qdev->nic_ops = &qla8000_nic_ops;
4491 static void ql_release_all(struct pci_dev *pdev)
4493 struct net_device *ndev = pci_get_drvdata(pdev);
4494 struct ql_adapter *qdev = netdev_priv(ndev);
4496 if (qdev->workqueue) {
4497 destroy_workqueue(qdev->workqueue);
4498 qdev->workqueue = NULL;
4502 iounmap(qdev->reg_base);
4503 if (qdev->doorbell_area)
4504 iounmap(qdev->doorbell_area);
4505 vfree(qdev->mpi_coredump);
4506 pci_release_regions(pdev);
4507 pci_set_drvdata(pdev, NULL);
4510 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4513 struct ql_adapter *qdev = netdev_priv(ndev);
4516 memset((void *)qdev, 0, sizeof(*qdev));
4517 err = pci_enable_device(pdev);
4519 dev_err(&pdev->dev, "PCI device enable failed.\n");
4525 pci_set_drvdata(pdev, ndev);
4527 /* Set PCIe read request size */
4528 err = pcie_set_readrq(pdev, 4096);
4530 dev_err(&pdev->dev, "Set readrq failed.\n");
4534 err = pci_request_regions(pdev, DRV_NAME);
4536 dev_err(&pdev->dev, "PCI region request failed.\n");
4540 pci_set_master(pdev);
4541 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4542 set_bit(QL_DMA64, &qdev->flags);
4543 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4545 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4547 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4551 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4555 /* Set PCIe reset type for EEH to fundamental. */
4556 pdev->needs_freset = 1;
4557 pci_save_state(pdev);
4559 ioremap_nocache(pci_resource_start(pdev, 1),
4560 pci_resource_len(pdev, 1));
4561 if (!qdev->reg_base) {
4562 dev_err(&pdev->dev, "Register mapping failed.\n");
4567 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4568 qdev->doorbell_area =
4569 ioremap_nocache(pci_resource_start(pdev, 3),
4570 pci_resource_len(pdev, 3));
4571 if (!qdev->doorbell_area) {
4572 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4577 err = ql_get_board_info(qdev);
4579 dev_err(&pdev->dev, "Register access failed.\n");
4583 qdev->msg_enable = netif_msg_init(debug, default_msg);
4584 spin_lock_init(&qdev->hw_lock);
4585 spin_lock_init(&qdev->stats_lock);
4587 if (qlge_mpi_coredump) {
4588 qdev->mpi_coredump =
4589 vmalloc(sizeof(struct ql_mpi_coredump));
4590 if (qdev->mpi_coredump == NULL) {
4594 if (qlge_force_coredump)
4595 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4597 /* make sure the EEPROM is good */
4598 err = qdev->nic_ops->get_flash(qdev);
4600 dev_err(&pdev->dev, "Invalid FLASH.\n");
4604 /* Keep local copy of current mac address. */
4605 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4607 /* Set up the default ring sizes. */
4608 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4609 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4611 /* Set up the coalescing parameters. */
4612 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4613 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4614 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4615 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4618 * Set up the operating parameters.
4620 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4621 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4622 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4623 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4624 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4625 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4626 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4627 init_completion(&qdev->ide_completion);
4628 mutex_init(&qdev->mpi_mutex);
4631 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4632 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4633 DRV_NAME, DRV_VERSION);
4637 ql_release_all(pdev);
4639 pci_disable_device(pdev);
4643 static const struct net_device_ops qlge_netdev_ops = {
4644 .ndo_open = qlge_open,
4645 .ndo_stop = qlge_close,
4646 .ndo_start_xmit = qlge_send,
4647 .ndo_change_mtu = qlge_change_mtu,
4648 .ndo_get_stats = qlge_get_stats,
4649 .ndo_set_rx_mode = qlge_set_multicast_list,
4650 .ndo_set_mac_address = qlge_set_mac_address,
4651 .ndo_validate_addr = eth_validate_addr,
4652 .ndo_tx_timeout = qlge_tx_timeout,
4653 .ndo_fix_features = qlge_fix_features,
4654 .ndo_set_features = qlge_set_features,
4655 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4656 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4659 static void ql_timer(unsigned long data)
4661 struct ql_adapter *qdev = (struct ql_adapter *)data;
4664 var = ql_read32(qdev, STS);
4665 if (pci_channel_offline(qdev->pdev)) {
4666 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4670 mod_timer(&qdev->timer, jiffies + (5*HZ));
4673 static int qlge_probe(struct pci_dev *pdev,
4674 const struct pci_device_id *pci_entry)
4676 struct net_device *ndev = NULL;
4677 struct ql_adapter *qdev = NULL;
4678 static int cards_found = 0;
4681 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4682 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4686 err = ql_init_device(pdev, ndev, cards_found);
4692 qdev = netdev_priv(ndev);
4693 SET_NETDEV_DEV(ndev, &pdev->dev);
4694 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4695 NETIF_F_TSO | NETIF_F_TSO_ECN |
4696 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4697 ndev->features = ndev->hw_features |
4698 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4699 ndev->vlan_features = ndev->hw_features;
4701 if (test_bit(QL_DMA64, &qdev->flags))
4702 ndev->features |= NETIF_F_HIGHDMA;
4705 * Set up net_device structure.
4707 ndev->tx_queue_len = qdev->tx_ring_size;
4708 ndev->irq = pdev->irq;
4710 ndev->netdev_ops = &qlge_netdev_ops;
4711 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4712 ndev->watchdog_timeo = 10 * HZ;
4714 err = register_netdev(ndev);
4716 dev_err(&pdev->dev, "net device registration failed.\n");
4717 ql_release_all(pdev);
4718 pci_disable_device(pdev);
4721 /* Start up the timer to trigger EEH if
4724 init_timer_deferrable(&qdev->timer);
4725 qdev->timer.data = (unsigned long)qdev;
4726 qdev->timer.function = ql_timer;
4727 qdev->timer.expires = jiffies + (5*HZ);
4728 add_timer(&qdev->timer);
4730 ql_display_dev_info(ndev);
4731 atomic_set(&qdev->lb_count, 0);
4736 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4738 return qlge_send(skb, ndev);
4741 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4743 return ql_clean_inbound_rx_ring(rx_ring, budget);
4746 static void qlge_remove(struct pci_dev *pdev)
4748 struct net_device *ndev = pci_get_drvdata(pdev);
4749 struct ql_adapter *qdev = netdev_priv(ndev);
4750 del_timer_sync(&qdev->timer);
4751 ql_cancel_all_work_sync(qdev);
4752 unregister_netdev(ndev);
4753 ql_release_all(pdev);
4754 pci_disable_device(pdev);
4758 /* Clean up resources without touching hardware. */
4759 static void ql_eeh_close(struct net_device *ndev)
4762 struct ql_adapter *qdev = netdev_priv(ndev);
4764 if (netif_carrier_ok(ndev)) {
4765 netif_carrier_off(ndev);
4766 netif_stop_queue(ndev);
4769 /* Disabling the timer */
4770 del_timer_sync(&qdev->timer);
4771 ql_cancel_all_work_sync(qdev);
4773 for (i = 0; i < qdev->rss_ring_count; i++)
4774 netif_napi_del(&qdev->rx_ring[i].napi);
4776 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4777 ql_tx_ring_clean(qdev);
4778 ql_free_rx_buffers(qdev);
4779 ql_release_adapter_resources(qdev);
4783 * This callback is called by the PCI subsystem whenever
4784 * a PCI bus error is detected.
4786 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4787 enum pci_channel_state state)
4789 struct net_device *ndev = pci_get_drvdata(pdev);
4790 struct ql_adapter *qdev = netdev_priv(ndev);
4793 case pci_channel_io_normal:
4794 return PCI_ERS_RESULT_CAN_RECOVER;
4795 case pci_channel_io_frozen:
4796 netif_device_detach(ndev);
4797 if (netif_running(ndev))
4799 pci_disable_device(pdev);
4800 return PCI_ERS_RESULT_NEED_RESET;
4801 case pci_channel_io_perm_failure:
4803 "%s: pci_channel_io_perm_failure.\n", __func__);
4805 set_bit(QL_EEH_FATAL, &qdev->flags);
4806 return PCI_ERS_RESULT_DISCONNECT;
4809 /* Request a slot reset. */
4810 return PCI_ERS_RESULT_NEED_RESET;
4814 * This callback is called after the PCI buss has been reset.
4815 * Basically, this tries to restart the card from scratch.
4816 * This is a shortened version of the device probe/discovery code,
4817 * it resembles the first-half of the () routine.
4819 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4821 struct net_device *ndev = pci_get_drvdata(pdev);
4822 struct ql_adapter *qdev = netdev_priv(ndev);
4824 pdev->error_state = pci_channel_io_normal;
4826 pci_restore_state(pdev);
4827 if (pci_enable_device(pdev)) {
4828 netif_err(qdev, ifup, qdev->ndev,
4829 "Cannot re-enable PCI device after reset.\n");
4830 return PCI_ERS_RESULT_DISCONNECT;
4832 pci_set_master(pdev);
4834 if (ql_adapter_reset(qdev)) {
4835 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4836 set_bit(QL_EEH_FATAL, &qdev->flags);
4837 return PCI_ERS_RESULT_DISCONNECT;
4840 return PCI_ERS_RESULT_RECOVERED;
4843 static void qlge_io_resume(struct pci_dev *pdev)
4845 struct net_device *ndev = pci_get_drvdata(pdev);
4846 struct ql_adapter *qdev = netdev_priv(ndev);
4849 if (netif_running(ndev)) {
4850 err = qlge_open(ndev);
4852 netif_err(qdev, ifup, qdev->ndev,
4853 "Device initialization failed after reset.\n");
4857 netif_err(qdev, ifup, qdev->ndev,
4858 "Device was not running prior to EEH.\n");
4860 mod_timer(&qdev->timer, jiffies + (5*HZ));
4861 netif_device_attach(ndev);
4864 static const struct pci_error_handlers qlge_err_handler = {
4865 .error_detected = qlge_io_error_detected,
4866 .slot_reset = qlge_io_slot_reset,
4867 .resume = qlge_io_resume,
4870 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4872 struct net_device *ndev = pci_get_drvdata(pdev);
4873 struct ql_adapter *qdev = netdev_priv(ndev);
4876 netif_device_detach(ndev);
4877 del_timer_sync(&qdev->timer);
4879 if (netif_running(ndev)) {
4880 err = ql_adapter_down(qdev);
4886 err = pci_save_state(pdev);
4890 pci_disable_device(pdev);
4892 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4898 static int qlge_resume(struct pci_dev *pdev)
4900 struct net_device *ndev = pci_get_drvdata(pdev);
4901 struct ql_adapter *qdev = netdev_priv(ndev);
4904 pci_set_power_state(pdev, PCI_D0);
4905 pci_restore_state(pdev);
4906 err = pci_enable_device(pdev);
4908 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4911 pci_set_master(pdev);
4913 pci_enable_wake(pdev, PCI_D3hot, 0);
4914 pci_enable_wake(pdev, PCI_D3cold, 0);
4916 if (netif_running(ndev)) {
4917 err = ql_adapter_up(qdev);
4922 mod_timer(&qdev->timer, jiffies + (5*HZ));
4923 netif_device_attach(ndev);
4927 #endif /* CONFIG_PM */
4929 static void qlge_shutdown(struct pci_dev *pdev)
4931 qlge_suspend(pdev, PMSG_SUSPEND);
4934 static struct pci_driver qlge_driver = {
4936 .id_table = qlge_pci_tbl,
4937 .probe = qlge_probe,
4938 .remove = qlge_remove,
4940 .suspend = qlge_suspend,
4941 .resume = qlge_resume,
4943 .shutdown = qlge_shutdown,
4944 .err_handler = &qlge_err_handler
4947 static int __init qlge_init_module(void)
4949 return pci_register_driver(&qlge_driver);
4952 static void __exit qlge_exit(void)
4954 pci_unregister_driver(&qlge_driver);
4957 module_init(qlge_init_module);
4958 module_exit(qlge_exit);