2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 unsigned int wait_count = 30;
148 if (!ql_sem_trylock(qdev, sem_mask))
151 } while (--wait_count);
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169 int count = UDELAY_COUNT;
172 temp = ql_read32(qdev, reg);
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp & bit)
182 udelay(UDELAY_DELAY);
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 int count = UDELAY_COUNT;
199 temp = ql_read32(qdev, CFG);
204 udelay(UDELAY_DELAY);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
237 status = ql_wait_cfg(qdev, bit);
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
252 * Wait for the bit to clear after signaling hw.
254 status = ql_wait_cfg(qdev, bit);
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
339 case MAC_ADDR_TYPE_MULTI_MAC:
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
371 case MAC_ADDR_TYPE_CAM_MAC:
374 u32 upper = (addr[0] << 8) | addr[1];
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 ql_wait_reg_rdy(qdev,
380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
388 ql_wait_reg_rdy(qdev,
389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
397 ql_wait_reg_rdy(qdev,
398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
408 cam_output = (CAM_OUT_ROUTE_NIC |
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
418 case MAC_ADDR_TYPE_VLAN:
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
427 ql_wait_reg_rdy(qdev,
428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
434 enable_bit); /* enable/disable */
437 case MAC_ADDR_TYPE_MULTI_FLTR:
439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
447 /* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
454 char zero_mac_addr[ETH_ALEN];
458 addr = &qdev->current_mac_addr[0];
459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
479 void ql_link_on(struct ql_adapter *qdev)
481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
486 void ql_link_off(struct ql_adapter *qdev)
488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
493 /* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
509 *value = ql_read32(qdev, RT_DATA);
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
522 int status = -EINVAL; /* Return error if no mask match. */
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
591 case 0: /* Clear the E-bit on an entry. */
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
630 * incremented every time we queue a worker and decremented every time
631 * a worker finishes. Once it hits zero we enable the interrupt.
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
643 ql_write32(qdev, INTR_EN,
645 var = ql_read32(qdev, STS);
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
653 var = ql_read32(qdev, STS);
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
662 struct intr_context *ctx;
664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
670 ctx = qdev->intr_context + intr;
671 spin_lock(&qdev->hw_lock);
672 if (!atomic_read(&ctx->irq_cnt)) {
673 ql_write32(qdev, INTR_EN,
675 var = ql_read32(qdev, STS);
677 atomic_inc(&ctx->irq_cnt);
678 spin_unlock(&qdev->hw_lock);
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693 ql_enable_completion_interrupt(qdev, i);
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
702 __le16 *flash = (__le16 *)&qdev->flash;
704 status = strncmp((char *)&qdev->flash, str, 4);
706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
748 __le32 *p = (__le32 *)&qdev->flash;
752 /* Get flash offset for function and adjust
756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
782 /* Extract either manufacturer or BOFM modified
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
794 if (!is_valid_ether_addr(mac_addr)) {
795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
800 memcpy(qdev->ndev->dev_addr,
802 qdev->ndev->addr_len);
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
813 __le32 *p = (__le32 *)&qdev->flash;
815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
817 /* Second function's parameters follow the first
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
826 for (i = 0; i < size; i++, p++) {
827 status = ql_read_flash_word(qdev, i+offset, p);
829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
898 *data = ql_read32(qdev, XGMAC_DATA);
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
918 *data = (u64) lo | ((u64) hi << 32);
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
928 * Get MPI firmware version for driver banner
931 status = ql_mb_about_fw(qdev);
934 status = ql_mb_get_fw_state(qdev);
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943 /* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1007 /* Turn on jumbo. */
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
1046 dma_unmap_addr(lbq_desc, mapaddr),
1047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1050 /* If it's the last chunk of our master page then
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1092 if (!rx_ring->pg_chunk.page) {
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
1098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
1109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1122 /* Adjust the master page chunk for next
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
1141 struct bq_desc *lbq_desc;
1145 while (rx_ring->lbq_free_cnt > 32) {
1146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1150 lbq_desc = &rx_ring->lbq[clean_idx];
1151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152 rx_ring->lbq_clean_idx = clean_idx;
1153 netif_err(qdev, ifup, qdev->ndev,
1154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
1161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
1163 rx_ring->lbq_buf_size);
1164 *lbq_desc->addr = cpu_to_le64(map);
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
1170 if (clean_idx == rx_ring->lbq_len)
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
1178 rx_ring->lbq_free_cnt -= 16;
1181 if (start_idx != clean_idx) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
1185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
1195 struct bq_desc *sbq_desc;
1199 while (rx_ring->sbq_free_cnt > 16) {
1200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201 sbq_desc = &rx_ring->sbq[clean_idx];
1202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1205 if (sbq_desc->p.skb == NULL) {
1206 netif_printk(qdev, rx_status, KERN_DEBUG,
1208 "sbq: getting new skb for index %d.\n",
1211 netdev_alloc_skb(qdev->ndev,
1213 if (sbq_desc->p.skb == NULL) {
1214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
1216 rx_ring->sbq_clean_idx = clean_idx;
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
1222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
1224 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
1232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
1234 rx_ring->sbq_buf_size);
1235 *sbq_desc->addr = cpu_to_le64(map);
1239 if (clean_idx == rx_ring->sbq_len)
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
1246 rx_ring->sbq_free_cnt -= 16;
1249 if (start_idx != clean_idx) {
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1284 netif_printk(qdev, tx_done, KERN_DEBUG,
1286 "unmapping OAL area.\n");
1288 pci_unmap_single(qdev->pdev,
1289 dma_unmap_addr(&tx_ring_desc->map[i],
1291 dma_unmap_len(&tx_ring_desc->map[i],
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->pdev,
1298 dma_unmap_addr(&tx_ring_desc->map[i],
1300 dma_unmap_len(&tx_ring_desc->map[i],
1301 maplen), PCI_DMA_TODEVICE);
1307 /* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1310 static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1314 int len = skb_headlen(skb);
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
1325 * Map the skb buffer first.
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
1334 return NETDEV_TX_BUSY;
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1384 tbd->addr = cpu_to_le64(map);
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1404 err = dma_mapping_error(&qdev->pdev->dev, map);
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 skb_frag_size(frag));
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1439 struct nic_stats *stats = &qdev->nic_stats;
1441 stats->rx_err_count++;
1443 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1445 stats->rx_code_err++;
1447 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1448 stats->rx_oversize_err++;
1450 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1451 stats->rx_undersize_err++;
1453 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1454 stats->rx_preamble_err++;
1456 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1457 stats->rx_frame_len_err++;
1459 case IB_MAC_IOCB_RSP_ERR_CRC:
1460 stats->rx_crc_err++;
1466 /* Process an inbound completion from an rx ring. */
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1469 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 struct sk_buff *skb;
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475 struct napi_struct *napi = &rx_ring->napi;
1477 napi->dev = qdev->ndev;
1479 skb = napi_get_frags(napi);
1481 netif_err(qdev, drv, qdev->ndev,
1482 "Couldn't get an skb, exiting.\n");
1483 rx_ring->rx_dropped++;
1484 put_page(lbq_desc->p.pg_chunk.page);
1487 prefetch(lbq_desc->p.pg_chunk.va);
1488 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1489 lbq_desc->p.pg_chunk.page,
1490 lbq_desc->p.pg_chunk.offset,
1494 skb->data_len += length;
1495 skb->truesize += length;
1496 skb_shinfo(skb)->nr_frags++;
1498 rx_ring->rx_packets++;
1499 rx_ring->rx_bytes += length;
1500 skb->ip_summed = CHECKSUM_UNNECESSARY;
1501 skb_record_rx_queue(skb, rx_ring->cq_id);
1502 if (vlan_id != 0xffff)
1503 __vlan_hwaccel_put_tag(skb, vlan_id);
1504 napi_gro_frags(napi);
1507 /* Process an inbound completion from an rx ring. */
1508 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1509 struct rx_ring *rx_ring,
1510 struct ib_mac_iocb_rsp *ib_mac_rsp,
1514 struct net_device *ndev = qdev->ndev;
1515 struct sk_buff *skb = NULL;
1517 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1518 struct napi_struct *napi = &rx_ring->napi;
1520 skb = netdev_alloc_skb(ndev, length);
1522 netif_err(qdev, drv, qdev->ndev,
1523 "Couldn't get an skb, need to unwind!.\n");
1524 rx_ring->rx_dropped++;
1525 put_page(lbq_desc->p.pg_chunk.page);
1529 addr = lbq_desc->p.pg_chunk.va;
1532 /* The max framesize filter on this chip is set higher than
1533 * MTU since FCoE uses 2k frames.
1535 if (skb->len > ndev->mtu + ETH_HLEN) {
1536 netif_err(qdev, drv, qdev->ndev,
1537 "Segment too small, dropping.\n");
1538 rx_ring->rx_dropped++;
1541 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1542 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1543 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1545 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1546 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1548 skb->len += length-ETH_HLEN;
1549 skb->data_len += length-ETH_HLEN;
1550 skb->truesize += length-ETH_HLEN;
1552 rx_ring->rx_packets++;
1553 rx_ring->rx_bytes += skb->len;
1554 skb->protocol = eth_type_trans(skb, ndev);
1555 skb_checksum_none_assert(skb);
1557 if ((ndev->features & NETIF_F_RXCSUM) &&
1558 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1560 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1561 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1562 "TCP checksum done!\n");
1563 skb->ip_summed = CHECKSUM_UNNECESSARY;
1564 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1565 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1566 /* Unfragmented ipv4 UDP frame. */
1568 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1569 if (!(iph->frag_off &
1570 htons(IP_MF|IP_OFFSET))) {
1571 skb->ip_summed = CHECKSUM_UNNECESSARY;
1572 netif_printk(qdev, rx_status, KERN_DEBUG,
1574 "UDP checksum done!\n");
1579 skb_record_rx_queue(skb, rx_ring->cq_id);
1580 if (vlan_id != 0xffff)
1581 __vlan_hwaccel_put_tag(skb, vlan_id);
1582 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1583 napi_gro_receive(napi, skb);
1585 netif_receive_skb(skb);
1588 dev_kfree_skb_any(skb);
1589 put_page(lbq_desc->p.pg_chunk.page);
1592 /* Process an inbound completion from an rx ring. */
1593 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1594 struct rx_ring *rx_ring,
1595 struct ib_mac_iocb_rsp *ib_mac_rsp,
1599 struct net_device *ndev = qdev->ndev;
1600 struct sk_buff *skb = NULL;
1601 struct sk_buff *new_skb = NULL;
1602 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1604 skb = sbq_desc->p.skb;
1605 /* Allocate new_skb and copy */
1606 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1607 if (new_skb == NULL) {
1608 netif_err(qdev, probe, qdev->ndev,
1609 "No skb available, drop the packet.\n");
1610 rx_ring->rx_dropped++;
1613 skb_reserve(new_skb, NET_IP_ALIGN);
1614 memcpy(skb_put(new_skb, length), skb->data, length);
1617 /* loopback self test for ethtool */
1618 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1619 ql_check_lb_frame(qdev, skb);
1620 dev_kfree_skb_any(skb);
1624 /* The max framesize filter on this chip is set higher than
1625 * MTU since FCoE uses 2k frames.
1627 if (skb->len > ndev->mtu + ETH_HLEN) {
1628 dev_kfree_skb_any(skb);
1629 rx_ring->rx_dropped++;
1633 prefetch(skb->data);
1634 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1635 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1637 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1638 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1639 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1640 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1644 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1645 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1646 "Promiscuous Packet.\n");
1648 rx_ring->rx_packets++;
1649 rx_ring->rx_bytes += skb->len;
1650 skb->protocol = eth_type_trans(skb, ndev);
1651 skb_checksum_none_assert(skb);
1653 /* If rx checksum is on, and there are no
1654 * csum or frame errors.
1656 if ((ndev->features & NETIF_F_RXCSUM) &&
1657 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1659 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1660 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1661 "TCP checksum done!\n");
1662 skb->ip_summed = CHECKSUM_UNNECESSARY;
1663 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1664 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1665 /* Unfragmented ipv4 UDP frame. */
1666 struct iphdr *iph = (struct iphdr *) skb->data;
1667 if (!(iph->frag_off &
1668 htons(IP_MF|IP_OFFSET))) {
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
1670 netif_printk(qdev, rx_status, KERN_DEBUG,
1672 "UDP checksum done!\n");
1677 skb_record_rx_queue(skb, rx_ring->cq_id);
1678 if (vlan_id != 0xffff)
1679 __vlan_hwaccel_put_tag(skb, vlan_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1681 napi_gro_receive(&rx_ring->napi, skb);
1683 netif_receive_skb(skb);
1686 static void ql_realign_skb(struct sk_buff *skb, int len)
1688 void *temp_addr = skb->data;
1690 /* Undo the skb_reserve(skb,32) we did before
1691 * giving to hardware, and realign data on
1692 * a 2-byte boundary.
1694 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1695 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1696 skb_copy_to_linear_data(skb, temp_addr,
1701 * This function builds an skb for the given inbound
1702 * completion. It will be rewritten for readability in the near
1703 * future, but for not it works well.
1705 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1706 struct rx_ring *rx_ring,
1707 struct ib_mac_iocb_rsp *ib_mac_rsp)
1709 struct bq_desc *lbq_desc;
1710 struct bq_desc *sbq_desc;
1711 struct sk_buff *skb = NULL;
1712 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1713 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1716 * Handle the header buffer if present.
1718 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1719 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1720 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1721 "Header of %d bytes in small buffer.\n", hdr_len);
1723 * Headers fit nicely into a small buffer.
1725 sbq_desc = ql_get_curr_sbuf(rx_ring);
1726 pci_unmap_single(qdev->pdev,
1727 dma_unmap_addr(sbq_desc, mapaddr),
1728 dma_unmap_len(sbq_desc, maplen),
1729 PCI_DMA_FROMDEVICE);
1730 skb = sbq_desc->p.skb;
1731 ql_realign_skb(skb, hdr_len);
1732 skb_put(skb, hdr_len);
1733 sbq_desc->p.skb = NULL;
1737 * Handle the data buffer(s).
1739 if (unlikely(!length)) { /* Is there data too? */
1740 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741 "No Data buffer in this packet.\n");
1745 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1746 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1747 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748 "Headers in small, data of %d bytes in small, combine them.\n",
1751 * Data is less than small buffer size so it's
1752 * stuffed in a small buffer.
1753 * For this case we append the data
1754 * from the "data" small buffer to the "header" small
1757 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758 pci_dma_sync_single_for_cpu(qdev->pdev,
1760 (sbq_desc, mapaddr),
1763 PCI_DMA_FROMDEVICE);
1764 memcpy(skb_put(skb, length),
1765 sbq_desc->p.skb->data, length);
1766 pci_dma_sync_single_for_device(qdev->pdev,
1773 PCI_DMA_FROMDEVICE);
1775 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1776 "%d bytes in a single small buffer.\n",
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 skb = sbq_desc->p.skb;
1780 ql_realign_skb(skb, length);
1781 skb_put(skb, length);
1782 pci_unmap_single(qdev->pdev,
1783 dma_unmap_addr(sbq_desc,
1785 dma_unmap_len(sbq_desc,
1787 PCI_DMA_FROMDEVICE);
1788 sbq_desc->p.skb = NULL;
1790 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1791 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793 "Header in small, %d bytes in large. Chain large to small!\n",
1796 * The data is in a single large buffer. We
1797 * chain it to the header buffer's skb and let
1800 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1801 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802 "Chaining page at offset = %d, for %d bytes to skb.\n",
1803 lbq_desc->p.pg_chunk.offset, length);
1804 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1805 lbq_desc->p.pg_chunk.offset,
1808 skb->data_len += length;
1809 skb->truesize += length;
1812 * The headers and data are in a single large buffer. We
1813 * copy it to a new skb and let it go. This can happen with
1814 * jumbo mtu on a non-TCP/UDP frame.
1816 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1817 skb = netdev_alloc_skb(qdev->ndev, length);
1819 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1820 "No skb available, drop the packet.\n");
1823 pci_unmap_page(qdev->pdev,
1824 dma_unmap_addr(lbq_desc,
1826 dma_unmap_len(lbq_desc, maplen),
1827 PCI_DMA_FROMDEVICE);
1828 skb_reserve(skb, NET_IP_ALIGN);
1829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1832 skb_fill_page_desc(skb, 0,
1833 lbq_desc->p.pg_chunk.page,
1834 lbq_desc->p.pg_chunk.offset,
1837 skb->data_len += length;
1838 skb->truesize += length;
1840 __pskb_pull_tail(skb,
1841 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1842 VLAN_ETH_HLEN : ETH_HLEN);
1846 * The data is in a chain of large buffers
1847 * pointed to by a small buffer. We loop
1848 * thru and chain them to the our small header
1850 * frags: There are 18 max frags and our small
1851 * buffer will hold 32 of them. The thing is,
1852 * we'll use 3 max for our 9000 byte jumbo
1853 * frames. If the MTU goes up we could
1854 * eventually be in trouble.
1857 sbq_desc = ql_get_curr_sbuf(rx_ring);
1858 pci_unmap_single(qdev->pdev,
1859 dma_unmap_addr(sbq_desc, mapaddr),
1860 dma_unmap_len(sbq_desc, maplen),
1861 PCI_DMA_FROMDEVICE);
1862 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1864 * This is an non TCP/UDP IP frame, so
1865 * the headers aren't split into a small
1866 * buffer. We have to use the small buffer
1867 * that contains our sg list as our skb to
1868 * send upstairs. Copy the sg list here to
1869 * a local buffer and use it to find the
1872 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873 "%d bytes of headers & data in chain of large.\n",
1875 skb = sbq_desc->p.skb;
1876 sbq_desc->p.skb = NULL;
1877 skb_reserve(skb, NET_IP_ALIGN);
1879 while (length > 0) {
1880 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1881 size = (length < rx_ring->lbq_buf_size) ? length :
1882 rx_ring->lbq_buf_size;
1884 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1885 "Adding page %d to skb for %d bytes.\n",
1887 skb_fill_page_desc(skb, i,
1888 lbq_desc->p.pg_chunk.page,
1889 lbq_desc->p.pg_chunk.offset,
1892 skb->data_len += size;
1893 skb->truesize += size;
1897 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1898 VLAN_ETH_HLEN : ETH_HLEN);
1903 /* Process an inbound completion from an rx ring. */
1904 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1905 struct rx_ring *rx_ring,
1906 struct ib_mac_iocb_rsp *ib_mac_rsp,
1909 struct net_device *ndev = qdev->ndev;
1910 struct sk_buff *skb = NULL;
1912 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1914 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1915 if (unlikely(!skb)) {
1916 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917 "No skb available, drop packet.\n");
1918 rx_ring->rx_dropped++;
1922 /* The max framesize filter on this chip is set higher than
1923 * MTU since FCoE uses 2k frames.
1925 if (skb->len > ndev->mtu + ETH_HLEN) {
1926 dev_kfree_skb_any(skb);
1927 rx_ring->rx_dropped++;
1931 /* loopback self test for ethtool */
1932 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1933 ql_check_lb_frame(qdev, skb);
1934 dev_kfree_skb_any(skb);
1938 prefetch(skb->data);
1939 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1940 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1941 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1942 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1943 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1944 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1945 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1946 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1947 rx_ring->rx_multicast++;
1949 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1950 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951 "Promiscuous Packet.\n");
1954 skb->protocol = eth_type_trans(skb, ndev);
1955 skb_checksum_none_assert(skb);
1957 /* If rx checksum is on, and there are no
1958 * csum or frame errors.
1960 if ((ndev->features & NETIF_F_RXCSUM) &&
1961 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1963 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1964 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1965 "TCP checksum done!\n");
1966 skb->ip_summed = CHECKSUM_UNNECESSARY;
1967 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1968 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1969 /* Unfragmented ipv4 UDP frame. */
1970 struct iphdr *iph = (struct iphdr *) skb->data;
1971 if (!(iph->frag_off &
1972 htons(IP_MF|IP_OFFSET))) {
1973 skb->ip_summed = CHECKSUM_UNNECESSARY;
1974 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1975 "TCP checksum done!\n");
1980 rx_ring->rx_packets++;
1981 rx_ring->rx_bytes += skb->len;
1982 skb_record_rx_queue(skb, rx_ring->cq_id);
1983 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1984 __vlan_hwaccel_put_tag(skb, vlan_id);
1985 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1986 napi_gro_receive(&rx_ring->napi, skb);
1988 netif_receive_skb(skb);
1991 /* Process an inbound completion from an rx ring. */
1992 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1993 struct rx_ring *rx_ring,
1994 struct ib_mac_iocb_rsp *ib_mac_rsp)
1996 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1997 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1998 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1999 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2001 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2003 /* Frame error, so drop the packet. */
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006 return (unsigned long)length;
2009 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2010 /* The data and headers are split into
2013 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2015 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2016 /* The data fit in a single small buffer.
2017 * Allocate a new skb, copy the data and
2018 * return the buffer to the free pool.
2020 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2022 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2023 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2024 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2025 /* TCP packet in a page chunk that's been checksummed.
2026 * Tack it on to our GRO skb and let it go.
2028 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2030 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2031 /* Non-TCP packet in a page chunk. Allocate an
2032 * skb, tack it on frags, and send it up.
2034 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2037 /* Non-TCP/UDP large frames that span multiple buffers
2038 * can be processed corrrectly by the split frame logic.
2040 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2044 return (unsigned long)length;
2047 /* Process an outbound completion from an rx ring. */
2048 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2049 struct ob_mac_iocb_rsp *mac_rsp)
2051 struct tx_ring *tx_ring;
2052 struct tx_ring_desc *tx_ring_desc;
2054 QL_DUMP_OB_MAC_RSP(mac_rsp);
2055 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2056 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2057 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2058 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2059 tx_ring->tx_packets++;
2060 dev_kfree_skb(tx_ring_desc->skb);
2061 tx_ring_desc->skb = NULL;
2063 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2066 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2067 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2068 netif_warn(qdev, tx_done, qdev->ndev,
2069 "Total descriptor length did not match transfer length.\n");
2071 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2072 netif_warn(qdev, tx_done, qdev->ndev,
2073 "Frame too short to be valid, not sent.\n");
2075 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2076 netif_warn(qdev, tx_done, qdev->ndev,
2077 "Frame too long, but sent anyway.\n");
2079 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2080 netif_warn(qdev, tx_done, qdev->ndev,
2081 "PCI backplane error. Frame not sent.\n");
2084 atomic_inc(&tx_ring->tx_count);
2087 /* Fire up a handler to reset the MPI processor. */
2088 void ql_queue_fw_error(struct ql_adapter *qdev)
2091 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2094 void ql_queue_asic_error(struct ql_adapter *qdev)
2097 ql_disable_interrupts(qdev);
2098 /* Clear adapter up bit to signal the recovery
2099 * process that it shouldn't kill the reset worker
2102 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2103 /* Set asic recovery bit to indicate reset process that we are
2104 * in fatal error recovery process rather than normal close
2106 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2107 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2110 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2111 struct ib_ae_iocb_rsp *ib_ae_rsp)
2113 switch (ib_ae_rsp->event) {
2114 case MGMT_ERR_EVENT:
2115 netif_err(qdev, rx_err, qdev->ndev,
2116 "Management Processor Fatal Error.\n");
2117 ql_queue_fw_error(qdev);
2120 case CAM_LOOKUP_ERR_EVENT:
2121 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2122 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2123 ql_queue_asic_error(qdev);
2126 case SOFT_ECC_ERROR_EVENT:
2127 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2128 ql_queue_asic_error(qdev);
2131 case PCI_ERR_ANON_BUF_RD:
2132 netdev_err(qdev->ndev, "PCI error occurred when reading "
2133 "anonymous buffers from rx_ring %d.\n",
2135 ql_queue_asic_error(qdev);
2139 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2141 ql_queue_asic_error(qdev);
2146 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2148 struct ql_adapter *qdev = rx_ring->qdev;
2149 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2150 struct ob_mac_iocb_rsp *net_rsp = NULL;
2153 struct tx_ring *tx_ring;
2154 /* While there are entries in the completion queue. */
2155 while (prod != rx_ring->cnsmr_idx) {
2157 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2158 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2159 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2161 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2163 switch (net_rsp->opcode) {
2165 case OPCODE_OB_MAC_TSO_IOCB:
2166 case OPCODE_OB_MAC_IOCB:
2167 ql_process_mac_tx_intr(qdev, net_rsp);
2170 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2171 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2175 ql_update_cq(rx_ring);
2176 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2180 ql_write_cq_idx(rx_ring);
2181 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2182 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2183 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2185 * The queue got stopped because the tx_ring was full.
2186 * Wake it up, because it's now at least 25% empty.
2188 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2194 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2196 struct ql_adapter *qdev = rx_ring->qdev;
2197 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2198 struct ql_net_rsp_iocb *net_rsp;
2201 /* While there are entries in the completion queue. */
2202 while (prod != rx_ring->cnsmr_idx) {
2204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2208 net_rsp = rx_ring->curr_entry;
2210 switch (net_rsp->opcode) {
2211 case OPCODE_IB_MAC_IOCB:
2212 ql_process_mac_rx_intr(qdev, rx_ring,
2213 (struct ib_mac_iocb_rsp *)
2217 case OPCODE_IB_AE_IOCB:
2218 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2222 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2223 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2228 ql_update_cq(rx_ring);
2229 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230 if (count == budget)
2233 ql_update_buffer_queues(qdev, rx_ring);
2234 ql_write_cq_idx(rx_ring);
2238 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2240 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2241 struct ql_adapter *qdev = rx_ring->qdev;
2242 struct rx_ring *trx_ring;
2243 int i, work_done = 0;
2244 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2246 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2247 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2249 /* Service the TX rings first. They start
2250 * right after the RSS rings. */
2251 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2252 trx_ring = &qdev->rx_ring[i];
2253 /* If this TX completion ring belongs to this vector and
2254 * it's not empty then service it.
2256 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2257 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2258 trx_ring->cnsmr_idx)) {
2259 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2260 "%s: Servicing TX completion ring %d.\n",
2261 __func__, trx_ring->cq_id);
2262 ql_clean_outbound_rx_ring(trx_ring);
2267 * Now service the RSS ring if it's active.
2269 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2270 rx_ring->cnsmr_idx) {
2271 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2272 "%s: Servicing RX completion ring %d.\n",
2273 __func__, rx_ring->cq_id);
2274 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2277 if (work_done < budget) {
2278 napi_complete(napi);
2279 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2284 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2286 struct ql_adapter *qdev = netdev_priv(ndev);
2288 if (features & NETIF_F_HW_VLAN_RX) {
2289 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2290 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2292 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2296 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2297 netdev_features_t features)
2300 * Since there is no support for separate rx/tx vlan accel
2301 * enable/disable make sure tx flag is always in same state as rx.
2303 if (features & NETIF_F_HW_VLAN_RX)
2304 features |= NETIF_F_HW_VLAN_TX;
2306 features &= ~NETIF_F_HW_VLAN_TX;
2311 static int qlge_set_features(struct net_device *ndev,
2312 netdev_features_t features)
2314 netdev_features_t changed = ndev->features ^ features;
2316 if (changed & NETIF_F_HW_VLAN_RX)
2317 qlge_vlan_mode(ndev, features);
2322 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2324 u32 enable_bit = MAC_ADDR_E;
2327 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2328 MAC_ADDR_TYPE_VLAN, vid);
2330 netif_err(qdev, ifup, qdev->ndev,
2331 "Failed to init vlan address.\n");
2335 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2337 struct ql_adapter *qdev = netdev_priv(ndev);
2341 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2345 err = __qlge_vlan_rx_add_vid(qdev, vid);
2346 set_bit(vid, qdev->active_vlans);
2348 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2353 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2358 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2359 MAC_ADDR_TYPE_VLAN, vid);
2361 netif_err(qdev, ifup, qdev->ndev,
2362 "Failed to clear vlan address.\n");
2366 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2368 struct ql_adapter *qdev = netdev_priv(ndev);
2372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2376 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2377 clear_bit(vid, qdev->active_vlans);
2379 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2384 static void qlge_restore_vlan(struct ql_adapter *qdev)
2389 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2393 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2394 __qlge_vlan_rx_add_vid(qdev, vid);
2396 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2399 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2400 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2402 struct rx_ring *rx_ring = dev_id;
2403 napi_schedule(&rx_ring->napi);
2407 /* This handles a fatal error, MPI activity, and the default
2408 * rx_ring in an MSI-X multiple vector environment.
2409 * In MSI/Legacy environment it also process the rest of
2412 static irqreturn_t qlge_isr(int irq, void *dev_id)
2414 struct rx_ring *rx_ring = dev_id;
2415 struct ql_adapter *qdev = rx_ring->qdev;
2416 struct intr_context *intr_context = &qdev->intr_context[0];
2420 spin_lock(&qdev->hw_lock);
2421 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2422 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2423 "Shared Interrupt, Not ours!\n");
2424 spin_unlock(&qdev->hw_lock);
2427 spin_unlock(&qdev->hw_lock);
2429 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2432 * Check for fatal error.
2435 ql_queue_asic_error(qdev);
2436 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2437 var = ql_read32(qdev, ERR_STS);
2438 netdev_err(qdev->ndev, "Resetting chip. "
2439 "Error Status Register = 0x%x\n", var);
2444 * Check MPI processor activity.
2446 if ((var & STS_PI) &&
2447 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2449 * We've got an async event or mailbox completion.
2450 * Handle it and clear the source of the interrupt.
2452 netif_err(qdev, intr, qdev->ndev,
2453 "Got MPI processor interrupt.\n");
2454 ql_disable_completion_interrupt(qdev, intr_context->intr);
2455 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2456 queue_delayed_work_on(smp_processor_id(),
2457 qdev->workqueue, &qdev->mpi_work, 0);
2462 * Get the bit-mask that shows the active queues for this
2463 * pass. Compare it to the queues that this irq services
2464 * and call napi if there's a match.
2466 var = ql_read32(qdev, ISR1);
2467 if (var & intr_context->irq_mask) {
2468 netif_info(qdev, intr, qdev->ndev,
2469 "Waking handler for rx_ring[0].\n");
2470 ql_disable_completion_interrupt(qdev, intr_context->intr);
2471 napi_schedule(&rx_ring->napi);
2474 ql_enable_completion_interrupt(qdev, intr_context->intr);
2475 return work_done ? IRQ_HANDLED : IRQ_NONE;
2478 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2481 if (skb_is_gso(skb)) {
2483 if (skb_header_cloned(skb)) {
2484 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2489 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2490 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2491 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2492 mac_iocb_ptr->total_hdrs_len =
2493 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2494 mac_iocb_ptr->net_trans_offset =
2495 cpu_to_le16(skb_network_offset(skb) |
2496 skb_transport_offset(skb)
2497 << OB_MAC_TRANSPORT_HDR_SHIFT);
2498 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2499 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2500 if (likely(skb->protocol == htons(ETH_P_IP))) {
2501 struct iphdr *iph = ip_hdr(skb);
2503 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2504 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2508 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2509 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2510 tcp_hdr(skb)->check =
2511 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2512 &ipv6_hdr(skb)->daddr,
2520 static void ql_hw_csum_setup(struct sk_buff *skb,
2521 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2524 struct iphdr *iph = ip_hdr(skb);
2526 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2527 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2528 mac_iocb_ptr->net_trans_offset =
2529 cpu_to_le16(skb_network_offset(skb) |
2530 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2532 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2534 if (likely(iph->protocol == IPPROTO_TCP)) {
2535 check = &(tcp_hdr(skb)->check);
2536 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2537 mac_iocb_ptr->total_hdrs_len =
2538 cpu_to_le16(skb_transport_offset(skb) +
2539 (tcp_hdr(skb)->doff << 2));
2541 check = &(udp_hdr(skb)->check);
2542 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2543 mac_iocb_ptr->total_hdrs_len =
2544 cpu_to_le16(skb_transport_offset(skb) +
2545 sizeof(struct udphdr));
2547 *check = ~csum_tcpudp_magic(iph->saddr,
2548 iph->daddr, len, iph->protocol, 0);
2551 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2553 struct tx_ring_desc *tx_ring_desc;
2554 struct ob_mac_iocb_req *mac_iocb_ptr;
2555 struct ql_adapter *qdev = netdev_priv(ndev);
2557 struct tx_ring *tx_ring;
2558 u32 tx_ring_idx = (u32) skb->queue_mapping;
2560 tx_ring = &qdev->tx_ring[tx_ring_idx];
2562 if (skb_padto(skb, ETH_ZLEN))
2563 return NETDEV_TX_OK;
2565 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2566 netif_info(qdev, tx_queued, qdev->ndev,
2567 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2568 __func__, tx_ring_idx);
2569 netif_stop_subqueue(ndev, tx_ring->wq_id);
2570 tx_ring->tx_errors++;
2571 return NETDEV_TX_BUSY;
2573 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2574 mac_iocb_ptr = tx_ring_desc->queue_entry;
2575 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2577 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2578 mac_iocb_ptr->tid = tx_ring_desc->index;
2579 /* We use the upper 32-bits to store the tx queue for this IO.
2580 * When we get the completion we can use it to establish the context.
2582 mac_iocb_ptr->txq_idx = tx_ring_idx;
2583 tx_ring_desc->skb = skb;
2585 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2587 if (vlan_tx_tag_present(skb)) {
2588 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2590 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2591 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2593 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2595 dev_kfree_skb_any(skb);
2596 return NETDEV_TX_OK;
2597 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2598 ql_hw_csum_setup(skb,
2599 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2601 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2603 netif_err(qdev, tx_queued, qdev->ndev,
2604 "Could not map the segments.\n");
2605 tx_ring->tx_errors++;
2606 return NETDEV_TX_BUSY;
2608 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2609 tx_ring->prod_idx++;
2610 if (tx_ring->prod_idx == tx_ring->wq_len)
2611 tx_ring->prod_idx = 0;
2614 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2615 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2616 "tx queued, slot %d, len %d\n",
2617 tx_ring->prod_idx, skb->len);
2619 atomic_dec(&tx_ring->tx_count);
2621 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2622 netif_stop_subqueue(ndev, tx_ring->wq_id);
2623 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2625 * The queue got stopped because the tx_ring was full.
2626 * Wake it up, because it's now at least 25% empty.
2628 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2630 return NETDEV_TX_OK;
2634 static void ql_free_shadow_space(struct ql_adapter *qdev)
2636 if (qdev->rx_ring_shadow_reg_area) {
2637 pci_free_consistent(qdev->pdev,
2639 qdev->rx_ring_shadow_reg_area,
2640 qdev->rx_ring_shadow_reg_dma);
2641 qdev->rx_ring_shadow_reg_area = NULL;
2643 if (qdev->tx_ring_shadow_reg_area) {
2644 pci_free_consistent(qdev->pdev,
2646 qdev->tx_ring_shadow_reg_area,
2647 qdev->tx_ring_shadow_reg_dma);
2648 qdev->tx_ring_shadow_reg_area = NULL;
2652 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2654 qdev->rx_ring_shadow_reg_area =
2655 pci_alloc_consistent(qdev->pdev,
2656 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2657 if (qdev->rx_ring_shadow_reg_area == NULL) {
2658 netif_err(qdev, ifup, qdev->ndev,
2659 "Allocation of RX shadow space failed.\n");
2662 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2663 qdev->tx_ring_shadow_reg_area =
2664 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2665 &qdev->tx_ring_shadow_reg_dma);
2666 if (qdev->tx_ring_shadow_reg_area == NULL) {
2667 netif_err(qdev, ifup, qdev->ndev,
2668 "Allocation of TX shadow space failed.\n");
2669 goto err_wqp_sh_area;
2671 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2675 pci_free_consistent(qdev->pdev,
2677 qdev->rx_ring_shadow_reg_area,
2678 qdev->rx_ring_shadow_reg_dma);
2682 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2684 struct tx_ring_desc *tx_ring_desc;
2686 struct ob_mac_iocb_req *mac_iocb_ptr;
2688 mac_iocb_ptr = tx_ring->wq_base;
2689 tx_ring_desc = tx_ring->q;
2690 for (i = 0; i < tx_ring->wq_len; i++) {
2691 tx_ring_desc->index = i;
2692 tx_ring_desc->skb = NULL;
2693 tx_ring_desc->queue_entry = mac_iocb_ptr;
2697 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2700 static void ql_free_tx_resources(struct ql_adapter *qdev,
2701 struct tx_ring *tx_ring)
2703 if (tx_ring->wq_base) {
2704 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2705 tx_ring->wq_base, tx_ring->wq_base_dma);
2706 tx_ring->wq_base = NULL;
2712 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2713 struct tx_ring *tx_ring)
2716 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2717 &tx_ring->wq_base_dma);
2719 if ((tx_ring->wq_base == NULL) ||
2720 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2724 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2725 if (tx_ring->q == NULL)
2730 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2731 tx_ring->wq_base, tx_ring->wq_base_dma);
2732 tx_ring->wq_base = NULL;
2734 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2738 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2740 struct bq_desc *lbq_desc;
2742 uint32_t curr_idx, clean_idx;
2744 curr_idx = rx_ring->lbq_curr_idx;
2745 clean_idx = rx_ring->lbq_clean_idx;
2746 while (curr_idx != clean_idx) {
2747 lbq_desc = &rx_ring->lbq[curr_idx];
2749 if (lbq_desc->p.pg_chunk.last_flag) {
2750 pci_unmap_page(qdev->pdev,
2751 lbq_desc->p.pg_chunk.map,
2752 ql_lbq_block_size(qdev),
2753 PCI_DMA_FROMDEVICE);
2754 lbq_desc->p.pg_chunk.last_flag = 0;
2757 put_page(lbq_desc->p.pg_chunk.page);
2758 lbq_desc->p.pg_chunk.page = NULL;
2760 if (++curr_idx == rx_ring->lbq_len)
2766 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2769 struct bq_desc *sbq_desc;
2771 for (i = 0; i < rx_ring->sbq_len; i++) {
2772 sbq_desc = &rx_ring->sbq[i];
2773 if (sbq_desc == NULL) {
2774 netif_err(qdev, ifup, qdev->ndev,
2775 "sbq_desc %d is NULL.\n", i);
2778 if (sbq_desc->p.skb) {
2779 pci_unmap_single(qdev->pdev,
2780 dma_unmap_addr(sbq_desc, mapaddr),
2781 dma_unmap_len(sbq_desc, maplen),
2782 PCI_DMA_FROMDEVICE);
2783 dev_kfree_skb(sbq_desc->p.skb);
2784 sbq_desc->p.skb = NULL;
2789 /* Free all large and small rx buffers associated
2790 * with the completion queues for this device.
2792 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2795 struct rx_ring *rx_ring;
2797 for (i = 0; i < qdev->rx_ring_count; i++) {
2798 rx_ring = &qdev->rx_ring[i];
2800 ql_free_lbq_buffers(qdev, rx_ring);
2802 ql_free_sbq_buffers(qdev, rx_ring);
2806 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2808 struct rx_ring *rx_ring;
2811 for (i = 0; i < qdev->rx_ring_count; i++) {
2812 rx_ring = &qdev->rx_ring[i];
2813 if (rx_ring->type != TX_Q)
2814 ql_update_buffer_queues(qdev, rx_ring);
2818 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2819 struct rx_ring *rx_ring)
2822 struct bq_desc *lbq_desc;
2823 __le64 *bq = rx_ring->lbq_base;
2825 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2826 for (i = 0; i < rx_ring->lbq_len; i++) {
2827 lbq_desc = &rx_ring->lbq[i];
2828 memset(lbq_desc, 0, sizeof(*lbq_desc));
2829 lbq_desc->index = i;
2830 lbq_desc->addr = bq;
2835 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2836 struct rx_ring *rx_ring)
2839 struct bq_desc *sbq_desc;
2840 __le64 *bq = rx_ring->sbq_base;
2842 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2843 for (i = 0; i < rx_ring->sbq_len; i++) {
2844 sbq_desc = &rx_ring->sbq[i];
2845 memset(sbq_desc, 0, sizeof(*sbq_desc));
2846 sbq_desc->index = i;
2847 sbq_desc->addr = bq;
2852 static void ql_free_rx_resources(struct ql_adapter *qdev,
2853 struct rx_ring *rx_ring)
2855 /* Free the small buffer queue. */
2856 if (rx_ring->sbq_base) {
2857 pci_free_consistent(qdev->pdev,
2859 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2860 rx_ring->sbq_base = NULL;
2863 /* Free the small buffer queue control blocks. */
2864 kfree(rx_ring->sbq);
2865 rx_ring->sbq = NULL;
2867 /* Free the large buffer queue. */
2868 if (rx_ring->lbq_base) {
2869 pci_free_consistent(qdev->pdev,
2871 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2872 rx_ring->lbq_base = NULL;
2875 /* Free the large buffer queue control blocks. */
2876 kfree(rx_ring->lbq);
2877 rx_ring->lbq = NULL;
2879 /* Free the rx queue. */
2880 if (rx_ring->cq_base) {
2881 pci_free_consistent(qdev->pdev,
2883 rx_ring->cq_base, rx_ring->cq_base_dma);
2884 rx_ring->cq_base = NULL;
2888 /* Allocate queues and buffers for this completions queue based
2889 * on the values in the parameter structure. */
2890 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2891 struct rx_ring *rx_ring)
2895 * Allocate the completion queue for this rx_ring.
2898 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2899 &rx_ring->cq_base_dma);
2901 if (rx_ring->cq_base == NULL) {
2902 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2906 if (rx_ring->sbq_len) {
2908 * Allocate small buffer queue.
2911 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2912 &rx_ring->sbq_base_dma);
2914 if (rx_ring->sbq_base == NULL) {
2915 netif_err(qdev, ifup, qdev->ndev,
2916 "Small buffer queue allocation failed.\n");
2921 * Allocate small buffer queue control blocks.
2923 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
2924 sizeof(struct bq_desc),
2926 if (rx_ring->sbq == NULL)
2929 ql_init_sbq_ring(qdev, rx_ring);
2932 if (rx_ring->lbq_len) {
2934 * Allocate large buffer queue.
2937 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2938 &rx_ring->lbq_base_dma);
2940 if (rx_ring->lbq_base == NULL) {
2941 netif_err(qdev, ifup, qdev->ndev,
2942 "Large buffer queue allocation failed.\n");
2946 * Allocate large buffer queue control blocks.
2948 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
2949 sizeof(struct bq_desc),
2951 if (rx_ring->lbq == NULL)
2954 ql_init_lbq_ring(qdev, rx_ring);
2960 ql_free_rx_resources(qdev, rx_ring);
2964 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2966 struct tx_ring *tx_ring;
2967 struct tx_ring_desc *tx_ring_desc;
2971 * Loop through all queues and free
2974 for (j = 0; j < qdev->tx_ring_count; j++) {
2975 tx_ring = &qdev->tx_ring[j];
2976 for (i = 0; i < tx_ring->wq_len; i++) {
2977 tx_ring_desc = &tx_ring->q[i];
2978 if (tx_ring_desc && tx_ring_desc->skb) {
2979 netif_err(qdev, ifdown, qdev->ndev,
2980 "Freeing lost SKB %p, from queue %d, index %d.\n",
2981 tx_ring_desc->skb, j,
2982 tx_ring_desc->index);
2983 ql_unmap_send(qdev, tx_ring_desc,
2984 tx_ring_desc->map_cnt);
2985 dev_kfree_skb(tx_ring_desc->skb);
2986 tx_ring_desc->skb = NULL;
2992 static void ql_free_mem_resources(struct ql_adapter *qdev)
2996 for (i = 0; i < qdev->tx_ring_count; i++)
2997 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2998 for (i = 0; i < qdev->rx_ring_count; i++)
2999 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3000 ql_free_shadow_space(qdev);
3003 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3007 /* Allocate space for our shadow registers and such. */
3008 if (ql_alloc_shadow_space(qdev))
3011 for (i = 0; i < qdev->rx_ring_count; i++) {
3012 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3013 netif_err(qdev, ifup, qdev->ndev,
3014 "RX resource allocation failed.\n");
3018 /* Allocate tx queue resources */
3019 for (i = 0; i < qdev->tx_ring_count; i++) {
3020 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3021 netif_err(qdev, ifup, qdev->ndev,
3022 "TX resource allocation failed.\n");
3029 ql_free_mem_resources(qdev);
3033 /* Set up the rx ring control block and pass it to the chip.
3034 * The control block is defined as
3035 * "Completion Queue Initialization Control Block", or cqicb.
3037 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3039 struct cqicb *cqicb = &rx_ring->cqicb;
3040 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3041 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3042 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3043 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3044 void __iomem *doorbell_area =
3045 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3049 __le64 *base_indirect_ptr;
3052 /* Set up the shadow registers for this ring. */
3053 rx_ring->prod_idx_sh_reg = shadow_reg;
3054 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3055 *rx_ring->prod_idx_sh_reg = 0;
3056 shadow_reg += sizeof(u64);
3057 shadow_reg_dma += sizeof(u64);
3058 rx_ring->lbq_base_indirect = shadow_reg;
3059 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3060 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3061 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3062 rx_ring->sbq_base_indirect = shadow_reg;
3063 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3065 /* PCI doorbell mem area + 0x00 for consumer index register */
3066 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3067 rx_ring->cnsmr_idx = 0;
3068 rx_ring->curr_entry = rx_ring->cq_base;
3070 /* PCI doorbell mem area + 0x04 for valid register */
3071 rx_ring->valid_db_reg = doorbell_area + 0x04;
3073 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3074 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3076 /* PCI doorbell mem area + 0x1c */
3077 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3079 memset((void *)cqicb, 0, sizeof(struct cqicb));
3080 cqicb->msix_vect = rx_ring->irq;
3082 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3083 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3085 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3087 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3090 * Set up the control block load flags.
3092 cqicb->flags = FLAGS_LC | /* Load queue base address */
3093 FLAGS_LV | /* Load MSI-X vector */
3094 FLAGS_LI; /* Load irq delay values */
3095 if (rx_ring->lbq_len) {
3096 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3097 tmp = (u64)rx_ring->lbq_base_dma;
3098 base_indirect_ptr = rx_ring->lbq_base_indirect;
3101 *base_indirect_ptr = cpu_to_le64(tmp);
3102 tmp += DB_PAGE_SIZE;
3103 base_indirect_ptr++;
3105 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3107 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3108 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3109 (u16) rx_ring->lbq_buf_size;
3110 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3111 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3112 (u16) rx_ring->lbq_len;
3113 cqicb->lbq_len = cpu_to_le16(bq_len);
3114 rx_ring->lbq_prod_idx = 0;
3115 rx_ring->lbq_curr_idx = 0;
3116 rx_ring->lbq_clean_idx = 0;
3117 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3119 if (rx_ring->sbq_len) {
3120 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3121 tmp = (u64)rx_ring->sbq_base_dma;
3122 base_indirect_ptr = rx_ring->sbq_base_indirect;
3125 *base_indirect_ptr = cpu_to_le64(tmp);
3126 tmp += DB_PAGE_SIZE;
3127 base_indirect_ptr++;
3129 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3131 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3132 cqicb->sbq_buf_size =
3133 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3134 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3135 (u16) rx_ring->sbq_len;
3136 cqicb->sbq_len = cpu_to_le16(bq_len);
3137 rx_ring->sbq_prod_idx = 0;
3138 rx_ring->sbq_curr_idx = 0;
3139 rx_ring->sbq_clean_idx = 0;
3140 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3142 switch (rx_ring->type) {
3144 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3145 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3148 /* Inbound completion handling rx_rings run in
3149 * separate NAPI contexts.
3151 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3153 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3154 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3157 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3158 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3160 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3161 CFG_LCQ, rx_ring->cq_id);
3163 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3169 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3171 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3172 void __iomem *doorbell_area =
3173 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3174 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3175 (tx_ring->wq_id * sizeof(u64));
3176 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3177 (tx_ring->wq_id * sizeof(u64));
3181 * Assign doorbell registers for this tx_ring.
3183 /* TX PCI doorbell mem area for tx producer index */
3184 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3185 tx_ring->prod_idx = 0;
3186 /* TX PCI doorbell mem area + 0x04 */
3187 tx_ring->valid_db_reg = doorbell_area + 0x04;
3190 * Assign shadow registers for this tx_ring.
3192 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3193 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3195 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3196 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3197 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3198 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3200 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3202 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3204 ql_init_tx_ring(qdev, tx_ring);
3206 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3207 (u16) tx_ring->wq_id);
3209 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3215 static void ql_disable_msix(struct ql_adapter *qdev)
3217 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3218 pci_disable_msix(qdev->pdev);
3219 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3220 kfree(qdev->msi_x_entry);
3221 qdev->msi_x_entry = NULL;
3222 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3223 pci_disable_msi(qdev->pdev);
3224 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3228 /* We start by trying to get the number of vectors
3229 * stored in qdev->intr_count. If we don't get that
3230 * many then we reduce the count and try again.
3232 static void ql_enable_msix(struct ql_adapter *qdev)
3236 /* Get the MSIX vectors. */
3237 if (qlge_irq_type == MSIX_IRQ) {
3238 /* Try to alloc space for the msix struct,
3239 * if it fails then go to MSI/legacy.
3241 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3242 sizeof(struct msix_entry),
3244 if (!qdev->msi_x_entry) {
3245 qlge_irq_type = MSI_IRQ;
3249 for (i = 0; i < qdev->intr_count; i++)
3250 qdev->msi_x_entry[i].entry = i;
3252 /* Loop to get our vectors. We start with
3253 * what we want and settle for what we get.
3256 err = pci_enable_msix(qdev->pdev,
3257 qdev->msi_x_entry, qdev->intr_count);
3259 qdev->intr_count = err;
3263 kfree(qdev->msi_x_entry);
3264 qdev->msi_x_entry = NULL;
3265 netif_warn(qdev, ifup, qdev->ndev,
3266 "MSI-X Enable failed, trying MSI.\n");
3267 qdev->intr_count = 1;
3268 qlge_irq_type = MSI_IRQ;
3269 } else if (err == 0) {
3270 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3271 netif_info(qdev, ifup, qdev->ndev,
3272 "MSI-X Enabled, got %d vectors.\n",
3278 qdev->intr_count = 1;
3279 if (qlge_irq_type == MSI_IRQ) {
3280 if (!pci_enable_msi(qdev->pdev)) {
3281 set_bit(QL_MSI_ENABLED, &qdev->flags);
3282 netif_info(qdev, ifup, qdev->ndev,
3283 "Running with MSI interrupts.\n");
3287 qlge_irq_type = LEG_IRQ;
3288 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3289 "Running with legacy interrupts.\n");
3292 /* Each vector services 1 RSS ring and and 1 or more
3293 * TX completion rings. This function loops through
3294 * the TX completion rings and assigns the vector that
3295 * will service it. An example would be if there are
3296 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3297 * This would mean that vector 0 would service RSS ring 0
3298 * and TX completion rings 0,1,2 and 3. Vector 1 would
3299 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3301 static void ql_set_tx_vect(struct ql_adapter *qdev)
3304 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3306 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3307 /* Assign irq vectors to TX rx_rings.*/
3308 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3309 i < qdev->rx_ring_count; i++) {
3310 if (j == tx_rings_per_vector) {
3314 qdev->rx_ring[i].irq = vect;
3318 /* For single vector all rings have an irq
3321 for (i = 0; i < qdev->rx_ring_count; i++)
3322 qdev->rx_ring[i].irq = 0;
3326 /* Set the interrupt mask for this vector. Each vector
3327 * will service 1 RSS ring and 1 or more TX completion
3328 * rings. This function sets up a bit mask per vector
3329 * that indicates which rings it services.
3331 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3333 int j, vect = ctx->intr;
3334 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3336 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3337 /* Add the RSS ring serviced by this vector
3340 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3341 /* Add the TX ring(s) serviced by this vector
3343 for (j = 0; j < tx_rings_per_vector; j++) {
3345 (1 << qdev->rx_ring[qdev->rss_ring_count +
3346 (vect * tx_rings_per_vector) + j].cq_id);
3349 /* For single vector we just shift each queue's
3352 for (j = 0; j < qdev->rx_ring_count; j++)
3353 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3358 * Here we build the intr_context structures based on
3359 * our rx_ring count and intr vector count.
3360 * The intr_context structure is used to hook each vector
3361 * to possibly different handlers.
3363 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3366 struct intr_context *intr_context = &qdev->intr_context[0];
3368 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3369 /* Each rx_ring has it's
3370 * own intr_context since we have separate
3371 * vectors for each queue.
3373 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3374 qdev->rx_ring[i].irq = i;
3375 intr_context->intr = i;
3376 intr_context->qdev = qdev;
3377 /* Set up this vector's bit-mask that indicates
3378 * which queues it services.
3380 ql_set_irq_mask(qdev, intr_context);
3382 * We set up each vectors enable/disable/read bits so
3383 * there's no bit/mask calculations in the critical path.
3385 intr_context->intr_en_mask =
3386 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3387 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3389 intr_context->intr_dis_mask =
3390 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3391 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3393 intr_context->intr_read_mask =
3394 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3395 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3398 /* The first vector/queue handles
3399 * broadcast/multicast, fatal errors,
3400 * and firmware events. This in addition
3401 * to normal inbound NAPI processing.
3403 intr_context->handler = qlge_isr;
3404 sprintf(intr_context->name, "%s-rx-%d",
3405 qdev->ndev->name, i);
3408 * Inbound queues handle unicast frames only.
3410 intr_context->handler = qlge_msix_rx_isr;
3411 sprintf(intr_context->name, "%s-rx-%d",
3412 qdev->ndev->name, i);
3417 * All rx_rings use the same intr_context since
3418 * there is only one vector.
3420 intr_context->intr = 0;
3421 intr_context->qdev = qdev;
3423 * We set up each vectors enable/disable/read bits so
3424 * there's no bit/mask calculations in the critical path.
3426 intr_context->intr_en_mask =
3427 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3428 intr_context->intr_dis_mask =
3429 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3430 INTR_EN_TYPE_DISABLE;
3431 intr_context->intr_read_mask =
3432 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3434 * Single interrupt means one handler for all rings.
3436 intr_context->handler = qlge_isr;
3437 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3438 /* Set up this vector's bit-mask that indicates
3439 * which queues it services. In this case there is
3440 * a single vector so it will service all RSS and
3441 * TX completion rings.
3443 ql_set_irq_mask(qdev, intr_context);
3445 /* Tell the TX completion rings which MSIx vector
3446 * they will be using.
3448 ql_set_tx_vect(qdev);
3451 static void ql_free_irq(struct ql_adapter *qdev)
3454 struct intr_context *intr_context = &qdev->intr_context[0];
3456 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3457 if (intr_context->hooked) {
3458 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3459 free_irq(qdev->msi_x_entry[i].vector,
3462 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3466 ql_disable_msix(qdev);
3469 static int ql_request_irq(struct ql_adapter *qdev)
3473 struct pci_dev *pdev = qdev->pdev;
3474 struct intr_context *intr_context = &qdev->intr_context[0];
3476 ql_resolve_queues_to_irqs(qdev);
3478 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3479 atomic_set(&intr_context->irq_cnt, 0);
3480 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3481 status = request_irq(qdev->msi_x_entry[i].vector,
3482 intr_context->handler,
3487 netif_err(qdev, ifup, qdev->ndev,
3488 "Failed request for MSIX interrupt %d.\n",
3493 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3494 "trying msi or legacy interrupts.\n");
3495 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 "%s: irq = %d.\n", __func__, pdev->irq);
3497 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3498 "%s: context->name = %s.\n", __func__,
3499 intr_context->name);
3500 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501 "%s: dev_id = 0x%p.\n", __func__,
3504 request_irq(pdev->irq, qlge_isr,
3505 test_bit(QL_MSI_ENABLED,
3507 flags) ? 0 : IRQF_SHARED,
3508 intr_context->name, &qdev->rx_ring[0]);
3512 netif_err(qdev, ifup, qdev->ndev,
3513 "Hooked intr %d, queue type %s, with name %s.\n",
3515 qdev->rx_ring[0].type == DEFAULT_Q ?
3517 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3518 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3519 intr_context->name);
3521 intr_context->hooked = 1;
3525 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3530 static int ql_start_rss(struct ql_adapter *qdev)
3532 static const u8 init_hash_seed[] = {
3533 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3534 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3535 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3536 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3537 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3539 struct ricb *ricb = &qdev->ricb;
3542 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3544 memset((void *)ricb, 0, sizeof(*ricb));
3546 ricb->base_cq = RSS_L4K;
3548 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3549 ricb->mask = cpu_to_le16((u16)(0x3ff));
3552 * Fill out the Indirection Table.
3554 for (i = 0; i < 1024; i++)
3555 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3557 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3558 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3560 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3562 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3568 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3572 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3575 /* Clear all the entries in the routing table. */
3576 for (i = 0; i < 16; i++) {
3577 status = ql_set_routing_reg(qdev, i, 0, 0);
3579 netif_err(qdev, ifup, qdev->ndev,
3580 "Failed to init routing register for CAM packets.\n");
3584 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3588 /* Initialize the frame-to-queue routing. */
3589 static int ql_route_initialize(struct ql_adapter *qdev)
3593 /* Clear all the entries in the routing table. */
3594 status = ql_clear_routing_entries(qdev);
3598 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3602 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3603 RT_IDX_IP_CSUM_ERR, 1);
3605 netif_err(qdev, ifup, qdev->ndev,
3606 "Failed to init routing register "
3607 "for IP CSUM error packets.\n");
3610 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3611 RT_IDX_TU_CSUM_ERR, 1);
3613 netif_err(qdev, ifup, qdev->ndev,
3614 "Failed to init routing register "
3615 "for TCP/UDP CSUM error packets.\n");
3618 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3620 netif_err(qdev, ifup, qdev->ndev,
3621 "Failed to init routing register for broadcast packets.\n");
3624 /* If we have more than one inbound queue, then turn on RSS in the
3627 if (qdev->rss_ring_count > 1) {
3628 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3629 RT_IDX_RSS_MATCH, 1);
3631 netif_err(qdev, ifup, qdev->ndev,
3632 "Failed to init routing register for MATCH RSS packets.\n");
3637 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3640 netif_err(qdev, ifup, qdev->ndev,
3641 "Failed to init routing register for CAM packets.\n");
3643 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3647 int ql_cam_route_initialize(struct ql_adapter *qdev)
3651 /* If check if the link is up and use to
3652 * determine if we are setting or clearing
3653 * the MAC address in the CAM.
3655 set = ql_read32(qdev, STS);
3656 set &= qdev->port_link_up;
3657 status = ql_set_mac_addr(qdev, set);
3659 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3663 status = ql_route_initialize(qdev);
3665 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3670 static int ql_adapter_initialize(struct ql_adapter *qdev)
3677 * Set up the System register to halt on errors.
3679 value = SYS_EFE | SYS_FAE;
3681 ql_write32(qdev, SYS, mask | value);
3683 /* Set the default queue, and VLAN behavior. */
3684 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3685 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3686 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3688 /* Set the MPI interrupt to enabled. */
3689 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3691 /* Enable the function, set pagesize, enable error checking. */
3692 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3693 FSC_EC | FSC_VM_PAGE_4K;
3694 value |= SPLT_SETTING;
3696 /* Set/clear header splitting. */
3697 mask = FSC_VM_PAGESIZE_MASK |
3698 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3699 ql_write32(qdev, FSC, mask | value);
3701 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3703 /* Set RX packet routing to use port/pci function on which the
3704 * packet arrived on in addition to usual frame routing.
3705 * This is helpful on bonding where both interfaces can have
3706 * the same MAC address.
3708 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3709 /* Reroute all packets to our Interface.
3710 * They may have been routed to MPI firmware
3713 value = ql_read32(qdev, MGMT_RCV_CFG);
3714 value &= ~MGMT_RCV_CFG_RM;
3717 /* Sticky reg needs clearing due to WOL. */
3718 ql_write32(qdev, MGMT_RCV_CFG, mask);
3719 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3721 /* Default WOL is enable on Mezz cards */
3722 if (qdev->pdev->subsystem_device == 0x0068 ||
3723 qdev->pdev->subsystem_device == 0x0180)
3724 qdev->wol = WAKE_MAGIC;
3726 /* Start up the rx queues. */
3727 for (i = 0; i < qdev->rx_ring_count; i++) {
3728 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3730 netif_err(qdev, ifup, qdev->ndev,
3731 "Failed to start rx ring[%d].\n", i);
3736 /* If there is more than one inbound completion queue
3737 * then download a RICB to configure RSS.
3739 if (qdev->rss_ring_count > 1) {
3740 status = ql_start_rss(qdev);
3742 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3747 /* Start up the tx queues. */
3748 for (i = 0; i < qdev->tx_ring_count; i++) {
3749 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3751 netif_err(qdev, ifup, qdev->ndev,
3752 "Failed to start tx ring[%d].\n", i);
3757 /* Initialize the port and set the max framesize. */
3758 status = qdev->nic_ops->port_initialize(qdev);
3760 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3762 /* Set up the MAC address and frame routing filter. */
3763 status = ql_cam_route_initialize(qdev);
3765 netif_err(qdev, ifup, qdev->ndev,
3766 "Failed to init CAM/Routing tables.\n");
3770 /* Start NAPI for the RSS queues. */
3771 for (i = 0; i < qdev->rss_ring_count; i++)
3772 napi_enable(&qdev->rx_ring[i].napi);
3777 /* Issue soft reset to chip. */
3778 static int ql_adapter_reset(struct ql_adapter *qdev)
3782 unsigned long end_jiffies;
3784 /* Clear all the entries in the routing table. */
3785 status = ql_clear_routing_entries(qdev);
3787 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3791 end_jiffies = jiffies +
3792 max((unsigned long)1, usecs_to_jiffies(30));
3794 /* Check if bit is set then skip the mailbox command and
3795 * clear the bit, else we are in normal reset process.
3797 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3798 /* Stop management traffic. */
3799 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3801 /* Wait for the NIC and MGMNT FIFOs to empty. */
3802 ql_wait_fifo_empty(qdev);
3804 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3806 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3809 value = ql_read32(qdev, RST_FO);
3810 if ((value & RST_FO_FR) == 0)
3813 } while (time_before(jiffies, end_jiffies));
3815 if (value & RST_FO_FR) {
3816 netif_err(qdev, ifdown, qdev->ndev,
3817 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3818 status = -ETIMEDOUT;
3821 /* Resume management traffic. */
3822 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3826 static void ql_display_dev_info(struct net_device *ndev)
3828 struct ql_adapter *qdev = netdev_priv(ndev);
3830 netif_info(qdev, probe, qdev->ndev,
3831 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3832 "XG Roll = %d, XG Rev = %d.\n",
3835 qdev->chip_rev_id & 0x0000000f,
3836 qdev->chip_rev_id >> 4 & 0x0000000f,
3837 qdev->chip_rev_id >> 8 & 0x0000000f,
3838 qdev->chip_rev_id >> 12 & 0x0000000f);
3839 netif_info(qdev, probe, qdev->ndev,
3840 "MAC address %pM\n", ndev->dev_addr);
3843 static int ql_wol(struct ql_adapter *qdev)
3846 u32 wol = MB_WOL_DISABLE;
3848 /* The CAM is still intact after a reset, but if we
3849 * are doing WOL, then we may need to program the
3850 * routing regs. We would also need to issue the mailbox
3851 * commands to instruct the MPI what to do per the ethtool
3855 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3856 WAKE_MCAST | WAKE_BCAST)) {
3857 netif_err(qdev, ifdown, qdev->ndev,
3858 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3863 if (qdev->wol & WAKE_MAGIC) {
3864 status = ql_mb_wol_set_magic(qdev, 1);
3866 netif_err(qdev, ifdown, qdev->ndev,
3867 "Failed to set magic packet on %s.\n",
3871 netif_info(qdev, drv, qdev->ndev,
3872 "Enabled magic packet successfully on %s.\n",
3875 wol |= MB_WOL_MAGIC_PKT;
3879 wol |= MB_WOL_MODE_ON;
3880 status = ql_mb_wol_mode(qdev, wol);
3881 netif_err(qdev, drv, qdev->ndev,
3882 "WOL %s (wol code 0x%x) on %s\n",
3883 (status == 0) ? "Successfully set" : "Failed",
3884 wol, qdev->ndev->name);
3890 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3893 /* Don't kill the reset worker thread if we
3894 * are in the process of recovery.
3896 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3897 cancel_delayed_work_sync(&qdev->asic_reset_work);
3898 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3899 cancel_delayed_work_sync(&qdev->mpi_work);
3900 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3901 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3902 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3905 static int ql_adapter_down(struct ql_adapter *qdev)
3911 ql_cancel_all_work_sync(qdev);
3913 for (i = 0; i < qdev->rss_ring_count; i++)
3914 napi_disable(&qdev->rx_ring[i].napi);
3916 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3918 ql_disable_interrupts(qdev);
3920 ql_tx_ring_clean(qdev);
3922 /* Call netif_napi_del() from common point.
3924 for (i = 0; i < qdev->rss_ring_count; i++)
3925 netif_napi_del(&qdev->rx_ring[i].napi);
3927 status = ql_adapter_reset(qdev);
3929 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3931 ql_free_rx_buffers(qdev);
3936 static int ql_adapter_up(struct ql_adapter *qdev)
3940 err = ql_adapter_initialize(qdev);
3942 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3945 set_bit(QL_ADAPTER_UP, &qdev->flags);
3946 ql_alloc_rx_buffers(qdev);
3947 /* If the port is initialized and the
3948 * link is up the turn on the carrier.
3950 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3951 (ql_read32(qdev, STS) & qdev->port_link_up))
3953 /* Restore rx mode. */
3954 clear_bit(QL_ALLMULTI, &qdev->flags);
3955 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3956 qlge_set_multicast_list(qdev->ndev);
3958 /* Restore vlan setting. */
3959 qlge_restore_vlan(qdev);
3961 ql_enable_interrupts(qdev);
3962 ql_enable_all_completion_interrupts(qdev);
3963 netif_tx_start_all_queues(qdev->ndev);
3967 ql_adapter_reset(qdev);
3971 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3973 ql_free_mem_resources(qdev);
3977 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3981 if (ql_alloc_mem_resources(qdev)) {
3982 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3985 status = ql_request_irq(qdev);
3989 static int qlge_close(struct net_device *ndev)
3991 struct ql_adapter *qdev = netdev_priv(ndev);
3993 /* If we hit pci_channel_io_perm_failure
3994 * failure condition, then we already
3995 * brought the adapter down.
3997 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3998 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3999 clear_bit(QL_EEH_FATAL, &qdev->flags);
4004 * Wait for device to recover from a reset.
4005 * (Rarely happens, but possible.)
4007 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4009 ql_adapter_down(qdev);
4010 ql_release_adapter_resources(qdev);
4014 static int ql_configure_rings(struct ql_adapter *qdev)
4017 struct rx_ring *rx_ring;
4018 struct tx_ring *tx_ring;
4019 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4020 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4021 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4023 qdev->lbq_buf_order = get_order(lbq_buf_len);
4025 /* In a perfect world we have one RSS ring for each CPU
4026 * and each has it's own vector. To do that we ask for
4027 * cpu_cnt vectors. ql_enable_msix() will adjust the
4028 * vector count to what we actually get. We then
4029 * allocate an RSS ring for each.
4030 * Essentially, we are doing min(cpu_count, msix_vector_count).
4032 qdev->intr_count = cpu_cnt;
4033 ql_enable_msix(qdev);
4034 /* Adjust the RSS ring count to the actual vector count. */
4035 qdev->rss_ring_count = qdev->intr_count;
4036 qdev->tx_ring_count = cpu_cnt;
4037 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4039 for (i = 0; i < qdev->tx_ring_count; i++) {
4040 tx_ring = &qdev->tx_ring[i];
4041 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4042 tx_ring->qdev = qdev;
4044 tx_ring->wq_len = qdev->tx_ring_size;
4046 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4049 * The completion queue ID for the tx rings start
4050 * immediately after the rss rings.
4052 tx_ring->cq_id = qdev->rss_ring_count + i;
4055 for (i = 0; i < qdev->rx_ring_count; i++) {
4056 rx_ring = &qdev->rx_ring[i];
4057 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4058 rx_ring->qdev = qdev;
4060 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4061 if (i < qdev->rss_ring_count) {
4063 * Inbound (RSS) queues.
4065 rx_ring->cq_len = qdev->rx_ring_size;
4067 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4068 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4070 rx_ring->lbq_len * sizeof(__le64);
4071 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4072 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4074 rx_ring->sbq_len * sizeof(__le64);
4075 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4076 rx_ring->type = RX_Q;
4079 * Outbound queue handles outbound completions only.
4081 /* outbound cq is same size as tx_ring it services. */
4082 rx_ring->cq_len = qdev->tx_ring_size;
4084 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4085 rx_ring->lbq_len = 0;
4086 rx_ring->lbq_size = 0;
4087 rx_ring->lbq_buf_size = 0;
4088 rx_ring->sbq_len = 0;
4089 rx_ring->sbq_size = 0;
4090 rx_ring->sbq_buf_size = 0;
4091 rx_ring->type = TX_Q;
4097 static int qlge_open(struct net_device *ndev)
4100 struct ql_adapter *qdev = netdev_priv(ndev);
4102 err = ql_adapter_reset(qdev);
4106 err = ql_configure_rings(qdev);
4110 err = ql_get_adapter_resources(qdev);
4114 err = ql_adapter_up(qdev);
4121 ql_release_adapter_resources(qdev);
4125 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4127 struct rx_ring *rx_ring;
4131 /* Wait for an outstanding reset to complete. */
4132 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4134 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4135 netif_err(qdev, ifup, qdev->ndev,
4136 "Waiting for adapter UP...\n");
4141 netif_err(qdev, ifup, qdev->ndev,
4142 "Timed out waiting for adapter UP\n");
4147 status = ql_adapter_down(qdev);
4151 /* Get the new rx buffer size. */
4152 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4153 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4154 qdev->lbq_buf_order = get_order(lbq_buf_len);
4156 for (i = 0; i < qdev->rss_ring_count; i++) {
4157 rx_ring = &qdev->rx_ring[i];
4158 /* Set the new size. */
4159 rx_ring->lbq_buf_size = lbq_buf_len;
4162 status = ql_adapter_up(qdev);
4168 netif_alert(qdev, ifup, qdev->ndev,
4169 "Driver up/down cycle failed, closing device.\n");
4170 set_bit(QL_ADAPTER_UP, &qdev->flags);
4171 dev_close(qdev->ndev);
4175 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4177 struct ql_adapter *qdev = netdev_priv(ndev);
4180 if (ndev->mtu == 1500 && new_mtu == 9000) {
4181 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4182 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4183 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4187 queue_delayed_work(qdev->workqueue,
4188 &qdev->mpi_port_cfg_work, 3*HZ);
4190 ndev->mtu = new_mtu;
4192 if (!netif_running(qdev->ndev)) {
4196 status = ql_change_rx_buffers(qdev);
4198 netif_err(qdev, ifup, qdev->ndev,
4199 "Changing MTU failed.\n");
4205 static struct net_device_stats *qlge_get_stats(struct net_device
4208 struct ql_adapter *qdev = netdev_priv(ndev);
4209 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4210 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4211 unsigned long pkts, mcast, dropped, errors, bytes;
4215 pkts = mcast = dropped = errors = bytes = 0;
4216 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4217 pkts += rx_ring->rx_packets;
4218 bytes += rx_ring->rx_bytes;
4219 dropped += rx_ring->rx_dropped;
4220 errors += rx_ring->rx_errors;
4221 mcast += rx_ring->rx_multicast;
4223 ndev->stats.rx_packets = pkts;
4224 ndev->stats.rx_bytes = bytes;
4225 ndev->stats.rx_dropped = dropped;
4226 ndev->stats.rx_errors = errors;
4227 ndev->stats.multicast = mcast;
4230 pkts = errors = bytes = 0;
4231 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4232 pkts += tx_ring->tx_packets;
4233 bytes += tx_ring->tx_bytes;
4234 errors += tx_ring->tx_errors;
4236 ndev->stats.tx_packets = pkts;
4237 ndev->stats.tx_bytes = bytes;
4238 ndev->stats.tx_errors = errors;
4239 return &ndev->stats;
4242 static void qlge_set_multicast_list(struct net_device *ndev)
4244 struct ql_adapter *qdev = netdev_priv(ndev);
4245 struct netdev_hw_addr *ha;
4248 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4252 * Set or clear promiscuous mode if a
4253 * transition is taking place.
4255 if (ndev->flags & IFF_PROMISC) {
4256 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4257 if (ql_set_routing_reg
4258 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4259 netif_err(qdev, hw, qdev->ndev,
4260 "Failed to set promiscuous mode.\n");
4262 set_bit(QL_PROMISCUOUS, &qdev->flags);
4266 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4267 if (ql_set_routing_reg
4268 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4269 netif_err(qdev, hw, qdev->ndev,
4270 "Failed to clear promiscuous mode.\n");
4272 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4278 * Set or clear all multicast mode if a
4279 * transition is taking place.
4281 if ((ndev->flags & IFF_ALLMULTI) ||
4282 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4283 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4284 if (ql_set_routing_reg
4285 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4286 netif_err(qdev, hw, qdev->ndev,
4287 "Failed to set all-multi mode.\n");
4289 set_bit(QL_ALLMULTI, &qdev->flags);
4293 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4294 if (ql_set_routing_reg
4295 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4296 netif_err(qdev, hw, qdev->ndev,
4297 "Failed to clear all-multi mode.\n");
4299 clear_bit(QL_ALLMULTI, &qdev->flags);
4304 if (!netdev_mc_empty(ndev)) {
4305 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4309 netdev_for_each_mc_addr(ha, ndev) {
4310 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4311 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4312 netif_err(qdev, hw, qdev->ndev,
4313 "Failed to loadmulticast address.\n");
4314 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4319 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4320 if (ql_set_routing_reg
4321 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4322 netif_err(qdev, hw, qdev->ndev,
4323 "Failed to set multicast match mode.\n");
4325 set_bit(QL_ALLMULTI, &qdev->flags);
4329 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4332 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4334 struct ql_adapter *qdev = netdev_priv(ndev);
4335 struct sockaddr *addr = p;
4338 if (!is_valid_ether_addr(addr->sa_data))
4339 return -EADDRNOTAVAIL;
4340 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4341 /* Update local copy of current mac address. */
4342 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4344 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4347 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4348 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4350 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4351 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4355 static void qlge_tx_timeout(struct net_device *ndev)
4357 struct ql_adapter *qdev = netdev_priv(ndev);
4358 ql_queue_asic_error(qdev);
4361 static void ql_asic_reset_work(struct work_struct *work)
4363 struct ql_adapter *qdev =
4364 container_of(work, struct ql_adapter, asic_reset_work.work);
4367 status = ql_adapter_down(qdev);
4371 status = ql_adapter_up(qdev);
4375 /* Restore rx mode. */
4376 clear_bit(QL_ALLMULTI, &qdev->flags);
4377 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4378 qlge_set_multicast_list(qdev->ndev);
4383 netif_alert(qdev, ifup, qdev->ndev,
4384 "Driver up/down cycle failed, closing device\n");
4386 set_bit(QL_ADAPTER_UP, &qdev->flags);
4387 dev_close(qdev->ndev);
4391 static const struct nic_operations qla8012_nic_ops = {
4392 .get_flash = ql_get_8012_flash_params,
4393 .port_initialize = ql_8012_port_initialize,
4396 static const struct nic_operations qla8000_nic_ops = {
4397 .get_flash = ql_get_8000_flash_params,
4398 .port_initialize = ql_8000_port_initialize,
4401 /* Find the pcie function number for the other NIC
4402 * on this chip. Since both NIC functions share a
4403 * common firmware we have the lowest enabled function
4404 * do any common work. Examples would be resetting
4405 * after a fatal firmware error, or doing a firmware
4408 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4412 u32 nic_func1, nic_func2;
4414 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4419 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4420 MPI_TEST_NIC_FUNC_MASK);
4421 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4422 MPI_TEST_NIC_FUNC_MASK);
4424 if (qdev->func == nic_func1)
4425 qdev->alt_func = nic_func2;
4426 else if (qdev->func == nic_func2)
4427 qdev->alt_func = nic_func1;
4434 static int ql_get_board_info(struct ql_adapter *qdev)
4438 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4442 status = ql_get_alt_pcie_func(qdev);
4446 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4448 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4449 qdev->port_link_up = STS_PL1;
4450 qdev->port_init = STS_PI1;
4451 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4452 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4454 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4455 qdev->port_link_up = STS_PL0;
4456 qdev->port_init = STS_PI0;
4457 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4458 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4460 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4461 qdev->device_id = qdev->pdev->device;
4462 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4463 qdev->nic_ops = &qla8012_nic_ops;
4464 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4465 qdev->nic_ops = &qla8000_nic_ops;
4469 static void ql_release_all(struct pci_dev *pdev)
4471 struct net_device *ndev = pci_get_drvdata(pdev);
4472 struct ql_adapter *qdev = netdev_priv(ndev);
4474 if (qdev->workqueue) {
4475 destroy_workqueue(qdev->workqueue);
4476 qdev->workqueue = NULL;
4480 iounmap(qdev->reg_base);
4481 if (qdev->doorbell_area)
4482 iounmap(qdev->doorbell_area);
4483 vfree(qdev->mpi_coredump);
4484 pci_release_regions(pdev);
4485 pci_set_drvdata(pdev, NULL);
4488 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4491 struct ql_adapter *qdev = netdev_priv(ndev);
4494 memset((void *)qdev, 0, sizeof(*qdev));
4495 err = pci_enable_device(pdev);
4497 dev_err(&pdev->dev, "PCI device enable failed.\n");
4503 pci_set_drvdata(pdev, ndev);
4505 /* Set PCIe read request size */
4506 err = pcie_set_readrq(pdev, 4096);
4508 dev_err(&pdev->dev, "Set readrq failed.\n");
4512 err = pci_request_regions(pdev, DRV_NAME);
4514 dev_err(&pdev->dev, "PCI region request failed.\n");
4518 pci_set_master(pdev);
4519 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4520 set_bit(QL_DMA64, &qdev->flags);
4521 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4523 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4525 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4529 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4533 /* Set PCIe reset type for EEH to fundamental. */
4534 pdev->needs_freset = 1;
4535 pci_save_state(pdev);
4537 ioremap_nocache(pci_resource_start(pdev, 1),
4538 pci_resource_len(pdev, 1));
4539 if (!qdev->reg_base) {
4540 dev_err(&pdev->dev, "Register mapping failed.\n");
4545 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4546 qdev->doorbell_area =
4547 ioremap_nocache(pci_resource_start(pdev, 3),
4548 pci_resource_len(pdev, 3));
4549 if (!qdev->doorbell_area) {
4550 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4555 err = ql_get_board_info(qdev);
4557 dev_err(&pdev->dev, "Register access failed.\n");
4561 qdev->msg_enable = netif_msg_init(debug, default_msg);
4562 spin_lock_init(&qdev->hw_lock);
4563 spin_lock_init(&qdev->stats_lock);
4565 if (qlge_mpi_coredump) {
4566 qdev->mpi_coredump =
4567 vmalloc(sizeof(struct ql_mpi_coredump));
4568 if (qdev->mpi_coredump == NULL) {
4572 if (qlge_force_coredump)
4573 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4575 /* make sure the EEPROM is good */
4576 err = qdev->nic_ops->get_flash(qdev);
4578 dev_err(&pdev->dev, "Invalid FLASH.\n");
4582 /* Keep local copy of current mac address. */
4583 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4585 /* Set up the default ring sizes. */
4586 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4587 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4589 /* Set up the coalescing parameters. */
4590 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4591 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4592 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4593 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4596 * Set up the operating parameters.
4598 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4599 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4600 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4601 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4602 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4603 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4604 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4605 init_completion(&qdev->ide_completion);
4606 mutex_init(&qdev->mpi_mutex);
4609 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4610 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4611 DRV_NAME, DRV_VERSION);
4615 ql_release_all(pdev);
4617 pci_disable_device(pdev);
4621 static const struct net_device_ops qlge_netdev_ops = {
4622 .ndo_open = qlge_open,
4623 .ndo_stop = qlge_close,
4624 .ndo_start_xmit = qlge_send,
4625 .ndo_change_mtu = qlge_change_mtu,
4626 .ndo_get_stats = qlge_get_stats,
4627 .ndo_set_rx_mode = qlge_set_multicast_list,
4628 .ndo_set_mac_address = qlge_set_mac_address,
4629 .ndo_validate_addr = eth_validate_addr,
4630 .ndo_tx_timeout = qlge_tx_timeout,
4631 .ndo_fix_features = qlge_fix_features,
4632 .ndo_set_features = qlge_set_features,
4633 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4634 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4637 static void ql_timer(unsigned long data)
4639 struct ql_adapter *qdev = (struct ql_adapter *)data;
4642 var = ql_read32(qdev, STS);
4643 if (pci_channel_offline(qdev->pdev)) {
4644 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4648 mod_timer(&qdev->timer, jiffies + (5*HZ));
4651 static int qlge_probe(struct pci_dev *pdev,
4652 const struct pci_device_id *pci_entry)
4654 struct net_device *ndev = NULL;
4655 struct ql_adapter *qdev = NULL;
4656 static int cards_found = 0;
4659 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4660 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4664 err = ql_init_device(pdev, ndev, cards_found);
4670 qdev = netdev_priv(ndev);
4671 SET_NETDEV_DEV(ndev, &pdev->dev);
4672 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4673 NETIF_F_TSO | NETIF_F_TSO_ECN |
4674 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4675 ndev->features = ndev->hw_features |
4676 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4677 ndev->vlan_features = ndev->hw_features;
4679 if (test_bit(QL_DMA64, &qdev->flags))
4680 ndev->features |= NETIF_F_HIGHDMA;
4683 * Set up net_device structure.
4685 ndev->tx_queue_len = qdev->tx_ring_size;
4686 ndev->irq = pdev->irq;
4688 ndev->netdev_ops = &qlge_netdev_ops;
4689 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4690 ndev->watchdog_timeo = 10 * HZ;
4692 err = register_netdev(ndev);
4694 dev_err(&pdev->dev, "net device registration failed.\n");
4695 ql_release_all(pdev);
4696 pci_disable_device(pdev);
4699 /* Start up the timer to trigger EEH if
4702 init_timer_deferrable(&qdev->timer);
4703 qdev->timer.data = (unsigned long)qdev;
4704 qdev->timer.function = ql_timer;
4705 qdev->timer.expires = jiffies + (5*HZ);
4706 add_timer(&qdev->timer);
4708 ql_display_dev_info(ndev);
4709 atomic_set(&qdev->lb_count, 0);
4714 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4716 return qlge_send(skb, ndev);
4719 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4721 return ql_clean_inbound_rx_ring(rx_ring, budget);
4724 static void qlge_remove(struct pci_dev *pdev)
4726 struct net_device *ndev = pci_get_drvdata(pdev);
4727 struct ql_adapter *qdev = netdev_priv(ndev);
4728 del_timer_sync(&qdev->timer);
4729 ql_cancel_all_work_sync(qdev);
4730 unregister_netdev(ndev);
4731 ql_release_all(pdev);
4732 pci_disable_device(pdev);
4736 /* Clean up resources without touching hardware. */
4737 static void ql_eeh_close(struct net_device *ndev)
4740 struct ql_adapter *qdev = netdev_priv(ndev);
4742 if (netif_carrier_ok(ndev)) {
4743 netif_carrier_off(ndev);
4744 netif_stop_queue(ndev);
4747 /* Disabling the timer */
4748 del_timer_sync(&qdev->timer);
4749 ql_cancel_all_work_sync(qdev);
4751 for (i = 0; i < qdev->rss_ring_count; i++)
4752 netif_napi_del(&qdev->rx_ring[i].napi);
4754 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4755 ql_tx_ring_clean(qdev);
4756 ql_free_rx_buffers(qdev);
4757 ql_release_adapter_resources(qdev);
4761 * This callback is called by the PCI subsystem whenever
4762 * a PCI bus error is detected.
4764 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4765 enum pci_channel_state state)
4767 struct net_device *ndev = pci_get_drvdata(pdev);
4768 struct ql_adapter *qdev = netdev_priv(ndev);
4771 case pci_channel_io_normal:
4772 return PCI_ERS_RESULT_CAN_RECOVER;
4773 case pci_channel_io_frozen:
4774 netif_device_detach(ndev);
4775 if (netif_running(ndev))
4777 pci_disable_device(pdev);
4778 return PCI_ERS_RESULT_NEED_RESET;
4779 case pci_channel_io_perm_failure:
4781 "%s: pci_channel_io_perm_failure.\n", __func__);
4783 set_bit(QL_EEH_FATAL, &qdev->flags);
4784 return PCI_ERS_RESULT_DISCONNECT;
4787 /* Request a slot reset. */
4788 return PCI_ERS_RESULT_NEED_RESET;
4792 * This callback is called after the PCI buss has been reset.
4793 * Basically, this tries to restart the card from scratch.
4794 * This is a shortened version of the device probe/discovery code,
4795 * it resembles the first-half of the () routine.
4797 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4799 struct net_device *ndev = pci_get_drvdata(pdev);
4800 struct ql_adapter *qdev = netdev_priv(ndev);
4802 pdev->error_state = pci_channel_io_normal;
4804 pci_restore_state(pdev);
4805 if (pci_enable_device(pdev)) {
4806 netif_err(qdev, ifup, qdev->ndev,
4807 "Cannot re-enable PCI device after reset.\n");
4808 return PCI_ERS_RESULT_DISCONNECT;
4810 pci_set_master(pdev);
4812 if (ql_adapter_reset(qdev)) {
4813 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4814 set_bit(QL_EEH_FATAL, &qdev->flags);
4815 return PCI_ERS_RESULT_DISCONNECT;
4818 return PCI_ERS_RESULT_RECOVERED;
4821 static void qlge_io_resume(struct pci_dev *pdev)
4823 struct net_device *ndev = pci_get_drvdata(pdev);
4824 struct ql_adapter *qdev = netdev_priv(ndev);
4827 if (netif_running(ndev)) {
4828 err = qlge_open(ndev);
4830 netif_err(qdev, ifup, qdev->ndev,
4831 "Device initialization failed after reset.\n");
4835 netif_err(qdev, ifup, qdev->ndev,
4836 "Device was not running prior to EEH.\n");
4838 mod_timer(&qdev->timer, jiffies + (5*HZ));
4839 netif_device_attach(ndev);
4842 static const struct pci_error_handlers qlge_err_handler = {
4843 .error_detected = qlge_io_error_detected,
4844 .slot_reset = qlge_io_slot_reset,
4845 .resume = qlge_io_resume,
4848 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4850 struct net_device *ndev = pci_get_drvdata(pdev);
4851 struct ql_adapter *qdev = netdev_priv(ndev);
4854 netif_device_detach(ndev);
4855 del_timer_sync(&qdev->timer);
4857 if (netif_running(ndev)) {
4858 err = ql_adapter_down(qdev);
4864 err = pci_save_state(pdev);
4868 pci_disable_device(pdev);
4870 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4876 static int qlge_resume(struct pci_dev *pdev)
4878 struct net_device *ndev = pci_get_drvdata(pdev);
4879 struct ql_adapter *qdev = netdev_priv(ndev);
4882 pci_set_power_state(pdev, PCI_D0);
4883 pci_restore_state(pdev);
4884 err = pci_enable_device(pdev);
4886 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4889 pci_set_master(pdev);
4891 pci_enable_wake(pdev, PCI_D3hot, 0);
4892 pci_enable_wake(pdev, PCI_D3cold, 0);
4894 if (netif_running(ndev)) {
4895 err = ql_adapter_up(qdev);
4900 mod_timer(&qdev->timer, jiffies + (5*HZ));
4901 netif_device_attach(ndev);
4905 #endif /* CONFIG_PM */
4907 static void qlge_shutdown(struct pci_dev *pdev)
4909 qlge_suspend(pdev, PMSG_SUSPEND);
4912 static struct pci_driver qlge_driver = {
4914 .id_table = qlge_pci_tbl,
4915 .probe = qlge_probe,
4916 .remove = qlge_remove,
4918 .suspend = qlge_suspend,
4919 .resume = qlge_resume,
4921 .shutdown = qlge_shutdown,
4922 .err_handler = &qlge_err_handler
4925 static int __init qlge_init_module(void)
4927 return pci_register_driver(&qlge_driver);
4930 static void __exit qlge_exit(void)
4932 pci_unregister_driver(&qlge_driver);
4935 module_init(qlge_init_module);
4936 module_exit(qlge_exit);