1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.6.0-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
85 /* required last entry */
88 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
90 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_VERSION);
95 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96 static int debug = -1;
97 module_param(debug, int, 0);
98 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
101 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
103 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
104 struct ixgbevf_ring *rx_ring,
108 * Force memory writes to complete before letting h/w
109 * know there are new descriptors to fetch. (Only
110 * applicable for weak-ordered memory model archs,
114 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
119 * @adapter: pointer to adapter struct
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue
125 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126 u8 queue, u8 msix_vector)
129 struct ixgbe_hw *hw = &adapter->hw;
130 if (direction == -1) {
132 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
138 /* tx or rx causes */
139 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140 index = ((16 * (queue & 1)) + (8 * direction));
141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
142 ivar &= ~(0xFF << index);
143 ivar |= (msix_vector << index);
144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
148 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
149 struct ixgbevf_tx_buffer
152 if (tx_buffer_info->dma) {
153 if (tx_buffer_info->mapped_as_page)
154 dma_unmap_page(tx_ring->dev,
156 tx_buffer_info->length,
159 dma_unmap_single(tx_ring->dev,
161 tx_buffer_info->length,
163 tx_buffer_info->dma = 0;
165 if (tx_buffer_info->skb) {
166 dev_kfree_skb_any(tx_buffer_info->skb);
167 tx_buffer_info->skb = NULL;
169 tx_buffer_info->time_stamp = 0;
170 /* tx_buffer_info must be completely set up in the transmit path */
173 #define IXGBE_MAX_TXD_PWR 14
174 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
176 /* Tx Descriptors needed, worst case */
177 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180 static void ixgbevf_tx_timeout(struct net_device *netdev);
183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
184 * @q_vector: board private structure
185 * @tx_ring: tx ring to clean
187 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
188 struct ixgbevf_ring *tx_ring)
190 struct ixgbevf_adapter *adapter = q_vector->adapter;
191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0;
196 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
199 i = tx_ring->next_to_clean;
200 eop = tx_ring->tx_buffer_info[i].next_to_watch;
201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
204 (count < tx_ring->count)) {
205 bool cleaned = false;
206 rmb(); /* read buffer_info after eop_desc */
207 /* eop could change between read and DD-check */
208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
210 for ( ; !cleaned; count++) {
212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
213 tx_buffer_info = &tx_ring->tx_buffer_info[i];
214 cleaned = (i == eop);
215 skb = tx_buffer_info->skb;
217 if (cleaned && skb) {
218 unsigned int segs, bytecount;
220 /* gso_segs is currently only valid for tcp */
221 segs = skb_shinfo(skb)->gso_segs ?: 1;
222 /* multiply data chunks by size of headers */
223 bytecount = ((segs - 1) * skb_headlen(skb)) +
225 total_packets += segs;
226 total_bytes += bytecount;
229 ixgbevf_unmap_and_free_tx_resource(tx_ring,
232 tx_desc->wb.status = 0;
235 if (i == tx_ring->count)
240 eop = tx_ring->tx_buffer_info[i].next_to_watch;
241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
244 tx_ring->next_to_clean = i;
246 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
249 /* Make sure that anybody stopping the queue after this
250 * sees the new next_to_clean.
253 if (__netif_subqueue_stopped(tx_ring->netdev,
254 tx_ring->queue_index) &&
255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
256 netif_wake_subqueue(tx_ring->netdev,
257 tx_ring->queue_index);
258 ++adapter->restart_queue;
262 u64_stats_update_begin(&tx_ring->syncp);
263 tx_ring->total_bytes += total_bytes;
264 tx_ring->total_packets += total_packets;
265 u64_stats_update_end(&tx_ring->syncp);
267 return count < tx_ring->count;
271 * ixgbevf_receive_skb - Send a completed packet up the stack
272 * @q_vector: structure containing interrupt and ring information
273 * @skb: packet to send up
274 * @status: hardware indication of status of receive
275 * @rx_ring: rx descriptor ring (for a specific queue) to setup
276 * @rx_desc: rx descriptor
278 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
279 struct sk_buff *skb, u8 status,
280 struct ixgbevf_ring *ring,
281 union ixgbe_adv_rx_desc *rx_desc)
283 struct ixgbevf_adapter *adapter = q_vector->adapter;
284 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
288 __vlan_hwaccel_put_tag(skb, tag);
290 napi_gro_receive(&q_vector->napi, skb);
294 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
295 * @adapter: address of board private structure
296 * @status_err: hardware indication of status of receive
297 * @skb: skb currently being received and modified
299 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
300 struct ixgbevf_ring *ring,
301 u32 status_err, struct sk_buff *skb)
303 skb_checksum_none_assert(skb);
305 /* Rx csum disabled */
306 if (!(ring->netdev->features & NETIF_F_RXCSUM))
309 /* if IP and error */
310 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
311 (status_err & IXGBE_RXDADV_ERR_IPE)) {
312 adapter->hw_csum_rx_error++;
316 if (!(status_err & IXGBE_RXD_STAT_L4CS))
319 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
320 adapter->hw_csum_rx_error++;
324 /* It must be a TCP or UDP packet with a valid checksum */
325 skb->ip_summed = CHECKSUM_UNNECESSARY;
326 adapter->hw_csum_rx_good++;
330 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
331 * @adapter: address of board private structure
333 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
334 struct ixgbevf_ring *rx_ring,
337 struct pci_dev *pdev = adapter->pdev;
338 union ixgbe_adv_rx_desc *rx_desc;
339 struct ixgbevf_rx_buffer *bi;
341 unsigned int i = rx_ring->next_to_use;
343 bi = &rx_ring->rx_buffer_info[i];
345 while (cleaned_count--) {
346 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
349 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
350 rx_ring->rx_buf_len);
352 adapter->alloc_rx_buff_failed++;
358 bi->dma = dma_map_single(&pdev->dev, skb->data,
362 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
365 if (i == rx_ring->count)
367 bi = &rx_ring->rx_buffer_info[i];
371 if (rx_ring->next_to_use != i) {
372 rx_ring->next_to_use = i;
374 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
378 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
381 struct ixgbe_hw *hw = &adapter->hw;
383 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
386 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
387 struct ixgbevf_ring *rx_ring,
390 struct ixgbevf_adapter *adapter = q_vector->adapter;
391 struct pci_dev *pdev = adapter->pdev;
392 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
393 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
397 int cleaned_count = 0;
398 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
400 i = rx_ring->next_to_clean;
401 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
402 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
403 rx_buffer_info = &rx_ring->rx_buffer_info[i];
405 while (staterr & IXGBE_RXD_STAT_DD) {
410 rmb(); /* read descriptor and rx_buffer_info after status DD */
411 len = le16_to_cpu(rx_desc->wb.upper.length);
412 skb = rx_buffer_info->skb;
413 prefetch(skb->data - NET_IP_ALIGN);
414 rx_buffer_info->skb = NULL;
416 if (rx_buffer_info->dma) {
417 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
420 rx_buffer_info->dma = 0;
425 if (i == rx_ring->count)
428 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
432 next_buffer = &rx_ring->rx_buffer_info[i];
434 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
435 skb->next = next_buffer->skb;
436 skb->next->prev = skb;
437 adapter->non_eop_descs++;
441 /* ERR_MASK will only have valid bits if EOP set */
442 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
443 dev_kfree_skb_irq(skb);
447 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
449 /* probably a little skewed due to removing CRC */
450 total_rx_bytes += skb->len;
454 * Work around issue of some types of VM to VM loop back
455 * packets not getting split correctly
457 if (staterr & IXGBE_RXD_STAT_LB) {
458 u32 header_fixup_len = skb_headlen(skb);
459 if (header_fixup_len < 14)
460 skb_push(skb, header_fixup_len);
462 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
464 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
467 rx_desc->wb.upper.status_error = 0;
469 /* return some buffers to hardware, one at a time is too slow */
470 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
471 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
476 /* use prefetched values */
478 rx_buffer_info = &rx_ring->rx_buffer_info[i];
480 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
483 rx_ring->next_to_clean = i;
484 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
487 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
489 u64_stats_update_begin(&rx_ring->syncp);
490 rx_ring->total_packets += total_rx_packets;
491 rx_ring->total_bytes += total_rx_bytes;
492 u64_stats_update_end(&rx_ring->syncp);
498 * ixgbevf_poll - NAPI polling calback
499 * @napi: napi struct with our devices info in it
500 * @budget: amount of work driver is allowed to do this pass, in packets
502 * This function will clean more than one or more rings associated with a
505 static int ixgbevf_poll(struct napi_struct *napi, int budget)
507 struct ixgbevf_q_vector *q_vector =
508 container_of(napi, struct ixgbevf_q_vector, napi);
509 struct ixgbevf_adapter *adapter = q_vector->adapter;
510 struct ixgbevf_ring *ring;
512 bool clean_complete = true;
514 ixgbevf_for_each_ring(ring, q_vector->tx)
515 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
517 /* attempt to distribute budget to each queue fairly, but don't allow
518 * the budget to go below 1 because we'll exit polling */
519 if (q_vector->rx.count > 1)
520 per_ring_budget = max(budget/q_vector->rx.count, 1);
522 per_ring_budget = budget;
524 ixgbevf_for_each_ring(ring, q_vector->rx)
525 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
528 /* If all work not completed, return budget and keep polling */
531 /* all work done, exit the polling mode */
533 if (adapter->rx_itr_setting & 1)
534 ixgbevf_set_itr(q_vector);
535 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
536 ixgbevf_irq_enable_queues(adapter,
537 1 << q_vector->v_idx);
543 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
544 * @q_vector: structure containing interrupt and ring information
546 static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
548 struct ixgbevf_adapter *adapter = q_vector->adapter;
549 struct ixgbe_hw *hw = &adapter->hw;
550 int v_idx = q_vector->v_idx;
551 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
554 * set the WDIS bit to not clear the timer bits and cause an
555 * immediate assertion of the interrupt
557 itr_reg |= IXGBE_EITR_CNT_WDIS;
559 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
563 * ixgbevf_configure_msix - Configure MSI-X hardware
564 * @adapter: board private structure
566 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
569 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
571 struct ixgbevf_q_vector *q_vector;
572 int q_vectors, v_idx;
574 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
575 adapter->eims_enable_mask = 0;
578 * Populate the IVAR table and set the ITR values to the
579 * corresponding register.
581 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
582 struct ixgbevf_ring *ring;
583 q_vector = adapter->q_vector[v_idx];
585 ixgbevf_for_each_ring(ring, q_vector->rx)
586 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
588 ixgbevf_for_each_ring(ring, q_vector->tx)
589 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
591 if (q_vector->tx.ring && !q_vector->rx.ring) {
593 if (adapter->tx_itr_setting == 1)
594 q_vector->itr = IXGBE_10K_ITR;
596 q_vector->itr = adapter->tx_itr_setting;
598 /* rx or rx/tx vector */
599 if (adapter->rx_itr_setting == 1)
600 q_vector->itr = IXGBE_20K_ITR;
602 q_vector->itr = adapter->rx_itr_setting;
605 /* add q_vector eims value to global eims_enable_mask */
606 adapter->eims_enable_mask |= 1 << v_idx;
608 ixgbevf_write_eitr(q_vector);
611 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
612 /* setup eims_other and add value to global eims_enable_mask */
613 adapter->eims_other = 1 << v_idx;
614 adapter->eims_enable_mask |= adapter->eims_other;
621 latency_invalid = 255
625 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
626 * @q_vector: structure containing interrupt and ring information
627 * @ring_container: structure containing ring performance data
629 * Stores a new ITR value based on packets and byte
630 * counts during the last interrupt. The advantage of per interrupt
631 * computation is faster updates and more accurate ITR for the current
632 * traffic pattern. Constants in this function were computed
633 * based on theoretical maximum wire speed and thresholds were set based
634 * on testing data as well as attempting to minimize response time
635 * while increasing bulk throughput.
637 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
638 struct ixgbevf_ring_container *ring_container)
640 int bytes = ring_container->total_bytes;
641 int packets = ring_container->total_packets;
644 u8 itr_setting = ring_container->itr;
649 /* simple throttlerate management
650 * 0-20MB/s lowest (100000 ints/s)
651 * 20-100MB/s low (20000 ints/s)
652 * 100-1249MB/s bulk (8000 ints/s)
654 /* what was last interrupt timeslice? */
655 timepassed_us = q_vector->itr >> 2;
656 bytes_perint = bytes / timepassed_us; /* bytes/usec */
658 switch (itr_setting) {
660 if (bytes_perint > 10)
661 itr_setting = low_latency;
664 if (bytes_perint > 20)
665 itr_setting = bulk_latency;
666 else if (bytes_perint <= 10)
667 itr_setting = lowest_latency;
670 if (bytes_perint <= 20)
671 itr_setting = low_latency;
675 /* clear work counters since we have the values we need */
676 ring_container->total_bytes = 0;
677 ring_container->total_packets = 0;
679 /* write updated itr to ring container */
680 ring_container->itr = itr_setting;
683 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
685 u32 new_itr = q_vector->itr;
688 ixgbevf_update_itr(q_vector, &q_vector->tx);
689 ixgbevf_update_itr(q_vector, &q_vector->rx);
691 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
693 switch (current_itr) {
694 /* counts and packets in update_itr are dependent on these numbers */
696 new_itr = IXGBE_100K_ITR;
699 new_itr = IXGBE_20K_ITR;
703 new_itr = IXGBE_8K_ITR;
707 if (new_itr != q_vector->itr) {
708 /* do an exponential smoothing */
709 new_itr = (10 * new_itr * q_vector->itr) /
710 ((9 * new_itr) + q_vector->itr);
712 /* save the algorithm value here */
713 q_vector->itr = new_itr;
715 ixgbevf_write_eitr(q_vector);
719 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
721 struct ixgbevf_adapter *adapter = data;
722 struct ixgbe_hw *hw = &adapter->hw;
724 bool got_ack = false;
726 if (!hw->mbx.ops.check_for_ack(hw))
729 if (!hw->mbx.ops.check_for_msg(hw)) {
730 hw->mbx.ops.read(hw, &msg, 1);
732 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
733 mod_timer(&adapter->watchdog_timer,
734 round_jiffies(jiffies + 1));
736 if (msg & IXGBE_VT_MSGTYPE_NACK)
737 pr_warn("Last Request of type %2.2x to PF Nacked\n",
740 * Restore the PFSTS bit in case someone is polling for a
741 * return message from the PF
743 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
747 * checking for the ack clears the PFACK bit. Place
748 * it back in the v2p_mailbox cache so that anyone
749 * polling for an ack will not miss it
752 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
754 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
761 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
763 * @data: pointer to our q_vector struct for this interrupt vector
765 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
767 struct ixgbevf_q_vector *q_vector = data;
769 /* EIAM disabled interrupts (on this vector) for us */
770 if (q_vector->rx.ring || q_vector->tx.ring)
771 napi_schedule(&q_vector->napi);
776 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
779 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
781 a->rx_ring[r_idx].next = q_vector->rx.ring;
782 q_vector->rx.ring = &a->rx_ring[r_idx];
783 q_vector->rx.count++;
786 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
789 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
791 a->tx_ring[t_idx].next = q_vector->tx.ring;
792 q_vector->tx.ring = &a->tx_ring[t_idx];
793 q_vector->tx.count++;
797 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
798 * @adapter: board private structure to initialize
800 * This function maps descriptor rings to the queue-specific vectors
801 * we were allotted through the MSI-X enabling code. Ideally, we'd have
802 * one vector per ring/queue, but on a constrained vector budget, we
803 * group the rings as "efficiently" as possible. You would add new
804 * mapping configurations in here.
806 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
810 int rxr_idx = 0, txr_idx = 0;
811 int rxr_remaining = adapter->num_rx_queues;
812 int txr_remaining = adapter->num_tx_queues;
817 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
820 * The ideal configuration...
821 * We have enough vectors to map one per queue.
823 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
824 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
825 map_vector_to_rxq(adapter, v_start, rxr_idx);
827 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
828 map_vector_to_txq(adapter, v_start, txr_idx);
833 * If we don't have enough vectors for a 1-to-1
834 * mapping, we'll have to group them so there are
835 * multiple queues per vector.
837 /* Re-adjusting *qpv takes care of the remainder. */
838 for (i = v_start; i < q_vectors; i++) {
839 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
840 for (j = 0; j < rqpv; j++) {
841 map_vector_to_rxq(adapter, i, rxr_idx);
846 for (i = v_start; i < q_vectors; i++) {
847 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
848 for (j = 0; j < tqpv; j++) {
849 map_vector_to_txq(adapter, i, txr_idx);
860 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
861 * @adapter: board private structure
863 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
864 * interrupts from the kernel.
866 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
868 struct net_device *netdev = adapter->netdev;
869 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
873 for (vector = 0; vector < q_vectors; vector++) {
874 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
875 struct msix_entry *entry = &adapter->msix_entries[vector];
877 if (q_vector->tx.ring && q_vector->rx.ring) {
878 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
879 "%s-%s-%d", netdev->name, "TxRx", ri++);
881 } else if (q_vector->rx.ring) {
882 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
883 "%s-%s-%d", netdev->name, "rx", ri++);
884 } else if (q_vector->tx.ring) {
885 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
886 "%s-%s-%d", netdev->name, "tx", ti++);
888 /* skip this unused q_vector */
891 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
892 q_vector->name, q_vector);
895 "request_irq failed for MSIX interrupt "
897 goto free_queue_irqs;
901 err = request_irq(adapter->msix_entries[vector].vector,
902 &ixgbevf_msix_mbx, 0, netdev->name, adapter);
905 "request_irq for msix_mbx failed: %d\n", err);
906 goto free_queue_irqs;
914 free_irq(adapter->msix_entries[vector].vector,
915 adapter->q_vector[vector]);
917 pci_disable_msix(adapter->pdev);
918 kfree(adapter->msix_entries);
919 adapter->msix_entries = NULL;
923 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
925 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
927 for (i = 0; i < q_vectors; i++) {
928 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
929 q_vector->rx.ring = NULL;
930 q_vector->tx.ring = NULL;
931 q_vector->rx.count = 0;
932 q_vector->tx.count = 0;
937 * ixgbevf_request_irq - initialize interrupts
938 * @adapter: board private structure
940 * Attempts to configure interrupts using the best available
941 * capabilities of the hardware and kernel.
943 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
947 err = ixgbevf_request_msix_irqs(adapter);
951 "request_irq failed, Error %d\n", err);
956 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
960 q_vectors = adapter->num_msix_vectors;
963 free_irq(adapter->msix_entries[i].vector, adapter);
966 for (; i >= 0; i--) {
967 /* free only the irqs that were actually requested */
968 if (!adapter->q_vector[i]->rx.ring &&
969 !adapter->q_vector[i]->tx.ring)
972 free_irq(adapter->msix_entries[i].vector,
973 adapter->q_vector[i]);
976 ixgbevf_reset_q_vectors(adapter);
980 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
981 * @adapter: board private structure
983 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
985 struct ixgbe_hw *hw = &adapter->hw;
988 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
989 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
990 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
992 IXGBE_WRITE_FLUSH(hw);
994 for (i = 0; i < adapter->num_msix_vectors; i++)
995 synchronize_irq(adapter->msix_entries[i].vector);
999 * ixgbevf_irq_enable - Enable default interrupt generation settings
1000 * @adapter: board private structure
1002 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1004 struct ixgbe_hw *hw = &adapter->hw;
1006 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1007 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1008 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1012 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1013 * @adapter: board private structure
1015 * Configure the Tx unit of the MAC after a reset.
1017 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1020 struct ixgbe_hw *hw = &adapter->hw;
1021 u32 i, j, tdlen, txctrl;
1023 /* Setup the HW Tx Head and Tail descriptor pointers */
1024 for (i = 0; i < adapter->num_tx_queues; i++) {
1025 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1028 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1029 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1030 (tdba & DMA_BIT_MASK(32)));
1031 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1032 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1033 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1034 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1035 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1036 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1037 /* Disable Tx Head Writeback RO bit, since this hoses
1038 * bookkeeping if things aren't delivered in order.
1040 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1041 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1042 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1046 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1048 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1050 struct ixgbevf_ring *rx_ring;
1051 struct ixgbe_hw *hw = &adapter->hw;
1054 rx_ring = &adapter->rx_ring[index];
1056 srrctl = IXGBE_SRRCTL_DROP_EN;
1058 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1060 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1061 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1062 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1064 srrctl |= rx_ring->rx_buf_len >>
1065 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1066 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1070 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1071 * @adapter: board private structure
1073 * Configure the Rx unit of the MAC after a reset.
1075 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1078 struct ixgbe_hw *hw = &adapter->hw;
1079 struct net_device *netdev = adapter->netdev;
1080 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1085 /* PSRTYPE must be initialized in 82599 */
1086 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1087 if (netdev->mtu <= ETH_DATA_LEN)
1088 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1090 rx_buf_len = ALIGN(max_frame, 1024);
1092 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1093 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1094 * the Base and Length of the Rx Descriptor Ring */
1095 for (i = 0; i < adapter->num_rx_queues; i++) {
1096 rdba = adapter->rx_ring[i].dma;
1097 j = adapter->rx_ring[i].reg_idx;
1098 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1099 (rdba & DMA_BIT_MASK(32)));
1100 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1101 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1102 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1104 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1105 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1106 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1108 ixgbevf_configure_srrctl(adapter, j);
1112 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1114 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1115 struct ixgbe_hw *hw = &adapter->hw;
1117 spin_lock(&adapter->mbx_lock);
1119 /* add VID to filter table */
1120 if (hw->mac.ops.set_vfta)
1121 hw->mac.ops.set_vfta(hw, vid, 0, true);
1123 spin_unlock(&adapter->mbx_lock);
1125 set_bit(vid, adapter->active_vlans);
1130 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1132 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1133 struct ixgbe_hw *hw = &adapter->hw;
1135 spin_lock(&adapter->mbx_lock);
1137 /* remove VID from filter table */
1138 if (hw->mac.ops.set_vfta)
1139 hw->mac.ops.set_vfta(hw, vid, 0, false);
1141 spin_unlock(&adapter->mbx_lock);
1143 clear_bit(vid, adapter->active_vlans);
1148 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1152 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1153 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1156 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1158 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1159 struct ixgbe_hw *hw = &adapter->hw;
1162 if ((netdev_uc_count(netdev)) > 10) {
1163 pr_err("Too many unicast filters - No Space\n");
1167 if (!netdev_uc_empty(netdev)) {
1168 struct netdev_hw_addr *ha;
1169 netdev_for_each_uc_addr(ha, netdev) {
1170 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1175 * If the list is empty then send message to PF driver to
1176 * clear all macvlans on this VF.
1178 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1185 * ixgbevf_set_rx_mode - Multicast set
1186 * @netdev: network interface device structure
1188 * The set_rx_method entry point is called whenever the multicast address
1189 * list or the network interface flags are updated. This routine is
1190 * responsible for configuring the hardware for proper multicast mode.
1192 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1194 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1195 struct ixgbe_hw *hw = &adapter->hw;
1197 spin_lock(&adapter->mbx_lock);
1199 /* reprogram multicast list */
1200 if (hw->mac.ops.update_mc_addr_list)
1201 hw->mac.ops.update_mc_addr_list(hw, netdev);
1203 ixgbevf_write_uc_addr_list(netdev);
1205 spin_unlock(&adapter->mbx_lock);
1208 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1211 struct ixgbevf_q_vector *q_vector;
1212 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1214 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1215 q_vector = adapter->q_vector[q_idx];
1216 napi_enable(&q_vector->napi);
1220 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1223 struct ixgbevf_q_vector *q_vector;
1224 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1226 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1227 q_vector = adapter->q_vector[q_idx];
1228 napi_disable(&q_vector->napi);
1232 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1234 struct net_device *netdev = adapter->netdev;
1237 ixgbevf_set_rx_mode(netdev);
1239 ixgbevf_restore_vlan(adapter);
1241 ixgbevf_configure_tx(adapter);
1242 ixgbevf_configure_rx(adapter);
1243 for (i = 0; i < adapter->num_rx_queues; i++) {
1244 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1245 ixgbevf_alloc_rx_buffers(adapter, ring,
1246 IXGBE_DESC_UNUSED(ring));
1250 #define IXGBE_MAX_RX_DESC_POLL 10
1251 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1254 struct ixgbe_hw *hw = &adapter->hw;
1255 int j = adapter->rx_ring[rxr].reg_idx;
1258 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1259 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1264 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1265 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1266 "not set within the polling period\n", rxr);
1269 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1270 (adapter->rx_ring[rxr].count - 1));
1273 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1275 /* Only save pre-reset stats if there are some */
1276 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1277 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1278 adapter->stats.base_vfgprc;
1279 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1280 adapter->stats.base_vfgptc;
1281 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1282 adapter->stats.base_vfgorc;
1283 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1284 adapter->stats.base_vfgotc;
1285 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1286 adapter->stats.base_vfmprc;
1290 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1292 struct ixgbe_hw *hw = &adapter->hw;
1294 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1295 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1296 adapter->stats.last_vfgorc |=
1297 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1298 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1299 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1300 adapter->stats.last_vfgotc |=
1301 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1302 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1304 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1305 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1306 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1307 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1308 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1311 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1313 struct net_device *netdev = adapter->netdev;
1314 struct ixgbe_hw *hw = &adapter->hw;
1316 int num_rx_rings = adapter->num_rx_queues;
1320 for (i = 0; i < adapter->num_tx_queues; i++) {
1321 j = adapter->tx_ring[i].reg_idx;
1322 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1323 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1324 txdctl |= (8 << 16);
1325 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1328 for (i = 0; i < adapter->num_tx_queues; i++) {
1329 j = adapter->tx_ring[i].reg_idx;
1330 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1331 txdctl |= IXGBE_TXDCTL_ENABLE;
1332 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1335 for (i = 0; i < num_rx_rings; i++) {
1336 j = adapter->rx_ring[i].reg_idx;
1337 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1338 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1339 if (hw->mac.type == ixgbe_mac_X540_vf) {
1340 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1341 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1342 IXGBE_RXDCTL_RLPML_EN);
1344 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1345 ixgbevf_rx_desc_queue_enable(adapter, i);
1348 ixgbevf_configure_msix(adapter);
1350 spin_lock(&adapter->mbx_lock);
1352 if (hw->mac.ops.set_rar) {
1353 if (is_valid_ether_addr(hw->mac.addr))
1354 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1356 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1359 msg[0] = IXGBE_VF_SET_LPE;
1360 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1361 hw->mbx.ops.write_posted(hw, msg, 2);
1363 spin_unlock(&adapter->mbx_lock);
1365 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1366 ixgbevf_napi_enable_all(adapter);
1368 /* enable transmits */
1369 netif_tx_start_all_queues(netdev);
1371 ixgbevf_save_reset_stats(adapter);
1372 ixgbevf_init_last_counter_stats(adapter);
1374 mod_timer(&adapter->watchdog_timer, jiffies);
1377 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1379 struct ixgbe_hw *hw = &adapter->hw;
1381 ixgbevf_configure(adapter);
1383 ixgbevf_up_complete(adapter);
1385 /* clear any pending interrupts, may auto mask */
1386 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1388 ixgbevf_irq_enable(adapter);
1392 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1393 * @adapter: board private structure
1394 * @rx_ring: ring to free buffers from
1396 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1397 struct ixgbevf_ring *rx_ring)
1399 struct pci_dev *pdev = adapter->pdev;
1403 if (!rx_ring->rx_buffer_info)
1406 /* Free all the Rx ring sk_buffs */
1407 for (i = 0; i < rx_ring->count; i++) {
1408 struct ixgbevf_rx_buffer *rx_buffer_info;
1410 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1411 if (rx_buffer_info->dma) {
1412 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1413 rx_ring->rx_buf_len,
1415 rx_buffer_info->dma = 0;
1417 if (rx_buffer_info->skb) {
1418 struct sk_buff *skb = rx_buffer_info->skb;
1419 rx_buffer_info->skb = NULL;
1421 struct sk_buff *this = skb;
1423 dev_kfree_skb(this);
1428 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1429 memset(rx_ring->rx_buffer_info, 0, size);
1431 /* Zero out the descriptor ring */
1432 memset(rx_ring->desc, 0, rx_ring->size);
1434 rx_ring->next_to_clean = 0;
1435 rx_ring->next_to_use = 0;
1438 writel(0, adapter->hw.hw_addr + rx_ring->head);
1440 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1444 * ixgbevf_clean_tx_ring - Free Tx Buffers
1445 * @adapter: board private structure
1446 * @tx_ring: ring to be cleaned
1448 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1449 struct ixgbevf_ring *tx_ring)
1451 struct ixgbevf_tx_buffer *tx_buffer_info;
1455 if (!tx_ring->tx_buffer_info)
1458 /* Free all the Tx ring sk_buffs */
1460 for (i = 0; i < tx_ring->count; i++) {
1461 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1462 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1465 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1466 memset(tx_ring->tx_buffer_info, 0, size);
1468 memset(tx_ring->desc, 0, tx_ring->size);
1470 tx_ring->next_to_use = 0;
1471 tx_ring->next_to_clean = 0;
1474 writel(0, adapter->hw.hw_addr + tx_ring->head);
1476 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1480 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1481 * @adapter: board private structure
1483 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1487 for (i = 0; i < adapter->num_rx_queues; i++)
1488 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1492 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1493 * @adapter: board private structure
1495 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1499 for (i = 0; i < adapter->num_tx_queues; i++)
1500 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1503 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1505 struct net_device *netdev = adapter->netdev;
1506 struct ixgbe_hw *hw = &adapter->hw;
1510 /* signal that we are down to the interrupt handler */
1511 set_bit(__IXGBEVF_DOWN, &adapter->state);
1512 /* disable receives */
1514 netif_tx_disable(netdev);
1518 netif_tx_stop_all_queues(netdev);
1520 ixgbevf_irq_disable(adapter);
1522 ixgbevf_napi_disable_all(adapter);
1524 del_timer_sync(&adapter->watchdog_timer);
1525 /* can't call flush scheduled work here because it can deadlock
1526 * if linkwatch_event tries to acquire the rtnl_lock which we are
1528 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1531 /* disable transmits in the hardware now that interrupts are off */
1532 for (i = 0; i < adapter->num_tx_queues; i++) {
1533 j = adapter->tx_ring[i].reg_idx;
1534 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1535 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1536 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1539 netif_carrier_off(netdev);
1541 if (!pci_channel_offline(adapter->pdev))
1542 ixgbevf_reset(adapter);
1544 ixgbevf_clean_all_tx_rings(adapter);
1545 ixgbevf_clean_all_rx_rings(adapter);
1548 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1550 struct ixgbe_hw *hw = &adapter->hw;
1552 WARN_ON(in_interrupt());
1554 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1558 * Check if PF is up before re-init. If not then skip until
1559 * later when the PF is up and ready to service requests from
1560 * the VF via mailbox. If the VF is up and running then the
1561 * watchdog task will continue to schedule reset tasks until
1562 * the PF is up and running.
1564 if (!hw->mac.ops.reset_hw(hw)) {
1565 ixgbevf_down(adapter);
1566 ixgbevf_up(adapter);
1569 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1572 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1574 struct ixgbe_hw *hw = &adapter->hw;
1575 struct net_device *netdev = adapter->netdev;
1577 spin_lock(&adapter->mbx_lock);
1579 if (hw->mac.ops.reset_hw(hw))
1580 hw_dbg(hw, "PF still resetting\n");
1582 hw->mac.ops.init_hw(hw);
1584 spin_unlock(&adapter->mbx_lock);
1586 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1587 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1589 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1594 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1597 int err, vector_threshold;
1599 /* We'll want at least 2 (vector_threshold):
1600 * 1) TxQ[0] + RxQ[0] handler
1601 * 2) Other (Link Status Change, etc.)
1603 vector_threshold = MIN_MSIX_COUNT;
1605 /* The more we get, the more we will assign to Tx/Rx Cleanup
1606 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1607 * Right now, we simply care about how many we'll get; we'll
1608 * set them up later while requesting irq's.
1610 while (vectors >= vector_threshold) {
1611 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1613 if (!err) /* Success in acquiring all requested vectors. */
1616 vectors = 0; /* Nasty failure, quit now */
1617 else /* err == number of vectors we should try again with */
1621 if (vectors < vector_threshold) {
1622 /* Can't allocate enough MSI-X interrupts? Oh well.
1623 * This just means we'll go with either a single MSI
1624 * vector or fall back to legacy interrupts.
1626 hw_dbg(&adapter->hw,
1627 "Unable to allocate MSI-X interrupts\n");
1628 kfree(adapter->msix_entries);
1629 adapter->msix_entries = NULL;
1632 * Adjust for only the vectors we'll use, which is minimum
1633 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1634 * vectors we were allocated.
1636 adapter->num_msix_vectors = vectors;
1641 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1642 * @adapter: board private structure to initialize
1644 * This is the top level queue allocation routine. The order here is very
1645 * important, starting with the "most" number of features turned on at once,
1646 * and ending with the smallest set of features. This way large combinations
1647 * can be allocated if they're turned on, and smaller combinations are the
1648 * fallthrough conditions.
1651 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1653 /* Start with base case */
1654 adapter->num_rx_queues = 1;
1655 adapter->num_tx_queues = 1;
1659 * ixgbevf_alloc_queues - Allocate memory for all rings
1660 * @adapter: board private structure to initialize
1662 * We allocate one ring per queue at run-time since we don't know the
1663 * number of queues at compile-time. The polling_netdev array is
1664 * intended for Multiqueue, but should work fine with a single queue.
1666 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1670 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1671 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1672 if (!adapter->tx_ring)
1673 goto err_tx_ring_allocation;
1675 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1676 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1677 if (!adapter->rx_ring)
1678 goto err_rx_ring_allocation;
1680 for (i = 0; i < adapter->num_tx_queues; i++) {
1681 adapter->tx_ring[i].count = adapter->tx_ring_count;
1682 adapter->tx_ring[i].queue_index = i;
1683 adapter->tx_ring[i].reg_idx = i;
1684 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1685 adapter->tx_ring[i].netdev = adapter->netdev;
1688 for (i = 0; i < adapter->num_rx_queues; i++) {
1689 adapter->rx_ring[i].count = adapter->rx_ring_count;
1690 adapter->rx_ring[i].queue_index = i;
1691 adapter->rx_ring[i].reg_idx = i;
1692 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1693 adapter->rx_ring[i].netdev = adapter->netdev;
1698 err_rx_ring_allocation:
1699 kfree(adapter->tx_ring);
1700 err_tx_ring_allocation:
1705 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1706 * @adapter: board private structure to initialize
1708 * Attempt to configure the interrupts using the best available
1709 * capabilities of the hardware and the kernel.
1711 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1714 int vector, v_budget;
1717 * It's easy to be greedy for MSI-X vectors, but it really
1718 * doesn't do us much good if we have a lot more vectors
1719 * than CPU's. So let's be conservative and only ask for
1720 * (roughly) the same number of vectors as there are CPU's.
1721 * The default is to use pairs of vectors.
1723 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1724 v_budget = min_t(int, v_budget, num_online_cpus());
1725 v_budget += NON_Q_VECTORS;
1727 /* A failure in MSI-X entry allocation isn't fatal, but it does
1728 * mean we disable MSI-X capabilities of the adapter. */
1729 adapter->msix_entries = kcalloc(v_budget,
1730 sizeof(struct msix_entry), GFP_KERNEL);
1731 if (!adapter->msix_entries) {
1736 for (vector = 0; vector < v_budget; vector++)
1737 adapter->msix_entries[vector].entry = vector;
1739 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1746 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1747 * @adapter: board private structure to initialize
1749 * We allocate one q_vector per queue interrupt. If allocation fails we
1752 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1754 int q_idx, num_q_vectors;
1755 struct ixgbevf_q_vector *q_vector;
1757 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1759 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1760 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1763 q_vector->adapter = adapter;
1764 q_vector->v_idx = q_idx;
1765 netif_napi_add(adapter->netdev, &q_vector->napi,
1767 adapter->q_vector[q_idx] = q_vector;
1775 q_vector = adapter->q_vector[q_idx];
1776 netif_napi_del(&q_vector->napi);
1778 adapter->q_vector[q_idx] = NULL;
1784 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1785 * @adapter: board private structure to initialize
1787 * This function frees the memory allocated to the q_vectors. In addition if
1788 * NAPI is enabled it will delete any references to the NAPI struct prior
1789 * to freeing the q_vector.
1791 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1793 int q_idx, num_q_vectors;
1796 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1797 napi_vectors = adapter->num_rx_queues;
1799 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1800 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1802 adapter->q_vector[q_idx] = NULL;
1803 if (q_idx < napi_vectors)
1804 netif_napi_del(&q_vector->napi);
1810 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1811 * @adapter: board private structure
1814 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1816 pci_disable_msix(adapter->pdev);
1817 kfree(adapter->msix_entries);
1818 adapter->msix_entries = NULL;
1822 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1823 * @adapter: board private structure to initialize
1826 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1830 /* Number of supported queues */
1831 ixgbevf_set_num_queues(adapter);
1833 err = ixgbevf_set_interrupt_capability(adapter);
1835 hw_dbg(&adapter->hw,
1836 "Unable to setup interrupt capabilities\n");
1837 goto err_set_interrupt;
1840 err = ixgbevf_alloc_q_vectors(adapter);
1842 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1844 goto err_alloc_q_vectors;
1847 err = ixgbevf_alloc_queues(adapter);
1849 pr_err("Unable to allocate memory for queues\n");
1850 goto err_alloc_queues;
1853 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1854 "Tx Queue count = %u\n",
1855 (adapter->num_rx_queues > 1) ? "Enabled" :
1856 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1858 set_bit(__IXGBEVF_DOWN, &adapter->state);
1862 ixgbevf_free_q_vectors(adapter);
1863 err_alloc_q_vectors:
1864 ixgbevf_reset_interrupt_capability(adapter);
1870 * ixgbevf_sw_init - Initialize general software structures
1871 * (struct ixgbevf_adapter)
1872 * @adapter: board private structure to initialize
1874 * ixgbevf_sw_init initializes the Adapter private data structure.
1875 * Fields are initialized based on PCI device information and
1876 * OS network device settings (MTU size).
1878 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1880 struct ixgbe_hw *hw = &adapter->hw;
1881 struct pci_dev *pdev = adapter->pdev;
1884 /* PCI config space info */
1886 hw->vendor_id = pdev->vendor;
1887 hw->device_id = pdev->device;
1888 hw->revision_id = pdev->revision;
1889 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1890 hw->subsystem_device_id = pdev->subsystem_device;
1892 hw->mbx.ops.init_params(hw);
1893 hw->mac.max_tx_queues = MAX_TX_QUEUES;
1894 hw->mac.max_rx_queues = MAX_RX_QUEUES;
1895 err = hw->mac.ops.reset_hw(hw);
1897 dev_info(&pdev->dev,
1898 "PF still in reset state, assigning new address\n");
1899 eth_hw_addr_random(adapter->netdev);
1900 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
1901 adapter->netdev->addr_len);
1903 err = hw->mac.ops.init_hw(hw);
1905 pr_err("init_shared_code failed: %d\n", err);
1908 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
1909 adapter->netdev->addr_len);
1912 /* lock to protect mailbox accesses */
1913 spin_lock_init(&adapter->mbx_lock);
1915 /* Enable dynamic interrupt throttling rates */
1916 adapter->rx_itr_setting = 1;
1917 adapter->tx_itr_setting = 1;
1919 /* set default ring sizes */
1920 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
1921 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
1923 set_bit(__IXGBEVF_DOWN, &adapter->state);
1930 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
1932 u32 current_counter = IXGBE_READ_REG(hw, reg); \
1933 if (current_counter < last_counter) \
1934 counter += 0x100000000LL; \
1935 last_counter = current_counter; \
1936 counter &= 0xFFFFFFFF00000000LL; \
1937 counter |= current_counter; \
1940 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1942 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
1943 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
1944 u64 current_counter = (current_counter_msb << 32) | \
1945 current_counter_lsb; \
1946 if (current_counter < last_counter) \
1947 counter += 0x1000000000LL; \
1948 last_counter = current_counter; \
1949 counter &= 0xFFFFFFF000000000LL; \
1950 counter |= current_counter; \
1953 * ixgbevf_update_stats - Update the board statistics counters.
1954 * @adapter: board private structure
1956 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
1958 struct ixgbe_hw *hw = &adapter->hw;
1960 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
1961 adapter->stats.vfgprc);
1962 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
1963 adapter->stats.vfgptc);
1964 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1965 adapter->stats.last_vfgorc,
1966 adapter->stats.vfgorc);
1967 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1968 adapter->stats.last_vfgotc,
1969 adapter->stats.vfgotc);
1970 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
1971 adapter->stats.vfmprc);
1975 * ixgbevf_watchdog - Timer Call-back
1976 * @data: pointer to adapter cast into an unsigned long
1978 static void ixgbevf_watchdog(unsigned long data)
1980 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
1981 struct ixgbe_hw *hw = &adapter->hw;
1986 * Do the watchdog outside of interrupt context due to the lovely
1987 * delays that some of the newer hardware requires
1990 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1991 goto watchdog_short_circuit;
1993 /* get one bit for every active tx/rx interrupt vector */
1994 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
1995 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
1996 if (qv->rx.ring || qv->tx.ring)
2000 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2002 watchdog_short_circuit:
2003 schedule_work(&adapter->watchdog_task);
2007 * ixgbevf_tx_timeout - Respond to a Tx Hang
2008 * @netdev: network interface device structure
2010 static void ixgbevf_tx_timeout(struct net_device *netdev)
2012 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2014 /* Do the reset outside of interrupt context */
2015 schedule_work(&adapter->reset_task);
2018 static void ixgbevf_reset_task(struct work_struct *work)
2020 struct ixgbevf_adapter *adapter;
2021 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2023 /* If we're already down or resetting, just bail */
2024 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2025 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2028 adapter->tx_timeout_count++;
2030 ixgbevf_reinit_locked(adapter);
2034 * ixgbevf_watchdog_task - worker thread to bring link up
2035 * @work: pointer to work_struct containing our data
2037 static void ixgbevf_watchdog_task(struct work_struct *work)
2039 struct ixgbevf_adapter *adapter = container_of(work,
2040 struct ixgbevf_adapter,
2042 struct net_device *netdev = adapter->netdev;
2043 struct ixgbe_hw *hw = &adapter->hw;
2044 u32 link_speed = adapter->link_speed;
2045 bool link_up = adapter->link_up;
2047 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2050 * Always check the link on the watchdog because we have
2053 if (hw->mac.ops.check_link) {
2056 spin_lock(&adapter->mbx_lock);
2058 need_reset = hw->mac.ops.check_link(hw, &link_speed,
2061 spin_unlock(&adapter->mbx_lock);
2064 adapter->link_up = link_up;
2065 adapter->link_speed = link_speed;
2066 netif_carrier_off(netdev);
2067 netif_tx_stop_all_queues(netdev);
2068 schedule_work(&adapter->reset_task);
2072 /* always assume link is up, if no check link
2074 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2077 adapter->link_up = link_up;
2078 adapter->link_speed = link_speed;
2081 if (!netif_carrier_ok(netdev)) {
2082 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2083 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2085 netif_carrier_on(netdev);
2086 netif_tx_wake_all_queues(netdev);
2089 adapter->link_up = false;
2090 adapter->link_speed = 0;
2091 if (netif_carrier_ok(netdev)) {
2092 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2093 netif_carrier_off(netdev);
2094 netif_tx_stop_all_queues(netdev);
2098 ixgbevf_update_stats(adapter);
2101 /* Reset the timer */
2102 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2103 mod_timer(&adapter->watchdog_timer,
2104 round_jiffies(jiffies + (2 * HZ)));
2106 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2110 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2111 * @adapter: board private structure
2112 * @tx_ring: Tx descriptor ring for a specific queue
2114 * Free all transmit software resources
2116 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2117 struct ixgbevf_ring *tx_ring)
2119 struct pci_dev *pdev = adapter->pdev;
2121 ixgbevf_clean_tx_ring(adapter, tx_ring);
2123 vfree(tx_ring->tx_buffer_info);
2124 tx_ring->tx_buffer_info = NULL;
2126 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2129 tx_ring->desc = NULL;
2133 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2134 * @adapter: board private structure
2136 * Free all transmit software resources
2138 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2142 for (i = 0; i < adapter->num_tx_queues; i++)
2143 if (adapter->tx_ring[i].desc)
2144 ixgbevf_free_tx_resources(adapter,
2145 &adapter->tx_ring[i]);
2150 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2151 * @adapter: board private structure
2152 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2154 * Return 0 on success, negative on failure
2156 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2157 struct ixgbevf_ring *tx_ring)
2159 struct pci_dev *pdev = adapter->pdev;
2162 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2163 tx_ring->tx_buffer_info = vzalloc(size);
2164 if (!tx_ring->tx_buffer_info)
2167 /* round up to nearest 4K */
2168 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2169 tx_ring->size = ALIGN(tx_ring->size, 4096);
2171 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2172 &tx_ring->dma, GFP_KERNEL);
2176 tx_ring->next_to_use = 0;
2177 tx_ring->next_to_clean = 0;
2181 vfree(tx_ring->tx_buffer_info);
2182 tx_ring->tx_buffer_info = NULL;
2183 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2184 "descriptor ring\n");
2189 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2190 * @adapter: board private structure
2192 * If this function returns with an error, then it's possible one or
2193 * more of the rings is populated (while the rest are not). It is the
2194 * callers duty to clean those orphaned rings.
2196 * Return 0 on success, negative on failure
2198 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2202 for (i = 0; i < adapter->num_tx_queues; i++) {
2203 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2206 hw_dbg(&adapter->hw,
2207 "Allocation for Tx Queue %u failed\n", i);
2215 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2216 * @adapter: board private structure
2217 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2219 * Returns 0 on success, negative on failure
2221 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2222 struct ixgbevf_ring *rx_ring)
2224 struct pci_dev *pdev = adapter->pdev;
2227 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2228 rx_ring->rx_buffer_info = vzalloc(size);
2229 if (!rx_ring->rx_buffer_info)
2232 /* Round up to nearest 4K */
2233 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2234 rx_ring->size = ALIGN(rx_ring->size, 4096);
2236 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2237 &rx_ring->dma, GFP_KERNEL);
2239 if (!rx_ring->desc) {
2240 hw_dbg(&adapter->hw,
2241 "Unable to allocate memory for "
2242 "the receive descriptor ring\n");
2243 vfree(rx_ring->rx_buffer_info);
2244 rx_ring->rx_buffer_info = NULL;
2248 rx_ring->next_to_clean = 0;
2249 rx_ring->next_to_use = 0;
2257 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2258 * @adapter: board private structure
2260 * If this function returns with an error, then it's possible one or
2261 * more of the rings is populated (while the rest are not). It is the
2262 * callers duty to clean those orphaned rings.
2264 * Return 0 on success, negative on failure
2266 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2270 for (i = 0; i < adapter->num_rx_queues; i++) {
2271 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2274 hw_dbg(&adapter->hw,
2275 "Allocation for Rx Queue %u failed\n", i);
2282 * ixgbevf_free_rx_resources - Free Rx Resources
2283 * @adapter: board private structure
2284 * @rx_ring: ring to clean the resources from
2286 * Free all receive software resources
2288 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2289 struct ixgbevf_ring *rx_ring)
2291 struct pci_dev *pdev = adapter->pdev;
2293 ixgbevf_clean_rx_ring(adapter, rx_ring);
2295 vfree(rx_ring->rx_buffer_info);
2296 rx_ring->rx_buffer_info = NULL;
2298 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2301 rx_ring->desc = NULL;
2305 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2306 * @adapter: board private structure
2308 * Free all receive software resources
2310 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2314 for (i = 0; i < adapter->num_rx_queues; i++)
2315 if (adapter->rx_ring[i].desc)
2316 ixgbevf_free_rx_resources(adapter,
2317 &adapter->rx_ring[i]);
2321 * ixgbevf_open - Called when a network interface is made active
2322 * @netdev: network interface device structure
2324 * Returns 0 on success, negative value on failure
2326 * The open entry point is called when a network interface is made
2327 * active by the system (IFF_UP). At this point all resources needed
2328 * for transmit and receive operations are allocated, the interrupt
2329 * handler is registered with the OS, the watchdog timer is started,
2330 * and the stack is notified that the interface is ready.
2332 static int ixgbevf_open(struct net_device *netdev)
2334 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2335 struct ixgbe_hw *hw = &adapter->hw;
2338 /* disallow open during test */
2339 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2342 if (hw->adapter_stopped) {
2343 ixgbevf_reset(adapter);
2344 /* if adapter is still stopped then PF isn't up and
2345 * the vf can't start. */
2346 if (hw->adapter_stopped) {
2347 err = IXGBE_ERR_MBX;
2348 pr_err("Unable to start - perhaps the PF Driver isn't "
2350 goto err_setup_reset;
2354 /* allocate transmit descriptors */
2355 err = ixgbevf_setup_all_tx_resources(adapter);
2359 /* allocate receive descriptors */
2360 err = ixgbevf_setup_all_rx_resources(adapter);
2364 ixgbevf_configure(adapter);
2367 * Map the Tx/Rx rings to the vectors we were allotted.
2368 * if request_irq will be called in this function map_rings
2369 * must be called *before* up_complete
2371 ixgbevf_map_rings_to_vectors(adapter);
2373 ixgbevf_up_complete(adapter);
2375 /* clear any pending interrupts, may auto mask */
2376 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2377 err = ixgbevf_request_irq(adapter);
2381 ixgbevf_irq_enable(adapter);
2386 ixgbevf_down(adapter);
2387 ixgbevf_free_irq(adapter);
2389 ixgbevf_free_all_rx_resources(adapter);
2391 ixgbevf_free_all_tx_resources(adapter);
2392 ixgbevf_reset(adapter);
2400 * ixgbevf_close - Disables a network interface
2401 * @netdev: network interface device structure
2403 * Returns 0, this is not allowed to fail
2405 * The close entry point is called when an interface is de-activated
2406 * by the OS. The hardware is still under the drivers control, but
2407 * needs to be disabled. A global MAC reset is issued to stop the
2408 * hardware, and all transmit and receive resources are freed.
2410 static int ixgbevf_close(struct net_device *netdev)
2412 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2414 ixgbevf_down(adapter);
2415 ixgbevf_free_irq(adapter);
2417 ixgbevf_free_all_tx_resources(adapter);
2418 ixgbevf_free_all_rx_resources(adapter);
2423 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2424 u32 vlan_macip_lens, u32 type_tucmd,
2427 struct ixgbe_adv_tx_context_desc *context_desc;
2428 u16 i = tx_ring->next_to_use;
2430 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2433 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2435 /* set bits to identify this as an advanced context descriptor */
2436 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2438 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2439 context_desc->seqnum_seed = 0;
2440 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2441 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2444 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2445 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2447 u32 vlan_macip_lens, type_tucmd;
2448 u32 mss_l4len_idx, l4len;
2450 if (!skb_is_gso(skb))
2453 if (skb_header_cloned(skb)) {
2454 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2459 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2460 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2462 if (skb->protocol == htons(ETH_P_IP)) {
2463 struct iphdr *iph = ip_hdr(skb);
2466 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2470 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2471 } else if (skb_is_gso_v6(skb)) {
2472 ipv6_hdr(skb)->payload_len = 0;
2473 tcp_hdr(skb)->check =
2474 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2475 &ipv6_hdr(skb)->daddr,
2479 /* compute header lengths */
2480 l4len = tcp_hdrlen(skb);
2482 *hdr_len = skb_transport_offset(skb) + l4len;
2484 /* mss_l4len_id: use 1 as index for TSO */
2485 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2486 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2487 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2489 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2490 vlan_macip_lens = skb_network_header_len(skb);
2491 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2492 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2494 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2495 type_tucmd, mss_l4len_idx);
2500 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2501 struct sk_buff *skb, u32 tx_flags)
2506 u32 vlan_macip_lens = 0;
2507 u32 mss_l4len_idx = 0;
2510 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2512 switch (skb->protocol) {
2513 case __constant_htons(ETH_P_IP):
2514 vlan_macip_lens |= skb_network_header_len(skb);
2515 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2516 l4_hdr = ip_hdr(skb)->protocol;
2518 case __constant_htons(ETH_P_IPV6):
2519 vlan_macip_lens |= skb_network_header_len(skb);
2520 l4_hdr = ipv6_hdr(skb)->nexthdr;
2523 if (unlikely(net_ratelimit())) {
2524 dev_warn(tx_ring->dev,
2525 "partial checksum but proto=%x!\n",
2533 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2534 mss_l4len_idx = tcp_hdrlen(skb) <<
2535 IXGBE_ADVTXD_L4LEN_SHIFT;
2538 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2539 mss_l4len_idx = sizeof(struct sctphdr) <<
2540 IXGBE_ADVTXD_L4LEN_SHIFT;
2543 mss_l4len_idx = sizeof(struct udphdr) <<
2544 IXGBE_ADVTXD_L4LEN_SHIFT;
2547 if (unlikely(net_ratelimit())) {
2548 dev_warn(tx_ring->dev,
2549 "partial checksum but l4 proto=%x!\n",
2556 /* vlan_macip_lens: MACLEN, VLAN tag */
2557 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2558 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2560 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2561 type_tucmd, mss_l4len_idx);
2563 return (skb->ip_summed == CHECKSUM_PARTIAL);
2566 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2567 struct sk_buff *skb, u32 tx_flags,
2570 struct ixgbevf_tx_buffer *tx_buffer_info;
2572 unsigned int total = skb->len;
2573 unsigned int offset = 0, size;
2575 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2579 i = tx_ring->next_to_use;
2581 len = min(skb_headlen(skb), total);
2583 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2584 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2586 tx_buffer_info->length = size;
2587 tx_buffer_info->mapped_as_page = false;
2588 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2590 size, DMA_TO_DEVICE);
2591 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2593 tx_buffer_info->next_to_watch = i;
2600 if (i == tx_ring->count)
2604 for (f = 0; f < nr_frags; f++) {
2605 const struct skb_frag_struct *frag;
2607 frag = &skb_shinfo(skb)->frags[f];
2608 len = min((unsigned int)skb_frag_size(frag), total);
2612 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2613 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2615 tx_buffer_info->length = size;
2616 tx_buffer_info->dma =
2617 skb_frag_dma_map(tx_ring->dev, frag,
2618 offset, size, DMA_TO_DEVICE);
2619 tx_buffer_info->mapped_as_page = true;
2620 if (dma_mapping_error(tx_ring->dev,
2621 tx_buffer_info->dma))
2623 tx_buffer_info->next_to_watch = i;
2630 if (i == tx_ring->count)
2638 i = tx_ring->count - 1;
2641 tx_ring->tx_buffer_info[i].skb = skb;
2642 tx_ring->tx_buffer_info[first].next_to_watch = i;
2643 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2648 dev_err(tx_ring->dev, "TX DMA map failed\n");
2650 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2651 tx_buffer_info->dma = 0;
2652 tx_buffer_info->next_to_watch = 0;
2655 /* clear timestamp and dma mappings for remaining portion of packet */
2656 while (count >= 0) {
2660 i += tx_ring->count;
2661 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2662 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2668 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2669 int count, u32 paylen, u8 hdr_len)
2671 union ixgbe_adv_tx_desc *tx_desc = NULL;
2672 struct ixgbevf_tx_buffer *tx_buffer_info;
2673 u32 olinfo_status = 0, cmd_type_len = 0;
2676 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2678 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2680 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2682 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2683 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2685 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2686 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2688 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2689 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2691 /* use index 1 context for tso */
2692 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2693 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2694 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2699 * Check Context must be set if Tx switch is enabled, which it
2700 * always is for case where virtual functions are running
2702 olinfo_status |= IXGBE_ADVTXD_CC;
2704 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2706 i = tx_ring->next_to_use;
2708 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2709 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2710 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2711 tx_desc->read.cmd_type_len =
2712 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2713 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2715 if (i == tx_ring->count)
2719 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2721 tx_ring->next_to_use = i;
2724 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2726 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
2728 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2729 /* Herbert's original patch had:
2730 * smp_mb__after_netif_stop_queue();
2731 * but since that doesn't exist yet, just open code it. */
2734 /* We need to check again in a case another CPU has just
2735 * made room available. */
2736 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2739 /* A reprieve! - use start_queue because it doesn't call schedule */
2740 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2741 ++adapter->restart_queue;
2745 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2747 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2749 return __ixgbevf_maybe_stop_tx(tx_ring, size);
2752 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2754 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2755 struct ixgbevf_ring *tx_ring;
2757 unsigned int tx_flags = 0;
2760 u16 count = TXD_USE_COUNT(skb_headlen(skb));
2761 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2765 tx_ring = &adapter->tx_ring[r_idx];
2768 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2769 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2770 * + 2 desc gap to keep tail from touching head,
2771 * + 1 desc for context descriptor,
2772 * otherwise try next time
2774 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2775 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2776 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2778 count += skb_shinfo(skb)->nr_frags;
2780 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
2782 return NETDEV_TX_BUSY;
2785 if (vlan_tx_tag_present(skb)) {
2786 tx_flags |= vlan_tx_tag_get(skb);
2787 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2788 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2791 first = tx_ring->next_to_use;
2793 if (skb->protocol == htons(ETH_P_IP))
2794 tx_flags |= IXGBE_TX_FLAGS_IPV4;
2795 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
2797 dev_kfree_skb_any(skb);
2798 return NETDEV_TX_OK;
2802 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
2803 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
2804 tx_flags |= IXGBE_TX_FLAGS_CSUM;
2806 ixgbevf_tx_queue(tx_ring, tx_flags,
2807 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
2810 * Force memory writes to complete before letting h/w
2811 * know there are new descriptors to fetch. (Only
2812 * applicable for weak-ordered memory model archs,
2817 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
2819 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2821 return NETDEV_TX_OK;
2825 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
2826 * @netdev: network interface device structure
2827 * @p: pointer to an address structure
2829 * Returns 0 on success, negative on failure
2831 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2833 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2834 struct ixgbe_hw *hw = &adapter->hw;
2835 struct sockaddr *addr = p;
2837 if (!is_valid_ether_addr(addr->sa_data))
2838 return -EADDRNOTAVAIL;
2840 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2841 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2843 spin_lock(&adapter->mbx_lock);
2845 if (hw->mac.ops.set_rar)
2846 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2848 spin_unlock(&adapter->mbx_lock);
2854 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
2855 * @netdev: network interface device structure
2856 * @new_mtu: new value for maximum frame size
2858 * Returns 0 on success, negative on failure
2860 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2862 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2863 struct ixgbe_hw *hw = &adapter->hw;
2864 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2865 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2868 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2869 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
2871 /* MTU < 68 is an error and causes problems on some kernels */
2872 if ((new_mtu < 68) || (max_frame > max_possible_frame))
2875 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
2876 netdev->mtu, new_mtu);
2877 /* must set new MTU before calling down or up */
2878 netdev->mtu = new_mtu;
2880 if (!netif_running(netdev)) {
2881 msg[0] = IXGBE_VF_SET_LPE;
2883 hw->mbx.ops.write_posted(hw, msg, 2);
2886 if (netif_running(netdev))
2887 ixgbevf_reinit_locked(adapter);
2892 static void ixgbevf_shutdown(struct pci_dev *pdev)
2894 struct net_device *netdev = pci_get_drvdata(pdev);
2895 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2897 netif_device_detach(netdev);
2899 if (netif_running(netdev)) {
2900 ixgbevf_down(adapter);
2901 ixgbevf_free_irq(adapter);
2902 ixgbevf_free_all_tx_resources(adapter);
2903 ixgbevf_free_all_rx_resources(adapter);
2906 pci_save_state(pdev);
2908 pci_disable_device(pdev);
2911 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
2912 struct rtnl_link_stats64 *stats)
2914 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2917 const struct ixgbevf_ring *ring;
2920 ixgbevf_update_stats(adapter);
2922 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
2924 for (i = 0; i < adapter->num_rx_queues; i++) {
2925 ring = &adapter->rx_ring[i];
2927 start = u64_stats_fetch_begin_bh(&ring->syncp);
2928 bytes = ring->total_bytes;
2929 packets = ring->total_packets;
2930 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
2931 stats->rx_bytes += bytes;
2932 stats->rx_packets += packets;
2935 for (i = 0; i < adapter->num_tx_queues; i++) {
2936 ring = &adapter->tx_ring[i];
2938 start = u64_stats_fetch_begin_bh(&ring->syncp);
2939 bytes = ring->total_bytes;
2940 packets = ring->total_packets;
2941 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
2942 stats->tx_bytes += bytes;
2943 stats->tx_packets += packets;
2949 static const struct net_device_ops ixgbe_netdev_ops = {
2950 .ndo_open = ixgbevf_open,
2951 .ndo_stop = ixgbevf_close,
2952 .ndo_start_xmit = ixgbevf_xmit_frame,
2953 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
2954 .ndo_get_stats64 = ixgbevf_get_stats,
2955 .ndo_validate_addr = eth_validate_addr,
2956 .ndo_set_mac_address = ixgbevf_set_mac,
2957 .ndo_change_mtu = ixgbevf_change_mtu,
2958 .ndo_tx_timeout = ixgbevf_tx_timeout,
2959 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
2960 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
2963 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
2965 dev->netdev_ops = &ixgbe_netdev_ops;
2966 ixgbevf_set_ethtool_ops(dev);
2967 dev->watchdog_timeo = 5 * HZ;
2971 * ixgbevf_probe - Device Initialization Routine
2972 * @pdev: PCI device information struct
2973 * @ent: entry in ixgbevf_pci_tbl
2975 * Returns 0 on success, negative on failure
2977 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
2978 * The OS initialization, configuring of the adapter private structure,
2979 * and a hardware reset occur.
2981 static int __devinit ixgbevf_probe(struct pci_dev *pdev,
2982 const struct pci_device_id *ent)
2984 struct net_device *netdev;
2985 struct ixgbevf_adapter *adapter = NULL;
2986 struct ixgbe_hw *hw = NULL;
2987 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
2988 static int cards_found;
2989 int err, pci_using_dac;
2991 err = pci_enable_device(pdev);
2995 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
2996 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
2999 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3001 err = dma_set_coherent_mask(&pdev->dev,
3004 dev_err(&pdev->dev, "No usable DMA "
3005 "configuration, aborting\n");
3012 err = pci_request_regions(pdev, ixgbevf_driver_name);
3014 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3018 pci_set_master(pdev);
3020 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3024 goto err_alloc_etherdev;
3027 SET_NETDEV_DEV(netdev, &pdev->dev);
3029 pci_set_drvdata(pdev, netdev);
3030 adapter = netdev_priv(netdev);
3032 adapter->netdev = netdev;
3033 adapter->pdev = pdev;
3036 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3039 * call save state here in standalone driver because it relies on
3040 * adapter struct to exist, and needs to call netdev_priv
3042 pci_save_state(pdev);
3044 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3045 pci_resource_len(pdev, 0));
3051 ixgbevf_assign_netdev_ops(netdev);
3053 adapter->bd_number = cards_found;
3056 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3057 hw->mac.type = ii->mac;
3059 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3060 sizeof(struct ixgbe_mbx_operations));
3062 /* setup the private structure */
3063 err = ixgbevf_sw_init(adapter);
3067 /* The HW MAC address was set and/or determined in sw_init */
3068 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3070 if (!is_valid_ether_addr(netdev->dev_addr)) {
3071 pr_err("invalid MAC address\n");
3076 netdev->hw_features = NETIF_F_SG |
3083 netdev->features = netdev->hw_features |
3084 NETIF_F_HW_VLAN_TX |
3085 NETIF_F_HW_VLAN_RX |
3086 NETIF_F_HW_VLAN_FILTER;
3088 netdev->vlan_features |= NETIF_F_TSO;
3089 netdev->vlan_features |= NETIF_F_TSO6;
3090 netdev->vlan_features |= NETIF_F_IP_CSUM;
3091 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3092 netdev->vlan_features |= NETIF_F_SG;
3095 netdev->features |= NETIF_F_HIGHDMA;
3097 netdev->priv_flags |= IFF_UNICAST_FLT;
3099 init_timer(&adapter->watchdog_timer);
3100 adapter->watchdog_timer.function = ixgbevf_watchdog;
3101 adapter->watchdog_timer.data = (unsigned long)adapter;
3103 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3104 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3106 err = ixgbevf_init_interrupt_scheme(adapter);
3110 /* pick up the PCI bus settings for reporting later */
3111 if (hw->mac.ops.get_bus_info)
3112 hw->mac.ops.get_bus_info(hw);
3114 strcpy(netdev->name, "eth%d");
3116 err = register_netdev(netdev);
3120 netif_carrier_off(netdev);
3122 ixgbevf_init_last_counter_stats(adapter);
3124 /* print the MAC address */
3125 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3127 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3129 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3135 ixgbevf_reset_interrupt_capability(adapter);
3136 iounmap(hw->hw_addr);
3138 free_netdev(netdev);
3140 pci_release_regions(pdev);
3143 pci_disable_device(pdev);
3148 * ixgbevf_remove - Device Removal Routine
3149 * @pdev: PCI device information struct
3151 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3152 * that it should release a PCI device. The could be caused by a
3153 * Hot-Plug event, or because the driver is going to be removed from
3156 static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3158 struct net_device *netdev = pci_get_drvdata(pdev);
3159 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3161 set_bit(__IXGBEVF_DOWN, &adapter->state);
3163 del_timer_sync(&adapter->watchdog_timer);
3165 cancel_work_sync(&adapter->reset_task);
3166 cancel_work_sync(&adapter->watchdog_task);
3168 if (netdev->reg_state == NETREG_REGISTERED)
3169 unregister_netdev(netdev);
3171 ixgbevf_reset_interrupt_capability(adapter);
3173 iounmap(adapter->hw.hw_addr);
3174 pci_release_regions(pdev);
3176 hw_dbg(&adapter->hw, "Remove complete\n");
3178 kfree(adapter->tx_ring);
3179 kfree(adapter->rx_ring);
3181 free_netdev(netdev);
3183 pci_disable_device(pdev);
3187 * ixgbevf_io_error_detected - called when PCI error is detected
3188 * @pdev: Pointer to PCI device
3189 * @state: The current pci connection state
3191 * This function is called after a PCI bus error affecting
3192 * this device has been detected.
3194 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3195 pci_channel_state_t state)
3197 struct net_device *netdev = pci_get_drvdata(pdev);
3198 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3200 netif_device_detach(netdev);
3202 if (state == pci_channel_io_perm_failure)
3203 return PCI_ERS_RESULT_DISCONNECT;
3205 if (netif_running(netdev))
3206 ixgbevf_down(adapter);
3208 pci_disable_device(pdev);
3210 /* Request a slot slot reset. */
3211 return PCI_ERS_RESULT_NEED_RESET;
3215 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3216 * @pdev: Pointer to PCI device
3218 * Restart the card from scratch, as if from a cold-boot. Implementation
3219 * resembles the first-half of the ixgbevf_resume routine.
3221 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3223 struct net_device *netdev = pci_get_drvdata(pdev);
3224 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3226 if (pci_enable_device_mem(pdev)) {
3228 "Cannot re-enable PCI device after reset.\n");
3229 return PCI_ERS_RESULT_DISCONNECT;
3232 pci_set_master(pdev);
3234 ixgbevf_reset(adapter);
3236 return PCI_ERS_RESULT_RECOVERED;
3240 * ixgbevf_io_resume - called when traffic can start flowing again.
3241 * @pdev: Pointer to PCI device
3243 * This callback is called when the error recovery driver tells us that
3244 * its OK to resume normal operation. Implementation resembles the
3245 * second-half of the ixgbevf_resume routine.
3247 static void ixgbevf_io_resume(struct pci_dev *pdev)
3249 struct net_device *netdev = pci_get_drvdata(pdev);
3250 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3252 if (netif_running(netdev))
3253 ixgbevf_up(adapter);
3255 netif_device_attach(netdev);
3258 /* PCI Error Recovery (ERS) */
3259 static struct pci_error_handlers ixgbevf_err_handler = {
3260 .error_detected = ixgbevf_io_error_detected,
3261 .slot_reset = ixgbevf_io_slot_reset,
3262 .resume = ixgbevf_io_resume,
3265 static struct pci_driver ixgbevf_driver = {
3266 .name = ixgbevf_driver_name,
3267 .id_table = ixgbevf_pci_tbl,
3268 .probe = ixgbevf_probe,
3269 .remove = __devexit_p(ixgbevf_remove),
3270 .shutdown = ixgbevf_shutdown,
3271 .err_handler = &ixgbevf_err_handler
3275 * ixgbevf_init_module - Driver Registration Routine
3277 * ixgbevf_init_module is the first routine called when the driver is
3278 * loaded. All it does is register with the PCI subsystem.
3280 static int __init ixgbevf_init_module(void)
3283 pr_info("%s - version %s\n", ixgbevf_driver_string,
3284 ixgbevf_driver_version);
3286 pr_info("%s\n", ixgbevf_copyright);
3288 ret = pci_register_driver(&ixgbevf_driver);
3292 module_init(ixgbevf_init_module);
3295 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3297 * ixgbevf_exit_module is called just before the driver is removed
3300 static void __exit ixgbevf_exit_module(void)
3302 pci_unregister_driver(&ixgbevf_driver);
3307 * ixgbevf_get_hw_dev_name - return device name string
3308 * used by hardware layer to print debugging information
3310 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3312 struct ixgbevf_adapter *adapter = hw->back;
3313 return adapter->netdev->name;
3317 module_exit(ixgbevf_exit_module);
3319 /* ixgbevf_main.c */