ixgbevf: add ixgbevf_rx_skb
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2012 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28
29 /******************************************************************************
30  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
51 #include <linux/if.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
54
55 #include "ixgbevf.h"
56
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59         "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61 #define DRV_VERSION "2.11.3-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64         "Copyright (c) 2009 - 2012 Intel Corporation.";
65
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67         [board_82599_vf] = &ixgbevf_82599_vf_info,
68         [board_X540_vf]  = &ixgbevf_X540_vf_info,
69 };
70
71 /* ixgbevf_pci_tbl - PCI Device ID Table
72  *
73  * Wildcard entries (PCI_ANY_ID) should come last
74  * Last entry must be all 0s
75  *
76  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77  *   Class, Class Mask, private data (not used) }
78  */
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82         /* required last entry */
83         {0, }
84 };
85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
91
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug = -1;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
96
97 /* forward decls */
98 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
99 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
100
101 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
102                                            struct ixgbevf_ring *rx_ring,
103                                            u32 val)
104 {
105         /*
106          * Force memory writes to complete before letting h/w
107          * know there are new descriptors to fetch.  (Only
108          * applicable for weak-ordered memory model archs,
109          * such as IA-64).
110          */
111         wmb();
112         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
113 }
114
115 /**
116  * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
117  * @adapter: pointer to adapter struct
118  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
119  * @queue: queue to map the corresponding interrupt to
120  * @msix_vector: the vector to map to the corresponding queue
121  */
122 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
123                              u8 queue, u8 msix_vector)
124 {
125         u32 ivar, index;
126         struct ixgbe_hw *hw = &adapter->hw;
127         if (direction == -1) {
128                 /* other causes */
129                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
130                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
131                 ivar &= ~0xFF;
132                 ivar |= msix_vector;
133                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
134         } else {
135                 /* tx or rx causes */
136                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
137                 index = ((16 * (queue & 1)) + (8 * direction));
138                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
139                 ivar &= ~(0xFF << index);
140                 ivar |= (msix_vector << index);
141                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
142         }
143 }
144
145 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
146                                                struct ixgbevf_tx_buffer
147                                                *tx_buffer_info)
148 {
149         if (tx_buffer_info->dma) {
150                 if (tx_buffer_info->mapped_as_page)
151                         dma_unmap_page(tx_ring->dev,
152                                        tx_buffer_info->dma,
153                                        tx_buffer_info->length,
154                                        DMA_TO_DEVICE);
155                 else
156                         dma_unmap_single(tx_ring->dev,
157                                          tx_buffer_info->dma,
158                                          tx_buffer_info->length,
159                                          DMA_TO_DEVICE);
160                 tx_buffer_info->dma = 0;
161         }
162         if (tx_buffer_info->skb) {
163                 dev_kfree_skb_any(tx_buffer_info->skb);
164                 tx_buffer_info->skb = NULL;
165         }
166         tx_buffer_info->time_stamp = 0;
167         /* tx_buffer_info must be completely set up in the transmit path */
168 }
169
170 #define IXGBE_MAX_TXD_PWR       14
171 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
172
173 /* Tx Descriptors needed, worst case */
174 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
175 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
176
177 static void ixgbevf_tx_timeout(struct net_device *netdev);
178
179 /**
180  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
181  * @q_vector: board private structure
182  * @tx_ring: tx ring to clean
183  **/
184 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
185                                  struct ixgbevf_ring *tx_ring)
186 {
187         struct ixgbevf_adapter *adapter = q_vector->adapter;
188         union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
189         struct ixgbevf_tx_buffer *tx_buffer_info;
190         unsigned int i, count = 0;
191         unsigned int total_bytes = 0, total_packets = 0;
192
193         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
194                 return true;
195
196         i = tx_ring->next_to_clean;
197         tx_buffer_info = &tx_ring->tx_buffer_info[i];
198         eop_desc = tx_buffer_info->next_to_watch;
199
200         do {
201                 bool cleaned = false;
202
203                 /* if next_to_watch is not set then there is no work pending */
204                 if (!eop_desc)
205                         break;
206
207                 /* prevent any other reads prior to eop_desc */
208                 read_barrier_depends();
209
210                 /* if DD is not set pending work has not been completed */
211                 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
212                         break;
213
214                 /* clear next_to_watch to prevent false hangs */
215                 tx_buffer_info->next_to_watch = NULL;
216
217                 for ( ; !cleaned; count++) {
218                         struct sk_buff *skb;
219                         tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
220                         cleaned = (tx_desc == eop_desc);
221                         skb = tx_buffer_info->skb;
222
223                         if (cleaned && skb) {
224                                 unsigned int segs, bytecount;
225
226                                 /* gso_segs is currently only valid for tcp */
227                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
228                                 /* multiply data chunks by size of headers */
229                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
230                                             skb->len;
231                                 total_packets += segs;
232                                 total_bytes += bytecount;
233                         }
234
235                         ixgbevf_unmap_and_free_tx_resource(tx_ring,
236                                                            tx_buffer_info);
237
238                         tx_desc->wb.status = 0;
239
240                         i++;
241                         if (i == tx_ring->count)
242                                 i = 0;
243
244                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
245                 }
246
247                 eop_desc = tx_buffer_info->next_to_watch;
248         } while (count < tx_ring->count);
249
250         tx_ring->next_to_clean = i;
251
252 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
253         if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
254                      (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
255                 /* Make sure that anybody stopping the queue after this
256                  * sees the new next_to_clean.
257                  */
258                 smp_mb();
259                 if (__netif_subqueue_stopped(tx_ring->netdev,
260                                              tx_ring->queue_index) &&
261                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
262                         netif_wake_subqueue(tx_ring->netdev,
263                                             tx_ring->queue_index);
264                         ++adapter->restart_queue;
265                 }
266         }
267
268         u64_stats_update_begin(&tx_ring->syncp);
269         tx_ring->total_bytes += total_bytes;
270         tx_ring->total_packets += total_packets;
271         u64_stats_update_end(&tx_ring->syncp);
272         q_vector->tx.total_bytes += total_bytes;
273         q_vector->tx.total_packets += total_packets;
274
275         return count < tx_ring->count;
276 }
277
278 /**
279  * ixgbevf_receive_skb - Send a completed packet up the stack
280  * @q_vector: structure containing interrupt and ring information
281  * @skb: packet to send up
282  * @status: hardware indication of status of receive
283  * @rx_desc: rx descriptor
284  **/
285 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
286                                 struct sk_buff *skb, u8 status,
287                                 union ixgbe_adv_rx_desc *rx_desc)
288 {
289         struct ixgbevf_adapter *adapter = q_vector->adapter;
290         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
291         u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
292
293         if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
294                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
295
296         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
297                 napi_gro_receive(&q_vector->napi, skb);
298         else
299                 netif_rx(skb);
300 }
301
302 /**
303  * ixgbevf_rx_skb - Helper function to determine proper Rx method
304  * @q_vector: structure containing interrupt and ring information
305  * @skb: packet to send up
306  * @status: hardware indication of status of receive
307  * @rx_desc: rx descriptor
308  **/
309 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
310                            struct sk_buff *skb, u8 status,
311                            union ixgbe_adv_rx_desc *rx_desc)
312 {
313         ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
314 }
315
316 /**
317  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
318  * @ring: pointer to Rx descriptor ring structure
319  * @status_err: hardware indication of status of receive
320  * @skb: skb currently being received and modified
321  **/
322 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
323                                        u32 status_err, struct sk_buff *skb)
324 {
325         skb_checksum_none_assert(skb);
326
327         /* Rx csum disabled */
328         if (!(ring->netdev->features & NETIF_F_RXCSUM))
329                 return;
330
331         /* if IP and error */
332         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
333             (status_err & IXGBE_RXDADV_ERR_IPE)) {
334                 ring->hw_csum_rx_error++;
335                 return;
336         }
337
338         if (!(status_err & IXGBE_RXD_STAT_L4CS))
339                 return;
340
341         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
342                 ring->hw_csum_rx_error++;
343                 return;
344         }
345
346         /* It must be a TCP or UDP packet with a valid checksum */
347         skb->ip_summed = CHECKSUM_UNNECESSARY;
348         ring->hw_csum_rx_good++;
349 }
350
351 /**
352  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
353  * @adapter: address of board private structure
354  **/
355 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
356                                      struct ixgbevf_ring *rx_ring,
357                                      int cleaned_count)
358 {
359         struct pci_dev *pdev = adapter->pdev;
360         union ixgbe_adv_rx_desc *rx_desc;
361         struct ixgbevf_rx_buffer *bi;
362         unsigned int i = rx_ring->next_to_use;
363
364         bi = &rx_ring->rx_buffer_info[i];
365
366         while (cleaned_count--) {
367                 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
368
369                 if (!bi->skb) {
370                         struct sk_buff *skb;
371
372                         skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
373                                                         rx_ring->rx_buf_len);
374                         if (!skb) {
375                                 adapter->alloc_rx_buff_failed++;
376                                 goto no_buffers;
377                         }
378                         bi->skb = skb;
379
380                         bi->dma = dma_map_single(&pdev->dev, skb->data,
381                                                  rx_ring->rx_buf_len,
382                                                  DMA_FROM_DEVICE);
383                         if (dma_mapping_error(&pdev->dev, bi->dma)) {
384                                 dev_kfree_skb(skb);
385                                 bi->skb = NULL;
386                                 dev_err(&pdev->dev, "RX DMA map failed\n");
387                                 break;
388                         }
389                 }
390                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
391
392                 i++;
393                 if (i == rx_ring->count)
394                         i = 0;
395                 bi = &rx_ring->rx_buffer_info[i];
396         }
397
398 no_buffers:
399         if (rx_ring->next_to_use != i) {
400                 rx_ring->next_to_use = i;
401                 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
402         }
403 }
404
405 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
406                                              u32 qmask)
407 {
408         struct ixgbe_hw *hw = &adapter->hw;
409
410         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
411 }
412
413 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
414                                  struct ixgbevf_ring *rx_ring,
415                                  int budget)
416 {
417         struct ixgbevf_adapter *adapter = q_vector->adapter;
418         struct pci_dev *pdev = adapter->pdev;
419         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
420         struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
421         struct sk_buff *skb;
422         unsigned int i;
423         u32 len, staterr;
424         int cleaned_count = 0;
425         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
426
427         i = rx_ring->next_to_clean;
428         rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
429         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
430         rx_buffer_info = &rx_ring->rx_buffer_info[i];
431
432         while (staterr & IXGBE_RXD_STAT_DD) {
433                 if (!budget)
434                         break;
435                 budget--;
436
437                 rmb(); /* read descriptor and rx_buffer_info after status DD */
438                 len = le16_to_cpu(rx_desc->wb.upper.length);
439                 skb = rx_buffer_info->skb;
440                 prefetch(skb->data - NET_IP_ALIGN);
441                 rx_buffer_info->skb = NULL;
442
443                 if (rx_buffer_info->dma) {
444                         dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
445                                          rx_ring->rx_buf_len,
446                                          DMA_FROM_DEVICE);
447                         rx_buffer_info->dma = 0;
448                         skb_put(skb, len);
449                 }
450
451                 i++;
452                 if (i == rx_ring->count)
453                         i = 0;
454
455                 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
456                 prefetch(next_rxd);
457                 cleaned_count++;
458
459                 next_buffer = &rx_ring->rx_buffer_info[i];
460
461                 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
462                         skb->next = next_buffer->skb;
463                         IXGBE_CB(skb->next)->prev = skb;
464                         adapter->non_eop_descs++;
465                         goto next_desc;
466                 }
467
468                 /* we should not be chaining buffers, if we did drop the skb */
469                 if (IXGBE_CB(skb)->prev) {
470                         do {
471                                 struct sk_buff *this = skb;
472                                 skb = IXGBE_CB(skb)->prev;
473                                 dev_kfree_skb(this);
474                         } while (skb);
475                         goto next_desc;
476                 }
477
478                 /* ERR_MASK will only have valid bits if EOP set */
479                 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
480                         dev_kfree_skb_irq(skb);
481                         goto next_desc;
482                 }
483
484                 ixgbevf_rx_checksum(rx_ring, staterr, skb);
485
486                 /* probably a little skewed due to removing CRC */
487                 total_rx_bytes += skb->len;
488                 total_rx_packets++;
489
490                 /*
491                  * Work around issue of some types of VM to VM loop back
492                  * packets not getting split correctly
493                  */
494                 if (staterr & IXGBE_RXD_STAT_LB) {
495                         u32 header_fixup_len = skb_headlen(skb);
496                         if (header_fixup_len < 14)
497                                 skb_push(skb, header_fixup_len);
498                 }
499                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
500
501                 /* Workaround hardware that can't do proper VEPA multicast
502                  * source pruning.
503                  */
504                 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
505                     ether_addr_equal(adapter->netdev->dev_addr,
506                                      eth_hdr(skb)->h_source)) {
507                         dev_kfree_skb_irq(skb);
508                         goto next_desc;
509                 }
510
511                 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
512
513 next_desc:
514                 rx_desc->wb.upper.status_error = 0;
515
516                 /* return some buffers to hardware, one at a time is too slow */
517                 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
518                         ixgbevf_alloc_rx_buffers(adapter, rx_ring,
519                                                  cleaned_count);
520                         cleaned_count = 0;
521                 }
522
523                 /* use prefetched values */
524                 rx_desc = next_rxd;
525                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
526
527                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
528         }
529
530         rx_ring->next_to_clean = i;
531         cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
532
533         if (cleaned_count)
534                 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
535
536         u64_stats_update_begin(&rx_ring->syncp);
537         rx_ring->total_packets += total_rx_packets;
538         rx_ring->total_bytes += total_rx_bytes;
539         u64_stats_update_end(&rx_ring->syncp);
540         q_vector->rx.total_packets += total_rx_packets;
541         q_vector->rx.total_bytes += total_rx_bytes;
542
543         return !!budget;
544 }
545
546 /**
547  * ixgbevf_poll - NAPI polling calback
548  * @napi: napi struct with our devices info in it
549  * @budget: amount of work driver is allowed to do this pass, in packets
550  *
551  * This function will clean more than one or more rings associated with a
552  * q_vector.
553  **/
554 static int ixgbevf_poll(struct napi_struct *napi, int budget)
555 {
556         struct ixgbevf_q_vector *q_vector =
557                 container_of(napi, struct ixgbevf_q_vector, napi);
558         struct ixgbevf_adapter *adapter = q_vector->adapter;
559         struct ixgbevf_ring *ring;
560         int per_ring_budget;
561         bool clean_complete = true;
562
563         ixgbevf_for_each_ring(ring, q_vector->tx)
564                 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
565
566         /* attempt to distribute budget to each queue fairly, but don't allow
567          * the budget to go below 1 because we'll exit polling */
568         if (q_vector->rx.count > 1)
569                 per_ring_budget = max(budget/q_vector->rx.count, 1);
570         else
571                 per_ring_budget = budget;
572
573         adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
574         ixgbevf_for_each_ring(ring, q_vector->rx)
575                 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
576                                                        per_ring_budget);
577         adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
578
579         /* If all work not completed, return budget and keep polling */
580         if (!clean_complete)
581                 return budget;
582         /* all work done, exit the polling mode */
583         napi_complete(napi);
584         if (adapter->rx_itr_setting & 1)
585                 ixgbevf_set_itr(q_vector);
586         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
587                 ixgbevf_irq_enable_queues(adapter,
588                                           1 << q_vector->v_idx);
589
590         return 0;
591 }
592
593 /**
594  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
595  * @q_vector: structure containing interrupt and ring information
596  */
597 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
598 {
599         struct ixgbevf_adapter *adapter = q_vector->adapter;
600         struct ixgbe_hw *hw = &adapter->hw;
601         int v_idx = q_vector->v_idx;
602         u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
603
604         /*
605          * set the WDIS bit to not clear the timer bits and cause an
606          * immediate assertion of the interrupt
607          */
608         itr_reg |= IXGBE_EITR_CNT_WDIS;
609
610         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
611 }
612
613 /**
614  * ixgbevf_configure_msix - Configure MSI-X hardware
615  * @adapter: board private structure
616  *
617  * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
618  * interrupts.
619  **/
620 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
621 {
622         struct ixgbevf_q_vector *q_vector;
623         int q_vectors, v_idx;
624
625         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
626         adapter->eims_enable_mask = 0;
627
628         /*
629          * Populate the IVAR table and set the ITR values to the
630          * corresponding register.
631          */
632         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
633                 struct ixgbevf_ring *ring;
634                 q_vector = adapter->q_vector[v_idx];
635
636                 ixgbevf_for_each_ring(ring, q_vector->rx)
637                         ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
638
639                 ixgbevf_for_each_ring(ring, q_vector->tx)
640                         ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
641
642                 if (q_vector->tx.ring && !q_vector->rx.ring) {
643                         /* tx only vector */
644                         if (adapter->tx_itr_setting == 1)
645                                 q_vector->itr = IXGBE_10K_ITR;
646                         else
647                                 q_vector->itr = adapter->tx_itr_setting;
648                 } else {
649                         /* rx or rx/tx vector */
650                         if (adapter->rx_itr_setting == 1)
651                                 q_vector->itr = IXGBE_20K_ITR;
652                         else
653                                 q_vector->itr = adapter->rx_itr_setting;
654                 }
655
656                 /* add q_vector eims value to global eims_enable_mask */
657                 adapter->eims_enable_mask |= 1 << v_idx;
658
659                 ixgbevf_write_eitr(q_vector);
660         }
661
662         ixgbevf_set_ivar(adapter, -1, 1, v_idx);
663         /* setup eims_other and add value to global eims_enable_mask */
664         adapter->eims_other = 1 << v_idx;
665         adapter->eims_enable_mask |= adapter->eims_other;
666 }
667
668 enum latency_range {
669         lowest_latency = 0,
670         low_latency = 1,
671         bulk_latency = 2,
672         latency_invalid = 255
673 };
674
675 /**
676  * ixgbevf_update_itr - update the dynamic ITR value based on statistics
677  * @q_vector: structure containing interrupt and ring information
678  * @ring_container: structure containing ring performance data
679  *
680  *      Stores a new ITR value based on packets and byte
681  *      counts during the last interrupt.  The advantage of per interrupt
682  *      computation is faster updates and more accurate ITR for the current
683  *      traffic pattern.  Constants in this function were computed
684  *      based on theoretical maximum wire speed and thresholds were set based
685  *      on testing data as well as attempting to minimize response time
686  *      while increasing bulk throughput.
687  **/
688 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
689                                struct ixgbevf_ring_container *ring_container)
690 {
691         int bytes = ring_container->total_bytes;
692         int packets = ring_container->total_packets;
693         u32 timepassed_us;
694         u64 bytes_perint;
695         u8 itr_setting = ring_container->itr;
696
697         if (packets == 0)
698                 return;
699
700         /* simple throttlerate management
701          *    0-20MB/s lowest (100000 ints/s)
702          *   20-100MB/s low   (20000 ints/s)
703          *  100-1249MB/s bulk (8000 ints/s)
704          */
705         /* what was last interrupt timeslice? */
706         timepassed_us = q_vector->itr >> 2;
707         bytes_perint = bytes / timepassed_us; /* bytes/usec */
708
709         switch (itr_setting) {
710         case lowest_latency:
711                 if (bytes_perint > 10)
712                         itr_setting = low_latency;
713                 break;
714         case low_latency:
715                 if (bytes_perint > 20)
716                         itr_setting = bulk_latency;
717                 else if (bytes_perint <= 10)
718                         itr_setting = lowest_latency;
719                 break;
720         case bulk_latency:
721                 if (bytes_perint <= 20)
722                         itr_setting = low_latency;
723                 break;
724         }
725
726         /* clear work counters since we have the values we need */
727         ring_container->total_bytes = 0;
728         ring_container->total_packets = 0;
729
730         /* write updated itr to ring container */
731         ring_container->itr = itr_setting;
732 }
733
734 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
735 {
736         u32 new_itr = q_vector->itr;
737         u8 current_itr;
738
739         ixgbevf_update_itr(q_vector, &q_vector->tx);
740         ixgbevf_update_itr(q_vector, &q_vector->rx);
741
742         current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
743
744         switch (current_itr) {
745         /* counts and packets in update_itr are dependent on these numbers */
746         case lowest_latency:
747                 new_itr = IXGBE_100K_ITR;
748                 break;
749         case low_latency:
750                 new_itr = IXGBE_20K_ITR;
751                 break;
752         case bulk_latency:
753         default:
754                 new_itr = IXGBE_8K_ITR;
755                 break;
756         }
757
758         if (new_itr != q_vector->itr) {
759                 /* do an exponential smoothing */
760                 new_itr = (10 * new_itr * q_vector->itr) /
761                           ((9 * new_itr) + q_vector->itr);
762
763                 /* save the algorithm value here */
764                 q_vector->itr = new_itr;
765
766                 ixgbevf_write_eitr(q_vector);
767         }
768 }
769
770 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
771 {
772         struct ixgbevf_adapter *adapter = data;
773         struct ixgbe_hw *hw = &adapter->hw;
774
775         hw->mac.get_link_status = 1;
776
777         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
778                 mod_timer(&adapter->watchdog_timer, jiffies);
779
780         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
781
782         return IRQ_HANDLED;
783 }
784
785 /**
786  * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
787  * @irq: unused
788  * @data: pointer to our q_vector struct for this interrupt vector
789  **/
790 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
791 {
792         struct ixgbevf_q_vector *q_vector = data;
793
794         /* EIAM disabled interrupts (on this vector) for us */
795         if (q_vector->rx.ring || q_vector->tx.ring)
796                 napi_schedule(&q_vector->napi);
797
798         return IRQ_HANDLED;
799 }
800
801 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
802                                      int r_idx)
803 {
804         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
805
806         a->rx_ring[r_idx].next = q_vector->rx.ring;
807         q_vector->rx.ring = &a->rx_ring[r_idx];
808         q_vector->rx.count++;
809 }
810
811 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
812                                      int t_idx)
813 {
814         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
815
816         a->tx_ring[t_idx].next = q_vector->tx.ring;
817         q_vector->tx.ring = &a->tx_ring[t_idx];
818         q_vector->tx.count++;
819 }
820
821 /**
822  * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
823  * @adapter: board private structure to initialize
824  *
825  * This function maps descriptor rings to the queue-specific vectors
826  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
827  * one vector per ring/queue, but on a constrained vector budget, we
828  * group the rings as "efficiently" as possible.  You would add new
829  * mapping configurations in here.
830  **/
831 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
832 {
833         int q_vectors;
834         int v_start = 0;
835         int rxr_idx = 0, txr_idx = 0;
836         int rxr_remaining = adapter->num_rx_queues;
837         int txr_remaining = adapter->num_tx_queues;
838         int i, j;
839         int rqpv, tqpv;
840         int err = 0;
841
842         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
843
844         /*
845          * The ideal configuration...
846          * We have enough vectors to map one per queue.
847          */
848         if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
849                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
850                         map_vector_to_rxq(adapter, v_start, rxr_idx);
851
852                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
853                         map_vector_to_txq(adapter, v_start, txr_idx);
854                 goto out;
855         }
856
857         /*
858          * If we don't have enough vectors for a 1-to-1
859          * mapping, we'll have to group them so there are
860          * multiple queues per vector.
861          */
862         /* Re-adjusting *qpv takes care of the remainder. */
863         for (i = v_start; i < q_vectors; i++) {
864                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
865                 for (j = 0; j < rqpv; j++) {
866                         map_vector_to_rxq(adapter, i, rxr_idx);
867                         rxr_idx++;
868                         rxr_remaining--;
869                 }
870         }
871         for (i = v_start; i < q_vectors; i++) {
872                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
873                 for (j = 0; j < tqpv; j++) {
874                         map_vector_to_txq(adapter, i, txr_idx);
875                         txr_idx++;
876                         txr_remaining--;
877                 }
878         }
879
880 out:
881         return err;
882 }
883
884 /**
885  * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
886  * @adapter: board private structure
887  *
888  * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
889  * interrupts from the kernel.
890  **/
891 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
892 {
893         struct net_device *netdev = adapter->netdev;
894         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
895         int vector, err;
896         int ri = 0, ti = 0;
897
898         for (vector = 0; vector < q_vectors; vector++) {
899                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
900                 struct msix_entry *entry = &adapter->msix_entries[vector];
901
902                 if (q_vector->tx.ring && q_vector->rx.ring) {
903                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
904                                  "%s-%s-%d", netdev->name, "TxRx", ri++);
905                         ti++;
906                 } else if (q_vector->rx.ring) {
907                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
908                                  "%s-%s-%d", netdev->name, "rx", ri++);
909                 } else if (q_vector->tx.ring) {
910                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
911                                  "%s-%s-%d", netdev->name, "tx", ti++);
912                 } else {
913                         /* skip this unused q_vector */
914                         continue;
915                 }
916                 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
917                                   q_vector->name, q_vector);
918                 if (err) {
919                         hw_dbg(&adapter->hw,
920                                "request_irq failed for MSIX interrupt "
921                                "Error: %d\n", err);
922                         goto free_queue_irqs;
923                 }
924         }
925
926         err = request_irq(adapter->msix_entries[vector].vector,
927                           &ixgbevf_msix_other, 0, netdev->name, adapter);
928         if (err) {
929                 hw_dbg(&adapter->hw,
930                        "request_irq for msix_other failed: %d\n", err);
931                 goto free_queue_irqs;
932         }
933
934         return 0;
935
936 free_queue_irqs:
937         while (vector) {
938                 vector--;
939                 free_irq(adapter->msix_entries[vector].vector,
940                          adapter->q_vector[vector]);
941         }
942         /* This failure is non-recoverable - it indicates the system is
943          * out of MSIX vector resources and the VF driver cannot run
944          * without them.  Set the number of msix vectors to zero
945          * indicating that not enough can be allocated.  The error
946          * will be returned to the user indicating device open failed.
947          * Any further attempts to force the driver to open will also
948          * fail.  The only way to recover is to unload the driver and
949          * reload it again.  If the system has recovered some MSIX
950          * vectors then it may succeed.
951          */
952         adapter->num_msix_vectors = 0;
953         return err;
954 }
955
956 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
957 {
958         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
959
960         for (i = 0; i < q_vectors; i++) {
961                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
962                 q_vector->rx.ring = NULL;
963                 q_vector->tx.ring = NULL;
964                 q_vector->rx.count = 0;
965                 q_vector->tx.count = 0;
966         }
967 }
968
969 /**
970  * ixgbevf_request_irq - initialize interrupts
971  * @adapter: board private structure
972  *
973  * Attempts to configure interrupts using the best available
974  * capabilities of the hardware and kernel.
975  **/
976 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
977 {
978         int err = 0;
979
980         err = ixgbevf_request_msix_irqs(adapter);
981
982         if (err)
983                 hw_dbg(&adapter->hw,
984                        "request_irq failed, Error %d\n", err);
985
986         return err;
987 }
988
989 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
990 {
991         int i, q_vectors;
992
993         q_vectors = adapter->num_msix_vectors;
994         i = q_vectors - 1;
995
996         free_irq(adapter->msix_entries[i].vector, adapter);
997         i--;
998
999         for (; i >= 0; i--) {
1000                 /* free only the irqs that were actually requested */
1001                 if (!adapter->q_vector[i]->rx.ring &&
1002                     !adapter->q_vector[i]->tx.ring)
1003                         continue;
1004
1005                 free_irq(adapter->msix_entries[i].vector,
1006                          adapter->q_vector[i]);
1007         }
1008
1009         ixgbevf_reset_q_vectors(adapter);
1010 }
1011
1012 /**
1013  * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1014  * @adapter: board private structure
1015  **/
1016 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1017 {
1018         struct ixgbe_hw *hw = &adapter->hw;
1019         int i;
1020
1021         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1022         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1023         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1024
1025         IXGBE_WRITE_FLUSH(hw);
1026
1027         for (i = 0; i < adapter->num_msix_vectors; i++)
1028                 synchronize_irq(adapter->msix_entries[i].vector);
1029 }
1030
1031 /**
1032  * ixgbevf_irq_enable - Enable default interrupt generation settings
1033  * @adapter: board private structure
1034  **/
1035 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1036 {
1037         struct ixgbe_hw *hw = &adapter->hw;
1038
1039         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1040         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1041         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1042 }
1043
1044 /**
1045  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1046  * @adapter: board private structure
1047  *
1048  * Configure the Tx unit of the MAC after a reset.
1049  **/
1050 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1051 {
1052         u64 tdba;
1053         struct ixgbe_hw *hw = &adapter->hw;
1054         u32 i, j, tdlen, txctrl;
1055
1056         /* Setup the HW Tx Head and Tail descriptor pointers */
1057         for (i = 0; i < adapter->num_tx_queues; i++) {
1058                 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1059                 j = ring->reg_idx;
1060                 tdba = ring->dma;
1061                 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1062                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1063                                 (tdba & DMA_BIT_MASK(32)));
1064                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1065                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1066                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1067                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1068                 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1069                 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1070                 /* Disable Tx Head Writeback RO bit, since this hoses
1071                  * bookkeeping if things aren't delivered in order.
1072                  */
1073                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1074                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1075                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1076         }
1077 }
1078
1079 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1080
1081 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1082 {
1083         struct ixgbevf_ring *rx_ring;
1084         struct ixgbe_hw *hw = &adapter->hw;
1085         u32 srrctl;
1086
1087         rx_ring = &adapter->rx_ring[index];
1088
1089         srrctl = IXGBE_SRRCTL_DROP_EN;
1090
1091         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1092
1093         srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1094                   IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1095
1096         IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1097 }
1098
1099 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1100 {
1101         struct ixgbe_hw *hw = &adapter->hw;
1102
1103         /* PSRTYPE must be initialized in 82599 */
1104         u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1105                       IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1106                       IXGBE_PSRTYPE_L2HDR;
1107
1108         if (adapter->num_rx_queues > 1)
1109                 psrtype |= 1 << 29;
1110
1111         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1112 }
1113
1114 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1115 {
1116         struct ixgbe_hw *hw = &adapter->hw;
1117         struct net_device *netdev = adapter->netdev;
1118         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1119         int i;
1120         u16 rx_buf_len;
1121
1122         /* notify the PF of our intent to use this size of frame */
1123         ixgbevf_rlpml_set_vf(hw, max_frame);
1124
1125         /* PF will allow an extra 4 bytes past for vlan tagged frames */
1126         max_frame += VLAN_HLEN;
1127
1128         /*
1129          * Allocate buffer sizes that fit well into 32K and
1130          * take into account max frame size of 9.5K
1131          */
1132         if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1133             (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1134                 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1135         else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1136                 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1137         else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1138                 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1139         else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1140                 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1141         else
1142                 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1143
1144         for (i = 0; i < adapter->num_rx_queues; i++)
1145                 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1146 }
1147
1148 /**
1149  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1150  * @adapter: board private structure
1151  *
1152  * Configure the Rx unit of the MAC after a reset.
1153  **/
1154 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1155 {
1156         u64 rdba;
1157         struct ixgbe_hw *hw = &adapter->hw;
1158         int i, j;
1159         u32 rdlen;
1160
1161         ixgbevf_setup_psrtype(adapter);
1162
1163         /* set_rx_buffer_len must be called before ring initialization */
1164         ixgbevf_set_rx_buffer_len(adapter);
1165
1166         rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1167         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1168          * the Base and Length of the Rx Descriptor Ring */
1169         for (i = 0; i < adapter->num_rx_queues; i++) {
1170                 rdba = adapter->rx_ring[i].dma;
1171                 j = adapter->rx_ring[i].reg_idx;
1172                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1173                                 (rdba & DMA_BIT_MASK(32)));
1174                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1175                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1176                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1177                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1178                 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1179                 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1180
1181                 ixgbevf_configure_srrctl(adapter, j);
1182         }
1183 }
1184
1185 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1186                                    __be16 proto, u16 vid)
1187 {
1188         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1189         struct ixgbe_hw *hw = &adapter->hw;
1190         int err;
1191
1192         spin_lock_bh(&adapter->mbx_lock);
1193
1194         /* add VID to filter table */
1195         err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1196
1197         spin_unlock_bh(&adapter->mbx_lock);
1198
1199         /* translate error return types so error makes sense */
1200         if (err == IXGBE_ERR_MBX)
1201                 return -EIO;
1202
1203         if (err == IXGBE_ERR_INVALID_ARGUMENT)
1204                 return -EACCES;
1205
1206         set_bit(vid, adapter->active_vlans);
1207
1208         return err;
1209 }
1210
1211 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1212                                     __be16 proto, u16 vid)
1213 {
1214         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1215         struct ixgbe_hw *hw = &adapter->hw;
1216         int err = -EOPNOTSUPP;
1217
1218         spin_lock_bh(&adapter->mbx_lock);
1219
1220         /* remove VID from filter table */
1221         err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1222
1223         spin_unlock_bh(&adapter->mbx_lock);
1224
1225         clear_bit(vid, adapter->active_vlans);
1226
1227         return err;
1228 }
1229
1230 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1231 {
1232         u16 vid;
1233
1234         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1235                 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1236                                         htons(ETH_P_8021Q), vid);
1237 }
1238
1239 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1240 {
1241         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1242         struct ixgbe_hw *hw = &adapter->hw;
1243         int count = 0;
1244
1245         if ((netdev_uc_count(netdev)) > 10) {
1246                 pr_err("Too many unicast filters - No Space\n");
1247                 return -ENOSPC;
1248         }
1249
1250         if (!netdev_uc_empty(netdev)) {
1251                 struct netdev_hw_addr *ha;
1252                 netdev_for_each_uc_addr(ha, netdev) {
1253                         hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1254                         udelay(200);
1255                 }
1256         } else {
1257                 /*
1258                  * If the list is empty then send message to PF driver to
1259                  * clear all macvlans on this VF.
1260                  */
1261                 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1262         }
1263
1264         return count;
1265 }
1266
1267 /**
1268  * ixgbevf_set_rx_mode - Multicast and unicast set
1269  * @netdev: network interface device structure
1270  *
1271  * The set_rx_method entry point is called whenever the multicast address
1272  * list, unicast address list or the network interface flags are updated.
1273  * This routine is responsible for configuring the hardware for proper
1274  * multicast mode and configuring requested unicast filters.
1275  **/
1276 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1277 {
1278         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1279         struct ixgbe_hw *hw = &adapter->hw;
1280
1281         spin_lock_bh(&adapter->mbx_lock);
1282
1283         /* reprogram multicast list */
1284         hw->mac.ops.update_mc_addr_list(hw, netdev);
1285
1286         ixgbevf_write_uc_addr_list(netdev);
1287
1288         spin_unlock_bh(&adapter->mbx_lock);
1289 }
1290
1291 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1292 {
1293         int q_idx;
1294         struct ixgbevf_q_vector *q_vector;
1295         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1296
1297         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1298                 q_vector = adapter->q_vector[q_idx];
1299                 napi_enable(&q_vector->napi);
1300         }
1301 }
1302
1303 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1304 {
1305         int q_idx;
1306         struct ixgbevf_q_vector *q_vector;
1307         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1308
1309         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1310                 q_vector = adapter->q_vector[q_idx];
1311                 napi_disable(&q_vector->napi);
1312         }
1313 }
1314
1315 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1316 {
1317         struct net_device *netdev = adapter->netdev;
1318         int i;
1319
1320         ixgbevf_set_rx_mode(netdev);
1321
1322         ixgbevf_restore_vlan(adapter);
1323
1324         ixgbevf_configure_tx(adapter);
1325         ixgbevf_configure_rx(adapter);
1326         for (i = 0; i < adapter->num_rx_queues; i++) {
1327                 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1328                 ixgbevf_alloc_rx_buffers(adapter, ring,
1329                                          IXGBE_DESC_UNUSED(ring));
1330         }
1331 }
1332
1333 #define IXGBEVF_MAX_RX_DESC_POLL 10
1334 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1335                                          int rxr)
1336 {
1337         struct ixgbe_hw *hw = &adapter->hw;
1338         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1339         u32 rxdctl;
1340         int j = adapter->rx_ring[rxr].reg_idx;
1341
1342         do {
1343                 usleep_range(1000, 2000);
1344                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1345         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1346
1347         if (!wait_loop)
1348                 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1349                        rxr);
1350
1351         ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1352                                 (adapter->rx_ring[rxr].count - 1));
1353 }
1354
1355 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1356                                      struct ixgbevf_ring *ring)
1357 {
1358         struct ixgbe_hw *hw = &adapter->hw;
1359         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1360         u32 rxdctl;
1361         u8 reg_idx = ring->reg_idx;
1362
1363         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1364         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1365
1366         /* write value back with RXDCTL.ENABLE bit cleared */
1367         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1368
1369         /* the hardware may take up to 100us to really disable the rx queue */
1370         do {
1371                 udelay(10);
1372                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1373         } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1374
1375         if (!wait_loop)
1376                 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1377                        reg_idx);
1378 }
1379
1380 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1381 {
1382         /* Only save pre-reset stats if there are some */
1383         if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1384                 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1385                         adapter->stats.base_vfgprc;
1386                 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1387                         adapter->stats.base_vfgptc;
1388                 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1389                         adapter->stats.base_vfgorc;
1390                 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1391                         adapter->stats.base_vfgotc;
1392                 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1393                         adapter->stats.base_vfmprc;
1394         }
1395 }
1396
1397 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1398 {
1399         struct ixgbe_hw *hw = &adapter->hw;
1400
1401         adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1402         adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1403         adapter->stats.last_vfgorc |=
1404                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1405         adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1406         adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1407         adapter->stats.last_vfgotc |=
1408                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1409         adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1410
1411         adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1412         adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1413         adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1414         adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1415         adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1416 }
1417
1418 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1419 {
1420         struct ixgbe_hw *hw = &adapter->hw;
1421         int api[] = { ixgbe_mbox_api_11,
1422                       ixgbe_mbox_api_10,
1423                       ixgbe_mbox_api_unknown };
1424         int err = 0, idx = 0;
1425
1426         spin_lock_bh(&adapter->mbx_lock);
1427
1428         while (api[idx] != ixgbe_mbox_api_unknown) {
1429                 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1430                 if (!err)
1431                         break;
1432                 idx++;
1433         }
1434
1435         spin_unlock_bh(&adapter->mbx_lock);
1436 }
1437
1438 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1439 {
1440         struct net_device *netdev = adapter->netdev;
1441         struct ixgbe_hw *hw = &adapter->hw;
1442         int i, j = 0;
1443         int num_rx_rings = adapter->num_rx_queues;
1444         u32 txdctl, rxdctl;
1445
1446         for (i = 0; i < adapter->num_tx_queues; i++) {
1447                 j = adapter->tx_ring[i].reg_idx;
1448                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1449                 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1450                 txdctl |= (8 << 16);
1451                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1452         }
1453
1454         for (i = 0; i < adapter->num_tx_queues; i++) {
1455                 j = adapter->tx_ring[i].reg_idx;
1456                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1457                 txdctl |= IXGBE_TXDCTL_ENABLE;
1458                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1459         }
1460
1461         for (i = 0; i < num_rx_rings; i++) {
1462                 j = adapter->rx_ring[i].reg_idx;
1463                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1464                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1465                 if (hw->mac.type == ixgbe_mac_X540_vf) {
1466                         rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1467                         rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1468                                    IXGBE_RXDCTL_RLPML_EN);
1469                 }
1470                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1471                 ixgbevf_rx_desc_queue_enable(adapter, i);
1472         }
1473
1474         ixgbevf_configure_msix(adapter);
1475
1476         spin_lock_bh(&adapter->mbx_lock);
1477
1478         if (is_valid_ether_addr(hw->mac.addr))
1479                 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1480         else
1481                 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1482
1483         spin_unlock_bh(&adapter->mbx_lock);
1484
1485         clear_bit(__IXGBEVF_DOWN, &adapter->state);
1486         ixgbevf_napi_enable_all(adapter);
1487
1488         /* enable transmits */
1489         netif_tx_start_all_queues(netdev);
1490
1491         ixgbevf_save_reset_stats(adapter);
1492         ixgbevf_init_last_counter_stats(adapter);
1493
1494         hw->mac.get_link_status = 1;
1495         mod_timer(&adapter->watchdog_timer, jiffies);
1496 }
1497
1498 static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1499 {
1500         struct ixgbe_hw *hw = &adapter->hw;
1501         struct ixgbevf_ring *rx_ring;
1502         unsigned int def_q = 0;
1503         unsigned int num_tcs = 0;
1504         unsigned int num_rx_queues = 1;
1505         int err, i;
1506
1507         spin_lock_bh(&adapter->mbx_lock);
1508
1509         /* fetch queue configuration from the PF */
1510         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1511
1512         spin_unlock_bh(&adapter->mbx_lock);
1513
1514         if (err)
1515                 return err;
1516
1517         if (num_tcs > 1) {
1518                 /* update default Tx ring register index */
1519                 adapter->tx_ring[0].reg_idx = def_q;
1520
1521                 /* we need as many queues as traffic classes */
1522                 num_rx_queues = num_tcs;
1523         }
1524
1525         /* nothing to do if we have the correct number of queues */
1526         if (adapter->num_rx_queues == num_rx_queues)
1527                 return 0;
1528
1529         /* allocate new rings */
1530         rx_ring = kcalloc(num_rx_queues,
1531                           sizeof(struct ixgbevf_ring), GFP_KERNEL);
1532         if (!rx_ring)
1533                 return -ENOMEM;
1534
1535         /* setup ring fields */
1536         for (i = 0; i < num_rx_queues; i++) {
1537                 rx_ring[i].count = adapter->rx_ring_count;
1538                 rx_ring[i].queue_index = i;
1539                 rx_ring[i].reg_idx = i;
1540                 rx_ring[i].dev = &adapter->pdev->dev;
1541                 rx_ring[i].netdev = adapter->netdev;
1542
1543                 /* allocate resources on the ring */
1544                 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1545                 if (err) {
1546                         while (i) {
1547                                 i--;
1548                                 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1549                         }
1550                         kfree(rx_ring);
1551                         return err;
1552                 }
1553         }
1554
1555         /* free the existing rings and queues */
1556         ixgbevf_free_all_rx_resources(adapter);
1557         adapter->num_rx_queues = 0;
1558         kfree(adapter->rx_ring);
1559
1560         /* move new rings into position on the adapter struct */
1561         adapter->rx_ring = rx_ring;
1562         adapter->num_rx_queues = num_rx_queues;
1563
1564         /* reset ring to vector mapping */
1565         ixgbevf_reset_q_vectors(adapter);
1566         ixgbevf_map_rings_to_vectors(adapter);
1567
1568         return 0;
1569 }
1570
1571 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1572 {
1573         struct ixgbe_hw *hw = &adapter->hw;
1574
1575         ixgbevf_reset_queues(adapter);
1576
1577         ixgbevf_configure(adapter);
1578
1579         ixgbevf_up_complete(adapter);
1580
1581         /* clear any pending interrupts, may auto mask */
1582         IXGBE_READ_REG(hw, IXGBE_VTEICR);
1583
1584         ixgbevf_irq_enable(adapter);
1585 }
1586
1587 /**
1588  * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1589  * @adapter: board private structure
1590  * @rx_ring: ring to free buffers from
1591  **/
1592 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1593                                   struct ixgbevf_ring *rx_ring)
1594 {
1595         struct pci_dev *pdev = adapter->pdev;
1596         unsigned long size;
1597         unsigned int i;
1598
1599         if (!rx_ring->rx_buffer_info)
1600                 return;
1601
1602         /* Free all the Rx ring sk_buffs */
1603         for (i = 0; i < rx_ring->count; i++) {
1604                 struct ixgbevf_rx_buffer *rx_buffer_info;
1605
1606                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1607                 if (rx_buffer_info->dma) {
1608                         dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1609                                          rx_ring->rx_buf_len,
1610                                          DMA_FROM_DEVICE);
1611                         rx_buffer_info->dma = 0;
1612                 }
1613                 if (rx_buffer_info->skb) {
1614                         struct sk_buff *skb = rx_buffer_info->skb;
1615                         rx_buffer_info->skb = NULL;
1616                         do {
1617                                 struct sk_buff *this = skb;
1618                                 skb = IXGBE_CB(skb)->prev;
1619                                 dev_kfree_skb(this);
1620                         } while (skb);
1621                 }
1622         }
1623
1624         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1625         memset(rx_ring->rx_buffer_info, 0, size);
1626
1627         /* Zero out the descriptor ring */
1628         memset(rx_ring->desc, 0, rx_ring->size);
1629
1630         rx_ring->next_to_clean = 0;
1631         rx_ring->next_to_use = 0;
1632
1633         if (rx_ring->head)
1634                 writel(0, adapter->hw.hw_addr + rx_ring->head);
1635         if (rx_ring->tail)
1636                 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1637 }
1638
1639 /**
1640  * ixgbevf_clean_tx_ring - Free Tx Buffers
1641  * @adapter: board private structure
1642  * @tx_ring: ring to be cleaned
1643  **/
1644 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1645                                   struct ixgbevf_ring *tx_ring)
1646 {
1647         struct ixgbevf_tx_buffer *tx_buffer_info;
1648         unsigned long size;
1649         unsigned int i;
1650
1651         if (!tx_ring->tx_buffer_info)
1652                 return;
1653
1654         /* Free all the Tx ring sk_buffs */
1655         for (i = 0; i < tx_ring->count; i++) {
1656                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1657                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1658         }
1659
1660         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1661         memset(tx_ring->tx_buffer_info, 0, size);
1662
1663         memset(tx_ring->desc, 0, tx_ring->size);
1664
1665         tx_ring->next_to_use = 0;
1666         tx_ring->next_to_clean = 0;
1667
1668         if (tx_ring->head)
1669                 writel(0, adapter->hw.hw_addr + tx_ring->head);
1670         if (tx_ring->tail)
1671                 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1672 }
1673
1674 /**
1675  * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1676  * @adapter: board private structure
1677  **/
1678 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1679 {
1680         int i;
1681
1682         for (i = 0; i < adapter->num_rx_queues; i++)
1683                 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1684 }
1685
1686 /**
1687  * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1688  * @adapter: board private structure
1689  **/
1690 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1691 {
1692         int i;
1693
1694         for (i = 0; i < adapter->num_tx_queues; i++)
1695                 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1696 }
1697
1698 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1699 {
1700         struct net_device *netdev = adapter->netdev;
1701         struct ixgbe_hw *hw = &adapter->hw;
1702         u32 txdctl;
1703         int i, j;
1704
1705         /* signal that we are down to the interrupt handler */
1706         set_bit(__IXGBEVF_DOWN, &adapter->state);
1707
1708         /* disable all enabled rx queues */
1709         for (i = 0; i < adapter->num_rx_queues; i++)
1710                 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
1711
1712         netif_tx_disable(netdev);
1713
1714         msleep(10);
1715
1716         netif_tx_stop_all_queues(netdev);
1717
1718         ixgbevf_irq_disable(adapter);
1719
1720         ixgbevf_napi_disable_all(adapter);
1721
1722         del_timer_sync(&adapter->watchdog_timer);
1723         /* can't call flush scheduled work here because it can deadlock
1724          * if linkwatch_event tries to acquire the rtnl_lock which we are
1725          * holding */
1726         while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1727                 msleep(1);
1728
1729         /* disable transmits in the hardware now that interrupts are off */
1730         for (i = 0; i < adapter->num_tx_queues; i++) {
1731                 j = adapter->tx_ring[i].reg_idx;
1732                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1733                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1734                                 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1735         }
1736
1737         netif_carrier_off(netdev);
1738
1739         if (!pci_channel_offline(adapter->pdev))
1740                 ixgbevf_reset(adapter);
1741
1742         ixgbevf_clean_all_tx_rings(adapter);
1743         ixgbevf_clean_all_rx_rings(adapter);
1744 }
1745
1746 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1747 {
1748         WARN_ON(in_interrupt());
1749
1750         while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1751                 msleep(1);
1752
1753         ixgbevf_down(adapter);
1754         ixgbevf_up(adapter);
1755
1756         clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1757 }
1758
1759 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1760 {
1761         struct ixgbe_hw *hw = &adapter->hw;
1762         struct net_device *netdev = adapter->netdev;
1763
1764         if (hw->mac.ops.reset_hw(hw)) {
1765                 hw_dbg(hw, "PF still resetting\n");
1766         } else {
1767                 hw->mac.ops.init_hw(hw);
1768                 ixgbevf_negotiate_api(adapter);
1769         }
1770
1771         if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1772                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1773                        netdev->addr_len);
1774                 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1775                        netdev->addr_len);
1776         }
1777 }
1778
1779 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1780                                         int vectors)
1781 {
1782         int err = 0;
1783         int vector_threshold;
1784
1785         /* We'll want at least 2 (vector_threshold):
1786          * 1) TxQ[0] + RxQ[0] handler
1787          * 2) Other (Link Status Change, etc.)
1788          */
1789         vector_threshold = MIN_MSIX_COUNT;
1790
1791         /* The more we get, the more we will assign to Tx/Rx Cleanup
1792          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1793          * Right now, we simply care about how many we'll get; we'll
1794          * set them up later while requesting irq's.
1795          */
1796         while (vectors >= vector_threshold) {
1797                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1798                                       vectors);
1799                 if (!err || err < 0) /* Success or a nasty failure. */
1800                         break;
1801                 else /* err == number of vectors we should try again with */
1802                         vectors = err;
1803         }
1804
1805         if (vectors < vector_threshold)
1806                 err = -ENOMEM;
1807
1808         if (err) {
1809                 dev_err(&adapter->pdev->dev,
1810                         "Unable to allocate MSI-X interrupts\n");
1811                 kfree(adapter->msix_entries);
1812                 adapter->msix_entries = NULL;
1813         } else {
1814                 /*
1815                  * Adjust for only the vectors we'll use, which is minimum
1816                  * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1817                  * vectors we were allocated.
1818                  */
1819                 adapter->num_msix_vectors = vectors;
1820         }
1821
1822         return err;
1823 }
1824
1825 /**
1826  * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1827  * @adapter: board private structure to initialize
1828  *
1829  * This is the top level queue allocation routine.  The order here is very
1830  * important, starting with the "most" number of features turned on at once,
1831  * and ending with the smallest set of features.  This way large combinations
1832  * can be allocated if they're turned on, and smaller combinations are the
1833  * fallthrough conditions.
1834  *
1835  **/
1836 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1837 {
1838         /* Start with base case */
1839         adapter->num_rx_queues = 1;
1840         adapter->num_tx_queues = 1;
1841 }
1842
1843 /**
1844  * ixgbevf_alloc_queues - Allocate memory for all rings
1845  * @adapter: board private structure to initialize
1846  *
1847  * We allocate one ring per queue at run-time since we don't know the
1848  * number of queues at compile-time.  The polling_netdev array is
1849  * intended for Multiqueue, but should work fine with a single queue.
1850  **/
1851 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1852 {
1853         int i;
1854
1855         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1856                                    sizeof(struct ixgbevf_ring), GFP_KERNEL);
1857         if (!adapter->tx_ring)
1858                 goto err_tx_ring_allocation;
1859
1860         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1861                                    sizeof(struct ixgbevf_ring), GFP_KERNEL);
1862         if (!adapter->rx_ring)
1863                 goto err_rx_ring_allocation;
1864
1865         for (i = 0; i < adapter->num_tx_queues; i++) {
1866                 adapter->tx_ring[i].count = adapter->tx_ring_count;
1867                 adapter->tx_ring[i].queue_index = i;
1868                 /* reg_idx may be remapped later by DCB config */
1869                 adapter->tx_ring[i].reg_idx = i;
1870                 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1871                 adapter->tx_ring[i].netdev = adapter->netdev;
1872         }
1873
1874         for (i = 0; i < adapter->num_rx_queues; i++) {
1875                 adapter->rx_ring[i].count = adapter->rx_ring_count;
1876                 adapter->rx_ring[i].queue_index = i;
1877                 adapter->rx_ring[i].reg_idx = i;
1878                 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1879                 adapter->rx_ring[i].netdev = adapter->netdev;
1880         }
1881
1882         return 0;
1883
1884 err_rx_ring_allocation:
1885         kfree(adapter->tx_ring);
1886 err_tx_ring_allocation:
1887         return -ENOMEM;
1888 }
1889
1890 /**
1891  * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1892  * @adapter: board private structure to initialize
1893  *
1894  * Attempt to configure the interrupts using the best available
1895  * capabilities of the hardware and the kernel.
1896  **/
1897 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1898 {
1899         struct net_device *netdev = adapter->netdev;
1900         int err = 0;
1901         int vector, v_budget;
1902
1903         /*
1904          * It's easy to be greedy for MSI-X vectors, but it really
1905          * doesn't do us much good if we have a lot more vectors
1906          * than CPU's.  So let's be conservative and only ask for
1907          * (roughly) the same number of vectors as there are CPU's.
1908          * The default is to use pairs of vectors.
1909          */
1910         v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1911         v_budget = min_t(int, v_budget, num_online_cpus());
1912         v_budget += NON_Q_VECTORS;
1913
1914         /* A failure in MSI-X entry allocation isn't fatal, but it does
1915          * mean we disable MSI-X capabilities of the adapter. */
1916         adapter->msix_entries = kcalloc(v_budget,
1917                                         sizeof(struct msix_entry), GFP_KERNEL);
1918         if (!adapter->msix_entries) {
1919                 err = -ENOMEM;
1920                 goto out;
1921         }
1922
1923         for (vector = 0; vector < v_budget; vector++)
1924                 adapter->msix_entries[vector].entry = vector;
1925
1926         err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1927         if (err)
1928                 goto out;
1929
1930         err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1931         if (err)
1932                 goto out;
1933
1934         err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1935
1936 out:
1937         return err;
1938 }
1939
1940 /**
1941  * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1942  * @adapter: board private structure to initialize
1943  *
1944  * We allocate one q_vector per queue interrupt.  If allocation fails we
1945  * return -ENOMEM.
1946  **/
1947 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1948 {
1949         int q_idx, num_q_vectors;
1950         struct ixgbevf_q_vector *q_vector;
1951
1952         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1953
1954         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1955                 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1956                 if (!q_vector)
1957                         goto err_out;
1958                 q_vector->adapter = adapter;
1959                 q_vector->v_idx = q_idx;
1960                 netif_napi_add(adapter->netdev, &q_vector->napi,
1961                                ixgbevf_poll, 64);
1962                 adapter->q_vector[q_idx] = q_vector;
1963         }
1964
1965         return 0;
1966
1967 err_out:
1968         while (q_idx) {
1969                 q_idx--;
1970                 q_vector = adapter->q_vector[q_idx];
1971                 netif_napi_del(&q_vector->napi);
1972                 kfree(q_vector);
1973                 adapter->q_vector[q_idx] = NULL;
1974         }
1975         return -ENOMEM;
1976 }
1977
1978 /**
1979  * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1980  * @adapter: board private structure to initialize
1981  *
1982  * This function frees the memory allocated to the q_vectors.  In addition if
1983  * NAPI is enabled it will delete any references to the NAPI struct prior
1984  * to freeing the q_vector.
1985  **/
1986 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1987 {
1988         int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1989
1990         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1991                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1992
1993                 adapter->q_vector[q_idx] = NULL;
1994                 netif_napi_del(&q_vector->napi);
1995                 kfree(q_vector);
1996         }
1997 }
1998
1999 /**
2000  * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2001  * @adapter: board private structure
2002  *
2003  **/
2004 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2005 {
2006         pci_disable_msix(adapter->pdev);
2007         kfree(adapter->msix_entries);
2008         adapter->msix_entries = NULL;
2009 }
2010
2011 /**
2012  * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2013  * @adapter: board private structure to initialize
2014  *
2015  **/
2016 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2017 {
2018         int err;
2019
2020         /* Number of supported queues */
2021         ixgbevf_set_num_queues(adapter);
2022
2023         err = ixgbevf_set_interrupt_capability(adapter);
2024         if (err) {
2025                 hw_dbg(&adapter->hw,
2026                        "Unable to setup interrupt capabilities\n");
2027                 goto err_set_interrupt;
2028         }
2029
2030         err = ixgbevf_alloc_q_vectors(adapter);
2031         if (err) {
2032                 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2033                        "vectors\n");
2034                 goto err_alloc_q_vectors;
2035         }
2036
2037         err = ixgbevf_alloc_queues(adapter);
2038         if (err) {
2039                 pr_err("Unable to allocate memory for queues\n");
2040                 goto err_alloc_queues;
2041         }
2042
2043         hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2044                "Tx Queue count = %u\n",
2045                (adapter->num_rx_queues > 1) ? "Enabled" :
2046                "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2047
2048         set_bit(__IXGBEVF_DOWN, &adapter->state);
2049
2050         return 0;
2051 err_alloc_queues:
2052         ixgbevf_free_q_vectors(adapter);
2053 err_alloc_q_vectors:
2054         ixgbevf_reset_interrupt_capability(adapter);
2055 err_set_interrupt:
2056         return err;
2057 }
2058
2059 /**
2060  * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2061  * @adapter: board private structure to clear interrupt scheme on
2062  *
2063  * We go through and clear interrupt specific resources and reset the structure
2064  * to pre-load conditions
2065  **/
2066 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2067 {
2068         adapter->num_tx_queues = 0;
2069         adapter->num_rx_queues = 0;
2070
2071         ixgbevf_free_q_vectors(adapter);
2072         ixgbevf_reset_interrupt_capability(adapter);
2073 }
2074
2075 /**
2076  * ixgbevf_sw_init - Initialize general software structures
2077  * (struct ixgbevf_adapter)
2078  * @adapter: board private structure to initialize
2079  *
2080  * ixgbevf_sw_init initializes the Adapter private data structure.
2081  * Fields are initialized based on PCI device information and
2082  * OS network device settings (MTU size).
2083  **/
2084 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2085 {
2086         struct ixgbe_hw *hw = &adapter->hw;
2087         struct pci_dev *pdev = adapter->pdev;
2088         struct net_device *netdev = adapter->netdev;
2089         int err;
2090
2091         /* PCI config space info */
2092
2093         hw->vendor_id = pdev->vendor;
2094         hw->device_id = pdev->device;
2095         hw->revision_id = pdev->revision;
2096         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2097         hw->subsystem_device_id = pdev->subsystem_device;
2098
2099         hw->mbx.ops.init_params(hw);
2100
2101         /* assume legacy case in which PF would only give VF 2 queues */
2102         hw->mac.max_tx_queues = 2;
2103         hw->mac.max_rx_queues = 2;
2104
2105         /* lock to protect mailbox accesses */
2106         spin_lock_init(&adapter->mbx_lock);
2107
2108         err = hw->mac.ops.reset_hw(hw);
2109         if (err) {
2110                 dev_info(&pdev->dev,
2111                          "PF still in reset state.  Is the PF interface up?\n");
2112         } else {
2113                 err = hw->mac.ops.init_hw(hw);
2114                 if (err) {
2115                         pr_err("init_shared_code failed: %d\n", err);
2116                         goto out;
2117                 }
2118                 ixgbevf_negotiate_api(adapter);
2119                 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2120                 if (err)
2121                         dev_info(&pdev->dev, "Error reading MAC address\n");
2122                 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2123                         dev_info(&pdev->dev,
2124                                  "MAC address not assigned by administrator.\n");
2125                 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2126         }
2127
2128         if (!is_valid_ether_addr(netdev->dev_addr)) {
2129                 dev_info(&pdev->dev, "Assigning random MAC address\n");
2130                 eth_hw_addr_random(netdev);
2131                 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2132         }
2133
2134         /* Enable dynamic interrupt throttling rates */
2135         adapter->rx_itr_setting = 1;
2136         adapter->tx_itr_setting = 1;
2137
2138         /* set default ring sizes */
2139         adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2140         adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2141
2142         set_bit(__IXGBEVF_DOWN, &adapter->state);
2143         return 0;
2144
2145 out:
2146         return err;
2147 }
2148
2149 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)     \
2150         {                                                       \
2151                 u32 current_counter = IXGBE_READ_REG(hw, reg);  \
2152                 if (current_counter < last_counter)             \
2153                         counter += 0x100000000LL;               \
2154                 last_counter = current_counter;                 \
2155                 counter &= 0xFFFFFFFF00000000LL;                \
2156                 counter |= current_counter;                     \
2157         }
2158
2159 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2160         {                                                                \
2161                 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
2162                 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
2163                 u64 current_counter = (current_counter_msb << 32) |      \
2164                         current_counter_lsb;                             \
2165                 if (current_counter < last_counter)                      \
2166                         counter += 0x1000000000LL;                       \
2167                 last_counter = current_counter;                          \
2168                 counter &= 0xFFFFFFF000000000LL;                         \
2169                 counter |= current_counter;                              \
2170         }
2171 /**
2172  * ixgbevf_update_stats - Update the board statistics counters.
2173  * @adapter: board private structure
2174  **/
2175 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2176 {
2177         struct ixgbe_hw *hw = &adapter->hw;
2178         int i;
2179
2180         if (!adapter->link_up)
2181                 return;
2182
2183         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2184                                 adapter->stats.vfgprc);
2185         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2186                                 adapter->stats.vfgptc);
2187         UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2188                                 adapter->stats.last_vfgorc,
2189                                 adapter->stats.vfgorc);
2190         UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2191                                 adapter->stats.last_vfgotc,
2192                                 adapter->stats.vfgotc);
2193         UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2194                                 adapter->stats.vfmprc);
2195
2196         for (i = 0;  i  < adapter->num_rx_queues;  i++) {
2197                 adapter->hw_csum_rx_error +=
2198                         adapter->rx_ring[i].hw_csum_rx_error;
2199                 adapter->hw_csum_rx_good +=
2200                         adapter->rx_ring[i].hw_csum_rx_good;
2201                 adapter->rx_ring[i].hw_csum_rx_error = 0;
2202                 adapter->rx_ring[i].hw_csum_rx_good = 0;
2203         }
2204 }
2205
2206 /**
2207  * ixgbevf_watchdog - Timer Call-back
2208  * @data: pointer to adapter cast into an unsigned long
2209  **/
2210 static void ixgbevf_watchdog(unsigned long data)
2211 {
2212         struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2213         struct ixgbe_hw *hw = &adapter->hw;
2214         u32 eics = 0;
2215         int i;
2216
2217         /*
2218          * Do the watchdog outside of interrupt context due to the lovely
2219          * delays that some of the newer hardware requires
2220          */
2221
2222         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2223                 goto watchdog_short_circuit;
2224
2225         /* get one bit for every active tx/rx interrupt vector */
2226         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2227                 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2228                 if (qv->rx.ring || qv->tx.ring)
2229                         eics |= 1 << i;
2230         }
2231
2232         IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2233
2234 watchdog_short_circuit:
2235         schedule_work(&adapter->watchdog_task);
2236 }
2237
2238 /**
2239  * ixgbevf_tx_timeout - Respond to a Tx Hang
2240  * @netdev: network interface device structure
2241  **/
2242 static void ixgbevf_tx_timeout(struct net_device *netdev)
2243 {
2244         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2245
2246         /* Do the reset outside of interrupt context */
2247         schedule_work(&adapter->reset_task);
2248 }
2249
2250 static void ixgbevf_reset_task(struct work_struct *work)
2251 {
2252         struct ixgbevf_adapter *adapter;
2253         adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2254
2255         /* If we're already down or resetting, just bail */
2256         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2257             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2258                 return;
2259
2260         adapter->tx_timeout_count++;
2261
2262         ixgbevf_reinit_locked(adapter);
2263 }
2264
2265 /**
2266  * ixgbevf_watchdog_task - worker thread to bring link up
2267  * @work: pointer to work_struct containing our data
2268  **/
2269 static void ixgbevf_watchdog_task(struct work_struct *work)
2270 {
2271         struct ixgbevf_adapter *adapter = container_of(work,
2272                                                        struct ixgbevf_adapter,
2273                                                        watchdog_task);
2274         struct net_device *netdev = adapter->netdev;
2275         struct ixgbe_hw *hw = &adapter->hw;
2276         u32 link_speed = adapter->link_speed;
2277         bool link_up = adapter->link_up;
2278         s32 need_reset;
2279
2280         adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2281
2282         /*
2283          * Always check the link on the watchdog because we have
2284          * no LSC interrupt
2285          */
2286         spin_lock_bh(&adapter->mbx_lock);
2287
2288         need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2289
2290         spin_unlock_bh(&adapter->mbx_lock);
2291
2292         if (need_reset) {
2293                 adapter->link_up = link_up;
2294                 adapter->link_speed = link_speed;
2295                 netif_carrier_off(netdev);
2296                 netif_tx_stop_all_queues(netdev);
2297                 schedule_work(&adapter->reset_task);
2298                 goto pf_has_reset;
2299         }
2300         adapter->link_up = link_up;
2301         adapter->link_speed = link_speed;
2302
2303         if (link_up) {
2304                 if (!netif_carrier_ok(netdev)) {
2305                         char *link_speed_string;
2306                         switch (link_speed) {
2307                         case IXGBE_LINK_SPEED_10GB_FULL:
2308                                 link_speed_string = "10 Gbps";
2309                                 break;
2310                         case IXGBE_LINK_SPEED_1GB_FULL:
2311                                 link_speed_string = "1 Gbps";
2312                                 break;
2313                         case IXGBE_LINK_SPEED_100_FULL:
2314                                 link_speed_string = "100 Mbps";
2315                                 break;
2316                         default:
2317                                 link_speed_string = "unknown speed";
2318                                 break;
2319                         }
2320                         dev_info(&adapter->pdev->dev,
2321                                 "NIC Link is Up, %s\n", link_speed_string);
2322                         netif_carrier_on(netdev);
2323                         netif_tx_wake_all_queues(netdev);
2324                 }
2325         } else {
2326                 adapter->link_up = false;
2327                 adapter->link_speed = 0;
2328                 if (netif_carrier_ok(netdev)) {
2329                         dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2330                         netif_carrier_off(netdev);
2331                         netif_tx_stop_all_queues(netdev);
2332                 }
2333         }
2334
2335         ixgbevf_update_stats(adapter);
2336
2337 pf_has_reset:
2338         /* Reset the timer */
2339         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2340                 mod_timer(&adapter->watchdog_timer,
2341                           round_jiffies(jiffies + (2 * HZ)));
2342
2343         adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2344 }
2345
2346 /**
2347  * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2348  * @adapter: board private structure
2349  * @tx_ring: Tx descriptor ring for a specific queue
2350  *
2351  * Free all transmit software resources
2352  **/
2353 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2354                                struct ixgbevf_ring *tx_ring)
2355 {
2356         struct pci_dev *pdev = adapter->pdev;
2357
2358         ixgbevf_clean_tx_ring(adapter, tx_ring);
2359
2360         vfree(tx_ring->tx_buffer_info);
2361         tx_ring->tx_buffer_info = NULL;
2362
2363         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2364                           tx_ring->dma);
2365
2366         tx_ring->desc = NULL;
2367 }
2368
2369 /**
2370  * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2371  * @adapter: board private structure
2372  *
2373  * Free all transmit software resources
2374  **/
2375 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2376 {
2377         int i;
2378
2379         for (i = 0; i < adapter->num_tx_queues; i++)
2380                 if (adapter->tx_ring[i].desc)
2381                         ixgbevf_free_tx_resources(adapter,
2382                                                   &adapter->tx_ring[i]);
2383
2384 }
2385
2386 /**
2387  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2388  * @adapter: board private structure
2389  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2390  *
2391  * Return 0 on success, negative on failure
2392  **/
2393 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2394                                struct ixgbevf_ring *tx_ring)
2395 {
2396         struct pci_dev *pdev = adapter->pdev;
2397         int size;
2398
2399         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2400         tx_ring->tx_buffer_info = vzalloc(size);
2401         if (!tx_ring->tx_buffer_info)
2402                 goto err;
2403
2404         /* round up to nearest 4K */
2405         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2406         tx_ring->size = ALIGN(tx_ring->size, 4096);
2407
2408         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2409                                            &tx_ring->dma, GFP_KERNEL);
2410         if (!tx_ring->desc)
2411                 goto err;
2412
2413         tx_ring->next_to_use = 0;
2414         tx_ring->next_to_clean = 0;
2415         return 0;
2416
2417 err:
2418         vfree(tx_ring->tx_buffer_info);
2419         tx_ring->tx_buffer_info = NULL;
2420         hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2421                "descriptor ring\n");
2422         return -ENOMEM;
2423 }
2424
2425 /**
2426  * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2427  * @adapter: board private structure
2428  *
2429  * If this function returns with an error, then it's possible one or
2430  * more of the rings is populated (while the rest are not).  It is the
2431  * callers duty to clean those orphaned rings.
2432  *
2433  * Return 0 on success, negative on failure
2434  **/
2435 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2436 {
2437         int i, err = 0;
2438
2439         for (i = 0; i < adapter->num_tx_queues; i++) {
2440                 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2441                 if (!err)
2442                         continue;
2443                 hw_dbg(&adapter->hw,
2444                        "Allocation for Tx Queue %u failed\n", i);
2445                 break;
2446         }
2447
2448         return err;
2449 }
2450
2451 /**
2452  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2453  * @adapter: board private structure
2454  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2455  *
2456  * Returns 0 on success, negative on failure
2457  **/
2458 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2459                                struct ixgbevf_ring *rx_ring)
2460 {
2461         struct pci_dev *pdev = adapter->pdev;
2462         int size;
2463
2464         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2465         rx_ring->rx_buffer_info = vzalloc(size);
2466         if (!rx_ring->rx_buffer_info)
2467                 goto alloc_failed;
2468
2469         /* Round up to nearest 4K */
2470         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2471         rx_ring->size = ALIGN(rx_ring->size, 4096);
2472
2473         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2474                                            &rx_ring->dma, GFP_KERNEL);
2475
2476         if (!rx_ring->desc) {
2477                 vfree(rx_ring->rx_buffer_info);
2478                 rx_ring->rx_buffer_info = NULL;
2479                 goto alloc_failed;
2480         }
2481
2482         rx_ring->next_to_clean = 0;
2483         rx_ring->next_to_use = 0;
2484
2485         return 0;
2486 alloc_failed:
2487         return -ENOMEM;
2488 }
2489
2490 /**
2491  * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2492  * @adapter: board private structure
2493  *
2494  * If this function returns with an error, then it's possible one or
2495  * more of the rings is populated (while the rest are not).  It is the
2496  * callers duty to clean those orphaned rings.
2497  *
2498  * Return 0 on success, negative on failure
2499  **/
2500 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2501 {
2502         int i, err = 0;
2503
2504         for (i = 0; i < adapter->num_rx_queues; i++) {
2505                 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2506                 if (!err)
2507                         continue;
2508                 hw_dbg(&adapter->hw,
2509                        "Allocation for Rx Queue %u failed\n", i);
2510                 break;
2511         }
2512         return err;
2513 }
2514
2515 /**
2516  * ixgbevf_free_rx_resources - Free Rx Resources
2517  * @adapter: board private structure
2518  * @rx_ring: ring to clean the resources from
2519  *
2520  * Free all receive software resources
2521  **/
2522 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2523                                struct ixgbevf_ring *rx_ring)
2524 {
2525         struct pci_dev *pdev = adapter->pdev;
2526
2527         ixgbevf_clean_rx_ring(adapter, rx_ring);
2528
2529         vfree(rx_ring->rx_buffer_info);
2530         rx_ring->rx_buffer_info = NULL;
2531
2532         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2533                           rx_ring->dma);
2534
2535         rx_ring->desc = NULL;
2536 }
2537
2538 /**
2539  * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2540  * @adapter: board private structure
2541  *
2542  * Free all receive software resources
2543  **/
2544 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2545 {
2546         int i;
2547
2548         for (i = 0; i < adapter->num_rx_queues; i++)
2549                 if (adapter->rx_ring[i].desc)
2550                         ixgbevf_free_rx_resources(adapter,
2551                                                   &adapter->rx_ring[i]);
2552 }
2553
2554 static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2555 {
2556         struct ixgbe_hw *hw = &adapter->hw;
2557         struct ixgbevf_ring *rx_ring;
2558         unsigned int def_q = 0;
2559         unsigned int num_tcs = 0;
2560         unsigned int num_rx_queues = 1;
2561         int err, i;
2562
2563         spin_lock_bh(&adapter->mbx_lock);
2564
2565         /* fetch queue configuration from the PF */
2566         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2567
2568         spin_unlock_bh(&adapter->mbx_lock);
2569
2570         if (err)
2571                 return err;
2572
2573         if (num_tcs > 1) {
2574                 /* update default Tx ring register index */
2575                 adapter->tx_ring[0].reg_idx = def_q;
2576
2577                 /* we need as many queues as traffic classes */
2578                 num_rx_queues = num_tcs;
2579         }
2580
2581         /* nothing to do if we have the correct number of queues */
2582         if (adapter->num_rx_queues == num_rx_queues)
2583                 return 0;
2584
2585         /* allocate new rings */
2586         rx_ring = kcalloc(num_rx_queues,
2587                           sizeof(struct ixgbevf_ring), GFP_KERNEL);
2588         if (!rx_ring)
2589                 return -ENOMEM;
2590
2591         /* setup ring fields */
2592         for (i = 0; i < num_rx_queues; i++) {
2593                 rx_ring[i].count = adapter->rx_ring_count;
2594                 rx_ring[i].queue_index = i;
2595                 rx_ring[i].reg_idx = i;
2596                 rx_ring[i].dev = &adapter->pdev->dev;
2597                 rx_ring[i].netdev = adapter->netdev;
2598         }
2599
2600         /* free the existing ring and queues */
2601         adapter->num_rx_queues = 0;
2602         kfree(adapter->rx_ring);
2603
2604         /* move new rings into position on the adapter struct */
2605         adapter->rx_ring = rx_ring;
2606         adapter->num_rx_queues = num_rx_queues;
2607
2608         return 0;
2609 }
2610
2611 /**
2612  * ixgbevf_open - Called when a network interface is made active
2613  * @netdev: network interface device structure
2614  *
2615  * Returns 0 on success, negative value on failure
2616  *
2617  * The open entry point is called when a network interface is made
2618  * active by the system (IFF_UP).  At this point all resources needed
2619  * for transmit and receive operations are allocated, the interrupt
2620  * handler is registered with the OS, the watchdog timer is started,
2621  * and the stack is notified that the interface is ready.
2622  **/
2623 static int ixgbevf_open(struct net_device *netdev)
2624 {
2625         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2626         struct ixgbe_hw *hw = &adapter->hw;
2627         int err;
2628
2629         /* A previous failure to open the device because of a lack of
2630          * available MSIX vector resources may have reset the number
2631          * of msix vectors variable to zero.  The only way to recover
2632          * is to unload/reload the driver and hope that the system has
2633          * been able to recover some MSIX vector resources.
2634          */
2635         if (!adapter->num_msix_vectors)
2636                 return -ENOMEM;
2637
2638         /* disallow open during test */
2639         if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2640                 return -EBUSY;
2641
2642         if (hw->adapter_stopped) {
2643                 ixgbevf_reset(adapter);
2644                 /* if adapter is still stopped then PF isn't up and
2645                  * the vf can't start. */
2646                 if (hw->adapter_stopped) {
2647                         err = IXGBE_ERR_MBX;
2648                         pr_err("Unable to start - perhaps the PF Driver isn't "
2649                                "up yet\n");
2650                         goto err_setup_reset;
2651                 }
2652         }
2653
2654         /* setup queue reg_idx and Rx queue count */
2655         err = ixgbevf_setup_queues(adapter);
2656         if (err)
2657                 goto err_setup_queues;
2658
2659         /* allocate transmit descriptors */
2660         err = ixgbevf_setup_all_tx_resources(adapter);
2661         if (err)
2662                 goto err_setup_tx;
2663
2664         /* allocate receive descriptors */
2665         err = ixgbevf_setup_all_rx_resources(adapter);
2666         if (err)
2667                 goto err_setup_rx;
2668
2669         ixgbevf_configure(adapter);
2670
2671         /*
2672          * Map the Tx/Rx rings to the vectors we were allotted.
2673          * if request_irq will be called in this function map_rings
2674          * must be called *before* up_complete
2675          */
2676         ixgbevf_map_rings_to_vectors(adapter);
2677
2678         ixgbevf_up_complete(adapter);
2679
2680         /* clear any pending interrupts, may auto mask */
2681         IXGBE_READ_REG(hw, IXGBE_VTEICR);
2682         err = ixgbevf_request_irq(adapter);
2683         if (err)
2684                 goto err_req_irq;
2685
2686         ixgbevf_irq_enable(adapter);
2687
2688         return 0;
2689
2690 err_req_irq:
2691         ixgbevf_down(adapter);
2692 err_setup_rx:
2693         ixgbevf_free_all_rx_resources(adapter);
2694 err_setup_tx:
2695         ixgbevf_free_all_tx_resources(adapter);
2696 err_setup_queues:
2697         ixgbevf_reset(adapter);
2698
2699 err_setup_reset:
2700
2701         return err;
2702 }
2703
2704 /**
2705  * ixgbevf_close - Disables a network interface
2706  * @netdev: network interface device structure
2707  *
2708  * Returns 0, this is not allowed to fail
2709  *
2710  * The close entry point is called when an interface is de-activated
2711  * by the OS.  The hardware is still under the drivers control, but
2712  * needs to be disabled.  A global MAC reset is issued to stop the
2713  * hardware, and all transmit and receive resources are freed.
2714  **/
2715 static int ixgbevf_close(struct net_device *netdev)
2716 {
2717         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2718
2719         ixgbevf_down(adapter);
2720         ixgbevf_free_irq(adapter);
2721
2722         ixgbevf_free_all_tx_resources(adapter);
2723         ixgbevf_free_all_rx_resources(adapter);
2724
2725         return 0;
2726 }
2727
2728 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2729                                 u32 vlan_macip_lens, u32 type_tucmd,
2730                                 u32 mss_l4len_idx)
2731 {
2732         struct ixgbe_adv_tx_context_desc *context_desc;
2733         u16 i = tx_ring->next_to_use;
2734
2735         context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2736
2737         i++;
2738         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2739
2740         /* set bits to identify this as an advanced context descriptor */
2741         type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2742
2743         context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
2744         context_desc->seqnum_seed       = 0;
2745         context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
2746         context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
2747 }
2748
2749 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2750                        struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2751 {
2752         u32 vlan_macip_lens, type_tucmd;
2753         u32 mss_l4len_idx, l4len;
2754
2755         if (!skb_is_gso(skb))
2756                 return 0;
2757
2758         if (skb_header_cloned(skb)) {
2759                 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2760                 if (err)
2761                         return err;
2762         }
2763
2764         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2765         type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2766
2767         if (skb->protocol == htons(ETH_P_IP)) {
2768                 struct iphdr *iph = ip_hdr(skb);
2769                 iph->tot_len = 0;
2770                 iph->check = 0;
2771                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2772                                                          iph->daddr, 0,
2773                                                          IPPROTO_TCP,
2774                                                          0);
2775                 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2776         } else if (skb_is_gso_v6(skb)) {
2777                 ipv6_hdr(skb)->payload_len = 0;
2778                 tcp_hdr(skb)->check =
2779                     ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2780                                      &ipv6_hdr(skb)->daddr,
2781                                      0, IPPROTO_TCP, 0);
2782         }
2783
2784         /* compute header lengths */
2785         l4len = tcp_hdrlen(skb);
2786         *hdr_len += l4len;
2787         *hdr_len = skb_transport_offset(skb) + l4len;
2788
2789         /* mss_l4len_id: use 1 as index for TSO */
2790         mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2791         mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2792         mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2793
2794         /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2795         vlan_macip_lens = skb_network_header_len(skb);
2796         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2797         vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2798
2799         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2800                             type_tucmd, mss_l4len_idx);
2801
2802         return 1;
2803 }
2804
2805 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2806                             struct sk_buff *skb, u32 tx_flags)
2807 {
2808         u32 vlan_macip_lens = 0;
2809         u32 mss_l4len_idx = 0;
2810         u32 type_tucmd = 0;
2811
2812         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2813                 u8 l4_hdr = 0;
2814                 switch (skb->protocol) {
2815                 case __constant_htons(ETH_P_IP):
2816                         vlan_macip_lens |= skb_network_header_len(skb);
2817                         type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2818                         l4_hdr = ip_hdr(skb)->protocol;
2819                         break;
2820                 case __constant_htons(ETH_P_IPV6):
2821                         vlan_macip_lens |= skb_network_header_len(skb);
2822                         l4_hdr = ipv6_hdr(skb)->nexthdr;
2823                         break;
2824                 default:
2825                         if (unlikely(net_ratelimit())) {
2826                                 dev_warn(tx_ring->dev,
2827                                  "partial checksum but proto=%x!\n",
2828                                  skb->protocol);
2829                         }
2830                         break;
2831                 }
2832
2833                 switch (l4_hdr) {
2834                 case IPPROTO_TCP:
2835                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2836                         mss_l4len_idx = tcp_hdrlen(skb) <<
2837                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2838                         break;
2839                 case IPPROTO_SCTP:
2840                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2841                         mss_l4len_idx = sizeof(struct sctphdr) <<
2842                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2843                         break;
2844                 case IPPROTO_UDP:
2845                         mss_l4len_idx = sizeof(struct udphdr) <<
2846                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2847                         break;
2848                 default:
2849                         if (unlikely(net_ratelimit())) {
2850                                 dev_warn(tx_ring->dev,
2851                                  "partial checksum but l4 proto=%x!\n",
2852                                  l4_hdr);
2853                         }
2854                         break;
2855                 }
2856         }
2857
2858         /* vlan_macip_lens: MACLEN, VLAN tag */
2859         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2860         vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2861
2862         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2863                             type_tucmd, mss_l4len_idx);
2864
2865         return (skb->ip_summed == CHECKSUM_PARTIAL);
2866 }
2867
2868 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2869                           struct sk_buff *skb, u32 tx_flags)
2870 {
2871         struct ixgbevf_tx_buffer *tx_buffer_info;
2872         unsigned int len;
2873         unsigned int total = skb->len;
2874         unsigned int offset = 0, size;
2875         int count = 0;
2876         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2877         unsigned int f;
2878         int i;
2879
2880         i = tx_ring->next_to_use;
2881
2882         len = min(skb_headlen(skb), total);
2883         while (len) {
2884                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2885                 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2886
2887                 tx_buffer_info->length = size;
2888                 tx_buffer_info->mapped_as_page = false;
2889                 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2890                                                      skb->data + offset,
2891                                                      size, DMA_TO_DEVICE);
2892                 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2893                         goto dma_error;
2894
2895                 len -= size;
2896                 total -= size;
2897                 offset += size;
2898                 count++;
2899                 i++;
2900                 if (i == tx_ring->count)
2901                         i = 0;
2902         }
2903
2904         for (f = 0; f < nr_frags; f++) {
2905                 const struct skb_frag_struct *frag;
2906
2907                 frag = &skb_shinfo(skb)->frags[f];
2908                 len = min((unsigned int)skb_frag_size(frag), total);
2909                 offset = 0;
2910
2911                 while (len) {
2912                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
2913                         size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2914
2915                         tx_buffer_info->length = size;
2916                         tx_buffer_info->dma =
2917                                 skb_frag_dma_map(tx_ring->dev, frag,
2918                                                  offset, size, DMA_TO_DEVICE);
2919                         if (dma_mapping_error(tx_ring->dev,
2920                                               tx_buffer_info->dma))
2921                                 goto dma_error;
2922                         tx_buffer_info->mapped_as_page = true;
2923
2924                         len -= size;
2925                         total -= size;
2926                         offset += size;
2927                         count++;
2928                         i++;
2929                         if (i == tx_ring->count)
2930                                 i = 0;
2931                 }
2932                 if (total == 0)
2933                         break;
2934         }
2935
2936         if (i == 0)
2937                 i = tx_ring->count - 1;
2938         else
2939                 i = i - 1;
2940         tx_ring->tx_buffer_info[i].skb = skb;
2941
2942         return count;
2943
2944 dma_error:
2945         dev_err(tx_ring->dev, "TX DMA map failed\n");
2946
2947         /* clear timestamp and dma mappings for failed tx_buffer_info map */
2948         tx_buffer_info->dma = 0;
2949         count--;
2950
2951         /* clear timestamp and dma mappings for remaining portion of packet */
2952         while (count >= 0) {
2953                 count--;
2954                 i--;
2955                 if (i < 0)
2956                         i += tx_ring->count;
2957                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2958                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2959         }
2960
2961         return count;
2962 }
2963
2964 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2965                              int count, unsigned int first, u32 paylen,
2966                              u8 hdr_len)
2967 {
2968         union ixgbe_adv_tx_desc *tx_desc = NULL;
2969         struct ixgbevf_tx_buffer *tx_buffer_info;
2970         u32 olinfo_status = 0, cmd_type_len = 0;
2971         unsigned int i;
2972
2973         u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2974
2975         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2976
2977         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2978
2979         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2980                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2981
2982         if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2983                 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2984
2985         if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2986                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2987
2988                 /* use index 1 context for tso */
2989                 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2990                 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2991                         olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2992         }
2993
2994         /*
2995          * Check Context must be set if Tx switch is enabled, which it
2996          * always is for case where virtual functions are running
2997          */
2998         olinfo_status |= IXGBE_ADVTXD_CC;
2999
3000         olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3001
3002         i = tx_ring->next_to_use;
3003         while (count--) {
3004                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3005                 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3006                 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3007                 tx_desc->read.cmd_type_len =
3008                         cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3009                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3010                 i++;
3011                 if (i == tx_ring->count)
3012                         i = 0;
3013         }
3014
3015         tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3016
3017         tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3018
3019         /* Force memory writes to complete before letting h/w
3020          * know there are new descriptors to fetch.  (Only
3021          * applicable for weak-ordered memory model archs,
3022          * such as IA-64).
3023          */
3024         wmb();
3025
3026         tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
3027         tx_ring->next_to_use = i;
3028 }
3029
3030 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3031 {
3032         struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3033
3034         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3035         /* Herbert's original patch had:
3036          *  smp_mb__after_netif_stop_queue();
3037          * but since that doesn't exist yet, just open code it. */
3038         smp_mb();
3039
3040         /* We need to check again in a case another CPU has just
3041          * made room available. */
3042         if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3043                 return -EBUSY;
3044
3045         /* A reprieve! - use start_queue because it doesn't call schedule */
3046         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3047         ++adapter->restart_queue;
3048         return 0;
3049 }
3050
3051 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3052 {
3053         if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3054                 return 0;
3055         return __ixgbevf_maybe_stop_tx(tx_ring, size);
3056 }
3057
3058 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3059 {
3060         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3061         struct ixgbevf_ring *tx_ring;
3062         unsigned int first;
3063         unsigned int tx_flags = 0;
3064         u8 hdr_len = 0;
3065         int r_idx = 0, tso;
3066         u16 count = TXD_USE_COUNT(skb_headlen(skb));
3067 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3068         unsigned short f;
3069 #endif
3070         u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3071         if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3072                 dev_kfree_skb(skb);
3073                 return NETDEV_TX_OK;
3074         }
3075
3076         tx_ring = &adapter->tx_ring[r_idx];
3077
3078         /*
3079          * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3080          *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3081          *       + 2 desc gap to keep tail from touching head,
3082          *       + 1 desc for context descriptor,
3083          * otherwise try next time
3084          */
3085 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3086         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3087                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3088 #else
3089         count += skb_shinfo(skb)->nr_frags;
3090 #endif
3091         if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3092                 adapter->tx_busy++;
3093                 return NETDEV_TX_BUSY;
3094         }
3095
3096         if (vlan_tx_tag_present(skb)) {
3097                 tx_flags |= vlan_tx_tag_get(skb);
3098                 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3099                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3100         }
3101
3102         first = tx_ring->next_to_use;
3103
3104         if (skb->protocol == htons(ETH_P_IP))
3105                 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3106         tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3107         if (tso < 0) {
3108                 dev_kfree_skb_any(skb);
3109                 return NETDEV_TX_OK;
3110         }
3111
3112         if (tso)
3113                 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3114         else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3115                 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3116
3117         ixgbevf_tx_queue(tx_ring, tx_flags,
3118                          ixgbevf_tx_map(tx_ring, skb, tx_flags),
3119                          first, skb->len, hdr_len);
3120
3121         writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3122
3123         ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3124
3125         return NETDEV_TX_OK;
3126 }
3127
3128 /**
3129  * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3130  * @netdev: network interface device structure
3131  * @p: pointer to an address structure
3132  *
3133  * Returns 0 on success, negative on failure
3134  **/
3135 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3136 {
3137         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3138         struct ixgbe_hw *hw = &adapter->hw;
3139         struct sockaddr *addr = p;
3140
3141         if (!is_valid_ether_addr(addr->sa_data))
3142                 return -EADDRNOTAVAIL;
3143
3144         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3145         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3146
3147         spin_lock_bh(&adapter->mbx_lock);
3148
3149         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3150
3151         spin_unlock_bh(&adapter->mbx_lock);
3152
3153         return 0;
3154 }
3155
3156 /**
3157  * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3158  * @netdev: network interface device structure
3159  * @new_mtu: new value for maximum frame size
3160  *
3161  * Returns 0 on success, negative on failure
3162  **/
3163 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3164 {
3165         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3166         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3167         int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3168
3169         switch (adapter->hw.api_version) {
3170         case ixgbe_mbox_api_11:
3171                 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3172                 break;
3173         default:
3174                 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3175                         max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3176                 break;
3177         }
3178
3179         /* MTU < 68 is an error and causes problems on some kernels */
3180         if ((new_mtu < 68) || (max_frame > max_possible_frame))
3181                 return -EINVAL;
3182
3183         hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3184                netdev->mtu, new_mtu);
3185         /* must set new MTU before calling down or up */
3186         netdev->mtu = new_mtu;
3187
3188         if (netif_running(netdev))
3189                 ixgbevf_reinit_locked(adapter);
3190
3191         return 0;
3192 }
3193
3194 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3195 {
3196         struct net_device *netdev = pci_get_drvdata(pdev);
3197         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3198 #ifdef CONFIG_PM
3199         int retval = 0;
3200 #endif
3201
3202         netif_device_detach(netdev);
3203
3204         if (netif_running(netdev)) {
3205                 rtnl_lock();
3206                 ixgbevf_down(adapter);
3207                 ixgbevf_free_irq(adapter);
3208                 ixgbevf_free_all_tx_resources(adapter);
3209                 ixgbevf_free_all_rx_resources(adapter);
3210                 rtnl_unlock();
3211         }
3212
3213         ixgbevf_clear_interrupt_scheme(adapter);
3214
3215 #ifdef CONFIG_PM
3216         retval = pci_save_state(pdev);
3217         if (retval)
3218                 return retval;
3219
3220 #endif
3221         pci_disable_device(pdev);
3222
3223         return 0;
3224 }
3225
3226 #ifdef CONFIG_PM
3227 static int ixgbevf_resume(struct pci_dev *pdev)
3228 {
3229         struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3230         struct net_device *netdev = adapter->netdev;
3231         u32 err;
3232
3233         pci_set_power_state(pdev, PCI_D0);
3234         pci_restore_state(pdev);
3235         /*
3236          * pci_restore_state clears dev->state_saved so call
3237          * pci_save_state to restore it.
3238          */
3239         pci_save_state(pdev);
3240
3241         err = pci_enable_device_mem(pdev);
3242         if (err) {
3243                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3244                 return err;
3245         }
3246         pci_set_master(pdev);
3247
3248         ixgbevf_reset(adapter);
3249
3250         rtnl_lock();
3251         err = ixgbevf_init_interrupt_scheme(adapter);
3252         rtnl_unlock();
3253         if (err) {
3254                 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3255                 return err;
3256         }
3257
3258         if (netif_running(netdev)) {
3259                 err = ixgbevf_open(netdev);
3260                 if (err)
3261                         return err;
3262         }
3263
3264         netif_device_attach(netdev);
3265
3266         return err;
3267 }
3268
3269 #endif /* CONFIG_PM */
3270 static void ixgbevf_shutdown(struct pci_dev *pdev)
3271 {
3272         ixgbevf_suspend(pdev, PMSG_SUSPEND);
3273 }
3274
3275 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3276                                                 struct rtnl_link_stats64 *stats)
3277 {
3278         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3279         unsigned int start;
3280         u64 bytes, packets;
3281         const struct ixgbevf_ring *ring;
3282         int i;
3283
3284         ixgbevf_update_stats(adapter);
3285
3286         stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3287
3288         for (i = 0; i < adapter->num_rx_queues; i++) {
3289                 ring = &adapter->rx_ring[i];
3290                 do {
3291                         start = u64_stats_fetch_begin_bh(&ring->syncp);
3292                         bytes = ring->total_bytes;
3293                         packets = ring->total_packets;
3294                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3295                 stats->rx_bytes += bytes;
3296                 stats->rx_packets += packets;
3297         }
3298
3299         for (i = 0; i < adapter->num_tx_queues; i++) {
3300                 ring = &adapter->tx_ring[i];
3301                 do {
3302                         start = u64_stats_fetch_begin_bh(&ring->syncp);
3303                         bytes = ring->total_bytes;
3304                         packets = ring->total_packets;
3305                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3306                 stats->tx_bytes += bytes;
3307                 stats->tx_packets += packets;
3308         }
3309
3310         return stats;
3311 }
3312
3313 static const struct net_device_ops ixgbevf_netdev_ops = {
3314         .ndo_open               = ixgbevf_open,
3315         .ndo_stop               = ixgbevf_close,
3316         .ndo_start_xmit         = ixgbevf_xmit_frame,
3317         .ndo_set_rx_mode        = ixgbevf_set_rx_mode,
3318         .ndo_get_stats64        = ixgbevf_get_stats,
3319         .ndo_validate_addr      = eth_validate_addr,
3320         .ndo_set_mac_address    = ixgbevf_set_mac,
3321         .ndo_change_mtu         = ixgbevf_change_mtu,
3322         .ndo_tx_timeout         = ixgbevf_tx_timeout,
3323         .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
3324         .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
3325 };
3326
3327 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3328 {
3329         dev->netdev_ops = &ixgbevf_netdev_ops;
3330         ixgbevf_set_ethtool_ops(dev);
3331         dev->watchdog_timeo = 5 * HZ;
3332 }
3333
3334 /**
3335  * ixgbevf_probe - Device Initialization Routine
3336  * @pdev: PCI device information struct
3337  * @ent: entry in ixgbevf_pci_tbl
3338  *
3339  * Returns 0 on success, negative on failure
3340  *
3341  * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3342  * The OS initialization, configuring of the adapter private structure,
3343  * and a hardware reset occur.
3344  **/
3345 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3346 {
3347         struct net_device *netdev;
3348         struct ixgbevf_adapter *adapter = NULL;
3349         struct ixgbe_hw *hw = NULL;
3350         const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3351         static int cards_found;
3352         int err, pci_using_dac;
3353
3354         err = pci_enable_device(pdev);
3355         if (err)
3356                 return err;
3357
3358         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3359             !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3360                 pci_using_dac = 1;
3361         } else {
3362                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3363                 if (err) {
3364                         err = dma_set_coherent_mask(&pdev->dev,
3365                                                     DMA_BIT_MASK(32));
3366                         if (err) {
3367                                 dev_err(&pdev->dev, "No usable DMA "
3368                                         "configuration, aborting\n");
3369                                 goto err_dma;
3370                         }
3371                 }
3372                 pci_using_dac = 0;
3373         }
3374
3375         err = pci_request_regions(pdev, ixgbevf_driver_name);
3376         if (err) {
3377                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3378                 goto err_pci_reg;
3379         }
3380
3381         pci_set_master(pdev);
3382
3383         netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3384                                    MAX_TX_QUEUES);
3385         if (!netdev) {
3386                 err = -ENOMEM;
3387                 goto err_alloc_etherdev;
3388         }
3389
3390         SET_NETDEV_DEV(netdev, &pdev->dev);
3391
3392         pci_set_drvdata(pdev, netdev);
3393         adapter = netdev_priv(netdev);
3394
3395         adapter->netdev = netdev;
3396         adapter->pdev = pdev;
3397         hw = &adapter->hw;
3398         hw->back = adapter;
3399         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3400
3401         /*
3402          * call save state here in standalone driver because it relies on
3403          * adapter struct to exist, and needs to call netdev_priv
3404          */
3405         pci_save_state(pdev);
3406
3407         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3408                               pci_resource_len(pdev, 0));
3409         if (!hw->hw_addr) {
3410                 err = -EIO;
3411                 goto err_ioremap;
3412         }
3413
3414         ixgbevf_assign_netdev_ops(netdev);
3415
3416         adapter->bd_number = cards_found;
3417
3418         /* Setup hw api */
3419         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3420         hw->mac.type  = ii->mac;
3421
3422         memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3423                sizeof(struct ixgbe_mbx_operations));
3424
3425         /* setup the private structure */
3426         err = ixgbevf_sw_init(adapter);
3427         if (err)
3428                 goto err_sw_init;
3429
3430         /* The HW MAC address was set and/or determined in sw_init */
3431         if (!is_valid_ether_addr(netdev->dev_addr)) {
3432                 pr_err("invalid MAC address\n");
3433                 err = -EIO;
3434                 goto err_sw_init;
3435         }
3436
3437         netdev->hw_features = NETIF_F_SG |
3438                            NETIF_F_IP_CSUM |
3439                            NETIF_F_IPV6_CSUM |
3440                            NETIF_F_TSO |
3441                            NETIF_F_TSO6 |
3442                            NETIF_F_RXCSUM;
3443
3444         netdev->features = netdev->hw_features |
3445                            NETIF_F_HW_VLAN_CTAG_TX |
3446                            NETIF_F_HW_VLAN_CTAG_RX |
3447                            NETIF_F_HW_VLAN_CTAG_FILTER;
3448
3449         netdev->vlan_features |= NETIF_F_TSO;
3450         netdev->vlan_features |= NETIF_F_TSO6;
3451         netdev->vlan_features |= NETIF_F_IP_CSUM;
3452         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3453         netdev->vlan_features |= NETIF_F_SG;
3454
3455         if (pci_using_dac)
3456                 netdev->features |= NETIF_F_HIGHDMA;
3457
3458         netdev->priv_flags |= IFF_UNICAST_FLT;
3459
3460         init_timer(&adapter->watchdog_timer);
3461         adapter->watchdog_timer.function = ixgbevf_watchdog;
3462         adapter->watchdog_timer.data = (unsigned long)adapter;
3463
3464         INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3465         INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3466
3467         err = ixgbevf_init_interrupt_scheme(adapter);
3468         if (err)
3469                 goto err_sw_init;
3470
3471         strcpy(netdev->name, "eth%d");
3472
3473         err = register_netdev(netdev);
3474         if (err)
3475                 goto err_register;
3476
3477         netif_carrier_off(netdev);
3478
3479         ixgbevf_init_last_counter_stats(adapter);
3480
3481         /* print the MAC address */
3482         hw_dbg(hw, "%pM\n", netdev->dev_addr);
3483
3484         hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3485
3486         hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3487         cards_found++;
3488         return 0;
3489
3490 err_register:
3491         ixgbevf_clear_interrupt_scheme(adapter);
3492 err_sw_init:
3493         ixgbevf_reset_interrupt_capability(adapter);
3494         iounmap(hw->hw_addr);
3495 err_ioremap:
3496         free_netdev(netdev);
3497 err_alloc_etherdev:
3498         pci_release_regions(pdev);
3499 err_pci_reg:
3500 err_dma:
3501         pci_disable_device(pdev);
3502         return err;
3503 }
3504
3505 /**
3506  * ixgbevf_remove - Device Removal Routine
3507  * @pdev: PCI device information struct
3508  *
3509  * ixgbevf_remove is called by the PCI subsystem to alert the driver
3510  * that it should release a PCI device.  The could be caused by a
3511  * Hot-Plug event, or because the driver is going to be removed from
3512  * memory.
3513  **/
3514 static void ixgbevf_remove(struct pci_dev *pdev)
3515 {
3516         struct net_device *netdev = pci_get_drvdata(pdev);
3517         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3518
3519         set_bit(__IXGBEVF_DOWN, &adapter->state);
3520
3521         del_timer_sync(&adapter->watchdog_timer);
3522
3523         cancel_work_sync(&adapter->reset_task);
3524         cancel_work_sync(&adapter->watchdog_task);
3525
3526         if (netdev->reg_state == NETREG_REGISTERED)
3527                 unregister_netdev(netdev);
3528
3529         ixgbevf_clear_interrupt_scheme(adapter);
3530         ixgbevf_reset_interrupt_capability(adapter);
3531
3532         iounmap(adapter->hw.hw_addr);
3533         pci_release_regions(pdev);
3534
3535         hw_dbg(&adapter->hw, "Remove complete\n");
3536
3537         kfree(adapter->tx_ring);
3538         kfree(adapter->rx_ring);
3539
3540         free_netdev(netdev);
3541
3542         pci_disable_device(pdev);
3543 }
3544
3545 /**
3546  * ixgbevf_io_error_detected - called when PCI error is detected
3547  * @pdev: Pointer to PCI device
3548  * @state: The current pci connection state
3549  *
3550  * This function is called after a PCI bus error affecting
3551  * this device has been detected.
3552  */
3553 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3554                                                   pci_channel_state_t state)
3555 {
3556         struct net_device *netdev = pci_get_drvdata(pdev);
3557         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3558
3559         netif_device_detach(netdev);
3560
3561         if (state == pci_channel_io_perm_failure)
3562                 return PCI_ERS_RESULT_DISCONNECT;
3563
3564         if (netif_running(netdev))
3565                 ixgbevf_down(adapter);
3566
3567         pci_disable_device(pdev);
3568
3569         /* Request a slot slot reset. */
3570         return PCI_ERS_RESULT_NEED_RESET;
3571 }
3572
3573 /**
3574  * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3575  * @pdev: Pointer to PCI device
3576  *
3577  * Restart the card from scratch, as if from a cold-boot. Implementation
3578  * resembles the first-half of the ixgbevf_resume routine.
3579  */
3580 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3581 {
3582         struct net_device *netdev = pci_get_drvdata(pdev);
3583         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3584
3585         if (pci_enable_device_mem(pdev)) {
3586                 dev_err(&pdev->dev,
3587                         "Cannot re-enable PCI device after reset.\n");
3588                 return PCI_ERS_RESULT_DISCONNECT;
3589         }
3590
3591         pci_set_master(pdev);
3592
3593         ixgbevf_reset(adapter);
3594
3595         return PCI_ERS_RESULT_RECOVERED;
3596 }
3597
3598 /**
3599  * ixgbevf_io_resume - called when traffic can start flowing again.
3600  * @pdev: Pointer to PCI device
3601  *
3602  * This callback is called when the error recovery driver tells us that
3603  * its OK to resume normal operation. Implementation resembles the
3604  * second-half of the ixgbevf_resume routine.
3605  */
3606 static void ixgbevf_io_resume(struct pci_dev *pdev)
3607 {
3608         struct net_device *netdev = pci_get_drvdata(pdev);
3609         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3610
3611         if (netif_running(netdev))
3612                 ixgbevf_up(adapter);
3613
3614         netif_device_attach(netdev);
3615 }
3616
3617 /* PCI Error Recovery (ERS) */
3618 static const struct pci_error_handlers ixgbevf_err_handler = {
3619         .error_detected = ixgbevf_io_error_detected,
3620         .slot_reset = ixgbevf_io_slot_reset,
3621         .resume = ixgbevf_io_resume,
3622 };
3623
3624 static struct pci_driver ixgbevf_driver = {
3625         .name     = ixgbevf_driver_name,
3626         .id_table = ixgbevf_pci_tbl,
3627         .probe    = ixgbevf_probe,
3628         .remove   = ixgbevf_remove,
3629 #ifdef CONFIG_PM
3630         /* Power Management Hooks */
3631         .suspend  = ixgbevf_suspend,
3632         .resume   = ixgbevf_resume,
3633 #endif
3634         .shutdown = ixgbevf_shutdown,
3635         .err_handler = &ixgbevf_err_handler
3636 };
3637
3638 /**
3639  * ixgbevf_init_module - Driver Registration Routine
3640  *
3641  * ixgbevf_init_module is the first routine called when the driver is
3642  * loaded. All it does is register with the PCI subsystem.
3643  **/
3644 static int __init ixgbevf_init_module(void)
3645 {
3646         int ret;
3647         pr_info("%s - version %s\n", ixgbevf_driver_string,
3648                 ixgbevf_driver_version);
3649
3650         pr_info("%s\n", ixgbevf_copyright);
3651
3652         ret = pci_register_driver(&ixgbevf_driver);
3653         return ret;
3654 }
3655
3656 module_init(ixgbevf_init_module);
3657
3658 /**
3659  * ixgbevf_exit_module - Driver Exit Cleanup Routine
3660  *
3661  * ixgbevf_exit_module is called just before the driver is removed
3662  * from memory.
3663  **/
3664 static void __exit ixgbevf_exit_module(void)
3665 {
3666         pci_unregister_driver(&ixgbevf_driver);
3667 }
3668
3669 #ifdef DEBUG
3670 /**
3671  * ixgbevf_get_hw_dev_name - return device name string
3672  * used by hardware layer to print debugging information
3673  **/
3674 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3675 {
3676         struct ixgbevf_adapter *adapter = hw->back;
3677         return adapter->netdev->name;
3678 }
3679
3680 #endif
3681 module_exit(ixgbevf_exit_module);
3682
3683 /* ixgbevf_main.c */