Merge branches 'clk-baikal', 'clk-broadcom', 'clk-vc5' and 'clk-versaclock' into...
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / intel / iavf / iavf_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8  * be included exactly once across the whole kernel with
9  * CREATE_TRACE_POINTS defined
10  */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22         "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25         "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28  *
29  * Wildcard entries (PCI_ANY_ID) should come last
30  * Last entry must be all 0s
31  *
32  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33  *   Class, Class Mask, private data (not used) }
34  */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40         /* required last entry */
41         {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
53
54 int iavf_status_to_errno(enum iavf_status status)
55 {
56         switch (status) {
57         case IAVF_SUCCESS:
58                 return 0;
59         case IAVF_ERR_PARAM:
60         case IAVF_ERR_MAC_TYPE:
61         case IAVF_ERR_INVALID_MAC_ADDR:
62         case IAVF_ERR_INVALID_LINK_SETTINGS:
63         case IAVF_ERR_INVALID_PD_ID:
64         case IAVF_ERR_INVALID_QP_ID:
65         case IAVF_ERR_INVALID_CQ_ID:
66         case IAVF_ERR_INVALID_CEQ_ID:
67         case IAVF_ERR_INVALID_AEQ_ID:
68         case IAVF_ERR_INVALID_SIZE:
69         case IAVF_ERR_INVALID_ARP_INDEX:
70         case IAVF_ERR_INVALID_FPM_FUNC_ID:
71         case IAVF_ERR_QP_INVALID_MSG_SIZE:
72         case IAVF_ERR_INVALID_FRAG_COUNT:
73         case IAVF_ERR_INVALID_ALIGNMENT:
74         case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
75         case IAVF_ERR_INVALID_IMM_DATA_SIZE:
76         case IAVF_ERR_INVALID_VF_ID:
77         case IAVF_ERR_INVALID_HMCFN_ID:
78         case IAVF_ERR_INVALID_PBLE_INDEX:
79         case IAVF_ERR_INVALID_SD_INDEX:
80         case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
81         case IAVF_ERR_INVALID_SD_TYPE:
82         case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
83         case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
84         case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
85                 return -EINVAL;
86         case IAVF_ERR_NVM:
87         case IAVF_ERR_NVM_CHECKSUM:
88         case IAVF_ERR_PHY:
89         case IAVF_ERR_CONFIG:
90         case IAVF_ERR_UNKNOWN_PHY:
91         case IAVF_ERR_LINK_SETUP:
92         case IAVF_ERR_ADAPTER_STOPPED:
93         case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
94         case IAVF_ERR_AUTONEG_NOT_COMPLETE:
95         case IAVF_ERR_RESET_FAILED:
96         case IAVF_ERR_BAD_PTR:
97         case IAVF_ERR_SWFW_SYNC:
98         case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
99         case IAVF_ERR_QUEUE_EMPTY:
100         case IAVF_ERR_FLUSHED_QUEUE:
101         case IAVF_ERR_OPCODE_MISMATCH:
102         case IAVF_ERR_CQP_COMPL_ERROR:
103         case IAVF_ERR_BACKING_PAGE_ERROR:
104         case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
105         case IAVF_ERR_MEMCPY_FAILED:
106         case IAVF_ERR_SRQ_ENABLED:
107         case IAVF_ERR_ADMIN_QUEUE_ERROR:
108         case IAVF_ERR_ADMIN_QUEUE_FULL:
109         case IAVF_ERR_BAD_IWARP_CQE:
110         case IAVF_ERR_NVM_BLANK_MODE:
111         case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
112         case IAVF_ERR_DIAG_TEST_FAILED:
113         case IAVF_ERR_FIRMWARE_API_VERSION:
114         case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
115                 return -EIO;
116         case IAVF_ERR_DEVICE_NOT_SUPPORTED:
117                 return -ENODEV;
118         case IAVF_ERR_NO_AVAILABLE_VSI:
119         case IAVF_ERR_RING_FULL:
120                 return -ENOSPC;
121         case IAVF_ERR_NO_MEMORY:
122                 return -ENOMEM;
123         case IAVF_ERR_TIMEOUT:
124         case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
125                 return -ETIMEDOUT;
126         case IAVF_ERR_NOT_IMPLEMENTED:
127         case IAVF_NOT_SUPPORTED:
128                 return -EOPNOTSUPP;
129         case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
130                 return -EALREADY;
131         case IAVF_ERR_NOT_READY:
132                 return -EBUSY;
133         case IAVF_ERR_BUF_TOO_SHORT:
134                 return -EMSGSIZE;
135         }
136
137         return -EIO;
138 }
139
140 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
141 {
142         switch (v_status) {
143         case VIRTCHNL_STATUS_SUCCESS:
144                 return 0;
145         case VIRTCHNL_STATUS_ERR_PARAM:
146         case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
147                 return -EINVAL;
148         case VIRTCHNL_STATUS_ERR_NO_MEMORY:
149                 return -ENOMEM;
150         case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
151         case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
152         case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
153                 return -EIO;
154         case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
155                 return -EOPNOTSUPP;
156         }
157
158         return -EIO;
159 }
160
161 /**
162  * iavf_pdev_to_adapter - go from pci_dev to adapter
163  * @pdev: pci_dev pointer
164  */
165 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
166 {
167         return netdev_priv(pci_get_drvdata(pdev));
168 }
169
170 /**
171  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
172  * @hw:   pointer to the HW structure
173  * @mem:  ptr to mem struct to fill out
174  * @size: size of memory requested
175  * @alignment: what to align the allocation to
176  **/
177 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
178                                          struct iavf_dma_mem *mem,
179                                          u64 size, u32 alignment)
180 {
181         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
182
183         if (!mem)
184                 return IAVF_ERR_PARAM;
185
186         mem->size = ALIGN(size, alignment);
187         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
188                                      (dma_addr_t *)&mem->pa, GFP_KERNEL);
189         if (mem->va)
190                 return 0;
191         else
192                 return IAVF_ERR_NO_MEMORY;
193 }
194
195 /**
196  * iavf_free_dma_mem_d - OS specific memory free for shared code
197  * @hw:   pointer to the HW structure
198  * @mem:  ptr to mem struct to free
199  **/
200 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
201                                      struct iavf_dma_mem *mem)
202 {
203         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
204
205         if (!mem || !mem->va)
206                 return IAVF_ERR_PARAM;
207         dma_free_coherent(&adapter->pdev->dev, mem->size,
208                           mem->va, (dma_addr_t)mem->pa);
209         return 0;
210 }
211
212 /**
213  * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
214  * @hw:   pointer to the HW structure
215  * @mem:  ptr to mem struct to fill out
216  * @size: size of memory requested
217  **/
218 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
219                                           struct iavf_virt_mem *mem, u32 size)
220 {
221         if (!mem)
222                 return IAVF_ERR_PARAM;
223
224         mem->size = size;
225         mem->va = kzalloc(size, GFP_KERNEL);
226
227         if (mem->va)
228                 return 0;
229         else
230                 return IAVF_ERR_NO_MEMORY;
231 }
232
233 /**
234  * iavf_free_virt_mem_d - OS specific memory free for shared code
235  * @hw:   pointer to the HW structure
236  * @mem:  ptr to mem struct to free
237  **/
238 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
239                                       struct iavf_virt_mem *mem)
240 {
241         if (!mem)
242                 return IAVF_ERR_PARAM;
243
244         /* it's ok to kfree a NULL pointer */
245         kfree(mem->va);
246
247         return 0;
248 }
249
250 /**
251  * iavf_lock_timeout - try to lock mutex but give up after timeout
252  * @lock: mutex that should be locked
253  * @msecs: timeout in msecs
254  *
255  * Returns 0 on success, negative on failure
256  **/
257 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
258 {
259         unsigned int wait, delay = 10;
260
261         for (wait = 0; wait < msecs; wait += delay) {
262                 if (mutex_trylock(lock))
263                         return 0;
264
265                 msleep(delay);
266         }
267
268         return -1;
269 }
270
271 /**
272  * iavf_schedule_reset - Set the flags and schedule a reset event
273  * @adapter: board private structure
274  **/
275 void iavf_schedule_reset(struct iavf_adapter *adapter)
276 {
277         if (!(adapter->flags &
278               (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
279                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
280                 queue_work(iavf_wq, &adapter->reset_task);
281         }
282 }
283
284 /**
285  * iavf_schedule_request_stats - Set the flags and schedule statistics request
286  * @adapter: board private structure
287  *
288  * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
289  * request and refresh ethtool stats
290  **/
291 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
292 {
293         adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
294         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
295 }
296
297 /**
298  * iavf_tx_timeout - Respond to a Tx Hang
299  * @netdev: network interface device structure
300  * @txqueue: queue number that is timing out
301  **/
302 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
303 {
304         struct iavf_adapter *adapter = netdev_priv(netdev);
305
306         adapter->tx_timeout_count++;
307         iavf_schedule_reset(adapter);
308 }
309
310 /**
311  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
312  * @adapter: board private structure
313  **/
314 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
315 {
316         struct iavf_hw *hw = &adapter->hw;
317
318         if (!adapter->msix_entries)
319                 return;
320
321         wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
322
323         iavf_flush(hw);
324
325         synchronize_irq(adapter->msix_entries[0].vector);
326 }
327
328 /**
329  * iavf_misc_irq_enable - Enable default interrupt generation settings
330  * @adapter: board private structure
331  **/
332 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
333 {
334         struct iavf_hw *hw = &adapter->hw;
335
336         wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
337                                        IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
338         wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
339
340         iavf_flush(hw);
341 }
342
343 /**
344  * iavf_irq_disable - Mask off interrupt generation on the NIC
345  * @adapter: board private structure
346  **/
347 static void iavf_irq_disable(struct iavf_adapter *adapter)
348 {
349         int i;
350         struct iavf_hw *hw = &adapter->hw;
351
352         if (!adapter->msix_entries)
353                 return;
354
355         for (i = 1; i < adapter->num_msix_vectors; i++) {
356                 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
357                 synchronize_irq(adapter->msix_entries[i].vector);
358         }
359         iavf_flush(hw);
360 }
361
362 /**
363  * iavf_irq_enable_queues - Enable interrupt for specified queues
364  * @adapter: board private structure
365  * @mask: bitmap of queues to enable
366  **/
367 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
368 {
369         struct iavf_hw *hw = &adapter->hw;
370         int i;
371
372         for (i = 1; i < adapter->num_msix_vectors; i++) {
373                 if (mask & BIT(i - 1)) {
374                         wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
375                              IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
376                              IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
377                 }
378         }
379 }
380
381 /**
382  * iavf_irq_enable - Enable default interrupt generation settings
383  * @adapter: board private structure
384  * @flush: boolean value whether to run rd32()
385  **/
386 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
387 {
388         struct iavf_hw *hw = &adapter->hw;
389
390         iavf_misc_irq_enable(adapter);
391         iavf_irq_enable_queues(adapter, ~0);
392
393         if (flush)
394                 iavf_flush(hw);
395 }
396
397 /**
398  * iavf_msix_aq - Interrupt handler for vector 0
399  * @irq: interrupt number
400  * @data: pointer to netdev
401  **/
402 static irqreturn_t iavf_msix_aq(int irq, void *data)
403 {
404         struct net_device *netdev = data;
405         struct iavf_adapter *adapter = netdev_priv(netdev);
406         struct iavf_hw *hw = &adapter->hw;
407
408         /* handle non-queue interrupts, these reads clear the registers */
409         rd32(hw, IAVF_VFINT_ICR01);
410         rd32(hw, IAVF_VFINT_ICR0_ENA1);
411
412         if (adapter->state != __IAVF_REMOVE)
413                 /* schedule work on the private workqueue */
414                 queue_work(iavf_wq, &adapter->adminq_task);
415
416         return IRQ_HANDLED;
417 }
418
419 /**
420  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
421  * @irq: interrupt number
422  * @data: pointer to a q_vector
423  **/
424 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
425 {
426         struct iavf_q_vector *q_vector = data;
427
428         if (!q_vector->tx.ring && !q_vector->rx.ring)
429                 return IRQ_HANDLED;
430
431         napi_schedule_irqoff(&q_vector->napi);
432
433         return IRQ_HANDLED;
434 }
435
436 /**
437  * iavf_map_vector_to_rxq - associate irqs with rx queues
438  * @adapter: board private structure
439  * @v_idx: interrupt number
440  * @r_idx: queue number
441  **/
442 static void
443 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
444 {
445         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
446         struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
447         struct iavf_hw *hw = &adapter->hw;
448
449         rx_ring->q_vector = q_vector;
450         rx_ring->next = q_vector->rx.ring;
451         rx_ring->vsi = &adapter->vsi;
452         q_vector->rx.ring = rx_ring;
453         q_vector->rx.count++;
454         q_vector->rx.next_update = jiffies + 1;
455         q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
456         q_vector->ring_mask |= BIT(r_idx);
457         wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
458              q_vector->rx.current_itr >> 1);
459         q_vector->rx.current_itr = q_vector->rx.target_itr;
460 }
461
462 /**
463  * iavf_map_vector_to_txq - associate irqs with tx queues
464  * @adapter: board private structure
465  * @v_idx: interrupt number
466  * @t_idx: queue number
467  **/
468 static void
469 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
470 {
471         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
472         struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
473         struct iavf_hw *hw = &adapter->hw;
474
475         tx_ring->q_vector = q_vector;
476         tx_ring->next = q_vector->tx.ring;
477         tx_ring->vsi = &adapter->vsi;
478         q_vector->tx.ring = tx_ring;
479         q_vector->tx.count++;
480         q_vector->tx.next_update = jiffies + 1;
481         q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
482         q_vector->num_ringpairs++;
483         wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
484              q_vector->tx.target_itr >> 1);
485         q_vector->tx.current_itr = q_vector->tx.target_itr;
486 }
487
488 /**
489  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
490  * @adapter: board private structure to initialize
491  *
492  * This function maps descriptor rings to the queue-specific vectors
493  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
494  * one vector per ring/queue, but on a constrained vector budget, we
495  * group the rings as "efficiently" as possible.  You would add new
496  * mapping configurations in here.
497  **/
498 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
499 {
500         int rings_remaining = adapter->num_active_queues;
501         int ridx = 0, vidx = 0;
502         int q_vectors;
503
504         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
505
506         for (; ridx < rings_remaining; ridx++) {
507                 iavf_map_vector_to_rxq(adapter, vidx, ridx);
508                 iavf_map_vector_to_txq(adapter, vidx, ridx);
509
510                 /* In the case where we have more queues than vectors, continue
511                  * round-robin on vectors until all queues are mapped.
512                  */
513                 if (++vidx >= q_vectors)
514                         vidx = 0;
515         }
516
517         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
518 }
519
520 /**
521  * iavf_irq_affinity_notify - Callback for affinity changes
522  * @notify: context as to what irq was changed
523  * @mask: the new affinity mask
524  *
525  * This is a callback function used by the irq_set_affinity_notifier function
526  * so that we may register to receive changes to the irq affinity masks.
527  **/
528 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
529                                      const cpumask_t *mask)
530 {
531         struct iavf_q_vector *q_vector =
532                 container_of(notify, struct iavf_q_vector, affinity_notify);
533
534         cpumask_copy(&q_vector->affinity_mask, mask);
535 }
536
537 /**
538  * iavf_irq_affinity_release - Callback for affinity notifier release
539  * @ref: internal core kernel usage
540  *
541  * This is a callback function used by the irq_set_affinity_notifier function
542  * to inform the current notification subscriber that they will no longer
543  * receive notifications.
544  **/
545 static void iavf_irq_affinity_release(struct kref *ref) {}
546
547 /**
548  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
549  * @adapter: board private structure
550  * @basename: device basename
551  *
552  * Allocates MSI-X vectors for tx and rx handling, and requests
553  * interrupts from the kernel.
554  **/
555 static int
556 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
557 {
558         unsigned int vector, q_vectors;
559         unsigned int rx_int_idx = 0, tx_int_idx = 0;
560         int irq_num, err;
561         int cpu;
562
563         iavf_irq_disable(adapter);
564         /* Decrement for Other and TCP Timer vectors */
565         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
566
567         for (vector = 0; vector < q_vectors; vector++) {
568                 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
569
570                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
571
572                 if (q_vector->tx.ring && q_vector->rx.ring) {
573                         snprintf(q_vector->name, sizeof(q_vector->name),
574                                  "iavf-%s-TxRx-%u", basename, rx_int_idx++);
575                         tx_int_idx++;
576                 } else if (q_vector->rx.ring) {
577                         snprintf(q_vector->name, sizeof(q_vector->name),
578                                  "iavf-%s-rx-%u", basename, rx_int_idx++);
579                 } else if (q_vector->tx.ring) {
580                         snprintf(q_vector->name, sizeof(q_vector->name),
581                                  "iavf-%s-tx-%u", basename, tx_int_idx++);
582                 } else {
583                         /* skip this unused q_vector */
584                         continue;
585                 }
586                 err = request_irq(irq_num,
587                                   iavf_msix_clean_rings,
588                                   0,
589                                   q_vector->name,
590                                   q_vector);
591                 if (err) {
592                         dev_info(&adapter->pdev->dev,
593                                  "Request_irq failed, error: %d\n", err);
594                         goto free_queue_irqs;
595                 }
596                 /* register for affinity change notifications */
597                 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
598                 q_vector->affinity_notify.release =
599                                                    iavf_irq_affinity_release;
600                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
601                 /* Spread the IRQ affinity hints across online CPUs. Note that
602                  * get_cpu_mask returns a mask with a permanent lifetime so
603                  * it's safe to use as a hint for irq_update_affinity_hint.
604                  */
605                 cpu = cpumask_local_spread(q_vector->v_idx, -1);
606                 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
607         }
608
609         return 0;
610
611 free_queue_irqs:
612         while (vector) {
613                 vector--;
614                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
615                 irq_set_affinity_notifier(irq_num, NULL);
616                 irq_update_affinity_hint(irq_num, NULL);
617                 free_irq(irq_num, &adapter->q_vectors[vector]);
618         }
619         return err;
620 }
621
622 /**
623  * iavf_request_misc_irq - Initialize MSI-X interrupts
624  * @adapter: board private structure
625  *
626  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
627  * vector is only for the admin queue, and stays active even when the netdev
628  * is closed.
629  **/
630 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
631 {
632         struct net_device *netdev = adapter->netdev;
633         int err;
634
635         snprintf(adapter->misc_vector_name,
636                  sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
637                  dev_name(&adapter->pdev->dev));
638         err = request_irq(adapter->msix_entries[0].vector,
639                           &iavf_msix_aq, 0,
640                           adapter->misc_vector_name, netdev);
641         if (err) {
642                 dev_err(&adapter->pdev->dev,
643                         "request_irq for %s failed: %d\n",
644                         adapter->misc_vector_name, err);
645                 free_irq(adapter->msix_entries[0].vector, netdev);
646         }
647         return err;
648 }
649
650 /**
651  * iavf_free_traffic_irqs - Free MSI-X interrupts
652  * @adapter: board private structure
653  *
654  * Frees all MSI-X vectors other than 0.
655  **/
656 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
657 {
658         int vector, irq_num, q_vectors;
659
660         if (!adapter->msix_entries)
661                 return;
662
663         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
664
665         for (vector = 0; vector < q_vectors; vector++) {
666                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
667                 irq_set_affinity_notifier(irq_num, NULL);
668                 irq_update_affinity_hint(irq_num, NULL);
669                 free_irq(irq_num, &adapter->q_vectors[vector]);
670         }
671 }
672
673 /**
674  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
675  * @adapter: board private structure
676  *
677  * Frees MSI-X vector 0.
678  **/
679 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
680 {
681         struct net_device *netdev = adapter->netdev;
682
683         if (!adapter->msix_entries)
684                 return;
685
686         free_irq(adapter->msix_entries[0].vector, netdev);
687 }
688
689 /**
690  * iavf_configure_tx - Configure Transmit Unit after Reset
691  * @adapter: board private structure
692  *
693  * Configure the Tx unit of the MAC after a reset.
694  **/
695 static void iavf_configure_tx(struct iavf_adapter *adapter)
696 {
697         struct iavf_hw *hw = &adapter->hw;
698         int i;
699
700         for (i = 0; i < adapter->num_active_queues; i++)
701                 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
702 }
703
704 /**
705  * iavf_configure_rx - Configure Receive Unit after Reset
706  * @adapter: board private structure
707  *
708  * Configure the Rx unit of the MAC after a reset.
709  **/
710 static void iavf_configure_rx(struct iavf_adapter *adapter)
711 {
712         unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
713         struct iavf_hw *hw = &adapter->hw;
714         int i;
715
716         /* Legacy Rx will always default to a 2048 buffer size. */
717 #if (PAGE_SIZE < 8192)
718         if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
719                 struct net_device *netdev = adapter->netdev;
720
721                 /* For jumbo frames on systems with 4K pages we have to use
722                  * an order 1 page, so we might as well increase the size
723                  * of our Rx buffer to make better use of the available space
724                  */
725                 rx_buf_len = IAVF_RXBUFFER_3072;
726
727                 /* We use a 1536 buffer size for configurations with
728                  * standard Ethernet mtu.  On x86 this gives us enough room
729                  * for shared info and 192 bytes of padding.
730                  */
731                 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
732                     (netdev->mtu <= ETH_DATA_LEN))
733                         rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
734         }
735 #endif
736
737         for (i = 0; i < adapter->num_active_queues; i++) {
738                 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
739                 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
740
741                 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
742                         clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
743                 else
744                         set_ring_build_skb_enabled(&adapter->rx_rings[i]);
745         }
746 }
747
748 /**
749  * iavf_find_vlan - Search filter list for specific vlan filter
750  * @adapter: board private structure
751  * @vlan: vlan tag
752  *
753  * Returns ptr to the filter object or NULL. Must be called while holding the
754  * mac_vlan_list_lock.
755  **/
756 static struct
757 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
758                                  struct iavf_vlan vlan)
759 {
760         struct iavf_vlan_filter *f;
761
762         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
763                 if (f->vlan.vid == vlan.vid &&
764                     f->vlan.tpid == vlan.tpid)
765                         return f;
766         }
767
768         return NULL;
769 }
770
771 /**
772  * iavf_add_vlan - Add a vlan filter to the list
773  * @adapter: board private structure
774  * @vlan: VLAN tag
775  *
776  * Returns ptr to the filter object or NULL when no memory available.
777  **/
778 static struct
779 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
780                                 struct iavf_vlan vlan)
781 {
782         struct iavf_vlan_filter *f = NULL;
783
784         spin_lock_bh(&adapter->mac_vlan_list_lock);
785
786         f = iavf_find_vlan(adapter, vlan);
787         if (!f) {
788                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
789                 if (!f)
790                         goto clearout;
791
792                 f->vlan = vlan;
793
794                 list_add_tail(&f->list, &adapter->vlan_filter_list);
795                 f->add = true;
796                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
797         }
798
799 clearout:
800         spin_unlock_bh(&adapter->mac_vlan_list_lock);
801         return f;
802 }
803
804 /**
805  * iavf_del_vlan - Remove a vlan filter from the list
806  * @adapter: board private structure
807  * @vlan: VLAN tag
808  **/
809 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
810 {
811         struct iavf_vlan_filter *f;
812
813         spin_lock_bh(&adapter->mac_vlan_list_lock);
814
815         f = iavf_find_vlan(adapter, vlan);
816         if (f) {
817                 f->remove = true;
818                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
819         }
820
821         spin_unlock_bh(&adapter->mac_vlan_list_lock);
822 }
823
824 /**
825  * iavf_restore_filters
826  * @adapter: board private structure
827  *
828  * Restore existing non MAC filters when VF netdev comes back up
829  **/
830 static void iavf_restore_filters(struct iavf_adapter *adapter)
831 {
832         u16 vid;
833
834         /* re-add all VLAN filters */
835         for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
836                 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
837
838         for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
839                 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
840 }
841
842 /**
843  * iavf_get_num_vlans_added - get number of VLANs added
844  * @adapter: board private structure
845  */
846 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
847 {
848         return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
849                 bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
850 }
851
852 /**
853  * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
854  * @adapter: board private structure
855  *
856  * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
857  * do not impose a limit as that maintains current behavior and for
858  * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
859  **/
860 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
861 {
862         /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
863          * never been a limit on the VF driver side
864          */
865         if (VLAN_ALLOWED(adapter))
866                 return VLAN_N_VID;
867         else if (VLAN_V2_ALLOWED(adapter))
868                 return adapter->vlan_v2_caps.filtering.max_filters;
869
870         return 0;
871 }
872
873 /**
874  * iavf_max_vlans_added - check if maximum VLANs allowed already exist
875  * @adapter: board private structure
876  **/
877 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
878 {
879         if (iavf_get_num_vlans_added(adapter) <
880             iavf_get_max_vlans_allowed(adapter))
881                 return false;
882
883         return true;
884 }
885
886 /**
887  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
888  * @netdev: network device struct
889  * @proto: unused protocol data
890  * @vid: VLAN tag
891  **/
892 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
893                                 __always_unused __be16 proto, u16 vid)
894 {
895         struct iavf_adapter *adapter = netdev_priv(netdev);
896
897         if (!VLAN_FILTERING_ALLOWED(adapter))
898                 return -EIO;
899
900         if (iavf_max_vlans_added(adapter)) {
901                 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
902                            iavf_get_max_vlans_allowed(adapter));
903                 return -EIO;
904         }
905
906         if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
907                 return -ENOMEM;
908
909         return 0;
910 }
911
912 /**
913  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
914  * @netdev: network device struct
915  * @proto: unused protocol data
916  * @vid: VLAN tag
917  **/
918 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
919                                  __always_unused __be16 proto, u16 vid)
920 {
921         struct iavf_adapter *adapter = netdev_priv(netdev);
922
923         iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
924         if (proto == cpu_to_be16(ETH_P_8021Q))
925                 clear_bit(vid, adapter->vsi.active_cvlans);
926         else
927                 clear_bit(vid, adapter->vsi.active_svlans);
928
929         return 0;
930 }
931
932 /**
933  * iavf_find_filter - Search filter list for specific mac filter
934  * @adapter: board private structure
935  * @macaddr: the MAC address
936  *
937  * Returns ptr to the filter object or NULL. Must be called while holding the
938  * mac_vlan_list_lock.
939  **/
940 static struct
941 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
942                                   const u8 *macaddr)
943 {
944         struct iavf_mac_filter *f;
945
946         if (!macaddr)
947                 return NULL;
948
949         list_for_each_entry(f, &adapter->mac_filter_list, list) {
950                 if (ether_addr_equal(macaddr, f->macaddr))
951                         return f;
952         }
953         return NULL;
954 }
955
956 /**
957  * iavf_add_filter - Add a mac filter to the filter list
958  * @adapter: board private structure
959  * @macaddr: the MAC address
960  *
961  * Returns ptr to the filter object or NULL when no memory available.
962  **/
963 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
964                                         const u8 *macaddr)
965 {
966         struct iavf_mac_filter *f;
967
968         if (!macaddr)
969                 return NULL;
970
971         f = iavf_find_filter(adapter, macaddr);
972         if (!f) {
973                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
974                 if (!f)
975                         return f;
976
977                 ether_addr_copy(f->macaddr, macaddr);
978
979                 list_add_tail(&f->list, &adapter->mac_filter_list);
980                 f->add = true;
981                 f->add_handled = false;
982                 f->is_new_mac = true;
983                 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
984                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
985         } else {
986                 f->remove = false;
987         }
988
989         return f;
990 }
991
992 /**
993  * iavf_replace_primary_mac - Replace current primary address
994  * @adapter: board private structure
995  * @new_mac: new MAC address to be applied
996  *
997  * Replace current dev_addr and send request to PF for removal of previous
998  * primary MAC address filter and addition of new primary MAC filter.
999  * Return 0 for success, -ENOMEM for failure.
1000  *
1001  * Do not call this with mac_vlan_list_lock!
1002  **/
1003 int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1004                              const u8 *new_mac)
1005 {
1006         struct iavf_hw *hw = &adapter->hw;
1007         struct iavf_mac_filter *f;
1008
1009         spin_lock_bh(&adapter->mac_vlan_list_lock);
1010
1011         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1012                 f->is_primary = false;
1013         }
1014
1015         f = iavf_find_filter(adapter, hw->mac.addr);
1016         if (f) {
1017                 f->remove = true;
1018                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1019         }
1020
1021         f = iavf_add_filter(adapter, new_mac);
1022
1023         if (f) {
1024                 /* Always send the request to add if changing primary MAC
1025                  * even if filter is already present on the list
1026                  */
1027                 f->is_primary = true;
1028                 f->add = true;
1029                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1030                 ether_addr_copy(hw->mac.addr, new_mac);
1031         }
1032
1033         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1034
1035         /* schedule the watchdog task to immediately process the request */
1036         if (f) {
1037                 queue_work(iavf_wq, &adapter->watchdog_task.work);
1038                 return 0;
1039         }
1040         return -ENOMEM;
1041 }
1042
1043 /**
1044  * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1045  * @netdev: network interface device structure
1046  * @macaddr: MAC address to set
1047  *
1048  * Returns true on success, false on failure
1049  */
1050 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1051                                     const u8 *macaddr)
1052 {
1053         struct iavf_adapter *adapter = netdev_priv(netdev);
1054         struct iavf_mac_filter *f;
1055         bool ret = false;
1056
1057         spin_lock_bh(&adapter->mac_vlan_list_lock);
1058
1059         f = iavf_find_filter(adapter, macaddr);
1060
1061         if (!f || (!f->add && f->add_handled))
1062                 ret = true;
1063
1064         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1065
1066         return ret;
1067 }
1068
1069 /**
1070  * iavf_set_mac - NDO callback to set port MAC address
1071  * @netdev: network interface device structure
1072  * @p: pointer to an address structure
1073  *
1074  * Returns 0 on success, negative on failure
1075  */
1076 static int iavf_set_mac(struct net_device *netdev, void *p)
1077 {
1078         struct iavf_adapter *adapter = netdev_priv(netdev);
1079         struct sockaddr *addr = p;
1080         bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
1081         int ret;
1082
1083         if (!is_valid_ether_addr(addr->sa_data))
1084                 return -EADDRNOTAVAIL;
1085
1086         ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1087
1088         if (ret)
1089                 return ret;
1090
1091         /* If this is an initial set MAC during VF spawn do not wait */
1092         if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
1093                 adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
1094                 return 0;
1095         }
1096
1097         if (handle_mac)
1098                 goto done;
1099
1100         ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));
1101
1102         /* If ret < 0 then it means wait was interrupted.
1103          * If ret == 0 then it means we got a timeout.
1104          * else it means we got response for set MAC from PF,
1105          * check if netdev MAC was updated to requested MAC,
1106          * if yes then set MAC succeeded otherwise it failed return -EACCES
1107          */
1108         if (ret < 0)
1109                 return ret;
1110
1111         if (!ret)
1112                 return -EAGAIN;
1113
1114 done:
1115         if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1116                 return -EACCES;
1117
1118         return 0;
1119 }
1120
1121 /**
1122  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1123  * @netdev: the netdevice
1124  * @addr: address to add
1125  *
1126  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1127  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1128  */
1129 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1130 {
1131         struct iavf_adapter *adapter = netdev_priv(netdev);
1132
1133         if (iavf_add_filter(adapter, addr))
1134                 return 0;
1135         else
1136                 return -ENOMEM;
1137 }
1138
1139 /**
1140  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1141  * @netdev: the netdevice
1142  * @addr: address to add
1143  *
1144  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1145  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1146  */
1147 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1148 {
1149         struct iavf_adapter *adapter = netdev_priv(netdev);
1150         struct iavf_mac_filter *f;
1151
1152         /* Under some circumstances, we might receive a request to delete
1153          * our own device address from our uc list. Because we store the
1154          * device address in the VSI's MAC/VLAN filter list, we need to ignore
1155          * such requests and not delete our device address from this list.
1156          */
1157         if (ether_addr_equal(addr, netdev->dev_addr))
1158                 return 0;
1159
1160         f = iavf_find_filter(adapter, addr);
1161         if (f) {
1162                 f->remove = true;
1163                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1164         }
1165         return 0;
1166 }
1167
1168 /**
1169  * iavf_set_rx_mode - NDO callback to set the netdev filters
1170  * @netdev: network interface device structure
1171  **/
1172 static void iavf_set_rx_mode(struct net_device *netdev)
1173 {
1174         struct iavf_adapter *adapter = netdev_priv(netdev);
1175
1176         spin_lock_bh(&adapter->mac_vlan_list_lock);
1177         __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1178         __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1179         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1180
1181         if (netdev->flags & IFF_PROMISC &&
1182             !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1183                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1184         else if (!(netdev->flags & IFF_PROMISC) &&
1185                  adapter->flags & IAVF_FLAG_PROMISC_ON)
1186                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1187
1188         if (netdev->flags & IFF_ALLMULTI &&
1189             !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1190                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1191         else if (!(netdev->flags & IFF_ALLMULTI) &&
1192                  adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1193                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1194 }
1195
1196 /**
1197  * iavf_napi_enable_all - enable NAPI on all queue vectors
1198  * @adapter: board private structure
1199  **/
1200 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1201 {
1202         int q_idx;
1203         struct iavf_q_vector *q_vector;
1204         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1205
1206         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1207                 struct napi_struct *napi;
1208
1209                 q_vector = &adapter->q_vectors[q_idx];
1210                 napi = &q_vector->napi;
1211                 napi_enable(napi);
1212         }
1213 }
1214
1215 /**
1216  * iavf_napi_disable_all - disable NAPI on all queue vectors
1217  * @adapter: board private structure
1218  **/
1219 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1220 {
1221         int q_idx;
1222         struct iavf_q_vector *q_vector;
1223         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1224
1225         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1226                 q_vector = &adapter->q_vectors[q_idx];
1227                 napi_disable(&q_vector->napi);
1228         }
1229 }
1230
1231 /**
1232  * iavf_configure - set up transmit and receive data structures
1233  * @adapter: board private structure
1234  **/
1235 static void iavf_configure(struct iavf_adapter *adapter)
1236 {
1237         struct net_device *netdev = adapter->netdev;
1238         int i;
1239
1240         iavf_set_rx_mode(netdev);
1241
1242         iavf_configure_tx(adapter);
1243         iavf_configure_rx(adapter);
1244         adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1245
1246         for (i = 0; i < adapter->num_active_queues; i++) {
1247                 struct iavf_ring *ring = &adapter->rx_rings[i];
1248
1249                 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1250         }
1251 }
1252
1253 /**
1254  * iavf_up_complete - Finish the last steps of bringing up a connection
1255  * @adapter: board private structure
1256  *
1257  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1258  **/
1259 static void iavf_up_complete(struct iavf_adapter *adapter)
1260 {
1261         iavf_change_state(adapter, __IAVF_RUNNING);
1262         clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1263
1264         iavf_napi_enable_all(adapter);
1265
1266         adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1267         if (CLIENT_ENABLED(adapter))
1268                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1269         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1270 }
1271
1272 /**
1273  * iavf_down - Shutdown the connection processing
1274  * @adapter: board private structure
1275  *
1276  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1277  **/
1278 void iavf_down(struct iavf_adapter *adapter)
1279 {
1280         struct net_device *netdev = adapter->netdev;
1281         struct iavf_vlan_filter *vlf;
1282         struct iavf_cloud_filter *cf;
1283         struct iavf_fdir_fltr *fdir;
1284         struct iavf_mac_filter *f;
1285         struct iavf_adv_rss *rss;
1286
1287         if (adapter->state <= __IAVF_DOWN_PENDING)
1288                 return;
1289
1290         netif_carrier_off(netdev);
1291         netif_tx_disable(netdev);
1292         adapter->link_up = false;
1293         iavf_napi_disable_all(adapter);
1294         iavf_irq_disable(adapter);
1295
1296         spin_lock_bh(&adapter->mac_vlan_list_lock);
1297
1298         /* clear the sync flag on all filters */
1299         __dev_uc_unsync(adapter->netdev, NULL);
1300         __dev_mc_unsync(adapter->netdev, NULL);
1301
1302         /* remove all MAC filters */
1303         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1304                 f->remove = true;
1305         }
1306
1307         /* remove all VLAN filters */
1308         list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1309                 vlf->remove = true;
1310         }
1311
1312         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1313
1314         /* remove all cloud filters */
1315         spin_lock_bh(&adapter->cloud_filter_list_lock);
1316         list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1317                 cf->del = true;
1318         }
1319         spin_unlock_bh(&adapter->cloud_filter_list_lock);
1320
1321         /* remove all Flow Director filters */
1322         spin_lock_bh(&adapter->fdir_fltr_lock);
1323         list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1324                 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1325         }
1326         spin_unlock_bh(&adapter->fdir_fltr_lock);
1327
1328         /* remove all advance RSS configuration */
1329         spin_lock_bh(&adapter->adv_rss_lock);
1330         list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
1331                 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1332         spin_unlock_bh(&adapter->adv_rss_lock);
1333
1334         if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1335                 /* cancel any current operation */
1336                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1337                 /* Schedule operations to close down the HW. Don't wait
1338                  * here for this to complete. The watchdog is still running
1339                  * and it will take care of this.
1340                  */
1341                 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1342                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1343                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1344                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1345                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1346                 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1347         }
1348
1349         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1350 }
1351
1352 /**
1353  * iavf_acquire_msix_vectors - Setup the MSIX capability
1354  * @adapter: board private structure
1355  * @vectors: number of vectors to request
1356  *
1357  * Work with the OS to set up the MSIX vectors needed.
1358  *
1359  * Returns 0 on success, negative on failure
1360  **/
1361 static int
1362 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1363 {
1364         int err, vector_threshold;
1365
1366         /* We'll want at least 3 (vector_threshold):
1367          * 0) Other (Admin Queue and link, mostly)
1368          * 1) TxQ[0] Cleanup
1369          * 2) RxQ[0] Cleanup
1370          */
1371         vector_threshold = MIN_MSIX_COUNT;
1372
1373         /* The more we get, the more we will assign to Tx/Rx Cleanup
1374          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1375          * Right now, we simply care about how many we'll get; we'll
1376          * set them up later while requesting irq's.
1377          */
1378         err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1379                                     vector_threshold, vectors);
1380         if (err < 0) {
1381                 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1382                 kfree(adapter->msix_entries);
1383                 adapter->msix_entries = NULL;
1384                 return err;
1385         }
1386
1387         /* Adjust for only the vectors we'll use, which is minimum
1388          * of max_msix_q_vectors + NONQ_VECS, or the number of
1389          * vectors we were allocated.
1390          */
1391         adapter->num_msix_vectors = err;
1392         return 0;
1393 }
1394
1395 /**
1396  * iavf_free_queues - Free memory for all rings
1397  * @adapter: board private structure to initialize
1398  *
1399  * Free all of the memory associated with queue pairs.
1400  **/
1401 static void iavf_free_queues(struct iavf_adapter *adapter)
1402 {
1403         if (!adapter->vsi_res)
1404                 return;
1405         adapter->num_active_queues = 0;
1406         kfree(adapter->tx_rings);
1407         adapter->tx_rings = NULL;
1408         kfree(adapter->rx_rings);
1409         adapter->rx_rings = NULL;
1410 }
1411
1412 /**
1413  * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1414  * @adapter: board private structure
1415  *
1416  * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1417  * stripped in certain descriptor fields. Instead of checking the offload
1418  * capability bits in the hot path, cache the location the ring specific
1419  * flags.
1420  */
1421 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1422 {
1423         int i;
1424
1425         for (i = 0; i < adapter->num_active_queues; i++) {
1426                 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1427                 struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1428
1429                 /* prevent multiple L2TAG bits being set after VFR */
1430                 tx_ring->flags &=
1431                         ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1432                           IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1433                 rx_ring->flags &=
1434                         ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1435                           IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1436
1437                 if (VLAN_ALLOWED(adapter)) {
1438                         tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1439                         rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1440                 } else if (VLAN_V2_ALLOWED(adapter)) {
1441                         struct virtchnl_vlan_supported_caps *stripping_support;
1442                         struct virtchnl_vlan_supported_caps *insertion_support;
1443
1444                         stripping_support =
1445                                 &adapter->vlan_v2_caps.offloads.stripping_support;
1446                         insertion_support =
1447                                 &adapter->vlan_v2_caps.offloads.insertion_support;
1448
1449                         if (stripping_support->outer) {
1450                                 if (stripping_support->outer &
1451                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1452                                         rx_ring->flags |=
1453                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1454                                 else if (stripping_support->outer &
1455                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1456                                         rx_ring->flags |=
1457                                                 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1458                         } else if (stripping_support->inner) {
1459                                 if (stripping_support->inner &
1460                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1461                                         rx_ring->flags |=
1462                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1463                                 else if (stripping_support->inner &
1464                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1465                                         rx_ring->flags |=
1466                                                 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1467                         }
1468
1469                         if (insertion_support->outer) {
1470                                 if (insertion_support->outer &
1471                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1472                                         tx_ring->flags |=
1473                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1474                                 else if (insertion_support->outer &
1475                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1476                                         tx_ring->flags |=
1477                                                 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1478                         } else if (insertion_support->inner) {
1479                                 if (insertion_support->inner &
1480                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1481                                         tx_ring->flags |=
1482                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1483                                 else if (insertion_support->inner &
1484                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1485                                         tx_ring->flags |=
1486                                                 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1487                         }
1488                 }
1489         }
1490 }
1491
1492 /**
1493  * iavf_alloc_queues - Allocate memory for all rings
1494  * @adapter: board private structure to initialize
1495  *
1496  * We allocate one ring per queue at run-time since we don't know the
1497  * number of queues at compile-time.  The polling_netdev array is
1498  * intended for Multiqueue, but should work fine with a single queue.
1499  **/
1500 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1501 {
1502         int i, num_active_queues;
1503
1504         /* If we're in reset reallocating queues we don't actually know yet for
1505          * certain the PF gave us the number of queues we asked for but we'll
1506          * assume it did.  Once basic reset is finished we'll confirm once we
1507          * start negotiating config with PF.
1508          */
1509         if (adapter->num_req_queues)
1510                 num_active_queues = adapter->num_req_queues;
1511         else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1512                  adapter->num_tc)
1513                 num_active_queues = adapter->ch_config.total_qps;
1514         else
1515                 num_active_queues = min_t(int,
1516                                           adapter->vsi_res->num_queue_pairs,
1517                                           (int)(num_online_cpus()));
1518
1519
1520         adapter->tx_rings = kcalloc(num_active_queues,
1521                                     sizeof(struct iavf_ring), GFP_KERNEL);
1522         if (!adapter->tx_rings)
1523                 goto err_out;
1524         adapter->rx_rings = kcalloc(num_active_queues,
1525                                     sizeof(struct iavf_ring), GFP_KERNEL);
1526         if (!adapter->rx_rings)
1527                 goto err_out;
1528
1529         for (i = 0; i < num_active_queues; i++) {
1530                 struct iavf_ring *tx_ring;
1531                 struct iavf_ring *rx_ring;
1532
1533                 tx_ring = &adapter->tx_rings[i];
1534
1535                 tx_ring->queue_index = i;
1536                 tx_ring->netdev = adapter->netdev;
1537                 tx_ring->dev = &adapter->pdev->dev;
1538                 tx_ring->count = adapter->tx_desc_count;
1539                 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1540                 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1541                         tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1542
1543                 rx_ring = &adapter->rx_rings[i];
1544                 rx_ring->queue_index = i;
1545                 rx_ring->netdev = adapter->netdev;
1546                 rx_ring->dev = &adapter->pdev->dev;
1547                 rx_ring->count = adapter->rx_desc_count;
1548                 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1549         }
1550
1551         adapter->num_active_queues = num_active_queues;
1552
1553         iavf_set_queue_vlan_tag_loc(adapter);
1554
1555         return 0;
1556
1557 err_out:
1558         iavf_free_queues(adapter);
1559         return -ENOMEM;
1560 }
1561
1562 /**
1563  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1564  * @adapter: board private structure to initialize
1565  *
1566  * Attempt to configure the interrupts using the best available
1567  * capabilities of the hardware and the kernel.
1568  **/
1569 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1570 {
1571         int vector, v_budget;
1572         int pairs = 0;
1573         int err = 0;
1574
1575         if (!adapter->vsi_res) {
1576                 err = -EIO;
1577                 goto out;
1578         }
1579         pairs = adapter->num_active_queues;
1580
1581         /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1582          * us much good if we have more vectors than CPUs. However, we already
1583          * limit the total number of queues by the number of CPUs so we do not
1584          * need any further limiting here.
1585          */
1586         v_budget = min_t(int, pairs + NONQ_VECS,
1587                          (int)adapter->vf_res->max_vectors);
1588
1589         adapter->msix_entries = kcalloc(v_budget,
1590                                         sizeof(struct msix_entry), GFP_KERNEL);
1591         if (!adapter->msix_entries) {
1592                 err = -ENOMEM;
1593                 goto out;
1594         }
1595
1596         for (vector = 0; vector < v_budget; vector++)
1597                 adapter->msix_entries[vector].entry = vector;
1598
1599         err = iavf_acquire_msix_vectors(adapter, v_budget);
1600
1601 out:
1602         netif_set_real_num_rx_queues(adapter->netdev, pairs);
1603         netif_set_real_num_tx_queues(adapter->netdev, pairs);
1604         return err;
1605 }
1606
1607 /**
1608  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1609  * @adapter: board private structure
1610  *
1611  * Return 0 on success, negative on failure
1612  **/
1613 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1614 {
1615         struct iavf_aqc_get_set_rss_key_data *rss_key =
1616                 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1617         struct iavf_hw *hw = &adapter->hw;
1618         enum iavf_status status;
1619
1620         if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1621                 /* bail because we already have a command pending */
1622                 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1623                         adapter->current_op);
1624                 return -EBUSY;
1625         }
1626
1627         status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1628         if (status) {
1629                 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1630                         iavf_stat_str(hw, status),
1631                         iavf_aq_str(hw, hw->aq.asq_last_status));
1632                 return iavf_status_to_errno(status);
1633
1634         }
1635
1636         status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1637                                      adapter->rss_lut, adapter->rss_lut_size);
1638         if (status) {
1639                 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1640                         iavf_stat_str(hw, status),
1641                         iavf_aq_str(hw, hw->aq.asq_last_status));
1642                 return iavf_status_to_errno(status);
1643         }
1644
1645         return 0;
1646
1647 }
1648
1649 /**
1650  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1651  * @adapter: board private structure
1652  *
1653  * Returns 0 on success, negative on failure
1654  **/
1655 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1656 {
1657         struct iavf_hw *hw = &adapter->hw;
1658         u32 *dw;
1659         u16 i;
1660
1661         dw = (u32 *)adapter->rss_key;
1662         for (i = 0; i <= adapter->rss_key_size / 4; i++)
1663                 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1664
1665         dw = (u32 *)adapter->rss_lut;
1666         for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1667                 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1668
1669         iavf_flush(hw);
1670
1671         return 0;
1672 }
1673
1674 /**
1675  * iavf_config_rss - Configure RSS keys and lut
1676  * @adapter: board private structure
1677  *
1678  * Returns 0 on success, negative on failure
1679  **/
1680 int iavf_config_rss(struct iavf_adapter *adapter)
1681 {
1682
1683         if (RSS_PF(adapter)) {
1684                 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1685                                         IAVF_FLAG_AQ_SET_RSS_KEY;
1686                 return 0;
1687         } else if (RSS_AQ(adapter)) {
1688                 return iavf_config_rss_aq(adapter);
1689         } else {
1690                 return iavf_config_rss_reg(adapter);
1691         }
1692 }
1693
1694 /**
1695  * iavf_fill_rss_lut - Fill the lut with default values
1696  * @adapter: board private structure
1697  **/
1698 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1699 {
1700         u16 i;
1701
1702         for (i = 0; i < adapter->rss_lut_size; i++)
1703                 adapter->rss_lut[i] = i % adapter->num_active_queues;
1704 }
1705
1706 /**
1707  * iavf_init_rss - Prepare for RSS
1708  * @adapter: board private structure
1709  *
1710  * Return 0 on success, negative on failure
1711  **/
1712 static int iavf_init_rss(struct iavf_adapter *adapter)
1713 {
1714         struct iavf_hw *hw = &adapter->hw;
1715
1716         if (!RSS_PF(adapter)) {
1717                 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1718                 if (adapter->vf_res->vf_cap_flags &
1719                     VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1720                         adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1721                 else
1722                         adapter->hena = IAVF_DEFAULT_RSS_HENA;
1723
1724                 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1725                 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1726         }
1727
1728         iavf_fill_rss_lut(adapter);
1729         netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1730
1731         return iavf_config_rss(adapter);
1732 }
1733
1734 /**
1735  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1736  * @adapter: board private structure to initialize
1737  *
1738  * We allocate one q_vector per queue interrupt.  If allocation fails we
1739  * return -ENOMEM.
1740  **/
1741 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1742 {
1743         int q_idx = 0, num_q_vectors;
1744         struct iavf_q_vector *q_vector;
1745
1746         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1747         adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1748                                      GFP_KERNEL);
1749         if (!adapter->q_vectors)
1750                 return -ENOMEM;
1751
1752         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1753                 q_vector = &adapter->q_vectors[q_idx];
1754                 q_vector->adapter = adapter;
1755                 q_vector->vsi = &adapter->vsi;
1756                 q_vector->v_idx = q_idx;
1757                 q_vector->reg_idx = q_idx;
1758                 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1759                 netif_napi_add(adapter->netdev, &q_vector->napi,
1760                                iavf_napi_poll, NAPI_POLL_WEIGHT);
1761         }
1762
1763         return 0;
1764 }
1765
1766 /**
1767  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1768  * @adapter: board private structure to initialize
1769  *
1770  * This function frees the memory allocated to the q_vectors.  In addition if
1771  * NAPI is enabled it will delete any references to the NAPI struct prior
1772  * to freeing the q_vector.
1773  **/
1774 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1775 {
1776         int q_idx, num_q_vectors;
1777         int napi_vectors;
1778
1779         if (!adapter->q_vectors)
1780                 return;
1781
1782         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1783         napi_vectors = adapter->num_active_queues;
1784
1785         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1786                 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1787
1788                 if (q_idx < napi_vectors)
1789                         netif_napi_del(&q_vector->napi);
1790         }
1791         kfree(adapter->q_vectors);
1792         adapter->q_vectors = NULL;
1793 }
1794
1795 /**
1796  * iavf_reset_interrupt_capability - Reset MSIX setup
1797  * @adapter: board private structure
1798  *
1799  **/
1800 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1801 {
1802         if (!adapter->msix_entries)
1803                 return;
1804
1805         pci_disable_msix(adapter->pdev);
1806         kfree(adapter->msix_entries);
1807         adapter->msix_entries = NULL;
1808 }
1809
1810 /**
1811  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1812  * @adapter: board private structure to initialize
1813  *
1814  **/
1815 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1816 {
1817         int err;
1818
1819         err = iavf_alloc_queues(adapter);
1820         if (err) {
1821                 dev_err(&adapter->pdev->dev,
1822                         "Unable to allocate memory for queues\n");
1823                 goto err_alloc_queues;
1824         }
1825
1826         rtnl_lock();
1827         err = iavf_set_interrupt_capability(adapter);
1828         rtnl_unlock();
1829         if (err) {
1830                 dev_err(&adapter->pdev->dev,
1831                         "Unable to setup interrupt capabilities\n");
1832                 goto err_set_interrupt;
1833         }
1834
1835         err = iavf_alloc_q_vectors(adapter);
1836         if (err) {
1837                 dev_err(&adapter->pdev->dev,
1838                         "Unable to allocate memory for queue vectors\n");
1839                 goto err_alloc_q_vectors;
1840         }
1841
1842         /* If we've made it so far while ADq flag being ON, then we haven't
1843          * bailed out anywhere in middle. And ADq isn't just enabled but actual
1844          * resources have been allocated in the reset path.
1845          * Now we can truly claim that ADq is enabled.
1846          */
1847         if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1848             adapter->num_tc)
1849                 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1850                          adapter->num_tc);
1851
1852         dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1853                  (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1854                  adapter->num_active_queues);
1855
1856         return 0;
1857 err_alloc_q_vectors:
1858         iavf_reset_interrupt_capability(adapter);
1859 err_set_interrupt:
1860         iavf_free_queues(adapter);
1861 err_alloc_queues:
1862         return err;
1863 }
1864
1865 /**
1866  * iavf_free_rss - Free memory used by RSS structs
1867  * @adapter: board private structure
1868  **/
1869 static void iavf_free_rss(struct iavf_adapter *adapter)
1870 {
1871         kfree(adapter->rss_key);
1872         adapter->rss_key = NULL;
1873
1874         kfree(adapter->rss_lut);
1875         adapter->rss_lut = NULL;
1876 }
1877
1878 /**
1879  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1880  * @adapter: board private structure
1881  *
1882  * Returns 0 on success, negative on failure
1883  **/
1884 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1885 {
1886         struct net_device *netdev = adapter->netdev;
1887         int err;
1888
1889         if (netif_running(netdev))
1890                 iavf_free_traffic_irqs(adapter);
1891         iavf_free_misc_irq(adapter);
1892         iavf_reset_interrupt_capability(adapter);
1893         iavf_free_q_vectors(adapter);
1894         iavf_free_queues(adapter);
1895
1896         err =  iavf_init_interrupt_scheme(adapter);
1897         if (err)
1898                 goto err;
1899
1900         netif_tx_stop_all_queues(netdev);
1901
1902         err = iavf_request_misc_irq(adapter);
1903         if (err)
1904                 goto err;
1905
1906         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1907
1908         iavf_map_rings_to_vectors(adapter);
1909 err:
1910         return err;
1911 }
1912
1913 /**
1914  * iavf_process_aq_command - process aq_required flags
1915  * and sends aq command
1916  * @adapter: pointer to iavf adapter structure
1917  *
1918  * Returns 0 on success
1919  * Returns error code if no command was sent
1920  * or error code if the command failed.
1921  **/
1922 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1923 {
1924         if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1925                 return iavf_send_vf_config_msg(adapter);
1926         if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
1927                 return iavf_send_vf_offload_vlan_v2_msg(adapter);
1928         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1929                 iavf_disable_queues(adapter);
1930                 return 0;
1931         }
1932
1933         if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1934                 iavf_map_queues(adapter);
1935                 return 0;
1936         }
1937
1938         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1939                 iavf_add_ether_addrs(adapter);
1940                 return 0;
1941         }
1942
1943         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1944                 iavf_add_vlans(adapter);
1945                 return 0;
1946         }
1947
1948         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1949                 iavf_del_ether_addrs(adapter);
1950                 return 0;
1951         }
1952
1953         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1954                 iavf_del_vlans(adapter);
1955                 return 0;
1956         }
1957
1958         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1959                 iavf_enable_vlan_stripping(adapter);
1960                 return 0;
1961         }
1962
1963         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1964                 iavf_disable_vlan_stripping(adapter);
1965                 return 0;
1966         }
1967
1968         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1969                 iavf_configure_queues(adapter);
1970                 return 0;
1971         }
1972
1973         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1974                 iavf_enable_queues(adapter);
1975                 return 0;
1976         }
1977
1978         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1979                 /* This message goes straight to the firmware, not the
1980                  * PF, so we don't have to set current_op as we will
1981                  * not get a response through the ARQ.
1982                  */
1983                 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1984                 return 0;
1985         }
1986         if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1987                 iavf_get_hena(adapter);
1988                 return 0;
1989         }
1990         if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1991                 iavf_set_hena(adapter);
1992                 return 0;
1993         }
1994         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1995                 iavf_set_rss_key(adapter);
1996                 return 0;
1997         }
1998         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1999                 iavf_set_rss_lut(adapter);
2000                 return 0;
2001         }
2002
2003         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
2004                 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2005                                        FLAG_VF_MULTICAST_PROMISC);
2006                 return 0;
2007         }
2008
2009         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
2010                 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2011                 return 0;
2012         }
2013         if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2014             (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2015                 iavf_set_promiscuous(adapter, 0);
2016                 return 0;
2017         }
2018
2019         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2020                 iavf_enable_channels(adapter);
2021                 return 0;
2022         }
2023
2024         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2025                 iavf_disable_channels(adapter);
2026                 return 0;
2027         }
2028         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2029                 iavf_add_cloud_filter(adapter);
2030                 return 0;
2031         }
2032
2033         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2034                 iavf_del_cloud_filter(adapter);
2035                 return 0;
2036         }
2037         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2038                 iavf_del_cloud_filter(adapter);
2039                 return 0;
2040         }
2041         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2042                 iavf_add_cloud_filter(adapter);
2043                 return 0;
2044         }
2045         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2046                 iavf_add_fdir_filter(adapter);
2047                 return IAVF_SUCCESS;
2048         }
2049         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2050                 iavf_del_fdir_filter(adapter);
2051                 return IAVF_SUCCESS;
2052         }
2053         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2054                 iavf_add_adv_rss_cfg(adapter);
2055                 return 0;
2056         }
2057         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2058                 iavf_del_adv_rss_cfg(adapter);
2059                 return 0;
2060         }
2061         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2062                 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2063                 return 0;
2064         }
2065         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2066                 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2067                 return 0;
2068         }
2069         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2070                 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2071                 return 0;
2072         }
2073         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2074                 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2075                 return 0;
2076         }
2077         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2078                 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2079                 return 0;
2080         }
2081         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2082                 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2083                 return 0;
2084         }
2085         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2086                 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2087                 return 0;
2088         }
2089         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2090                 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2091                 return 0;
2092         }
2093
2094         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2095                 iavf_request_stats(adapter);
2096                 return 0;
2097         }
2098
2099         return -EAGAIN;
2100 }
2101
2102 /**
2103  * iavf_set_vlan_offload_features - set VLAN offload configuration
2104  * @adapter: board private structure
2105  * @prev_features: previous features used for comparison
2106  * @features: updated features used for configuration
2107  *
2108  * Set the aq_required bit(s) based on the requested features passed in to
2109  * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2110  * the watchdog if any changes are requested to expedite the request via
2111  * virtchnl.
2112  **/
2113 void
2114 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2115                                netdev_features_t prev_features,
2116                                netdev_features_t features)
2117 {
2118         bool enable_stripping = true, enable_insertion = true;
2119         u16 vlan_ethertype = 0;
2120         u64 aq_required = 0;
2121
2122         /* keep cases separate because one ethertype for offloads can be
2123          * disabled at the same time as another is disabled, so check for an
2124          * enabled ethertype first, then check for disabled. Default to
2125          * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2126          * stripping.
2127          */
2128         if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2129                 vlan_ethertype = ETH_P_8021AD;
2130         else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2131                 vlan_ethertype = ETH_P_8021Q;
2132         else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2133                 vlan_ethertype = ETH_P_8021AD;
2134         else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2135                 vlan_ethertype = ETH_P_8021Q;
2136         else
2137                 vlan_ethertype = ETH_P_8021Q;
2138
2139         if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2140                 enable_stripping = false;
2141         if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2142                 enable_insertion = false;
2143
2144         if (VLAN_ALLOWED(adapter)) {
2145                 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2146                  * stripping via virtchnl. VLAN insertion can be toggled on the
2147                  * netdev, but it doesn't require a virtchnl message
2148                  */
2149                 if (enable_stripping)
2150                         aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2151                 else
2152                         aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2153
2154         } else if (VLAN_V2_ALLOWED(adapter)) {
2155                 switch (vlan_ethertype) {
2156                 case ETH_P_8021Q:
2157                         if (enable_stripping)
2158                                 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2159                         else
2160                                 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2161
2162                         if (enable_insertion)
2163                                 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2164                         else
2165                                 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2166                         break;
2167                 case ETH_P_8021AD:
2168                         if (enable_stripping)
2169                                 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2170                         else
2171                                 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2172
2173                         if (enable_insertion)
2174                                 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2175                         else
2176                                 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2177                         break;
2178                 }
2179         }
2180
2181         if (aq_required) {
2182                 adapter->aq_required |= aq_required;
2183                 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
2184         }
2185 }
2186
2187 /**
2188  * iavf_startup - first step of driver startup
2189  * @adapter: board private structure
2190  *
2191  * Function process __IAVF_STARTUP driver state.
2192  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2193  * when fails the state is changed to __IAVF_INIT_FAILED
2194  **/
2195 static void iavf_startup(struct iavf_adapter *adapter)
2196 {
2197         struct pci_dev *pdev = adapter->pdev;
2198         struct iavf_hw *hw = &adapter->hw;
2199         enum iavf_status status;
2200         int ret;
2201
2202         WARN_ON(adapter->state != __IAVF_STARTUP);
2203
2204         /* driver loaded, probe complete */
2205         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2206         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2207         status = iavf_set_mac_type(hw);
2208         if (status) {
2209                 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2210                 goto err;
2211         }
2212
2213         ret = iavf_check_reset_complete(hw);
2214         if (ret) {
2215                 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2216                          ret);
2217                 goto err;
2218         }
2219         hw->aq.num_arq_entries = IAVF_AQ_LEN;
2220         hw->aq.num_asq_entries = IAVF_AQ_LEN;
2221         hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2222         hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2223
2224         status = iavf_init_adminq(hw);
2225         if (status) {
2226                 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2227                         status);
2228                 goto err;
2229         }
2230         ret = iavf_send_api_ver(adapter);
2231         if (ret) {
2232                 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2233                 iavf_shutdown_adminq(hw);
2234                 goto err;
2235         }
2236         iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2237         return;
2238 err:
2239         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2240 }
2241
2242 /**
2243  * iavf_init_version_check - second step of driver startup
2244  * @adapter: board private structure
2245  *
2246  * Function process __IAVF_INIT_VERSION_CHECK driver state.
2247  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2248  * when fails the state is changed to __IAVF_INIT_FAILED
2249  **/
2250 static void iavf_init_version_check(struct iavf_adapter *adapter)
2251 {
2252         struct pci_dev *pdev = adapter->pdev;
2253         struct iavf_hw *hw = &adapter->hw;
2254         int err = -EAGAIN;
2255
2256         WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2257
2258         if (!iavf_asq_done(hw)) {
2259                 dev_err(&pdev->dev, "Admin queue command never completed\n");
2260                 iavf_shutdown_adminq(hw);
2261                 iavf_change_state(adapter, __IAVF_STARTUP);
2262                 goto err;
2263         }
2264
2265         /* aq msg sent, awaiting reply */
2266         err = iavf_verify_api_ver(adapter);
2267         if (err) {
2268                 if (err == -EALREADY)
2269                         err = iavf_send_api_ver(adapter);
2270                 else
2271                         dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2272                                 adapter->pf_version.major,
2273                                 adapter->pf_version.minor,
2274                                 VIRTCHNL_VERSION_MAJOR,
2275                                 VIRTCHNL_VERSION_MINOR);
2276                 goto err;
2277         }
2278         err = iavf_send_vf_config_msg(adapter);
2279         if (err) {
2280                 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2281                         err);
2282                 goto err;
2283         }
2284         iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2285         return;
2286 err:
2287         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2288 }
2289
2290 /**
2291  * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2292  * @adapter: board private structure
2293  */
2294 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2295 {
2296         int i, num_req_queues = adapter->num_req_queues;
2297         struct iavf_vsi *vsi = &adapter->vsi;
2298
2299         for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2300                 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2301                         adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2302         }
2303         if (!adapter->vsi_res) {
2304                 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2305                 return -ENODEV;
2306         }
2307
2308         if (num_req_queues &&
2309             num_req_queues > adapter->vsi_res->num_queue_pairs) {
2310                 /* Problem.  The PF gave us fewer queues than what we had
2311                  * negotiated in our request.  Need a reset to see if we can't
2312                  * get back to a working state.
2313                  */
2314                 dev_err(&adapter->pdev->dev,
2315                         "Requested %d queues, but PF only gave us %d.\n",
2316                         num_req_queues,
2317                         adapter->vsi_res->num_queue_pairs);
2318                 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2319                 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2320                 iavf_schedule_reset(adapter);
2321
2322                 return -EAGAIN;
2323         }
2324         adapter->num_req_queues = 0;
2325         adapter->vsi.id = adapter->vsi_res->vsi_id;
2326
2327         adapter->vsi.back = adapter;
2328         adapter->vsi.base_vector = 1;
2329         vsi->netdev = adapter->netdev;
2330         vsi->qs_handle = adapter->vsi_res->qset_handle;
2331         if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2332                 adapter->rss_key_size = adapter->vf_res->rss_key_size;
2333                 adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2334         } else {
2335                 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2336                 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2337         }
2338
2339         return 0;
2340 }
2341
2342 /**
2343  * iavf_init_get_resources - third step of driver startup
2344  * @adapter: board private structure
2345  *
2346  * Function process __IAVF_INIT_GET_RESOURCES driver state and
2347  * finishes driver initialization procedure.
2348  * When success the state is changed to __IAVF_DOWN
2349  * when fails the state is changed to __IAVF_INIT_FAILED
2350  **/
2351 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2352 {
2353         struct pci_dev *pdev = adapter->pdev;
2354         struct iavf_hw *hw = &adapter->hw;
2355         int err;
2356
2357         WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2358         /* aq msg sent, awaiting reply */
2359         if (!adapter->vf_res) {
2360                 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2361                                           GFP_KERNEL);
2362                 if (!adapter->vf_res) {
2363                         err = -ENOMEM;
2364                         goto err;
2365                 }
2366         }
2367         err = iavf_get_vf_config(adapter);
2368         if (err == -EALREADY) {
2369                 err = iavf_send_vf_config_msg(adapter);
2370                 goto err;
2371         } else if (err == -EINVAL) {
2372                 /* We only get -EINVAL if the device is in a very bad
2373                  * state or if we've been disabled for previous bad
2374                  * behavior. Either way, we're done now.
2375                  */
2376                 iavf_shutdown_adminq(hw);
2377                 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2378                 return;
2379         }
2380         if (err) {
2381                 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2382                 goto err_alloc;
2383         }
2384
2385         err = iavf_parse_vf_resource_msg(adapter);
2386         if (err) {
2387                 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2388                         err);
2389                 goto err_alloc;
2390         }
2391         /* Some features require additional messages to negotiate extended
2392          * capabilities. These are processed in sequence by the
2393          * __IAVF_INIT_EXTENDED_CAPS driver state.
2394          */
2395         adapter->extended_caps = IAVF_EXTENDED_CAPS;
2396
2397         iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2398         return;
2399
2400 err_alloc:
2401         kfree(adapter->vf_res);
2402         adapter->vf_res = NULL;
2403 err:
2404         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2405 }
2406
2407 /**
2408  * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2409  * @adapter: board private structure
2410  *
2411  * Function processes send of the extended VLAN V2 capability message to the
2412  * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2413  * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2414  */
2415 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2416 {
2417         int ret;
2418
2419         WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2420
2421         ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2422         if (ret && ret == -EOPNOTSUPP) {
2423                 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2424                  * we did not send the capability exchange message and do not
2425                  * expect a response.
2426                  */
2427                 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2428         }
2429
2430         /* We sent the message, so move on to the next step */
2431         adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2432 }
2433
2434 /**
2435  * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2436  * @adapter: board private structure
2437  *
2438  * Function processes receipt of the extended VLAN V2 capability message from
2439  * the PF.
2440  **/
2441 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2442 {
2443         int ret;
2444
2445         WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2446
2447         memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2448
2449         ret = iavf_get_vf_vlan_v2_caps(adapter);
2450         if (ret)
2451                 goto err;
2452
2453         /* We've processed receipt of the VLAN V2 caps message */
2454         adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2455         return;
2456 err:
2457         /* We didn't receive a reply. Make sure we try sending again when
2458          * __IAVF_INIT_FAILED attempts to recover.
2459          */
2460         adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2461         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2462 }
2463
2464 /**
2465  * iavf_init_process_extended_caps - Part of driver startup
2466  * @adapter: board private structure
2467  *
2468  * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2469  * handles negotiating capabilities for features which require an additional
2470  * message.
2471  *
2472  * Once all extended capabilities exchanges are finished, the driver will
2473  * transition into __IAVF_INIT_CONFIG_ADAPTER.
2474  */
2475 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2476 {
2477         WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2478
2479         /* Process capability exchange for VLAN V2 */
2480         if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2481                 iavf_init_send_offload_vlan_v2_caps(adapter);
2482                 return;
2483         } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2484                 iavf_init_recv_offload_vlan_v2_caps(adapter);
2485                 return;
2486         }
2487
2488         /* When we reach here, no further extended capabilities exchanges are
2489          * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2490          */
2491         iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2492 }
2493
2494 /**
2495  * iavf_init_config_adapter - last part of driver startup
2496  * @adapter: board private structure
2497  *
2498  * After all the supported capabilities are negotiated, then the
2499  * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2500  */
2501 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2502 {
2503         struct net_device *netdev = adapter->netdev;
2504         struct pci_dev *pdev = adapter->pdev;
2505         int err;
2506
2507         WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2508
2509         if (iavf_process_config(adapter))
2510                 goto err;
2511
2512         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2513
2514         adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2515
2516         netdev->netdev_ops = &iavf_netdev_ops;
2517         iavf_set_ethtool_ops(netdev);
2518         netdev->watchdog_timeo = 5 * HZ;
2519
2520         /* MTU range: 68 - 9710 */
2521         netdev->min_mtu = ETH_MIN_MTU;
2522         netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2523
2524         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2525                 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2526                          adapter->hw.mac.addr);
2527                 eth_hw_addr_random(netdev);
2528                 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2529         } else {
2530                 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2531                 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2532         }
2533
2534         adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
2535
2536         adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2537         adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2538         err = iavf_init_interrupt_scheme(adapter);
2539         if (err)
2540                 goto err_sw_init;
2541         iavf_map_rings_to_vectors(adapter);
2542         if (adapter->vf_res->vf_cap_flags &
2543                 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2544                 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2545
2546         err = iavf_request_misc_irq(adapter);
2547         if (err)
2548                 goto err_sw_init;
2549
2550         netif_carrier_off(netdev);
2551         adapter->link_up = false;
2552
2553         /* set the semaphore to prevent any callbacks after device registration
2554          * up to time when state of driver will be set to __IAVF_DOWN
2555          */
2556         rtnl_lock();
2557         if (!adapter->netdev_registered) {
2558                 err = register_netdevice(netdev);
2559                 if (err) {
2560                         rtnl_unlock();
2561                         goto err_register;
2562                 }
2563         }
2564
2565         adapter->netdev_registered = true;
2566
2567         netif_tx_stop_all_queues(netdev);
2568         if (CLIENT_ALLOWED(adapter)) {
2569                 err = iavf_lan_add_device(adapter);
2570                 if (err)
2571                         dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2572                                  err);
2573         }
2574         dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2575         if (netdev->features & NETIF_F_GRO)
2576                 dev_info(&pdev->dev, "GRO is enabled\n");
2577
2578         iavf_change_state(adapter, __IAVF_DOWN);
2579         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2580         rtnl_unlock();
2581
2582         iavf_misc_irq_enable(adapter);
2583         wake_up(&adapter->down_waitqueue);
2584
2585         adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2586         adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2587         if (!adapter->rss_key || !adapter->rss_lut) {
2588                 err = -ENOMEM;
2589                 goto err_mem;
2590         }
2591         if (RSS_AQ(adapter))
2592                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2593         else
2594                 iavf_init_rss(adapter);
2595
2596         if (VLAN_V2_ALLOWED(adapter))
2597                 /* request initial VLAN offload settings */
2598                 iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2599
2600         return;
2601 err_mem:
2602         iavf_free_rss(adapter);
2603 err_register:
2604         iavf_free_misc_irq(adapter);
2605 err_sw_init:
2606         iavf_reset_interrupt_capability(adapter);
2607 err:
2608         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2609 }
2610
2611 /**
2612  * iavf_watchdog_task - Periodic call-back task
2613  * @work: pointer to work_struct
2614  **/
2615 static void iavf_watchdog_task(struct work_struct *work)
2616 {
2617         struct iavf_adapter *adapter = container_of(work,
2618                                                     struct iavf_adapter,
2619                                                     watchdog_task.work);
2620         struct iavf_hw *hw = &adapter->hw;
2621         u32 reg_val;
2622
2623         if (!mutex_trylock(&adapter->crit_lock)) {
2624                 if (adapter->state == __IAVF_REMOVE)
2625                         return;
2626
2627                 goto restart_watchdog;
2628         }
2629
2630         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2631                 iavf_change_state(adapter, __IAVF_COMM_FAILED);
2632
2633         if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2634                 adapter->aq_required = 0;
2635                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2636                 mutex_unlock(&adapter->crit_lock);
2637                 queue_work(iavf_wq, &adapter->reset_task);
2638                 return;
2639         }
2640
2641         switch (adapter->state) {
2642         case __IAVF_STARTUP:
2643                 iavf_startup(adapter);
2644                 mutex_unlock(&adapter->crit_lock);
2645                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2646                                    msecs_to_jiffies(30));
2647                 return;
2648         case __IAVF_INIT_VERSION_CHECK:
2649                 iavf_init_version_check(adapter);
2650                 mutex_unlock(&adapter->crit_lock);
2651                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2652                                    msecs_to_jiffies(30));
2653                 return;
2654         case __IAVF_INIT_GET_RESOURCES:
2655                 iavf_init_get_resources(adapter);
2656                 mutex_unlock(&adapter->crit_lock);
2657                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2658                                    msecs_to_jiffies(1));
2659                 return;
2660         case __IAVF_INIT_EXTENDED_CAPS:
2661                 iavf_init_process_extended_caps(adapter);
2662                 mutex_unlock(&adapter->crit_lock);
2663                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2664                                    msecs_to_jiffies(1));
2665                 return;
2666         case __IAVF_INIT_CONFIG_ADAPTER:
2667                 iavf_init_config_adapter(adapter);
2668                 mutex_unlock(&adapter->crit_lock);
2669                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2670                                    msecs_to_jiffies(1));
2671                 return;
2672         case __IAVF_INIT_FAILED:
2673                 if (test_bit(__IAVF_IN_REMOVE_TASK,
2674                              &adapter->crit_section)) {
2675                         /* Do not update the state and do not reschedule
2676                          * watchdog task, iavf_remove should handle this state
2677                          * as it can loop forever
2678                          */
2679                         mutex_unlock(&adapter->crit_lock);
2680                         return;
2681                 }
2682                 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2683                         dev_err(&adapter->pdev->dev,
2684                                 "Failed to communicate with PF; waiting before retry\n");
2685                         adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2686                         iavf_shutdown_adminq(hw);
2687                         mutex_unlock(&adapter->crit_lock);
2688                         queue_delayed_work(iavf_wq,
2689                                            &adapter->watchdog_task, (5 * HZ));
2690                         return;
2691                 }
2692                 /* Try again from failed step*/
2693                 iavf_change_state(adapter, adapter->last_state);
2694                 mutex_unlock(&adapter->crit_lock);
2695                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2696                 return;
2697         case __IAVF_COMM_FAILED:
2698                 if (test_bit(__IAVF_IN_REMOVE_TASK,
2699                              &adapter->crit_section)) {
2700                         /* Set state to __IAVF_INIT_FAILED and perform remove
2701                          * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2702                          * doesn't bring the state back to __IAVF_COMM_FAILED.
2703                          */
2704                         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2705                         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2706                         mutex_unlock(&adapter->crit_lock);
2707                         return;
2708                 }
2709                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2710                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2711                 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2712                     reg_val == VIRTCHNL_VFR_COMPLETED) {
2713                         /* A chance for redemption! */
2714                         dev_err(&adapter->pdev->dev,
2715                                 "Hardware came out of reset. Attempting reinit.\n");
2716                         /* When init task contacts the PF and
2717                          * gets everything set up again, it'll restart the
2718                          * watchdog for us. Down, boy. Sit. Stay. Woof.
2719                          */
2720                         iavf_change_state(adapter, __IAVF_STARTUP);
2721                         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2722                 }
2723                 adapter->aq_required = 0;
2724                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2725                 mutex_unlock(&adapter->crit_lock);
2726                 queue_delayed_work(iavf_wq,
2727                                    &adapter->watchdog_task,
2728                                    msecs_to_jiffies(10));
2729                 return;
2730         case __IAVF_RESETTING:
2731                 mutex_unlock(&adapter->crit_lock);
2732                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2733                 return;
2734         case __IAVF_DOWN:
2735         case __IAVF_DOWN_PENDING:
2736         case __IAVF_TESTING:
2737         case __IAVF_RUNNING:
2738                 if (adapter->current_op) {
2739                         if (!iavf_asq_done(hw)) {
2740                                 dev_dbg(&adapter->pdev->dev,
2741                                         "Admin queue timeout\n");
2742                                 iavf_send_api_ver(adapter);
2743                         }
2744                 } else {
2745                         int ret = iavf_process_aq_command(adapter);
2746
2747                         /* An error will be returned if no commands were
2748                          * processed; use this opportunity to update stats
2749                          * if the error isn't -ENOTSUPP
2750                          */
2751                         if (ret && ret != -EOPNOTSUPP &&
2752                             adapter->state == __IAVF_RUNNING)
2753                                 iavf_request_stats(adapter);
2754                 }
2755                 if (adapter->state == __IAVF_RUNNING)
2756                         iavf_detect_recover_hung(&adapter->vsi);
2757                 break;
2758         case __IAVF_REMOVE:
2759         default:
2760                 mutex_unlock(&adapter->crit_lock);
2761                 return;
2762         }
2763
2764         /* check for hw reset */
2765         reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2766         if (!reg_val) {
2767                 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2768                 adapter->aq_required = 0;
2769                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2770                 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2771                 queue_work(iavf_wq, &adapter->reset_task);
2772                 mutex_unlock(&adapter->crit_lock);
2773                 queue_delayed_work(iavf_wq,
2774                                    &adapter->watchdog_task, HZ * 2);
2775                 return;
2776         }
2777
2778         schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2779         mutex_unlock(&adapter->crit_lock);
2780 restart_watchdog:
2781         if (adapter->state >= __IAVF_DOWN)
2782                 queue_work(iavf_wq, &adapter->adminq_task);
2783         if (adapter->aq_required)
2784                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2785                                    msecs_to_jiffies(20));
2786         else
2787                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2788 }
2789
2790 /**
2791  * iavf_disable_vf - disable VF
2792  * @adapter: board private structure
2793  *
2794  * Set communication failed flag and free all resources.
2795  * NOTE: This function is expected to be called with crit_lock being held.
2796  **/
2797 static void iavf_disable_vf(struct iavf_adapter *adapter)
2798 {
2799         struct iavf_mac_filter *f, *ftmp;
2800         struct iavf_vlan_filter *fv, *fvtmp;
2801         struct iavf_cloud_filter *cf, *cftmp;
2802
2803         adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2804
2805         /* We don't use netif_running() because it may be true prior to
2806          * ndo_open() returning, so we can't assume it means all our open
2807          * tasks have finished, since we're not holding the rtnl_lock here.
2808          */
2809         if (adapter->state == __IAVF_RUNNING) {
2810                 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2811                 netif_carrier_off(adapter->netdev);
2812                 netif_tx_disable(adapter->netdev);
2813                 adapter->link_up = false;
2814                 iavf_napi_disable_all(adapter);
2815                 iavf_irq_disable(adapter);
2816                 iavf_free_traffic_irqs(adapter);
2817                 iavf_free_all_tx_resources(adapter);
2818                 iavf_free_all_rx_resources(adapter);
2819         }
2820
2821         spin_lock_bh(&adapter->mac_vlan_list_lock);
2822
2823         /* Delete all of the filters */
2824         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2825                 list_del(&f->list);
2826                 kfree(f);
2827         }
2828
2829         list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2830                 list_del(&fv->list);
2831                 kfree(fv);
2832         }
2833
2834         spin_unlock_bh(&adapter->mac_vlan_list_lock);
2835
2836         spin_lock_bh(&adapter->cloud_filter_list_lock);
2837         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2838                 list_del(&cf->list);
2839                 kfree(cf);
2840                 adapter->num_cloud_filters--;
2841         }
2842         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2843
2844         iavf_free_misc_irq(adapter);
2845         iavf_reset_interrupt_capability(adapter);
2846         iavf_free_q_vectors(adapter);
2847         iavf_free_queues(adapter);
2848         memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2849         iavf_shutdown_adminq(&adapter->hw);
2850         adapter->netdev->flags &= ~IFF_UP;
2851         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2852         iavf_change_state(adapter, __IAVF_DOWN);
2853         wake_up(&adapter->down_waitqueue);
2854         dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2855 }
2856
2857 /**
2858  * iavf_reset_task - Call-back task to handle hardware reset
2859  * @work: pointer to work_struct
2860  *
2861  * During reset we need to shut down and reinitialize the admin queue
2862  * before we can use it to communicate with the PF again. We also clear
2863  * and reinit the rings because that context is lost as well.
2864  **/
2865 static void iavf_reset_task(struct work_struct *work)
2866 {
2867         struct iavf_adapter *adapter = container_of(work,
2868                                                       struct iavf_adapter,
2869                                                       reset_task);
2870         struct virtchnl_vf_resource *vfres = adapter->vf_res;
2871         struct net_device *netdev = adapter->netdev;
2872         struct iavf_hw *hw = &adapter->hw;
2873         struct iavf_mac_filter *f, *ftmp;
2874         struct iavf_cloud_filter *cf;
2875         enum iavf_status status;
2876         u32 reg_val;
2877         int i = 0, err;
2878         bool running;
2879
2880         /* When device is being removed it doesn't make sense to run the reset
2881          * task, just return in such a case.
2882          */
2883         if (!mutex_trylock(&adapter->crit_lock)) {
2884                 if (adapter->state != __IAVF_REMOVE)
2885                         queue_work(iavf_wq, &adapter->reset_task);
2886
2887                 return;
2888         }
2889
2890         while (!mutex_trylock(&adapter->client_lock))
2891                 usleep_range(500, 1000);
2892         if (CLIENT_ENABLED(adapter)) {
2893                 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2894                                     IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2895                                     IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2896                                     IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2897                 cancel_delayed_work_sync(&adapter->client_task);
2898                 iavf_notify_client_close(&adapter->vsi, true);
2899         }
2900         iavf_misc_irq_disable(adapter);
2901         if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2902                 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2903                 /* Restart the AQ here. If we have been reset but didn't
2904                  * detect it, or if the PF had to reinit, our AQ will be hosed.
2905                  */
2906                 iavf_shutdown_adminq(hw);
2907                 iavf_init_adminq(hw);
2908                 iavf_request_reset(adapter);
2909         }
2910         adapter->flags |= IAVF_FLAG_RESET_PENDING;
2911
2912         /* poll until we see the reset actually happen */
2913         for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2914                 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2915                           IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2916                 if (!reg_val)
2917                         break;
2918                 usleep_range(5000, 10000);
2919         }
2920         if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2921                 dev_info(&adapter->pdev->dev, "Never saw reset\n");
2922                 goto continue_reset; /* act like the reset happened */
2923         }
2924
2925         /* wait until the reset is complete and the PF is responding to us */
2926         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2927                 /* sleep first to make sure a minimum wait time is met */
2928                 msleep(IAVF_RESET_WAIT_MS);
2929
2930                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2931                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2932                 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
2933                         break;
2934         }
2935
2936         pci_set_master(adapter->pdev);
2937         pci_restore_msi_state(adapter->pdev);
2938
2939         if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2940                 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
2941                         reg_val);
2942                 iavf_disable_vf(adapter);
2943                 mutex_unlock(&adapter->client_lock);
2944                 mutex_unlock(&adapter->crit_lock);
2945                 return; /* Do not attempt to reinit. It's dead, Jim. */
2946         }
2947
2948 continue_reset:
2949         /* We don't use netif_running() because it may be true prior to
2950          * ndo_open() returning, so we can't assume it means all our open
2951          * tasks have finished, since we're not holding the rtnl_lock here.
2952          */
2953         running = adapter->state == __IAVF_RUNNING;
2954
2955         if (running) {
2956                 netif_carrier_off(netdev);
2957                 netif_tx_stop_all_queues(netdev);
2958                 adapter->link_up = false;
2959                 iavf_napi_disable_all(adapter);
2960         }
2961         iavf_irq_disable(adapter);
2962
2963         iavf_change_state(adapter, __IAVF_RESETTING);
2964         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2965
2966         /* free the Tx/Rx rings and descriptors, might be better to just
2967          * re-use them sometime in the future
2968          */
2969         iavf_free_all_rx_resources(adapter);
2970         iavf_free_all_tx_resources(adapter);
2971
2972         adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
2973         /* kill and reinit the admin queue */
2974         iavf_shutdown_adminq(hw);
2975         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2976         status = iavf_init_adminq(hw);
2977         if (status) {
2978                 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2979                          status);
2980                 goto reset_err;
2981         }
2982         adapter->aq_required = 0;
2983
2984         if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
2985             (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
2986                 err = iavf_reinit_interrupt_scheme(adapter);
2987                 if (err)
2988                         goto reset_err;
2989         }
2990
2991         if (RSS_AQ(adapter)) {
2992                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2993         } else {
2994                 err = iavf_init_rss(adapter);
2995                 if (err)
2996                         goto reset_err;
2997         }
2998
2999         adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3000         /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3001          * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3002          * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3003          * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3004          * been successfully sent and negotiated
3005          */
3006         adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3007         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3008
3009         spin_lock_bh(&adapter->mac_vlan_list_lock);
3010
3011         /* Delete filter for the current MAC address, it could have
3012          * been changed by the PF via administratively set MAC.
3013          * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3014          */
3015         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3016                 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3017                         list_del(&f->list);
3018                         kfree(f);
3019                 }
3020         }
3021         /* re-add all MAC filters */
3022         list_for_each_entry(f, &adapter->mac_filter_list, list) {
3023                 f->add = true;
3024         }
3025         spin_unlock_bh(&adapter->mac_vlan_list_lock);
3026
3027         /* check if TCs are running and re-add all cloud filters */
3028         spin_lock_bh(&adapter->cloud_filter_list_lock);
3029         if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3030             adapter->num_tc) {
3031                 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3032                         cf->add = true;
3033                 }
3034         }
3035         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3036
3037         adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3038         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3039         iavf_misc_irq_enable(adapter);
3040
3041         bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
3042         bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
3043
3044         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
3045
3046         /* We were running when the reset started, so we need to restore some
3047          * state here.
3048          */
3049         if (running) {
3050                 /* allocate transmit descriptors */
3051                 err = iavf_setup_all_tx_resources(adapter);
3052                 if (err)
3053                         goto reset_err;
3054
3055                 /* allocate receive descriptors */
3056                 err = iavf_setup_all_rx_resources(adapter);
3057                 if (err)
3058                         goto reset_err;
3059
3060                 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3061                     (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3062                         err = iavf_request_traffic_irqs(adapter, netdev->name);
3063                         if (err)
3064                                 goto reset_err;
3065
3066                         adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3067                 }
3068
3069                 iavf_configure(adapter);
3070
3071                 /* iavf_up_complete() will switch device back
3072                  * to __IAVF_RUNNING
3073                  */
3074                 iavf_up_complete(adapter);
3075
3076                 iavf_irq_enable(adapter, true);
3077         } else {
3078                 iavf_change_state(adapter, __IAVF_DOWN);
3079                 wake_up(&adapter->down_waitqueue);
3080         }
3081
3082         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3083
3084         mutex_unlock(&adapter->client_lock);
3085         mutex_unlock(&adapter->crit_lock);
3086
3087         return;
3088 reset_err:
3089         if (running) {
3090                 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3091                 iavf_free_traffic_irqs(adapter);
3092         }
3093         iavf_disable_vf(adapter);
3094
3095         mutex_unlock(&adapter->client_lock);
3096         mutex_unlock(&adapter->crit_lock);
3097         dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3098 }
3099
3100 /**
3101  * iavf_adminq_task - worker thread to clean the admin queue
3102  * @work: pointer to work_struct containing our data
3103  **/
3104 static void iavf_adminq_task(struct work_struct *work)
3105 {
3106         struct iavf_adapter *adapter =
3107                 container_of(work, struct iavf_adapter, adminq_task);
3108         struct iavf_hw *hw = &adapter->hw;
3109         struct iavf_arq_event_info event;
3110         enum virtchnl_ops v_op;
3111         enum iavf_status ret, v_ret;
3112         u32 val, oldval;
3113         u16 pending;
3114
3115         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3116                 goto out;
3117
3118         if (!mutex_trylock(&adapter->crit_lock)) {
3119                 if (adapter->state == __IAVF_REMOVE)
3120                         return;
3121
3122                 queue_work(iavf_wq, &adapter->adminq_task);
3123                 goto out;
3124         }
3125
3126         event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3127         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3128         if (!event.msg_buf)
3129                 goto out;
3130
3131         do {
3132                 ret = iavf_clean_arq_element(hw, &event, &pending);
3133                 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3134                 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3135
3136                 if (ret || !v_op)
3137                         break; /* No event to process or error cleaning ARQ */
3138
3139                 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3140                                          event.msg_len);
3141                 if (pending != 0)
3142                         memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3143         } while (pending);
3144         mutex_unlock(&adapter->crit_lock);
3145
3146         if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
3147                 if (adapter->netdev_registered ||
3148                     !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
3149                         struct net_device *netdev = adapter->netdev;
3150
3151                         rtnl_lock();
3152                         netdev_update_features(netdev);
3153                         rtnl_unlock();
3154                         /* Request VLAN offload settings */
3155                         if (VLAN_V2_ALLOWED(adapter))
3156                                 iavf_set_vlan_offload_features
3157                                         (adapter, 0, netdev->features);
3158
3159                         iavf_set_queue_vlan_tag_loc(adapter);
3160                 }
3161
3162                 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
3163         }
3164         if ((adapter->flags &
3165              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3166             adapter->state == __IAVF_RESETTING)
3167                 goto freedom;
3168
3169         /* check for error indications */
3170         val = rd32(hw, hw->aq.arq.len);
3171         if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3172                 goto freedom;
3173         oldval = val;
3174         if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3175                 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3176                 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3177         }
3178         if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3179                 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3180                 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3181         }
3182         if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3183                 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3184                 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3185         }
3186         if (oldval != val)
3187                 wr32(hw, hw->aq.arq.len, val);
3188
3189         val = rd32(hw, hw->aq.asq.len);
3190         oldval = val;
3191         if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3192                 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3193                 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3194         }
3195         if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3196                 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3197                 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3198         }
3199         if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3200                 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3201                 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3202         }
3203         if (oldval != val)
3204                 wr32(hw, hw->aq.asq.len, val);
3205
3206 freedom:
3207         kfree(event.msg_buf);
3208 out:
3209         /* re-enable Admin queue interrupt cause */
3210         iavf_misc_irq_enable(adapter);
3211 }
3212
3213 /**
3214  * iavf_client_task - worker thread to perform client work
3215  * @work: pointer to work_struct containing our data
3216  *
3217  * This task handles client interactions. Because client calls can be
3218  * reentrant, we can't handle them in the watchdog.
3219  **/
3220 static void iavf_client_task(struct work_struct *work)
3221 {
3222         struct iavf_adapter *adapter =
3223                 container_of(work, struct iavf_adapter, client_task.work);
3224
3225         /* If we can't get the client bit, just give up. We'll be rescheduled
3226          * later.
3227          */
3228
3229         if (!mutex_trylock(&adapter->client_lock))
3230                 return;
3231
3232         if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3233                 iavf_client_subtask(adapter);
3234                 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3235                 goto out;
3236         }
3237         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3238                 iavf_notify_client_l2_params(&adapter->vsi);
3239                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3240                 goto out;
3241         }
3242         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3243                 iavf_notify_client_close(&adapter->vsi, false);
3244                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3245                 goto out;
3246         }
3247         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3248                 iavf_notify_client_open(&adapter->vsi);
3249                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3250         }
3251 out:
3252         mutex_unlock(&adapter->client_lock);
3253 }
3254
3255 /**
3256  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3257  * @adapter: board private structure
3258  *
3259  * Free all transmit software resources
3260  **/
3261 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3262 {
3263         int i;
3264
3265         if (!adapter->tx_rings)
3266                 return;
3267
3268         for (i = 0; i < adapter->num_active_queues; i++)
3269                 if (adapter->tx_rings[i].desc)
3270                         iavf_free_tx_resources(&adapter->tx_rings[i]);
3271 }
3272
3273 /**
3274  * iavf_setup_all_tx_resources - allocate all queues Tx resources
3275  * @adapter: board private structure
3276  *
3277  * If this function returns with an error, then it's possible one or
3278  * more of the rings is populated (while the rest are not).  It is the
3279  * callers duty to clean those orphaned rings.
3280  *
3281  * Return 0 on success, negative on failure
3282  **/
3283 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3284 {
3285         int i, err = 0;
3286
3287         for (i = 0; i < adapter->num_active_queues; i++) {
3288                 adapter->tx_rings[i].count = adapter->tx_desc_count;
3289                 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3290                 if (!err)
3291                         continue;
3292                 dev_err(&adapter->pdev->dev,
3293                         "Allocation for Tx Queue %u failed\n", i);
3294                 break;
3295         }
3296
3297         return err;
3298 }
3299
3300 /**
3301  * iavf_setup_all_rx_resources - allocate all queues Rx resources
3302  * @adapter: board private structure
3303  *
3304  * If this function returns with an error, then it's possible one or
3305  * more of the rings is populated (while the rest are not).  It is the
3306  * callers duty to clean those orphaned rings.
3307  *
3308  * Return 0 on success, negative on failure
3309  **/
3310 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3311 {
3312         int i, err = 0;
3313
3314         for (i = 0; i < adapter->num_active_queues; i++) {
3315                 adapter->rx_rings[i].count = adapter->rx_desc_count;
3316                 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3317                 if (!err)
3318                         continue;
3319                 dev_err(&adapter->pdev->dev,
3320                         "Allocation for Rx Queue %u failed\n", i);
3321                 break;
3322         }
3323         return err;
3324 }
3325
3326 /**
3327  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3328  * @adapter: board private structure
3329  *
3330  * Free all receive software resources
3331  **/
3332 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3333 {
3334         int i;
3335
3336         if (!adapter->rx_rings)
3337                 return;
3338
3339         for (i = 0; i < adapter->num_active_queues; i++)
3340                 if (adapter->rx_rings[i].desc)
3341                         iavf_free_rx_resources(&adapter->rx_rings[i]);
3342 }
3343
3344 /**
3345  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3346  * @adapter: board private structure
3347  * @max_tx_rate: max Tx bw for a tc
3348  **/
3349 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3350                                       u64 max_tx_rate)
3351 {
3352         int speed = 0, ret = 0;
3353
3354         if (ADV_LINK_SUPPORT(adapter)) {
3355                 if (adapter->link_speed_mbps < U32_MAX) {
3356                         speed = adapter->link_speed_mbps;
3357                         goto validate_bw;
3358                 } else {
3359                         dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3360                         return -EINVAL;
3361                 }
3362         }
3363
3364         switch (adapter->link_speed) {
3365         case VIRTCHNL_LINK_SPEED_40GB:
3366                 speed = SPEED_40000;
3367                 break;
3368         case VIRTCHNL_LINK_SPEED_25GB:
3369                 speed = SPEED_25000;
3370                 break;
3371         case VIRTCHNL_LINK_SPEED_20GB:
3372                 speed = SPEED_20000;
3373                 break;
3374         case VIRTCHNL_LINK_SPEED_10GB:
3375                 speed = SPEED_10000;
3376                 break;
3377         case VIRTCHNL_LINK_SPEED_5GB:
3378                 speed = SPEED_5000;
3379                 break;
3380         case VIRTCHNL_LINK_SPEED_2_5GB:
3381                 speed = SPEED_2500;
3382                 break;
3383         case VIRTCHNL_LINK_SPEED_1GB:
3384                 speed = SPEED_1000;
3385                 break;
3386         case VIRTCHNL_LINK_SPEED_100MB:
3387                 speed = SPEED_100;
3388                 break;
3389         default:
3390                 break;
3391         }
3392
3393 validate_bw:
3394         if (max_tx_rate > speed) {
3395                 dev_err(&adapter->pdev->dev,
3396                         "Invalid tx rate specified\n");
3397                 ret = -EINVAL;
3398         }
3399
3400         return ret;
3401 }
3402
3403 /**
3404  * iavf_validate_ch_config - validate queue mapping info
3405  * @adapter: board private structure
3406  * @mqprio_qopt: queue parameters
3407  *
3408  * This function validates if the config provided by the user to
3409  * configure queue channels is valid or not. Returns 0 on a valid
3410  * config.
3411  **/
3412 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3413                                    struct tc_mqprio_qopt_offload *mqprio_qopt)
3414 {
3415         u64 total_max_rate = 0;
3416         u32 tx_rate_rem = 0;
3417         int i, num_qps = 0;
3418         u64 tx_rate = 0;
3419         int ret = 0;
3420
3421         if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3422             mqprio_qopt->qopt.num_tc < 1)
3423                 return -EINVAL;
3424
3425         for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3426                 if (!mqprio_qopt->qopt.count[i] ||
3427                     mqprio_qopt->qopt.offset[i] != num_qps)
3428                         return -EINVAL;
3429                 if (mqprio_qopt->min_rate[i]) {
3430                         dev_err(&adapter->pdev->dev,
3431                                 "Invalid min tx rate (greater than 0) specified for TC%d\n",
3432                                 i);
3433                         return -EINVAL;
3434                 }
3435
3436                 /* convert to Mbps */
3437                 tx_rate = div_u64(mqprio_qopt->max_rate[i],
3438                                   IAVF_MBPS_DIVISOR);
3439
3440                 if (mqprio_qopt->max_rate[i] &&
3441                     tx_rate < IAVF_MBPS_QUANTA) {
3442                         dev_err(&adapter->pdev->dev,
3443                                 "Invalid max tx rate for TC%d, minimum %dMbps\n",
3444                                 i, IAVF_MBPS_QUANTA);
3445                         return -EINVAL;
3446                 }
3447
3448                 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3449
3450                 if (tx_rate_rem != 0) {
3451                         dev_err(&adapter->pdev->dev,
3452                                 "Invalid max tx rate for TC%d, not divisible by %d\n",
3453                                 i, IAVF_MBPS_QUANTA);
3454                         return -EINVAL;
3455                 }
3456
3457                 total_max_rate += tx_rate;
3458                 num_qps += mqprio_qopt->qopt.count[i];
3459         }
3460         if (num_qps > adapter->num_active_queues) {
3461                 dev_err(&adapter->pdev->dev,
3462                         "Cannot support requested number of queues\n");
3463                 return -EINVAL;
3464         }
3465
3466         ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3467         return ret;
3468 }
3469
3470 /**
3471  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3472  * @adapter: board private structure
3473  **/
3474 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3475 {
3476         struct iavf_cloud_filter *cf, *cftmp;
3477
3478         spin_lock_bh(&adapter->cloud_filter_list_lock);
3479         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3480                                  list) {
3481                 list_del(&cf->list);
3482                 kfree(cf);
3483                 adapter->num_cloud_filters--;
3484         }
3485         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3486 }
3487
3488 /**
3489  * __iavf_setup_tc - configure multiple traffic classes
3490  * @netdev: network interface device structure
3491  * @type_data: tc offload data
3492  *
3493  * This function processes the config information provided by the
3494  * user to configure traffic classes/queue channels and packages the
3495  * information to request the PF to setup traffic classes.
3496  *
3497  * Returns 0 on success.
3498  **/
3499 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3500 {
3501         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3502         struct iavf_adapter *adapter = netdev_priv(netdev);
3503         struct virtchnl_vf_resource *vfres = adapter->vf_res;
3504         u8 num_tc = 0, total_qps = 0;
3505         int ret = 0, netdev_tc = 0;
3506         u64 max_tx_rate;
3507         u16 mode;
3508         int i;
3509
3510         num_tc = mqprio_qopt->qopt.num_tc;
3511         mode = mqprio_qopt->mode;
3512
3513         /* delete queue_channel */
3514         if (!mqprio_qopt->qopt.hw) {
3515                 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3516                         /* reset the tc configuration */
3517                         netdev_reset_tc(netdev);
3518                         adapter->num_tc = 0;
3519                         netif_tx_stop_all_queues(netdev);
3520                         netif_tx_disable(netdev);
3521                         iavf_del_all_cloud_filters(adapter);
3522                         adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3523                         total_qps = adapter->orig_num_active_queues;
3524                         goto exit;
3525                 } else {
3526                         return -EINVAL;
3527                 }
3528         }
3529
3530         /* add queue channel */
3531         if (mode == TC_MQPRIO_MODE_CHANNEL) {
3532                 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3533                         dev_err(&adapter->pdev->dev, "ADq not supported\n");
3534                         return -EOPNOTSUPP;
3535                 }
3536                 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3537                         dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3538                         return -EINVAL;
3539                 }
3540
3541                 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3542                 if (ret)
3543                         return ret;
3544                 /* Return if same TC config is requested */
3545                 if (adapter->num_tc == num_tc)
3546                         return 0;
3547                 adapter->num_tc = num_tc;
3548
3549                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3550                         if (i < num_tc) {
3551                                 adapter->ch_config.ch_info[i].count =
3552                                         mqprio_qopt->qopt.count[i];
3553                                 adapter->ch_config.ch_info[i].offset =
3554                                         mqprio_qopt->qopt.offset[i];
3555                                 total_qps += mqprio_qopt->qopt.count[i];
3556                                 max_tx_rate = mqprio_qopt->max_rate[i];
3557                                 /* convert to Mbps */
3558                                 max_tx_rate = div_u64(max_tx_rate,
3559                                                       IAVF_MBPS_DIVISOR);
3560                                 adapter->ch_config.ch_info[i].max_tx_rate =
3561                                         max_tx_rate;
3562                         } else {
3563                                 adapter->ch_config.ch_info[i].count = 1;
3564                                 adapter->ch_config.ch_info[i].offset = 0;
3565                         }
3566                 }
3567
3568                 /* Take snapshot of original config such as "num_active_queues"
3569                  * It is used later when delete ADQ flow is exercised, so that
3570                  * once delete ADQ flow completes, VF shall go back to its
3571                  * original queue configuration
3572                  */
3573
3574                 adapter->orig_num_active_queues = adapter->num_active_queues;
3575
3576                 /* Store queue info based on TC so that VF gets configured
3577                  * with correct number of queues when VF completes ADQ config
3578                  * flow
3579                  */
3580                 adapter->ch_config.total_qps = total_qps;
3581
3582                 netif_tx_stop_all_queues(netdev);
3583                 netif_tx_disable(netdev);
3584                 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3585                 netdev_reset_tc(netdev);
3586                 /* Report the tc mapping up the stack */
3587                 netdev_set_num_tc(adapter->netdev, num_tc);
3588                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3589                         u16 qcount = mqprio_qopt->qopt.count[i];
3590                         u16 qoffset = mqprio_qopt->qopt.offset[i];
3591
3592                         if (i < num_tc)
3593                                 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3594                                                     qoffset);
3595                 }
3596         }
3597 exit:
3598         if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3599                 return 0;
3600
3601         netif_set_real_num_rx_queues(netdev, total_qps);
3602         netif_set_real_num_tx_queues(netdev, total_qps);
3603
3604         return ret;
3605 }
3606
3607 /**
3608  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3609  * @adapter: board private structure
3610  * @f: pointer to struct flow_cls_offload
3611  * @filter: pointer to cloud filter structure
3612  */
3613 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3614                                  struct flow_cls_offload *f,
3615                                  struct iavf_cloud_filter *filter)
3616 {
3617         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3618         struct flow_dissector *dissector = rule->match.dissector;
3619         u16 n_proto_mask = 0;
3620         u16 n_proto_key = 0;
3621         u8 field_flags = 0;
3622         u16 addr_type = 0;
3623         u16 n_proto = 0;
3624         int i = 0;
3625         struct virtchnl_filter *vf = &filter->f;
3626
3627         if (dissector->used_keys &
3628             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3629               BIT(FLOW_DISSECTOR_KEY_BASIC) |
3630               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3631               BIT(FLOW_DISSECTOR_KEY_VLAN) |
3632               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3633               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3634               BIT(FLOW_DISSECTOR_KEY_PORTS) |
3635               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3636                 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3637                         dissector->used_keys);
3638                 return -EOPNOTSUPP;
3639         }
3640
3641         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3642                 struct flow_match_enc_keyid match;
3643
3644                 flow_rule_match_enc_keyid(rule, &match);
3645                 if (match.mask->keyid != 0)
3646                         field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3647         }
3648
3649         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3650                 struct flow_match_basic match;
3651
3652                 flow_rule_match_basic(rule, &match);
3653                 n_proto_key = ntohs(match.key->n_proto);
3654                 n_proto_mask = ntohs(match.mask->n_proto);
3655
3656                 if (n_proto_key == ETH_P_ALL) {
3657                         n_proto_key = 0;
3658                         n_proto_mask = 0;
3659                 }
3660                 n_proto = n_proto_key & n_proto_mask;
3661                 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3662                         return -EINVAL;
3663                 if (n_proto == ETH_P_IPV6) {
3664                         /* specify flow type as TCP IPv6 */
3665                         vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3666                 }
3667
3668                 if (match.key->ip_proto != IPPROTO_TCP) {
3669                         dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3670                         return -EINVAL;
3671                 }
3672         }
3673
3674         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3675                 struct flow_match_eth_addrs match;
3676
3677                 flow_rule_match_eth_addrs(rule, &match);
3678
3679                 /* use is_broadcast and is_zero to check for all 0xf or 0 */
3680                 if (!is_zero_ether_addr(match.mask->dst)) {
3681                         if (is_broadcast_ether_addr(match.mask->dst)) {
3682                                 field_flags |= IAVF_CLOUD_FIELD_OMAC;
3683                         } else {
3684                                 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3685                                         match.mask->dst);
3686                                 return -EINVAL;
3687                         }
3688                 }
3689
3690                 if (!is_zero_ether_addr(match.mask->src)) {
3691                         if (is_broadcast_ether_addr(match.mask->src)) {
3692                                 field_flags |= IAVF_CLOUD_FIELD_IMAC;
3693                         } else {
3694                                 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3695                                         match.mask->src);
3696                                 return -EINVAL;
3697                         }
3698                 }
3699
3700                 if (!is_zero_ether_addr(match.key->dst))
3701                         if (is_valid_ether_addr(match.key->dst) ||
3702                             is_multicast_ether_addr(match.key->dst)) {
3703                                 /* set the mask if a valid dst_mac address */
3704                                 for (i = 0; i < ETH_ALEN; i++)
3705                                         vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3706                                 ether_addr_copy(vf->data.tcp_spec.dst_mac,
3707                                                 match.key->dst);
3708                         }
3709
3710                 if (!is_zero_ether_addr(match.key->src))
3711                         if (is_valid_ether_addr(match.key->src) ||
3712                             is_multicast_ether_addr(match.key->src)) {
3713                                 /* set the mask if a valid dst_mac address */
3714                                 for (i = 0; i < ETH_ALEN; i++)
3715                                         vf->mask.tcp_spec.src_mac[i] |= 0xff;
3716                                 ether_addr_copy(vf->data.tcp_spec.src_mac,
3717                                                 match.key->src);
3718                 }
3719         }
3720
3721         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3722                 struct flow_match_vlan match;
3723
3724                 flow_rule_match_vlan(rule, &match);
3725                 if (match.mask->vlan_id) {
3726                         if (match.mask->vlan_id == VLAN_VID_MASK) {
3727                                 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3728                         } else {
3729                                 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3730                                         match.mask->vlan_id);
3731                                 return -EINVAL;
3732                         }
3733                 }
3734                 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3735                 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3736         }
3737
3738         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3739                 struct flow_match_control match;
3740
3741                 flow_rule_match_control(rule, &match);
3742                 addr_type = match.key->addr_type;
3743         }
3744
3745         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3746                 struct flow_match_ipv4_addrs match;
3747
3748                 flow_rule_match_ipv4_addrs(rule, &match);
3749                 if (match.mask->dst) {
3750                         if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3751                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3752                         } else {
3753                                 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3754                                         be32_to_cpu(match.mask->dst));
3755                                 return -EINVAL;
3756                         }
3757                 }
3758
3759                 if (match.mask->src) {
3760                         if (match.mask->src == cpu_to_be32(0xffffffff)) {
3761                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3762                         } else {
3763                                 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3764                                         be32_to_cpu(match.mask->dst));
3765                                 return -EINVAL;
3766                         }
3767                 }
3768
3769                 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3770                         dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3771                         return -EINVAL;
3772                 }
3773                 if (match.key->dst) {
3774                         vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3775                         vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3776                 }
3777                 if (match.key->src) {
3778                         vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3779                         vf->data.tcp_spec.src_ip[0] = match.key->src;
3780                 }
3781         }
3782
3783         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3784                 struct flow_match_ipv6_addrs match;
3785
3786                 flow_rule_match_ipv6_addrs(rule, &match);
3787
3788                 /* validate mask, make sure it is not IPV6_ADDR_ANY */
3789                 if (ipv6_addr_any(&match.mask->dst)) {
3790                         dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3791                                 IPV6_ADDR_ANY);
3792                         return -EINVAL;
3793                 }
3794
3795                 /* src and dest IPv6 address should not be LOOPBACK
3796                  * (0:0:0:0:0:0:0:1) which can be represented as ::1
3797                  */
3798                 if (ipv6_addr_loopback(&match.key->dst) ||
3799                     ipv6_addr_loopback(&match.key->src)) {
3800                         dev_err(&adapter->pdev->dev,
3801                                 "ipv6 addr should not be loopback\n");
3802                         return -EINVAL;
3803                 }
3804                 if (!ipv6_addr_any(&match.mask->dst) ||
3805                     !ipv6_addr_any(&match.mask->src))
3806                         field_flags |= IAVF_CLOUD_FIELD_IIP;
3807
3808                 for (i = 0; i < 4; i++)
3809                         vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3810                 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3811                        sizeof(vf->data.tcp_spec.dst_ip));
3812                 for (i = 0; i < 4; i++)
3813                         vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3814                 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3815                        sizeof(vf->data.tcp_spec.src_ip));
3816         }
3817         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3818                 struct flow_match_ports match;
3819
3820                 flow_rule_match_ports(rule, &match);
3821                 if (match.mask->src) {
3822                         if (match.mask->src == cpu_to_be16(0xffff)) {
3823                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3824                         } else {
3825                                 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3826                                         be16_to_cpu(match.mask->src));
3827                                 return -EINVAL;
3828                         }
3829                 }
3830
3831                 if (match.mask->dst) {
3832                         if (match.mask->dst == cpu_to_be16(0xffff)) {
3833                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3834                         } else {
3835                                 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3836                                         be16_to_cpu(match.mask->dst));
3837                                 return -EINVAL;
3838                         }
3839                 }
3840                 if (match.key->dst) {
3841                         vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3842                         vf->data.tcp_spec.dst_port = match.key->dst;
3843                 }
3844
3845                 if (match.key->src) {
3846                         vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3847                         vf->data.tcp_spec.src_port = match.key->src;
3848                 }
3849         }
3850         vf->field_flags = field_flags;
3851
3852         return 0;
3853 }
3854
3855 /**
3856  * iavf_handle_tclass - Forward to a traffic class on the device
3857  * @adapter: board private structure
3858  * @tc: traffic class index on the device
3859  * @filter: pointer to cloud filter structure
3860  */
3861 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3862                               struct iavf_cloud_filter *filter)
3863 {
3864         if (tc == 0)
3865                 return 0;
3866         if (tc < adapter->num_tc) {
3867                 if (!filter->f.data.tcp_spec.dst_port) {
3868                         dev_err(&adapter->pdev->dev,
3869                                 "Specify destination port to redirect to traffic class other than TC0\n");
3870                         return -EINVAL;
3871                 }
3872         }
3873         /* redirect to a traffic class on the same device */
3874         filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3875         filter->f.action_meta = tc;
3876         return 0;
3877 }
3878
3879 /**
3880  * iavf_find_cf - Find the cloud filter in the list
3881  * @adapter: Board private structure
3882  * @cookie: filter specific cookie
3883  *
3884  * Returns ptr to the filter object or NULL. Must be called while holding the
3885  * cloud_filter_list_lock.
3886  */
3887 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3888                                               unsigned long *cookie)
3889 {
3890         struct iavf_cloud_filter *filter = NULL;
3891
3892         if (!cookie)
3893                 return NULL;
3894
3895         list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3896                 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3897                         return filter;
3898         }
3899         return NULL;
3900 }
3901
3902 /**
3903  * iavf_configure_clsflower - Add tc flower filters
3904  * @adapter: board private structure
3905  * @cls_flower: Pointer to struct flow_cls_offload
3906  */
3907 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3908                                     struct flow_cls_offload *cls_flower)
3909 {
3910         int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3911         struct iavf_cloud_filter *filter = NULL;
3912         int err = -EINVAL, count = 50;
3913
3914         if (tc < 0) {
3915                 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3916                 return -EINVAL;
3917         }
3918
3919         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3920         if (!filter)
3921                 return -ENOMEM;
3922
3923         while (!mutex_trylock(&adapter->crit_lock)) {
3924                 if (--count == 0) {
3925                         kfree(filter);
3926                         return err;
3927                 }
3928                 udelay(1);
3929         }
3930
3931         filter->cookie = cls_flower->cookie;
3932
3933         /* bail out here if filter already exists */
3934         spin_lock_bh(&adapter->cloud_filter_list_lock);
3935         if (iavf_find_cf(adapter, &cls_flower->cookie)) {
3936                 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
3937                 err = -EEXIST;
3938                 goto spin_unlock;
3939         }
3940         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3941
3942         /* set the mask to all zeroes to begin with */
3943         memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3944         /* start out with flow type and eth type IPv4 to begin with */
3945         filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
3946         err = iavf_parse_cls_flower(adapter, cls_flower, filter);
3947         if (err)
3948                 goto err;
3949
3950         err = iavf_handle_tclass(adapter, tc, filter);
3951         if (err)
3952                 goto err;
3953
3954         /* add filter to the list */
3955         spin_lock_bh(&adapter->cloud_filter_list_lock);
3956         list_add_tail(&filter->list, &adapter->cloud_filter_list);
3957         adapter->num_cloud_filters++;
3958         filter->add = true;
3959         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3960 spin_unlock:
3961         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3962 err:
3963         if (err)
3964                 kfree(filter);
3965
3966         mutex_unlock(&adapter->crit_lock);
3967         return err;
3968 }
3969
3970 /**
3971  * iavf_delete_clsflower - Remove tc flower filters
3972  * @adapter: board private structure
3973  * @cls_flower: Pointer to struct flow_cls_offload
3974  */
3975 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3976                                  struct flow_cls_offload *cls_flower)
3977 {
3978         struct iavf_cloud_filter *filter = NULL;
3979         int err = 0;
3980
3981         spin_lock_bh(&adapter->cloud_filter_list_lock);
3982         filter = iavf_find_cf(adapter, &cls_flower->cookie);
3983         if (filter) {
3984                 filter->del = true;
3985                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
3986         } else {
3987                 err = -EINVAL;
3988         }
3989         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3990
3991         return err;
3992 }
3993
3994 /**
3995  * iavf_setup_tc_cls_flower - flower classifier offloads
3996  * @adapter: board private structure
3997  * @cls_flower: pointer to flow_cls_offload struct with flow info
3998  */
3999 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4000                                     struct flow_cls_offload *cls_flower)
4001 {
4002         switch (cls_flower->command) {
4003         case FLOW_CLS_REPLACE:
4004                 return iavf_configure_clsflower(adapter, cls_flower);
4005         case FLOW_CLS_DESTROY:
4006                 return iavf_delete_clsflower(adapter, cls_flower);
4007         case FLOW_CLS_STATS:
4008                 return -EOPNOTSUPP;
4009         default:
4010                 return -EOPNOTSUPP;
4011         }
4012 }
4013
4014 /**
4015  * iavf_setup_tc_block_cb - block callback for tc
4016  * @type: type of offload
4017  * @type_data: offload data
4018  * @cb_priv:
4019  *
4020  * This function is the block callback for traffic classes
4021  **/
4022 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4023                                   void *cb_priv)
4024 {
4025         struct iavf_adapter *adapter = cb_priv;
4026
4027         if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4028                 return -EOPNOTSUPP;
4029
4030         switch (type) {
4031         case TC_SETUP_CLSFLOWER:
4032                 return iavf_setup_tc_cls_flower(cb_priv, type_data);
4033         default:
4034                 return -EOPNOTSUPP;
4035         }
4036 }
4037
4038 static LIST_HEAD(iavf_block_cb_list);
4039
4040 /**
4041  * iavf_setup_tc - configure multiple traffic classes
4042  * @netdev: network interface device structure
4043  * @type: type of offload
4044  * @type_data: tc offload data
4045  *
4046  * This function is the callback to ndo_setup_tc in the
4047  * netdev_ops.
4048  *
4049  * Returns 0 on success
4050  **/
4051 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4052                          void *type_data)
4053 {
4054         struct iavf_adapter *adapter = netdev_priv(netdev);
4055
4056         switch (type) {
4057         case TC_SETUP_QDISC_MQPRIO:
4058                 return __iavf_setup_tc(netdev, type_data);
4059         case TC_SETUP_BLOCK:
4060                 return flow_block_cb_setup_simple(type_data,
4061                                                   &iavf_block_cb_list,
4062                                                   iavf_setup_tc_block_cb,
4063                                                   adapter, adapter, true);
4064         default:
4065                 return -EOPNOTSUPP;
4066         }
4067 }
4068
4069 /**
4070  * iavf_open - Called when a network interface is made active
4071  * @netdev: network interface device structure
4072  *
4073  * Returns 0 on success, negative value on failure
4074  *
4075  * The open entry point is called when a network interface is made
4076  * active by the system (IFF_UP).  At this point all resources needed
4077  * for transmit and receive operations are allocated, the interrupt
4078  * handler is registered with the OS, the watchdog is started,
4079  * and the stack is notified that the interface is ready.
4080  **/
4081 static int iavf_open(struct net_device *netdev)
4082 {
4083         struct iavf_adapter *adapter = netdev_priv(netdev);
4084         int err;
4085
4086         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4087                 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4088                 return -EIO;
4089         }
4090
4091         while (!mutex_trylock(&adapter->crit_lock)) {
4092                 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4093                  * is already taken and iavf_open is called from an upper
4094                  * device's notifier reacting on NETDEV_REGISTER event.
4095                  * We have to leave here to avoid dead lock.
4096                  */
4097                 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4098                         return -EBUSY;
4099
4100                 usleep_range(500, 1000);
4101         }
4102
4103         if (adapter->state != __IAVF_DOWN) {
4104                 err = -EBUSY;
4105                 goto err_unlock;
4106         }
4107
4108         if (adapter->state == __IAVF_RUNNING &&
4109             !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4110                 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4111                 err = 0;
4112                 goto err_unlock;
4113         }
4114
4115         /* allocate transmit descriptors */
4116         err = iavf_setup_all_tx_resources(adapter);
4117         if (err)
4118                 goto err_setup_tx;
4119
4120         /* allocate receive descriptors */
4121         err = iavf_setup_all_rx_resources(adapter);
4122         if (err)
4123                 goto err_setup_rx;
4124
4125         /* clear any pending interrupts, may auto mask */
4126         err = iavf_request_traffic_irqs(adapter, netdev->name);
4127         if (err)
4128                 goto err_req_irq;
4129
4130         spin_lock_bh(&adapter->mac_vlan_list_lock);
4131
4132         iavf_add_filter(adapter, adapter->hw.mac.addr);
4133
4134         spin_unlock_bh(&adapter->mac_vlan_list_lock);
4135
4136         /* Restore VLAN filters that were removed with IFF_DOWN */
4137         iavf_restore_filters(adapter);
4138
4139         iavf_configure(adapter);
4140
4141         iavf_up_complete(adapter);
4142
4143         iavf_irq_enable(adapter, true);
4144
4145         mutex_unlock(&adapter->crit_lock);
4146
4147         return 0;
4148
4149 err_req_irq:
4150         iavf_down(adapter);
4151         iavf_free_traffic_irqs(adapter);
4152 err_setup_rx:
4153         iavf_free_all_rx_resources(adapter);
4154 err_setup_tx:
4155         iavf_free_all_tx_resources(adapter);
4156 err_unlock:
4157         mutex_unlock(&adapter->crit_lock);
4158
4159         return err;
4160 }
4161
4162 /**
4163  * iavf_close - Disables a network interface
4164  * @netdev: network interface device structure
4165  *
4166  * Returns 0, this is not allowed to fail
4167  *
4168  * The close entry point is called when an interface is de-activated
4169  * by the OS.  The hardware is still under the drivers control, but
4170  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4171  * are freed, along with all transmit and receive resources.
4172  **/
4173 static int iavf_close(struct net_device *netdev)
4174 {
4175         struct iavf_adapter *adapter = netdev_priv(netdev);
4176         int status;
4177
4178         mutex_lock(&adapter->crit_lock);
4179
4180         if (adapter->state <= __IAVF_DOWN_PENDING) {
4181                 mutex_unlock(&adapter->crit_lock);
4182                 return 0;
4183         }
4184
4185         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4186         if (CLIENT_ENABLED(adapter))
4187                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4188
4189         iavf_down(adapter);
4190         iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4191         iavf_free_traffic_irqs(adapter);
4192
4193         mutex_unlock(&adapter->crit_lock);
4194
4195         /* We explicitly don't free resources here because the hardware is
4196          * still active and can DMA into memory. Resources are cleared in
4197          * iavf_virtchnl_completion() after we get confirmation from the PF
4198          * driver that the rings have been stopped.
4199          *
4200          * Also, we wait for state to transition to __IAVF_DOWN before
4201          * returning. State change occurs in iavf_virtchnl_completion() after
4202          * VF resources are released (which occurs after PF driver processes and
4203          * responds to admin queue commands).
4204          */
4205
4206         status = wait_event_timeout(adapter->down_waitqueue,
4207                                     adapter->state == __IAVF_DOWN,
4208                                     msecs_to_jiffies(500));
4209         if (!status)
4210                 netdev_warn(netdev, "Device resources not yet released\n");
4211         return 0;
4212 }
4213
4214 /**
4215  * iavf_change_mtu - Change the Maximum Transfer Unit
4216  * @netdev: network interface device structure
4217  * @new_mtu: new value for maximum frame size
4218  *
4219  * Returns 0 on success, negative on failure
4220  **/
4221 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4222 {
4223         struct iavf_adapter *adapter = netdev_priv(netdev);
4224
4225         netdev_dbg(netdev, "changing MTU from %d to %d\n",
4226                    netdev->mtu, new_mtu);
4227         netdev->mtu = new_mtu;
4228         if (CLIENT_ENABLED(adapter)) {
4229                 iavf_notify_client_l2_params(&adapter->vsi);
4230                 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4231         }
4232
4233         if (netif_running(netdev)) {
4234                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4235                 queue_work(iavf_wq, &adapter->reset_task);
4236         }
4237
4238         return 0;
4239 }
4240
4241 #define NETIF_VLAN_OFFLOAD_FEATURES     (NETIF_F_HW_VLAN_CTAG_RX | \
4242                                          NETIF_F_HW_VLAN_CTAG_TX | \
4243                                          NETIF_F_HW_VLAN_STAG_RX | \
4244                                          NETIF_F_HW_VLAN_STAG_TX)
4245
4246 /**
4247  * iavf_set_features - set the netdev feature flags
4248  * @netdev: ptr to the netdev being adjusted
4249  * @features: the feature set that the stack is suggesting
4250  * Note: expects to be called while under rtnl_lock()
4251  **/
4252 static int iavf_set_features(struct net_device *netdev,
4253                              netdev_features_t features)
4254 {
4255         struct iavf_adapter *adapter = netdev_priv(netdev);
4256
4257         /* trigger update on any VLAN feature change */
4258         if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4259             (features & NETIF_VLAN_OFFLOAD_FEATURES))
4260                 iavf_set_vlan_offload_features(adapter, netdev->features,
4261                                                features);
4262
4263         return 0;
4264 }
4265
4266 /**
4267  * iavf_features_check - Validate encapsulated packet conforms to limits
4268  * @skb: skb buff
4269  * @dev: This physical port's netdev
4270  * @features: Offload features that the stack believes apply
4271  **/
4272 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4273                                              struct net_device *dev,
4274                                              netdev_features_t features)
4275 {
4276         size_t len;
4277
4278         /* No point in doing any of this if neither checksum nor GSO are
4279          * being requested for this frame.  We can rule out both by just
4280          * checking for CHECKSUM_PARTIAL
4281          */
4282         if (skb->ip_summed != CHECKSUM_PARTIAL)
4283                 return features;
4284
4285         /* We cannot support GSO if the MSS is going to be less than
4286          * 64 bytes.  If it is then we need to drop support for GSO.
4287          */
4288         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4289                 features &= ~NETIF_F_GSO_MASK;
4290
4291         /* MACLEN can support at most 63 words */
4292         len = skb_network_header(skb) - skb->data;
4293         if (len & ~(63 * 2))
4294                 goto out_err;
4295
4296         /* IPLEN and EIPLEN can support at most 127 dwords */
4297         len = skb_transport_header(skb) - skb_network_header(skb);
4298         if (len & ~(127 * 4))
4299                 goto out_err;
4300
4301         if (skb->encapsulation) {
4302                 /* L4TUNLEN can support 127 words */
4303                 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4304                 if (len & ~(127 * 2))
4305                         goto out_err;
4306
4307                 /* IPLEN can support at most 127 dwords */
4308                 len = skb_inner_transport_header(skb) -
4309                       skb_inner_network_header(skb);
4310                 if (len & ~(127 * 4))
4311                         goto out_err;
4312         }
4313
4314         /* No need to validate L4LEN as TCP is the only protocol with a
4315          * flexible value and we support all possible values supported
4316          * by TCP, which is at most 15 dwords
4317          */
4318
4319         return features;
4320 out_err:
4321         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4322 }
4323
4324 /**
4325  * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4326  * @adapter: board private structure
4327  *
4328  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4329  * were negotiated determine the VLAN features that can be toggled on and off.
4330  **/
4331 static netdev_features_t
4332 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4333 {
4334         netdev_features_t hw_features = 0;
4335
4336         if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4337                 return hw_features;
4338
4339         /* Enable VLAN features if supported */
4340         if (VLAN_ALLOWED(adapter)) {
4341                 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4342                                 NETIF_F_HW_VLAN_CTAG_RX);
4343         } else if (VLAN_V2_ALLOWED(adapter)) {
4344                 struct virtchnl_vlan_caps *vlan_v2_caps =
4345                         &adapter->vlan_v2_caps;
4346                 struct virtchnl_vlan_supported_caps *stripping_support =
4347                         &vlan_v2_caps->offloads.stripping_support;
4348                 struct virtchnl_vlan_supported_caps *insertion_support =
4349                         &vlan_v2_caps->offloads.insertion_support;
4350
4351                 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4352                     stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4353                         if (stripping_support->outer &
4354                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4355                                 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4356                         if (stripping_support->outer &
4357                             VIRTCHNL_VLAN_ETHERTYPE_88A8)
4358                                 hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4359                 } else if (stripping_support->inner !=
4360                            VIRTCHNL_VLAN_UNSUPPORTED &&
4361                            stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4362                         if (stripping_support->inner &
4363                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4364                                 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4365                 }
4366
4367                 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4368                     insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4369                         if (insertion_support->outer &
4370                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4371                                 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4372                         if (insertion_support->outer &
4373                             VIRTCHNL_VLAN_ETHERTYPE_88A8)
4374                                 hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4375                 } else if (insertion_support->inner &&
4376                            insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4377                         if (insertion_support->inner &
4378                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4379                                 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4380                 }
4381         }
4382
4383         return hw_features;
4384 }
4385
4386 /**
4387  * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4388  * @adapter: board private structure
4389  *
4390  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4391  * were negotiated determine the VLAN features that are enabled by default.
4392  **/
4393 static netdev_features_t
4394 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4395 {
4396         netdev_features_t features = 0;
4397
4398         if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4399                 return features;
4400
4401         if (VLAN_ALLOWED(adapter)) {
4402                 features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4403                         NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4404         } else if (VLAN_V2_ALLOWED(adapter)) {
4405                 struct virtchnl_vlan_caps *vlan_v2_caps =
4406                         &adapter->vlan_v2_caps;
4407                 struct virtchnl_vlan_supported_caps *filtering_support =
4408                         &vlan_v2_caps->filtering.filtering_support;
4409                 struct virtchnl_vlan_supported_caps *stripping_support =
4410                         &vlan_v2_caps->offloads.stripping_support;
4411                 struct virtchnl_vlan_supported_caps *insertion_support =
4412                         &vlan_v2_caps->offloads.insertion_support;
4413                 u32 ethertype_init;
4414
4415                 /* give priority to outer stripping and don't support both outer
4416                  * and inner stripping
4417                  */
4418                 ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4419                 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4420                         if (stripping_support->outer &
4421                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4422                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4423                                 features |= NETIF_F_HW_VLAN_CTAG_RX;
4424                         else if (stripping_support->outer &
4425                                  VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4426                                  ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4427                                 features |= NETIF_F_HW_VLAN_STAG_RX;
4428                 } else if (stripping_support->inner !=
4429                            VIRTCHNL_VLAN_UNSUPPORTED) {
4430                         if (stripping_support->inner &
4431                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4432                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4433                                 features |= NETIF_F_HW_VLAN_CTAG_RX;
4434                 }
4435
4436                 /* give priority to outer insertion and don't support both outer
4437                  * and inner insertion
4438                  */
4439                 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4440                         if (insertion_support->outer &
4441                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4442                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4443                                 features |= NETIF_F_HW_VLAN_CTAG_TX;
4444                         else if (insertion_support->outer &
4445                                  VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4446                                  ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4447                                 features |= NETIF_F_HW_VLAN_STAG_TX;
4448                 } else if (insertion_support->inner !=
4449                            VIRTCHNL_VLAN_UNSUPPORTED) {
4450                         if (insertion_support->inner &
4451                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4452                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4453                                 features |= NETIF_F_HW_VLAN_CTAG_TX;
4454                 }
4455
4456                 /* give priority to outer filtering and don't bother if both
4457                  * outer and inner filtering are enabled
4458                  */
4459                 ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4460                 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4461                         if (filtering_support->outer &
4462                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4463                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4464                                 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4465                         if (filtering_support->outer &
4466                             VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4467                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4468                                 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4469                 } else if (filtering_support->inner !=
4470                            VIRTCHNL_VLAN_UNSUPPORTED) {
4471                         if (filtering_support->inner &
4472                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4473                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4474                                 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4475                         if (filtering_support->inner &
4476                             VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4477                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4478                                 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4479                 }
4480         }
4481
4482         return features;
4483 }
4484
4485 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4486         (!(((requested) & (feature_bit)) && \
4487            !((allowed) & (feature_bit))))
4488
4489 /**
4490  * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4491  * @adapter: board private structure
4492  * @requested_features: stack requested NETDEV features
4493  **/
4494 static netdev_features_t
4495 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4496                               netdev_features_t requested_features)
4497 {
4498         netdev_features_t allowed_features;
4499
4500         allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4501                 iavf_get_netdev_vlan_features(adapter);
4502
4503         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4504                                               allowed_features,
4505                                               NETIF_F_HW_VLAN_CTAG_TX))
4506                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4507
4508         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4509                                               allowed_features,
4510                                               NETIF_F_HW_VLAN_CTAG_RX))
4511                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4512
4513         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4514                                               allowed_features,
4515                                               NETIF_F_HW_VLAN_STAG_TX))
4516                 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4517         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4518                                               allowed_features,
4519                                               NETIF_F_HW_VLAN_STAG_RX))
4520                 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4521
4522         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4523                                               allowed_features,
4524                                               NETIF_F_HW_VLAN_CTAG_FILTER))
4525                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4526
4527         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4528                                               allowed_features,
4529                                               NETIF_F_HW_VLAN_STAG_FILTER))
4530                 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4531
4532         if ((requested_features &
4533              (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4534             (requested_features &
4535              (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4536             adapter->vlan_v2_caps.offloads.ethertype_match ==
4537             VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4538                 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4539                 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4540                                         NETIF_F_HW_VLAN_STAG_TX);
4541         }
4542
4543         return requested_features;
4544 }
4545
4546 /**
4547  * iavf_fix_features - fix up the netdev feature bits
4548  * @netdev: our net device
4549  * @features: desired feature bits
4550  *
4551  * Returns fixed-up features bits
4552  **/
4553 static netdev_features_t iavf_fix_features(struct net_device *netdev,
4554                                            netdev_features_t features)
4555 {
4556         struct iavf_adapter *adapter = netdev_priv(netdev);
4557
4558         return iavf_fix_netdev_vlan_features(adapter, features);
4559 }
4560
4561 static const struct net_device_ops iavf_netdev_ops = {
4562         .ndo_open               = iavf_open,
4563         .ndo_stop               = iavf_close,
4564         .ndo_start_xmit         = iavf_xmit_frame,
4565         .ndo_set_rx_mode        = iavf_set_rx_mode,
4566         .ndo_validate_addr      = eth_validate_addr,
4567         .ndo_set_mac_address    = iavf_set_mac,
4568         .ndo_change_mtu         = iavf_change_mtu,
4569         .ndo_tx_timeout         = iavf_tx_timeout,
4570         .ndo_vlan_rx_add_vid    = iavf_vlan_rx_add_vid,
4571         .ndo_vlan_rx_kill_vid   = iavf_vlan_rx_kill_vid,
4572         .ndo_features_check     = iavf_features_check,
4573         .ndo_fix_features       = iavf_fix_features,
4574         .ndo_set_features       = iavf_set_features,
4575         .ndo_setup_tc           = iavf_setup_tc,
4576 };
4577
4578 /**
4579  * iavf_check_reset_complete - check that VF reset is complete
4580  * @hw: pointer to hw struct
4581  *
4582  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4583  **/
4584 static int iavf_check_reset_complete(struct iavf_hw *hw)
4585 {
4586         u32 rstat;
4587         int i;
4588
4589         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4590                 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4591                              IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4592                 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4593                     (rstat == VIRTCHNL_VFR_COMPLETED))
4594                         return 0;
4595                 usleep_range(10, 20);
4596         }
4597         return -EBUSY;
4598 }
4599
4600 /**
4601  * iavf_process_config - Process the config information we got from the PF
4602  * @adapter: board private structure
4603  *
4604  * Verify that we have a valid config struct, and set up our netdev features
4605  * and our VSI struct.
4606  **/
4607 int iavf_process_config(struct iavf_adapter *adapter)
4608 {
4609         struct virtchnl_vf_resource *vfres = adapter->vf_res;
4610         netdev_features_t hw_vlan_features, vlan_features;
4611         struct net_device *netdev = adapter->netdev;
4612         netdev_features_t hw_enc_features;
4613         netdev_features_t hw_features;
4614
4615         hw_enc_features = NETIF_F_SG                    |
4616                           NETIF_F_IP_CSUM               |
4617                           NETIF_F_IPV6_CSUM             |
4618                           NETIF_F_HIGHDMA               |
4619                           NETIF_F_SOFT_FEATURES |
4620                           NETIF_F_TSO                   |
4621                           NETIF_F_TSO_ECN               |
4622                           NETIF_F_TSO6                  |
4623                           NETIF_F_SCTP_CRC              |
4624                           NETIF_F_RXHASH                |
4625                           NETIF_F_RXCSUM                |
4626                           0;
4627
4628         /* advertise to stack only if offloads for encapsulated packets is
4629          * supported
4630          */
4631         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4632                 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
4633                                    NETIF_F_GSO_GRE              |
4634                                    NETIF_F_GSO_GRE_CSUM         |
4635                                    NETIF_F_GSO_IPXIP4           |
4636                                    NETIF_F_GSO_IPXIP6           |
4637                                    NETIF_F_GSO_UDP_TUNNEL_CSUM  |
4638                                    NETIF_F_GSO_PARTIAL          |
4639                                    0;
4640
4641                 if (!(vfres->vf_cap_flags &
4642                       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4643                         netdev->gso_partial_features |=
4644                                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4645
4646                 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4647                 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4648                 netdev->hw_enc_features |= hw_enc_features;
4649         }
4650         /* record features VLANs can make use of */
4651         netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4652
4653         /* Write features and hw_features separately to avoid polluting
4654          * with, or dropping, features that are set when we registered.
4655          */
4656         hw_features = hw_enc_features;
4657
4658         /* get HW VLAN features that can be toggled */
4659         hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4660
4661         /* Enable cloud filter if ADQ is supported */
4662         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4663                 hw_features |= NETIF_F_HW_TC;
4664         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4665                 hw_features |= NETIF_F_GSO_UDP_L4;
4666
4667         netdev->hw_features |= hw_features | hw_vlan_features;
4668         vlan_features = iavf_get_netdev_vlan_features(adapter);
4669
4670         netdev->features |= hw_features | vlan_features;
4671
4672         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4673                 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4674
4675         netdev->priv_flags |= IFF_UNICAST_FLT;
4676
4677         /* Do not turn on offloads when they are requested to be turned off.
4678          * TSO needs minimum 576 bytes to work correctly.
4679          */
4680         if (netdev->wanted_features) {
4681                 if (!(netdev->wanted_features & NETIF_F_TSO) ||
4682                     netdev->mtu < 576)
4683                         netdev->features &= ~NETIF_F_TSO;
4684                 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4685                     netdev->mtu < 576)
4686                         netdev->features &= ~NETIF_F_TSO6;
4687                 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4688                         netdev->features &= ~NETIF_F_TSO_ECN;
4689                 if (!(netdev->wanted_features & NETIF_F_GRO))
4690                         netdev->features &= ~NETIF_F_GRO;
4691                 if (!(netdev->wanted_features & NETIF_F_GSO))
4692                         netdev->features &= ~NETIF_F_GSO;
4693         }
4694
4695         return 0;
4696 }
4697
4698 /**
4699  * iavf_shutdown - Shutdown the device in preparation for a reboot
4700  * @pdev: pci device structure
4701  **/
4702 static void iavf_shutdown(struct pci_dev *pdev)
4703 {
4704         struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4705         struct net_device *netdev = adapter->netdev;
4706
4707         netif_device_detach(netdev);
4708
4709         if (netif_running(netdev))
4710                 iavf_close(netdev);
4711
4712         if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4713                 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4714         /* Prevent the watchdog from running. */
4715         iavf_change_state(adapter, __IAVF_REMOVE);
4716         adapter->aq_required = 0;
4717         mutex_unlock(&adapter->crit_lock);
4718
4719 #ifdef CONFIG_PM
4720         pci_save_state(pdev);
4721
4722 #endif
4723         pci_disable_device(pdev);
4724 }
4725
4726 /**
4727  * iavf_probe - Device Initialization Routine
4728  * @pdev: PCI device information struct
4729  * @ent: entry in iavf_pci_tbl
4730  *
4731  * Returns 0 on success, negative on failure
4732  *
4733  * iavf_probe initializes an adapter identified by a pci_dev structure.
4734  * The OS initialization, configuring of the adapter private structure,
4735  * and a hardware reset occur.
4736  **/
4737 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4738 {
4739         struct net_device *netdev;
4740         struct iavf_adapter *adapter = NULL;
4741         struct iavf_hw *hw = NULL;
4742         int err;
4743
4744         err = pci_enable_device(pdev);
4745         if (err)
4746                 return err;
4747
4748         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4749         if (err) {
4750                 dev_err(&pdev->dev,
4751                         "DMA configuration failed: 0x%x\n", err);
4752                 goto err_dma;
4753         }
4754
4755         err = pci_request_regions(pdev, iavf_driver_name);
4756         if (err) {
4757                 dev_err(&pdev->dev,
4758                         "pci_request_regions failed 0x%x\n", err);
4759                 goto err_pci_reg;
4760         }
4761
4762         pci_enable_pcie_error_reporting(pdev);
4763
4764         pci_set_master(pdev);
4765
4766         netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4767                                    IAVF_MAX_REQ_QUEUES);
4768         if (!netdev) {
4769                 err = -ENOMEM;
4770                 goto err_alloc_etherdev;
4771         }
4772
4773         SET_NETDEV_DEV(netdev, &pdev->dev);
4774
4775         pci_set_drvdata(pdev, netdev);
4776         adapter = netdev_priv(netdev);
4777
4778         adapter->netdev = netdev;
4779         adapter->pdev = pdev;
4780
4781         hw = &adapter->hw;
4782         hw->back = adapter;
4783
4784         adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4785         iavf_change_state(adapter, __IAVF_STARTUP);
4786
4787         /* Call save state here because it relies on the adapter struct. */
4788         pci_save_state(pdev);
4789
4790         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4791                               pci_resource_len(pdev, 0));
4792         if (!hw->hw_addr) {
4793                 err = -EIO;
4794                 goto err_ioremap;
4795         }
4796         hw->vendor_id = pdev->vendor;
4797         hw->device_id = pdev->device;
4798         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4799         hw->subsystem_vendor_id = pdev->subsystem_vendor;
4800         hw->subsystem_device_id = pdev->subsystem_device;
4801         hw->bus.device = PCI_SLOT(pdev->devfn);
4802         hw->bus.func = PCI_FUNC(pdev->devfn);
4803         hw->bus.bus_id = pdev->bus->number;
4804
4805         /* set up the locks for the AQ, do this only once in probe
4806          * and destroy them only once in remove
4807          */
4808         mutex_init(&adapter->crit_lock);
4809         mutex_init(&adapter->client_lock);
4810         mutex_init(&hw->aq.asq_mutex);
4811         mutex_init(&hw->aq.arq_mutex);
4812
4813         spin_lock_init(&adapter->mac_vlan_list_lock);
4814         spin_lock_init(&adapter->cloud_filter_list_lock);
4815         spin_lock_init(&adapter->fdir_fltr_lock);
4816         spin_lock_init(&adapter->adv_rss_lock);
4817
4818         INIT_LIST_HEAD(&adapter->mac_filter_list);
4819         INIT_LIST_HEAD(&adapter->vlan_filter_list);
4820         INIT_LIST_HEAD(&adapter->cloud_filter_list);
4821         INIT_LIST_HEAD(&adapter->fdir_list_head);
4822         INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4823
4824         INIT_WORK(&adapter->reset_task, iavf_reset_task);
4825         INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4826         INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4827         INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4828         queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4829                            msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4830
4831         /* Setup the wait queue for indicating transition to down status */
4832         init_waitqueue_head(&adapter->down_waitqueue);
4833
4834         /* Setup the wait queue for indicating virtchannel events */
4835         init_waitqueue_head(&adapter->vc_waitqueue);
4836
4837         return 0;
4838
4839 err_ioremap:
4840         free_netdev(netdev);
4841 err_alloc_etherdev:
4842         pci_disable_pcie_error_reporting(pdev);
4843         pci_release_regions(pdev);
4844 err_pci_reg:
4845 err_dma:
4846         pci_disable_device(pdev);
4847         return err;
4848 }
4849
4850 /**
4851  * iavf_suspend - Power management suspend routine
4852  * @dev_d: device info pointer
4853  *
4854  * Called when the system (VM) is entering sleep/suspend.
4855  **/
4856 static int __maybe_unused iavf_suspend(struct device *dev_d)
4857 {
4858         struct net_device *netdev = dev_get_drvdata(dev_d);
4859         struct iavf_adapter *adapter = netdev_priv(netdev);
4860
4861         netif_device_detach(netdev);
4862
4863         while (!mutex_trylock(&adapter->crit_lock))
4864                 usleep_range(500, 1000);
4865
4866         if (netif_running(netdev)) {
4867                 rtnl_lock();
4868                 iavf_down(adapter);
4869                 rtnl_unlock();
4870         }
4871         iavf_free_misc_irq(adapter);
4872         iavf_reset_interrupt_capability(adapter);
4873
4874         mutex_unlock(&adapter->crit_lock);
4875
4876         return 0;
4877 }
4878
4879 /**
4880  * iavf_resume - Power management resume routine
4881  * @dev_d: device info pointer
4882  *
4883  * Called when the system (VM) is resumed from sleep/suspend.
4884  **/
4885 static int __maybe_unused iavf_resume(struct device *dev_d)
4886 {
4887         struct pci_dev *pdev = to_pci_dev(dev_d);
4888         struct iavf_adapter *adapter;
4889         u32 err;
4890
4891         adapter = iavf_pdev_to_adapter(pdev);
4892
4893         pci_set_master(pdev);
4894
4895         rtnl_lock();
4896         err = iavf_set_interrupt_capability(adapter);
4897         if (err) {
4898                 rtnl_unlock();
4899                 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
4900                 return err;
4901         }
4902         err = iavf_request_misc_irq(adapter);
4903         rtnl_unlock();
4904         if (err) {
4905                 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
4906                 return err;
4907         }
4908
4909         queue_work(iavf_wq, &adapter->reset_task);
4910
4911         netif_device_attach(adapter->netdev);
4912
4913         return err;
4914 }
4915
4916 /**
4917  * iavf_remove - Device Removal Routine
4918  * @pdev: PCI device information struct
4919  *
4920  * iavf_remove is called by the PCI subsystem to alert the driver
4921  * that it should release a PCI device.  The could be caused by a
4922  * Hot-Plug event, or because the driver is going to be removed from
4923  * memory.
4924  **/
4925 static void iavf_remove(struct pci_dev *pdev)
4926 {
4927         struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4928         struct net_device *netdev = adapter->netdev;
4929         struct iavf_fdir_fltr *fdir, *fdirtmp;
4930         struct iavf_vlan_filter *vlf, *vlftmp;
4931         struct iavf_adv_rss *rss, *rsstmp;
4932         struct iavf_mac_filter *f, *ftmp;
4933         struct iavf_cloud_filter *cf, *cftmp;
4934         struct iavf_hw *hw = &adapter->hw;
4935         int err;
4936
4937         /* When reboot/shutdown is in progress no need to do anything
4938          * as the adapter is already REMOVE state that was set during
4939          * iavf_shutdown() callback.
4940          */
4941         if (adapter->state == __IAVF_REMOVE)
4942                 return;
4943
4944         set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
4945         /* Wait until port initialization is complete.
4946          * There are flows where register/unregister netdev may race.
4947          */
4948         while (1) {
4949                 mutex_lock(&adapter->crit_lock);
4950                 if (adapter->state == __IAVF_RUNNING ||
4951                     adapter->state == __IAVF_DOWN ||
4952                     adapter->state == __IAVF_INIT_FAILED) {
4953                         mutex_unlock(&adapter->crit_lock);
4954                         break;
4955                 }
4956
4957                 mutex_unlock(&adapter->crit_lock);
4958                 usleep_range(500, 1000);
4959         }
4960         cancel_delayed_work_sync(&adapter->watchdog_task);
4961
4962         if (adapter->netdev_registered) {
4963                 rtnl_lock();
4964                 unregister_netdevice(netdev);
4965                 adapter->netdev_registered = false;
4966                 rtnl_unlock();
4967         }
4968         if (CLIENT_ALLOWED(adapter)) {
4969                 err = iavf_lan_del_device(adapter);
4970                 if (err)
4971                         dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
4972                                  err);
4973         }
4974
4975         mutex_lock(&adapter->crit_lock);
4976         dev_info(&adapter->pdev->dev, "Remove device\n");
4977         iavf_change_state(adapter, __IAVF_REMOVE);
4978
4979         iavf_request_reset(adapter);
4980         msleep(50);
4981         /* If the FW isn't responding, kick it once, but only once. */
4982         if (!iavf_asq_done(hw)) {
4983                 iavf_request_reset(adapter);
4984                 msleep(50);
4985         }
4986
4987         iavf_misc_irq_disable(adapter);
4988         /* Shut down all the garbage mashers on the detention level */
4989         cancel_work_sync(&adapter->reset_task);
4990         cancel_delayed_work_sync(&adapter->watchdog_task);
4991         cancel_work_sync(&adapter->adminq_task);
4992         cancel_delayed_work_sync(&adapter->client_task);
4993
4994         adapter->aq_required = 0;
4995         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
4996
4997         iavf_free_all_tx_resources(adapter);
4998         iavf_free_all_rx_resources(adapter);
4999         iavf_free_misc_irq(adapter);
5000
5001         iavf_reset_interrupt_capability(adapter);
5002         iavf_free_q_vectors(adapter);
5003
5004         iavf_free_rss(adapter);
5005
5006         if (hw->aq.asq.count)
5007                 iavf_shutdown_adminq(hw);
5008
5009         /* destroy the locks only once, here */
5010         mutex_destroy(&hw->aq.arq_mutex);
5011         mutex_destroy(&hw->aq.asq_mutex);
5012         mutex_destroy(&adapter->client_lock);
5013         mutex_unlock(&adapter->crit_lock);
5014         mutex_destroy(&adapter->crit_lock);
5015
5016         iounmap(hw->hw_addr);
5017         pci_release_regions(pdev);
5018         iavf_free_queues(adapter);
5019         kfree(adapter->vf_res);
5020         spin_lock_bh(&adapter->mac_vlan_list_lock);
5021         /* If we got removed before an up/down sequence, we've got a filter
5022          * hanging out there that we need to get rid of.
5023          */
5024         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5025                 list_del(&f->list);
5026                 kfree(f);
5027         }
5028         list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5029                                  list) {
5030                 list_del(&vlf->list);
5031                 kfree(vlf);
5032         }
5033
5034         spin_unlock_bh(&adapter->mac_vlan_list_lock);
5035
5036         spin_lock_bh(&adapter->cloud_filter_list_lock);
5037         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5038                 list_del(&cf->list);
5039                 kfree(cf);
5040         }
5041         spin_unlock_bh(&adapter->cloud_filter_list_lock);
5042
5043         spin_lock_bh(&adapter->fdir_fltr_lock);
5044         list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5045                 list_del(&fdir->list);
5046                 kfree(fdir);
5047         }
5048         spin_unlock_bh(&adapter->fdir_fltr_lock);
5049
5050         spin_lock_bh(&adapter->adv_rss_lock);
5051         list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5052                                  list) {
5053                 list_del(&rss->list);
5054                 kfree(rss);
5055         }
5056         spin_unlock_bh(&adapter->adv_rss_lock);
5057
5058         free_netdev(netdev);
5059
5060         pci_disable_pcie_error_reporting(pdev);
5061
5062         pci_disable_device(pdev);
5063 }
5064
5065 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5066
5067 static struct pci_driver iavf_driver = {
5068         .name      = iavf_driver_name,
5069         .id_table  = iavf_pci_tbl,
5070         .probe     = iavf_probe,
5071         .remove    = iavf_remove,
5072         .driver.pm = &iavf_pm_ops,
5073         .shutdown  = iavf_shutdown,
5074 };
5075
5076 /**
5077  * iavf_init_module - Driver Registration Routine
5078  *
5079  * iavf_init_module is the first routine called when the driver is
5080  * loaded. All it does is register with the PCI subsystem.
5081  **/
5082 static int __init iavf_init_module(void)
5083 {
5084         pr_info("iavf: %s\n", iavf_driver_string);
5085
5086         pr_info("%s\n", iavf_copyright);
5087
5088         iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
5089                                   iavf_driver_name);
5090         if (!iavf_wq) {
5091                 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
5092                 return -ENOMEM;
5093         }
5094         return pci_register_driver(&iavf_driver);
5095 }
5096
5097 module_init(iavf_init_module);
5098
5099 /**
5100  * iavf_exit_module - Driver Exit Cleanup Routine
5101  *
5102  * iavf_exit_module is called just before the driver is removed
5103  * from memory.
5104  **/
5105 static void __exit iavf_exit_module(void)
5106 {
5107         pci_unregister_driver(&iavf_driver);
5108         destroy_workqueue(iavf_wq);
5109 }
5110
5111 module_exit(iavf_exit_module);
5112
5113 /* iavf_main.c */