2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: pv-drivers@vmware.com
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static const struct pci_device_id vmxnet3_pciid_table[] = {
40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
44 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
46 static int enable_mq = 1;
49 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
52 * Enable/Disable the given intr
55 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
62 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
69 * Enable/Disable all intrs used by the device
72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
78 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
84 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
88 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
90 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
96 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
133 /* Check if capability is supported by UPT device or
134 * UPT is even requested
137 vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
139 if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
140 cap_supported & (1UL << cap)) {
149 * Check the link state. This may start or stop the tx queue.
152 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
158 spin_lock_irqsave(&adapter->cmd_lock, flags);
159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
160 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
161 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
163 adapter->link_speed = ret >> 16;
164 if (ret & 1) { /* Link is up. */
165 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
166 adapter->link_speed);
167 netif_carrier_on(adapter->netdev);
170 for (i = 0; i < adapter->num_tx_queues; i++)
171 vmxnet3_tq_start(&adapter->tx_queue[i],
175 netdev_info(adapter->netdev, "NIC Link is Down\n");
176 netif_carrier_off(adapter->netdev);
179 for (i = 0; i < adapter->num_tx_queues; i++)
180 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
186 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
190 u32 events = le32_to_cpu(adapter->shared->ecr);
194 vmxnet3_ack_events(adapter, events);
196 /* Check if link state has changed */
197 if (events & VMXNET3_ECR_LINK)
198 vmxnet3_check_link(adapter, true);
200 /* Check if there is an error on xmit/recv queues */
201 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
202 spin_lock_irqsave(&adapter->cmd_lock, flags);
203 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
204 VMXNET3_CMD_GET_QUEUE_STATUS);
205 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
207 for (i = 0; i < adapter->num_tx_queues; i++)
208 if (adapter->tqd_start[i].status.stopped)
209 dev_err(&adapter->netdev->dev,
210 "%s: tq[%d] error 0x%x\n",
211 adapter->netdev->name, i, le32_to_cpu(
212 adapter->tqd_start[i].status.error));
213 for (i = 0; i < adapter->num_rx_queues; i++)
214 if (adapter->rqd_start[i].status.stopped)
215 dev_err(&adapter->netdev->dev,
216 "%s: rq[%d] error 0x%x\n",
217 adapter->netdev->name, i,
218 adapter->rqd_start[i].status.error);
220 schedule_work(&adapter->work);
224 #ifdef __BIG_ENDIAN_BITFIELD
226 * The device expects the bitfields in shared structures to be written in
227 * little endian. When CPU is big endian, the following routines are used to
228 * correctly read and write into ABI.
229 * The general technique used here is : double word bitfields are defined in
230 * opposite order for big endian architecture. Then before reading them in
231 * driver the complete double word is translated using le32_to_cpu. Similarly
232 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
233 * double words into required format.
234 * In order to avoid touching bits in shared structure more than once, temporary
235 * descriptors are used. These are passed as srcDesc to following functions.
237 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
238 struct Vmxnet3_RxDesc *dstDesc)
240 u32 *src = (u32 *)srcDesc + 2;
241 u32 *dst = (u32 *)dstDesc + 2;
242 dstDesc->addr = le64_to_cpu(srcDesc->addr);
243 *dst = le32_to_cpu(*src);
244 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
247 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
248 struct Vmxnet3_TxDesc *dstDesc)
251 u32 *src = (u32 *)(srcDesc + 1);
252 u32 *dst = (u32 *)(dstDesc + 1);
254 /* Working backwards so that the gen bit is set at the end. */
255 for (i = 2; i > 0; i--) {
258 *dst = cpu_to_le32(*src);
263 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
264 struct Vmxnet3_RxCompDesc *dstDesc)
267 u32 *src = (u32 *)srcDesc;
268 u32 *dst = (u32 *)dstDesc;
269 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
270 *dst = le32_to_cpu(*src);
277 /* Used to read bitfield values from double words. */
278 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
280 u32 temp = le32_to_cpu(*bitfield);
281 u32 mask = ((1 << size) - 1) << pos;
289 #endif /* __BIG_ENDIAN_BITFIELD */
291 #ifdef __BIG_ENDIAN_BITFIELD
293 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
294 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
295 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
296 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
297 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
298 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
299 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
300 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
301 VMXNET3_TCD_GEN_SIZE)
302 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
303 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
304 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
306 vmxnet3_RxCompToCPU((rcd), (tmp)); \
308 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
310 vmxnet3_RxDescToCPU((rxd), (tmp)); \
315 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
316 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
317 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
318 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
319 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
320 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
322 #endif /* __BIG_ENDIAN_BITFIELD */
326 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
327 struct pci_dev *pdev)
329 if (tbi->map_type == VMXNET3_MAP_SINGLE)
330 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
332 else if (tbi->map_type == VMXNET3_MAP_PAGE)
333 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
336 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
338 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
343 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
344 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
349 /* no out of order completion */
350 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
351 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
353 skb = tq->buf_info[eop_idx].skb;
355 tq->buf_info[eop_idx].skb = NULL;
357 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
359 while (tq->tx_ring.next2comp != eop_idx) {
360 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
363 /* update next2comp w/o tx_lock. Since we are marking more,
364 * instead of less, tx ring entries avail, the worst case is
365 * that the tx routine incorrectly re-queues a pkt due to
366 * insufficient tx ring entries.
368 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
372 dev_kfree_skb_any(skb);
378 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
379 struct vmxnet3_adapter *adapter)
382 union Vmxnet3_GenericDesc *gdesc;
384 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
385 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
386 /* Prevent any &gdesc->tcd field from being (speculatively)
387 * read before (&gdesc->tcd)->gen is read.
391 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
392 &gdesc->tcd), tq, adapter->pdev,
395 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
396 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
400 spin_lock(&tq->tx_lock);
401 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
402 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
403 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
404 netif_carrier_ok(adapter->netdev))) {
405 vmxnet3_tq_wake(tq, adapter);
407 spin_unlock(&tq->tx_lock);
414 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
415 struct vmxnet3_adapter *adapter)
419 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
420 struct vmxnet3_tx_buf_info *tbi;
422 tbi = tq->buf_info + tq->tx_ring.next2comp;
424 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
426 dev_kfree_skb_any(tbi->skb);
429 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
432 /* sanity check, verify all buffers are indeed unmapped and freed */
433 for (i = 0; i < tq->tx_ring.size; i++) {
434 BUG_ON(tq->buf_info[i].skb != NULL ||
435 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
438 tq->tx_ring.gen = VMXNET3_INIT_GEN;
439 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
441 tq->comp_ring.gen = VMXNET3_INIT_GEN;
442 tq->comp_ring.next2proc = 0;
447 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
448 struct vmxnet3_adapter *adapter)
450 if (tq->tx_ring.base) {
451 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
452 sizeof(struct Vmxnet3_TxDesc),
453 tq->tx_ring.base, tq->tx_ring.basePA);
454 tq->tx_ring.base = NULL;
456 if (tq->data_ring.base) {
457 dma_free_coherent(&adapter->pdev->dev,
458 tq->data_ring.size * tq->txdata_desc_size,
459 tq->data_ring.base, tq->data_ring.basePA);
460 tq->data_ring.base = NULL;
462 if (tq->comp_ring.base) {
463 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
464 sizeof(struct Vmxnet3_TxCompDesc),
465 tq->comp_ring.base, tq->comp_ring.basePA);
466 tq->comp_ring.base = NULL;
473 /* Destroy all tx queues */
475 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
479 for (i = 0; i < adapter->num_tx_queues; i++)
480 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
485 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
486 struct vmxnet3_adapter *adapter)
490 /* reset the tx ring contents to 0 and reset the tx ring states */
491 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
492 sizeof(struct Vmxnet3_TxDesc));
493 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
494 tq->tx_ring.gen = VMXNET3_INIT_GEN;
496 memset(tq->data_ring.base, 0,
497 tq->data_ring.size * tq->txdata_desc_size);
499 /* reset the tx comp ring contents to 0 and reset comp ring states */
500 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
501 sizeof(struct Vmxnet3_TxCompDesc));
502 tq->comp_ring.next2proc = 0;
503 tq->comp_ring.gen = VMXNET3_INIT_GEN;
505 /* reset the bookkeeping data */
506 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
507 for (i = 0; i < tq->tx_ring.size; i++)
508 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
510 /* stats are not reset */
515 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
516 struct vmxnet3_adapter *adapter)
518 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
519 tq->comp_ring.base || tq->buf_info);
521 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
522 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
523 &tq->tx_ring.basePA, GFP_KERNEL);
524 if (!tq->tx_ring.base) {
525 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
529 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
530 tq->data_ring.size * tq->txdata_desc_size,
531 &tq->data_ring.basePA, GFP_KERNEL);
532 if (!tq->data_ring.base) {
533 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
537 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
538 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
539 &tq->comp_ring.basePA, GFP_KERNEL);
540 if (!tq->comp_ring.base) {
541 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
545 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
547 dev_to_node(&adapter->pdev->dev));
554 vmxnet3_tq_destroy(tq, adapter);
559 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
563 for (i = 0; i < adapter->num_tx_queues; i++)
564 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
568 * starting from ring->next2fill, allocate rx buffers for the given ring
569 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
570 * are allocated or allocation fails
574 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
575 int num_to_alloc, struct vmxnet3_adapter *adapter)
577 int num_allocated = 0;
578 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
579 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
582 while (num_allocated <= num_to_alloc) {
583 struct vmxnet3_rx_buf_info *rbi;
584 union Vmxnet3_GenericDesc *gd;
586 rbi = rbi_base + ring->next2fill;
587 gd = ring->base + ring->next2fill;
588 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
590 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
591 if (rbi->skb == NULL) {
592 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
595 if (unlikely(rbi->skb == NULL)) {
596 rq->stats.rx_buf_alloc_failure++;
600 rbi->dma_addr = dma_map_single(
602 rbi->skb->data, rbi->len,
604 if (dma_mapping_error(&adapter->pdev->dev,
606 dev_kfree_skb_any(rbi->skb);
608 rq->stats.rx_buf_alloc_failure++;
612 /* rx buffer skipped by the device */
614 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
616 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
617 rbi->len != PAGE_SIZE);
619 if (rbi->page == NULL) {
620 rbi->page = alloc_page(GFP_ATOMIC);
621 if (unlikely(rbi->page == NULL)) {
622 rq->stats.rx_buf_alloc_failure++;
625 rbi->dma_addr = dma_map_page(
627 rbi->page, 0, PAGE_SIZE,
629 if (dma_mapping_error(&adapter->pdev->dev,
633 rq->stats.rx_buf_alloc_failure++;
637 /* rx buffers skipped by the device */
639 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
642 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
643 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
646 /* Fill the last buffer but dont mark it ready, or else the
647 * device will think that the queue is full */
648 if (num_allocated == num_to_alloc) {
649 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
653 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
655 vmxnet3_cmd_ring_adv_next2fill(ring);
658 netdev_dbg(adapter->netdev,
659 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
660 num_allocated, ring->next2fill, ring->next2comp);
662 /* so that the device can distinguish a full ring and an empty ring */
663 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
665 return num_allocated;
670 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
671 struct vmxnet3_rx_buf_info *rbi)
673 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
675 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
677 __skb_frag_set_page(frag, rbi->page);
678 skb_frag_off_set(frag, 0);
679 skb_frag_size_set(frag, rcd->len);
680 skb->data_len += rcd->len;
681 skb->truesize += PAGE_SIZE;
682 skb_shinfo(skb)->nr_frags++;
687 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
688 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
689 struct vmxnet3_adapter *adapter)
692 unsigned long buf_offset;
694 union Vmxnet3_GenericDesc *gdesc;
695 struct vmxnet3_tx_buf_info *tbi = NULL;
697 BUG_ON(ctx->copy_size > skb_headlen(skb));
699 /* use the previous gen bit for the SOP desc */
700 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
702 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
703 gdesc = ctx->sop_txd; /* both loops below can be skipped */
705 /* no need to map the buffer if headers are copied */
706 if (ctx->copy_size) {
707 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
708 tq->tx_ring.next2fill *
709 tq->txdata_desc_size);
710 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
711 ctx->sop_txd->dword[3] = 0;
713 tbi = tq->buf_info + tq->tx_ring.next2fill;
714 tbi->map_type = VMXNET3_MAP_NONE;
716 netdev_dbg(adapter->netdev,
717 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
718 tq->tx_ring.next2fill,
719 le64_to_cpu(ctx->sop_txd->txd.addr),
720 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
721 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
723 /* use the right gen for non-SOP desc */
724 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
727 /* linear part can use multiple tx desc if it's big */
728 len = skb_headlen(skb) - ctx->copy_size;
729 buf_offset = ctx->copy_size;
733 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
737 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
738 /* spec says that for TxDesc.len, 0 == 2^14 */
741 tbi = tq->buf_info + tq->tx_ring.next2fill;
742 tbi->map_type = VMXNET3_MAP_SINGLE;
743 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
744 skb->data + buf_offset, buf_size,
746 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
751 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
752 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
754 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
755 gdesc->dword[2] = cpu_to_le32(dw2);
758 netdev_dbg(adapter->netdev,
759 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
760 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
761 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
762 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
763 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
766 buf_offset += buf_size;
769 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
770 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
774 len = skb_frag_size(frag);
776 tbi = tq->buf_info + tq->tx_ring.next2fill;
777 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
781 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
782 /* spec says that for TxDesc.len, 0 == 2^14 */
784 tbi->map_type = VMXNET3_MAP_PAGE;
785 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
786 buf_offset, buf_size,
788 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
793 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
794 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
796 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
797 gdesc->dword[2] = cpu_to_le32(dw2);
800 netdev_dbg(adapter->netdev,
801 "txd[%u]: 0x%llx %u %u\n",
802 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
803 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
804 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
805 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
808 buf_offset += buf_size;
812 ctx->eop_txd = gdesc;
814 /* set the last buf_info for the pkt */
816 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
822 /* Init all tx queues */
824 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
828 for (i = 0; i < adapter->num_tx_queues; i++)
829 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
834 * parse relevant protocol headers:
835 * For a tso pkt, relevant headers are L2/3/4 including options
836 * For a pkt requesting csum offloading, they are L2/3 and may include L4
837 * if it's a TCP/UDP pkt
840 * -1: error happens during parsing
841 * 0: protocol headers parsed, but too big to be copied
842 * 1: protocol headers parsed and copied
845 * 1. related *ctx fields are updated.
846 * 2. ctx->copy_size is # of bytes copied
847 * 3. the portion to be copied is guaranteed to be in the linear part
851 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
852 struct vmxnet3_tx_ctx *ctx,
853 struct vmxnet3_adapter *adapter)
857 if (ctx->mss) { /* TSO */
858 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
859 ctx->l4_offset = skb_inner_transport_offset(skb);
860 ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
861 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
863 ctx->l4_offset = skb_transport_offset(skb);
864 ctx->l4_hdr_size = tcp_hdrlen(skb);
865 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
868 if (skb->ip_summed == CHECKSUM_PARTIAL) {
869 /* For encap packets, skb_checksum_start_offset refers
870 * to inner L4 offset. Thus, below works for encap as
871 * well as non-encap case
873 ctx->l4_offset = skb_checksum_start_offset(skb);
875 if (VMXNET3_VERSION_GE_4(adapter) &&
876 skb->encapsulation) {
877 struct iphdr *iph = inner_ip_hdr(skb);
879 if (iph->version == 4) {
880 protocol = iph->protocol;
882 const struct ipv6hdr *ipv6h;
884 ipv6h = inner_ipv6_hdr(skb);
885 protocol = ipv6h->nexthdr;
889 const struct iphdr *iph = ip_hdr(skb);
891 protocol = iph->protocol;
892 } else if (ctx->ipv6) {
893 const struct ipv6hdr *ipv6h;
895 ipv6h = ipv6_hdr(skb);
896 protocol = ipv6h->nexthdr;
902 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
906 ctx->l4_hdr_size = sizeof(struct udphdr);
909 ctx->l4_hdr_size = 0;
913 ctx->copy_size = min(ctx->l4_offset +
914 ctx->l4_hdr_size, skb->len);
917 ctx->l4_hdr_size = 0;
918 /* copy as much as allowed */
919 ctx->copy_size = min_t(unsigned int,
920 tq->txdata_desc_size,
924 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
925 ctx->copy_size = skb->len;
927 /* make sure headers are accessible directly */
928 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
932 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
933 tq->stats.oversized_hdr++;
944 * copy relevant protocol headers to the transmit ring:
945 * For a tso pkt, relevant headers are L2/3/4 including options
946 * For a pkt requesting csum offloading, they are L2/3 and may include L4
947 * if it's a TCP/UDP pkt
950 * Note that this requires that vmxnet3_parse_hdr be called first to set the
951 * appropriate bits in ctx first
954 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
955 struct vmxnet3_tx_ctx *ctx,
956 struct vmxnet3_adapter *adapter)
958 struct Vmxnet3_TxDataDesc *tdd;
960 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
961 tq->tx_ring.next2fill *
962 tq->txdata_desc_size);
964 memcpy(tdd->data, skb->data, ctx->copy_size);
965 netdev_dbg(adapter->netdev,
966 "copy %u bytes to dataRing[%u]\n",
967 ctx->copy_size, tq->tx_ring.next2fill);
972 vmxnet3_prepare_inner_tso(struct sk_buff *skb,
973 struct vmxnet3_tx_ctx *ctx)
975 struct tcphdr *tcph = inner_tcp_hdr(skb);
976 struct iphdr *iph = inner_ip_hdr(skb);
978 if (iph->version == 4) {
980 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
983 struct ipv6hdr *iph = inner_ipv6_hdr(skb);
985 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
991 vmxnet3_prepare_tso(struct sk_buff *skb,
992 struct vmxnet3_tx_ctx *ctx)
994 struct tcphdr *tcph = tcp_hdr(skb);
997 struct iphdr *iph = ip_hdr(skb);
1000 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1002 } else if (ctx->ipv6) {
1003 tcp_v6_gso_csum_prep(skb);
1007 static int txd_estimate(const struct sk_buff *skb)
1009 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1012 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1013 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1015 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1021 * Transmits a pkt thru a given tq
1023 * NETDEV_TX_OK: descriptors are setup successfully
1024 * NETDEV_TX_OK: error occurred, the pkt is dropped
1025 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
1028 * 1. tx ring may be changed
1029 * 2. tq stats may be updated accordingly
1030 * 3. shared->txNumDeferred may be updated
1034 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1035 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1040 int tx_num_deferred;
1041 unsigned long flags;
1042 struct vmxnet3_tx_ctx ctx;
1043 union Vmxnet3_GenericDesc *gdesc;
1044 #ifdef __BIG_ENDIAN_BITFIELD
1045 /* Use temporary descriptor to avoid touching bits multiple times */
1046 union Vmxnet3_GenericDesc tempTxDesc;
1049 count = txd_estimate(skb);
1051 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1052 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1054 ctx.mss = skb_shinfo(skb)->gso_size;
1056 if (skb_header_cloned(skb)) {
1057 if (unlikely(pskb_expand_head(skb, 0, 0,
1058 GFP_ATOMIC) != 0)) {
1059 tq->stats.drop_tso++;
1062 tq->stats.copy_skb_header++;
1064 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1065 /* tso pkts must not use more than
1066 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
1068 if (skb_linearize(skb) != 0) {
1069 tq->stats.drop_too_many_frags++;
1072 tq->stats.linearized++;
1074 /* recalculate the # of descriptors to use */
1075 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1076 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1077 tq->stats.drop_too_many_frags++;
1081 if (skb->encapsulation) {
1082 vmxnet3_prepare_inner_tso(skb, &ctx);
1084 vmxnet3_prepare_tso(skb, &ctx);
1087 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1089 /* non-tso pkts must not use more than
1090 * VMXNET3_MAX_TXD_PER_PKT entries
1092 if (skb_linearize(skb) != 0) {
1093 tq->stats.drop_too_many_frags++;
1096 tq->stats.linearized++;
1098 /* recalculate the # of descriptors to use */
1099 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1103 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1105 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1106 /* hdrs parsed, check against other limits */
1108 if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1109 VMXNET3_MAX_TX_BUF_SIZE)) {
1110 tq->stats.drop_oversized_hdr++;
1114 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1115 if (unlikely(ctx.l4_offset +
1117 VMXNET3_MAX_CSUM_OFFSET)) {
1118 tq->stats.drop_oversized_hdr++;
1124 tq->stats.drop_hdr_inspect_err++;
1128 spin_lock_irqsave(&tq->tx_lock, flags);
1130 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1131 tq->stats.tx_ring_full++;
1132 netdev_dbg(adapter->netdev,
1133 "tx queue stopped on %s, next2comp %u"
1134 " next2fill %u\n", adapter->netdev->name,
1135 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1137 vmxnet3_tq_stop(tq, adapter);
1138 spin_unlock_irqrestore(&tq->tx_lock, flags);
1139 return NETDEV_TX_BUSY;
1143 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1145 /* fill tx descs related to addr & len */
1146 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1147 goto unlock_drop_pkt;
1149 /* setup the EOP desc */
1150 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1152 /* setup the SOP desc */
1153 #ifdef __BIG_ENDIAN_BITFIELD
1154 gdesc = &tempTxDesc;
1155 gdesc->dword[2] = ctx.sop_txd->dword[2];
1156 gdesc->dword[3] = ctx.sop_txd->dword[3];
1158 gdesc = ctx.sop_txd;
1160 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1162 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1163 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1164 if (VMXNET3_VERSION_GE_7(adapter)) {
1165 gdesc->txd.om = VMXNET3_OM_TSO;
1166 gdesc->txd.ext1 = 1;
1168 gdesc->txd.om = VMXNET3_OM_ENCAP;
1170 gdesc->txd.msscof = ctx.mss;
1172 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1175 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1176 gdesc->txd.om = VMXNET3_OM_TSO;
1177 gdesc->txd.msscof = ctx.mss;
1179 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1181 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1182 if (VMXNET3_VERSION_GE_4(adapter) &&
1183 skb->encapsulation) {
1184 gdesc->txd.hlen = ctx.l4_offset +
1186 if (VMXNET3_VERSION_GE_7(adapter)) {
1187 gdesc->txd.om = VMXNET3_OM_CSUM;
1188 gdesc->txd.msscof = ctx.l4_offset +
1190 gdesc->txd.ext1 = 1;
1192 gdesc->txd.om = VMXNET3_OM_ENCAP;
1193 gdesc->txd.msscof = 0; /* Reserved */
1196 gdesc->txd.hlen = ctx.l4_offset;
1197 gdesc->txd.om = VMXNET3_OM_CSUM;
1198 gdesc->txd.msscof = ctx.l4_offset +
1203 gdesc->txd.msscof = 0;
1207 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1208 tx_num_deferred += num_pkts;
1210 if (skb_vlan_tag_present(skb)) {
1212 gdesc->txd.tci = skb_vlan_tag_get(skb);
1215 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1216 * all other writes to &gdesc->txd.
1220 /* finally flips the GEN bit of the SOP desc. */
1221 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1223 #ifdef __BIG_ENDIAN_BITFIELD
1224 /* Finished updating in bitfields of Tx Desc, so write them in original
1227 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1228 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1229 gdesc = ctx.sop_txd;
1231 netdev_dbg(adapter->netdev,
1232 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1234 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1235 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1237 spin_unlock_irqrestore(&tq->tx_lock, flags);
1239 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1240 tq->shared->txNumDeferred = 0;
1241 VMXNET3_WRITE_BAR0_REG(adapter,
1242 adapter->tx_prod_offset + tq->qid * 8,
1243 tq->tx_ring.next2fill);
1246 return NETDEV_TX_OK;
1249 spin_unlock_irqrestore(&tq->tx_lock, flags);
1251 tq->stats.drop_total++;
1252 dev_kfree_skb_any(skb);
1253 return NETDEV_TX_OK;
1258 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1260 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1262 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1263 return vmxnet3_tq_xmit(skb,
1264 &adapter->tx_queue[skb->queue_mapping],
1270 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1271 struct sk_buff *skb,
1272 union Vmxnet3_GenericDesc *gdesc)
1274 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1275 if (gdesc->rcd.v4 &&
1276 (le32_to_cpu(gdesc->dword[3]) &
1277 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1278 skb->ip_summed = CHECKSUM_UNNECESSARY;
1279 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1280 !(le32_to_cpu(gdesc->dword[0]) &
1281 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1282 WARN_ON_ONCE(gdesc->rcd.frg &&
1283 !(le32_to_cpu(gdesc->dword[0]) &
1284 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1285 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1286 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1287 skb->ip_summed = CHECKSUM_UNNECESSARY;
1288 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1289 !(le32_to_cpu(gdesc->dword[0]) &
1290 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1291 WARN_ON_ONCE(gdesc->rcd.frg &&
1292 !(le32_to_cpu(gdesc->dword[0]) &
1293 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1295 if (gdesc->rcd.csum) {
1296 skb->csum = htons(gdesc->rcd.csum);
1297 skb->ip_summed = CHECKSUM_PARTIAL;
1299 skb_checksum_none_assert(skb);
1303 skb_checksum_none_assert(skb);
1309 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1310 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1312 rq->stats.drop_err++;
1314 rq->stats.drop_fcs++;
1316 rq->stats.drop_total++;
1319 * We do not unmap and chain the rx buffer to the skb.
1320 * We basically pretend this buffer is not used and will be recycled
1321 * by vmxnet3_rq_alloc_rx_buf()
1325 * ctx->skb may be NULL if this is the first and the only one
1329 dev_kfree_skb_irq(ctx->skb);
1336 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1337 union Vmxnet3_GenericDesc *gdesc)
1343 struct vlan_ethhdr *veth;
1345 struct ipv6hdr *ipv6;
1348 BUG_ON(gdesc->rcd.tcp == 0);
1350 maplen = skb_headlen(skb);
1351 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1354 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1355 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1356 hlen = sizeof(struct vlan_ethhdr);
1358 hlen = sizeof(struct ethhdr);
1360 hdr.eth = eth_hdr(skb);
1361 if (gdesc->rcd.v4) {
1362 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1363 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1365 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1366 hlen = hdr.ipv4->ihl << 2;
1367 hdr.ptr += hdr.ipv4->ihl << 2;
1368 } else if (gdesc->rcd.v6) {
1369 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1370 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1372 /* Use an estimated value, since we also need to handle
1375 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1376 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1377 hlen = sizeof(struct ipv6hdr);
1378 hdr.ptr += sizeof(struct ipv6hdr);
1380 /* Non-IP pkt, dont estimate header length */
1384 if (hlen + sizeof(struct tcphdr) > maplen)
1387 return (hlen + (hdr.tcp->doff << 2));
1391 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1392 struct vmxnet3_adapter *adapter, int quota)
1394 u32 rxprod_reg[2] = {
1395 adapter->rx_prod_offset, adapter->rx_prod2_offset
1398 bool skip_page_frags = false;
1399 bool encap_lro = false;
1400 struct Vmxnet3_RxCompDesc *rcd;
1401 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1402 u16 segCnt = 0, mss = 0;
1403 int comp_offset, fill_offset;
1404 #ifdef __BIG_ENDIAN_BITFIELD
1405 struct Vmxnet3_RxDesc rxCmdDesc;
1406 struct Vmxnet3_RxCompDesc rxComp;
1408 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1410 while (rcd->gen == rq->comp_ring.gen) {
1411 struct vmxnet3_rx_buf_info *rbi;
1412 struct sk_buff *skb, *new_skb = NULL;
1413 struct page *new_page = NULL;
1414 dma_addr_t new_dma_addr;
1416 struct Vmxnet3_RxDesc *rxd;
1418 struct vmxnet3_cmd_ring *ring = NULL;
1419 if (num_pkts >= quota) {
1420 /* we may stop even before we see the EOP desc of
1426 /* Prevent any rcd field from being (speculatively) read before
1431 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1432 rcd->rqID != rq->dataRingQid);
1434 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1435 ring = rq->rx_ring + ring_idx;
1436 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1438 rbi = rq->buf_info[ring_idx] + idx;
1440 BUG_ON(rxd->addr != rbi->dma_addr ||
1441 rxd->len != rbi->len);
1443 if (unlikely(rcd->eop && rcd->err)) {
1444 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1448 if (rcd->sop) { /* first buf of the pkt */
1449 bool rxDataRingUsed;
1452 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1453 (rcd->rqID != rq->qid &&
1454 rcd->rqID != rq->dataRingQid));
1456 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1457 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1459 if (unlikely(rcd->len == 0)) {
1460 /* Pretend the rx buffer is skipped. */
1461 BUG_ON(!(rcd->sop && rcd->eop));
1462 netdev_dbg(adapter->netdev,
1463 "rxRing[%u][%u] 0 length\n",
1468 skip_page_frags = false;
1469 ctx->skb = rbi->skb;
1472 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1473 len = rxDataRingUsed ? rcd->len : rbi->len;
1474 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1476 if (new_skb == NULL) {
1477 /* Skb allocation failed, do not handover this
1478 * skb to stack. Reuse it. Drop the existing pkt
1480 rq->stats.rx_buf_alloc_failure++;
1482 rq->stats.drop_total++;
1483 skip_page_frags = true;
1487 if (rxDataRingUsed) {
1490 BUG_ON(rcd->len > rq->data_ring.desc_size);
1493 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1494 memcpy(new_skb->data,
1495 &rq->data_ring.base[sz], rcd->len);
1497 ctx->skb = rbi->skb;
1500 dma_map_single(&adapter->pdev->dev,
1501 new_skb->data, rbi->len,
1503 if (dma_mapping_error(&adapter->pdev->dev,
1505 dev_kfree_skb(new_skb);
1506 /* Skb allocation failed, do not
1507 * handover this skb to stack. Reuse
1508 * it. Drop the existing pkt.
1510 rq->stats.rx_buf_alloc_failure++;
1512 rq->stats.drop_total++;
1513 skip_page_frags = true;
1517 dma_unmap_single(&adapter->pdev->dev,
1522 /* Immediate refill */
1524 rbi->dma_addr = new_dma_addr;
1525 rxd->addr = cpu_to_le64(rbi->dma_addr);
1526 rxd->len = rbi->len;
1530 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1531 (adapter->netdev->features & NETIF_F_RXHASH)) {
1532 enum pkt_hash_types hash_type;
1534 switch (rcd->rssType) {
1535 case VMXNET3_RCD_RSS_TYPE_IPV4:
1536 case VMXNET3_RCD_RSS_TYPE_IPV6:
1537 hash_type = PKT_HASH_TYPE_L3;
1539 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1540 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1541 case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1542 case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1543 hash_type = PKT_HASH_TYPE_L4;
1546 hash_type = PKT_HASH_TYPE_L3;
1549 skb_set_hash(ctx->skb,
1550 le32_to_cpu(rcd->rssHash),
1554 skb_record_rx_queue(ctx->skb, rq->qid);
1555 skb_put(ctx->skb, rcd->len);
1557 if (VMXNET3_VERSION_GE_2(adapter) &&
1558 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1559 struct Vmxnet3_RxCompDescExt *rcdlro;
1560 union Vmxnet3_GenericDesc *gdesc;
1562 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1563 gdesc = (union Vmxnet3_GenericDesc *)rcd;
1565 segCnt = rcdlro->segCnt;
1566 WARN_ON_ONCE(segCnt == 0);
1568 if (unlikely(segCnt <= 1))
1570 encap_lro = (le32_to_cpu(gdesc->dword[0]) &
1571 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
1576 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1578 /* non SOP buffer must be type 1 in most cases */
1579 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1580 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1582 /* If an sop buffer was dropped, skip all
1583 * following non-sop fragments. They will be reused.
1585 if (skip_page_frags)
1589 new_page = alloc_page(GFP_ATOMIC);
1590 /* Replacement page frag could not be allocated.
1591 * Reuse this page. Drop the pkt and free the
1592 * skb which contained this page as a frag. Skip
1593 * processing all the following non-sop frags.
1595 if (unlikely(!new_page)) {
1596 rq->stats.rx_buf_alloc_failure++;
1597 dev_kfree_skb(ctx->skb);
1599 skip_page_frags = true;
1602 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1606 if (dma_mapping_error(&adapter->pdev->dev,
1609 rq->stats.rx_buf_alloc_failure++;
1610 dev_kfree_skb(ctx->skb);
1612 skip_page_frags = true;
1616 dma_unmap_page(&adapter->pdev->dev,
1617 rbi->dma_addr, rbi->len,
1620 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1622 /* Immediate refill */
1623 rbi->page = new_page;
1624 rbi->dma_addr = new_dma_addr;
1625 rxd->addr = cpu_to_le64(rbi->dma_addr);
1626 rxd->len = rbi->len;
1633 u32 mtu = adapter->netdev->mtu;
1634 skb->len += skb->data_len;
1636 vmxnet3_rx_csum(adapter, skb,
1637 (union Vmxnet3_GenericDesc *)rcd);
1638 skb->protocol = eth_type_trans(skb, adapter->netdev);
1639 if ((!rcd->tcp && !encap_lro) ||
1640 !(adapter->netdev->features & NETIF_F_LRO))
1643 if (segCnt != 0 && mss != 0) {
1644 skb_shinfo(skb)->gso_type = rcd->v4 ?
1645 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1646 skb_shinfo(skb)->gso_size = mss;
1647 skb_shinfo(skb)->gso_segs = segCnt;
1648 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
1651 hlen = vmxnet3_get_hdr_len(adapter, skb,
1652 (union Vmxnet3_GenericDesc *)rcd);
1656 skb_shinfo(skb)->gso_type =
1657 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1659 skb_shinfo(skb)->gso_segs = segCnt;
1660 skb_shinfo(skb)->gso_size =
1661 DIV_ROUND_UP(skb->len -
1664 skb_shinfo(skb)->gso_size = mtu - hlen;
1668 if (unlikely(rcd->ts))
1669 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1671 if (adapter->netdev->features & NETIF_F_LRO)
1672 netif_receive_skb(skb);
1674 napi_gro_receive(&rq->napi, skb);
1682 /* device may have skipped some rx descs */
1683 ring = rq->rx_ring + ring_idx;
1684 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
1686 comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
1687 fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
1688 idx - ring->next2fill - 1;
1689 if (!ring->isOutOfOrder || fill_offset >= comp_offset)
1690 ring->next2comp = idx;
1691 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1693 /* Ensure that the writes to rxd->gen bits will be observed
1694 * after all other writes to rxd objects.
1698 while (num_to_alloc) {
1699 rbi = rq->buf_info[ring_idx] + ring->next2fill;
1700 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
1702 if (ring_idx == 0) {
1703 /* ring0 Type1 buffers can get skipped; re-fill them */
1704 if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
1707 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
1709 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1711 WARN_ON(!rxd->addr);
1713 /* Recv desc is ready to be used by the device */
1714 rxd->gen = ring->gen;
1715 vmxnet3_cmd_ring_adv_next2fill(ring);
1716 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
1719 /* rx completion hasn't occurred */
1720 ring->isOutOfOrder = 1;
1725 if (num_to_alloc == 0) {
1726 ring->isOutOfOrder = 0;
1729 /* if needed, update the register */
1730 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
1731 VMXNET3_WRITE_BAR0_REG(adapter,
1732 rxprod_reg[ring_idx] + rq->qid * 8,
1736 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1737 vmxnet3_getRxComp(rcd,
1738 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1746 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1747 struct vmxnet3_adapter *adapter)
1750 struct Vmxnet3_RxDesc *rxd;
1752 /* ring has already been cleaned up */
1753 if (!rq->rx_ring[0].base)
1756 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1757 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1758 #ifdef __BIG_ENDIAN_BITFIELD
1759 struct Vmxnet3_RxDesc rxDesc;
1761 vmxnet3_getRxDesc(rxd,
1762 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1764 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1765 rq->buf_info[ring_idx][i].skb) {
1766 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1767 rxd->len, DMA_FROM_DEVICE);
1768 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1769 rq->buf_info[ring_idx][i].skb = NULL;
1770 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1771 rq->buf_info[ring_idx][i].page) {
1772 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1773 rxd->len, DMA_FROM_DEVICE);
1774 put_page(rq->buf_info[ring_idx][i].page);
1775 rq->buf_info[ring_idx][i].page = NULL;
1779 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1780 rq->rx_ring[ring_idx].next2fill =
1781 rq->rx_ring[ring_idx].next2comp = 0;
1784 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1785 rq->comp_ring.next2proc = 0;
1790 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1794 for (i = 0; i < adapter->num_rx_queues; i++)
1795 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1799 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1800 struct vmxnet3_adapter *adapter)
1805 /* all rx buffers must have already been freed */
1806 for (i = 0; i < 2; i++) {
1807 if (rq->buf_info[i]) {
1808 for (j = 0; j < rq->rx_ring[i].size; j++)
1809 BUG_ON(rq->buf_info[i][j].page != NULL);
1814 for (i = 0; i < 2; i++) {
1815 if (rq->rx_ring[i].base) {
1816 dma_free_coherent(&adapter->pdev->dev,
1818 * sizeof(struct Vmxnet3_RxDesc),
1819 rq->rx_ring[i].base,
1820 rq->rx_ring[i].basePA);
1821 rq->rx_ring[i].base = NULL;
1825 if (rq->data_ring.base) {
1826 dma_free_coherent(&adapter->pdev->dev,
1827 rq->rx_ring[0].size * rq->data_ring.desc_size,
1828 rq->data_ring.base, rq->data_ring.basePA);
1829 rq->data_ring.base = NULL;
1832 if (rq->comp_ring.base) {
1833 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1834 * sizeof(struct Vmxnet3_RxCompDesc),
1835 rq->comp_ring.base, rq->comp_ring.basePA);
1836 rq->comp_ring.base = NULL;
1839 kfree(rq->buf_info[0]);
1840 rq->buf_info[0] = NULL;
1841 rq->buf_info[1] = NULL;
1845 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1849 for (i = 0; i < adapter->num_rx_queues; i++) {
1850 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1852 if (rq->data_ring.base) {
1853 dma_free_coherent(&adapter->pdev->dev,
1854 (rq->rx_ring[0].size *
1855 rq->data_ring.desc_size),
1857 rq->data_ring.basePA);
1858 rq->data_ring.base = NULL;
1859 rq->data_ring.desc_size = 0;
1865 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1866 struct vmxnet3_adapter *adapter)
1870 /* initialize buf_info */
1871 for (i = 0; i < rq->rx_ring[0].size; i++) {
1873 /* 1st buf for a pkt is skbuff */
1874 if (i % adapter->rx_buf_per_pkt == 0) {
1875 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1876 rq->buf_info[0][i].len = adapter->skb_buf_size;
1877 } else { /* subsequent bufs for a pkt is frag */
1878 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1879 rq->buf_info[0][i].len = PAGE_SIZE;
1882 for (i = 0; i < rq->rx_ring[1].size; i++) {
1883 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1884 rq->buf_info[1][i].len = PAGE_SIZE;
1887 /* reset internal state and allocate buffers for both rings */
1888 for (i = 0; i < 2; i++) {
1889 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1891 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1892 sizeof(struct Vmxnet3_RxDesc));
1893 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1894 rq->rx_ring[i].isOutOfOrder = 0;
1896 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1898 /* at least has 1 rx buffer for the 1st ring */
1901 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1903 /* reset the comp ring */
1904 rq->comp_ring.next2proc = 0;
1905 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1906 sizeof(struct Vmxnet3_RxCompDesc));
1907 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1910 rq->rx_ctx.skb = NULL;
1912 /* stats are not reset */
1918 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1922 for (i = 0; i < adapter->num_rx_queues; i++) {
1923 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1924 if (unlikely(err)) {
1925 dev_err(&adapter->netdev->dev, "%s: failed to "
1926 "initialize rx queue%i\n",
1927 adapter->netdev->name, i);
1937 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1941 struct vmxnet3_rx_buf_info *bi;
1943 for (i = 0; i < 2; i++) {
1945 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1946 rq->rx_ring[i].base = dma_alloc_coherent(
1947 &adapter->pdev->dev, sz,
1948 &rq->rx_ring[i].basePA,
1950 if (!rq->rx_ring[i].base) {
1951 netdev_err(adapter->netdev,
1952 "failed to allocate rx ring %d\n", i);
1957 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1958 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1959 rq->data_ring.base =
1960 dma_alloc_coherent(&adapter->pdev->dev, sz,
1961 &rq->data_ring.basePA,
1963 if (!rq->data_ring.base) {
1964 netdev_err(adapter->netdev,
1965 "rx data ring will be disabled\n");
1966 adapter->rxdataring_enabled = false;
1969 rq->data_ring.base = NULL;
1970 rq->data_ring.desc_size = 0;
1973 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1974 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1975 &rq->comp_ring.basePA,
1977 if (!rq->comp_ring.base) {
1978 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1982 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
1983 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
1984 dev_to_node(&adapter->pdev->dev));
1988 rq->buf_info[0] = bi;
1989 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1994 vmxnet3_rq_destroy(rq, adapter);
2000 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
2004 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2006 for (i = 0; i < adapter->num_rx_queues; i++) {
2007 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
2008 if (unlikely(err)) {
2009 dev_err(&adapter->netdev->dev,
2010 "%s: failed to create rx queue%i\n",
2011 adapter->netdev->name, i);
2016 if (!adapter->rxdataring_enabled)
2017 vmxnet3_rq_destroy_all_rxdataring(adapter);
2021 vmxnet3_rq_destroy_all(adapter);
2026 /* Multiple queue aware polling function for tx and rx */
2029 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2031 int rcd_done = 0, i;
2032 if (unlikely(adapter->shared->ecr))
2033 vmxnet3_process_events(adapter);
2034 for (i = 0; i < adapter->num_tx_queues; i++)
2035 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2037 for (i = 0; i < adapter->num_rx_queues; i++)
2038 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
2045 vmxnet3_poll(struct napi_struct *napi, int budget)
2047 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
2048 struct vmxnet3_rx_queue, napi);
2051 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2053 if (rxd_done < budget) {
2054 napi_complete_done(napi, rxd_done);
2055 vmxnet3_enable_all_intrs(rx_queue->adapter);
2061 * NAPI polling function for MSI-X mode with multiple Rx queues
2062 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2066 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
2068 struct vmxnet3_rx_queue *rq = container_of(napi,
2069 struct vmxnet3_rx_queue, napi);
2070 struct vmxnet3_adapter *adapter = rq->adapter;
2073 /* When sharing interrupt with corresponding tx queue, process
2074 * tx completions in that queue as well
2076 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
2077 struct vmxnet3_tx_queue *tq =
2078 &adapter->tx_queue[rq - adapter->rx_queue];
2079 vmxnet3_tq_tx_complete(tq, adapter);
2082 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2084 if (rxd_done < budget) {
2085 napi_complete_done(napi, rxd_done);
2086 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2092 #ifdef CONFIG_PCI_MSI
2095 * Handle completion interrupts on tx queues
2096 * Returns whether or not the intr is handled
2100 vmxnet3_msix_tx(int irq, void *data)
2102 struct vmxnet3_tx_queue *tq = data;
2103 struct vmxnet3_adapter *adapter = tq->adapter;
2105 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2106 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2108 /* Handle the case where only one irq is allocate for all tx queues */
2109 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2111 for (i = 0; i < adapter->num_tx_queues; i++) {
2112 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2113 vmxnet3_tq_tx_complete(txq, adapter);
2116 vmxnet3_tq_tx_complete(tq, adapter);
2118 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2125 * Handle completion interrupts on rx queues. Returns whether or not the
2130 vmxnet3_msix_rx(int irq, void *data)
2132 struct vmxnet3_rx_queue *rq = data;
2133 struct vmxnet3_adapter *adapter = rq->adapter;
2135 /* disable intr if needed */
2136 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2137 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2138 napi_schedule(&rq->napi);
2144 *----------------------------------------------------------------------------
2146 * vmxnet3_msix_event --
2148 * vmxnet3 msix event intr handler
2151 * whether or not the intr is handled
2153 *----------------------------------------------------------------------------
2157 vmxnet3_msix_event(int irq, void *data)
2159 struct net_device *dev = data;
2160 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2162 /* disable intr if needed */
2163 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2164 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2166 if (adapter->shared->ecr)
2167 vmxnet3_process_events(adapter);
2169 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2174 #endif /* CONFIG_PCI_MSI */
2177 /* Interrupt handler for vmxnet3 */
2179 vmxnet3_intr(int irq, void *dev_id)
2181 struct net_device *dev = dev_id;
2182 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2184 if (adapter->intr.type == VMXNET3_IT_INTX) {
2185 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2186 if (unlikely(icr == 0))
2192 /* disable intr if needed */
2193 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2194 vmxnet3_disable_all_intrs(adapter);
2196 napi_schedule(&adapter->rx_queue[0].napi);
2201 #ifdef CONFIG_NET_POLL_CONTROLLER
2203 /* netpoll callback. */
2205 vmxnet3_netpoll(struct net_device *netdev)
2207 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2209 switch (adapter->intr.type) {
2210 #ifdef CONFIG_PCI_MSI
2211 case VMXNET3_IT_MSIX: {
2213 for (i = 0; i < adapter->num_rx_queues; i++)
2214 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2218 case VMXNET3_IT_MSI:
2220 vmxnet3_intr(0, adapter->netdev);
2225 #endif /* CONFIG_NET_POLL_CONTROLLER */
2228 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2230 struct vmxnet3_intr *intr = &adapter->intr;
2234 #ifdef CONFIG_PCI_MSI
2235 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2236 for (i = 0; i < adapter->num_tx_queues; i++) {
2237 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2238 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2239 adapter->netdev->name, vector);
2241 intr->msix_entries[vector].vector,
2243 adapter->tx_queue[i].name,
2244 &adapter->tx_queue[i]);
2246 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2247 adapter->netdev->name, vector);
2250 dev_err(&adapter->netdev->dev,
2251 "Failed to request irq for MSIX, %s, "
2253 adapter->tx_queue[i].name, err);
2257 /* Handle the case where only 1 MSIx was allocated for
2259 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2260 for (; i < adapter->num_tx_queues; i++)
2261 adapter->tx_queue[i].comp_ring.intr_idx
2266 adapter->tx_queue[i].comp_ring.intr_idx
2270 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2273 for (i = 0; i < adapter->num_rx_queues; i++) {
2274 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2275 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2276 adapter->netdev->name, vector);
2278 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2279 adapter->netdev->name, vector);
2280 err = request_irq(intr->msix_entries[vector].vector,
2282 adapter->rx_queue[i].name,
2283 &(adapter->rx_queue[i]));
2285 netdev_err(adapter->netdev,
2286 "Failed to request irq for MSIX, "
2288 adapter->rx_queue[i].name, err);
2292 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2295 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2296 adapter->netdev->name, vector);
2297 err = request_irq(intr->msix_entries[vector].vector,
2298 vmxnet3_msix_event, 0,
2299 intr->event_msi_vector_name, adapter->netdev);
2300 intr->event_intr_idx = vector;
2302 } else if (intr->type == VMXNET3_IT_MSI) {
2303 adapter->num_rx_queues = 1;
2304 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2305 adapter->netdev->name, adapter->netdev);
2308 adapter->num_rx_queues = 1;
2309 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2310 IRQF_SHARED, adapter->netdev->name,
2312 #ifdef CONFIG_PCI_MSI
2315 intr->num_intrs = vector + 1;
2317 netdev_err(adapter->netdev,
2318 "Failed to request irq (intr type:%d), error %d\n",
2321 /* Number of rx queues will not change after this */
2322 for (i = 0; i < adapter->num_rx_queues; i++) {
2323 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2325 rq->qid2 = i + adapter->num_rx_queues;
2326 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2329 /* init our intr settings */
2330 for (i = 0; i < intr->num_intrs; i++)
2331 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2332 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2333 adapter->intr.event_intr_idx = 0;
2334 for (i = 0; i < adapter->num_tx_queues; i++)
2335 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2336 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2339 netdev_info(adapter->netdev,
2340 "intr type %u, mode %u, %u vectors allocated\n",
2341 intr->type, intr->mask_mode, intr->num_intrs);
2349 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2351 struct vmxnet3_intr *intr = &adapter->intr;
2352 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2354 switch (intr->type) {
2355 #ifdef CONFIG_PCI_MSI
2356 case VMXNET3_IT_MSIX:
2360 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2361 for (i = 0; i < adapter->num_tx_queues; i++) {
2362 free_irq(intr->msix_entries[vector++].vector,
2363 &(adapter->tx_queue[i]));
2364 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2369 for (i = 0; i < adapter->num_rx_queues; i++) {
2370 free_irq(intr->msix_entries[vector++].vector,
2371 &(adapter->rx_queue[i]));
2374 free_irq(intr->msix_entries[vector].vector,
2376 BUG_ON(vector >= intr->num_intrs);
2380 case VMXNET3_IT_MSI:
2381 free_irq(adapter->pdev->irq, adapter->netdev);
2383 case VMXNET3_IT_INTX:
2384 free_irq(adapter->pdev->irq, adapter->netdev);
2393 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2395 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2398 /* allow untagged pkts */
2399 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2401 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2402 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2407 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2409 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2411 if (!(netdev->flags & IFF_PROMISC)) {
2412 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2413 unsigned long flags;
2415 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2416 spin_lock_irqsave(&adapter->cmd_lock, flags);
2417 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2418 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2419 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2422 set_bit(vid, adapter->active_vlans);
2429 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2431 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2433 if (!(netdev->flags & IFF_PROMISC)) {
2434 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2435 unsigned long flags;
2437 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2438 spin_lock_irqsave(&adapter->cmd_lock, flags);
2439 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2440 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2441 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2444 clear_bit(vid, adapter->active_vlans);
2451 vmxnet3_copy_mc(struct net_device *netdev)
2454 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2456 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2458 /* We may be called with BH disabled */
2459 buf = kmalloc(sz, GFP_ATOMIC);
2461 struct netdev_hw_addr *ha;
2464 netdev_for_each_mc_addr(ha, netdev)
2465 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2474 vmxnet3_set_mc(struct net_device *netdev)
2476 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2477 unsigned long flags;
2478 struct Vmxnet3_RxFilterConf *rxConf =
2479 &adapter->shared->devRead.rxFilterConf;
2480 u8 *new_table = NULL;
2481 dma_addr_t new_table_pa = 0;
2482 bool new_table_pa_valid = false;
2483 u32 new_mode = VMXNET3_RXM_UCAST;
2485 if (netdev->flags & IFF_PROMISC) {
2486 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2487 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2489 new_mode |= VMXNET3_RXM_PROMISC;
2491 vmxnet3_restore_vlan(adapter);
2494 if (netdev->flags & IFF_BROADCAST)
2495 new_mode |= VMXNET3_RXM_BCAST;
2497 if (netdev->flags & IFF_ALLMULTI)
2498 new_mode |= VMXNET3_RXM_ALL_MULTI;
2500 if (!netdev_mc_empty(netdev)) {
2501 new_table = vmxnet3_copy_mc(netdev);
2503 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2505 rxConf->mfTableLen = cpu_to_le16(sz);
2506 new_table_pa = dma_map_single(
2507 &adapter->pdev->dev,
2511 if (!dma_mapping_error(&adapter->pdev->dev,
2513 new_mode |= VMXNET3_RXM_MCAST;
2514 new_table_pa_valid = true;
2515 rxConf->mfTablePA = cpu_to_le64(
2519 if (!new_table_pa_valid) {
2521 "failed to copy mcast list, setting ALL_MULTI\n");
2522 new_mode |= VMXNET3_RXM_ALL_MULTI;
2526 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2527 rxConf->mfTableLen = 0;
2528 rxConf->mfTablePA = 0;
2531 spin_lock_irqsave(&adapter->cmd_lock, flags);
2532 if (new_mode != rxConf->rxMode) {
2533 rxConf->rxMode = cpu_to_le32(new_mode);
2534 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2535 VMXNET3_CMD_UPDATE_RX_MODE);
2536 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2537 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2540 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2541 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2542 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2544 if (new_table_pa_valid)
2545 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2546 rxConf->mfTableLen, DMA_TO_DEVICE);
2551 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2555 for (i = 0; i < adapter->num_rx_queues; i++)
2556 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2561 * Set up driver_shared based on settings in adapter.
2565 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2567 struct Vmxnet3_DriverShared *shared = adapter->shared;
2568 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2569 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2570 struct Vmxnet3_TxQueueConf *tqc;
2571 struct Vmxnet3_RxQueueConf *rqc;
2574 memset(shared, 0, sizeof(*shared));
2576 /* driver settings */
2577 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2578 devRead->misc.driverInfo.version = cpu_to_le32(
2579 VMXNET3_DRIVER_VERSION_NUM);
2580 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2581 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2582 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2583 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2584 *((u32 *)&devRead->misc.driverInfo.gos));
2585 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2586 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2588 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2589 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2591 /* set up feature flags */
2592 if (adapter->netdev->features & NETIF_F_RXCSUM)
2593 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2595 if (adapter->netdev->features & NETIF_F_LRO) {
2596 devRead->misc.uptFeatures |= UPT1_F_LRO;
2597 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2599 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2600 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2602 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2603 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2604 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2606 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2607 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2608 devRead->misc.queueDescLen = cpu_to_le32(
2609 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2610 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2612 /* tx queue settings */
2613 devRead->misc.numTxQueues = adapter->num_tx_queues;
2614 for (i = 0; i < adapter->num_tx_queues; i++) {
2615 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2616 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2617 tqc = &adapter->tqd_start[i].conf;
2618 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2619 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2620 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2621 tqc->ddPA = cpu_to_le64(~0ULL);
2622 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2623 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2624 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2625 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2626 tqc->ddLen = cpu_to_le32(0);
2627 tqc->intrIdx = tq->comp_ring.intr_idx;
2630 /* rx queue settings */
2631 devRead->misc.numRxQueues = adapter->num_rx_queues;
2632 for (i = 0; i < adapter->num_rx_queues; i++) {
2633 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2634 rqc = &adapter->rqd_start[i].conf;
2635 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2636 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2637 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2638 rqc->ddPA = cpu_to_le64(~0ULL);
2639 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2640 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2641 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2642 rqc->ddLen = cpu_to_le32(0);
2643 rqc->intrIdx = rq->comp_ring.intr_idx;
2644 if (VMXNET3_VERSION_GE_3(adapter)) {
2645 rqc->rxDataRingBasePA =
2646 cpu_to_le64(rq->data_ring.basePA);
2647 rqc->rxDataRingDescSize =
2648 cpu_to_le16(rq->data_ring.desc_size);
2653 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2656 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2658 devRead->misc.uptFeatures |= UPT1_F_RSS;
2659 devRead->misc.numRxQueues = adapter->num_rx_queues;
2660 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2661 UPT1_RSS_HASH_TYPE_IPV4 |
2662 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2663 UPT1_RSS_HASH_TYPE_IPV6;
2664 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2665 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2666 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2667 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2669 for (i = 0; i < rssConf->indTableSize; i++)
2670 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2671 i, adapter->num_rx_queues);
2673 devRead->rssConfDesc.confVer = 1;
2674 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2675 devRead->rssConfDesc.confPA =
2676 cpu_to_le64(adapter->rss_conf_pa);
2679 #endif /* VMXNET3_RSS */
2682 if (!VMXNET3_VERSION_GE_6(adapter) ||
2683 !adapter->queuesExtEnabled) {
2684 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2686 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2687 for (i = 0; i < adapter->intr.num_intrs; i++)
2688 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2690 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2691 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2693 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2695 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2696 for (i = 0; i < adapter->intr.num_intrs; i++)
2697 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
2699 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2700 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2703 /* rx filter settings */
2704 devRead->rxFilterConf.rxMode = 0;
2705 vmxnet3_restore_vlan(adapter);
2706 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2708 /* the rest are already zeroed */
2712 vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
2714 struct Vmxnet3_DriverShared *shared = adapter->shared;
2715 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2716 unsigned long flags;
2718 if (!VMXNET3_VERSION_GE_7(adapter))
2721 cmdInfo->ringBufSize = adapter->ringBufSize;
2722 spin_lock_irqsave(&adapter->cmd_lock, flags);
2723 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2724 VMXNET3_CMD_SET_RING_BUFFER_SIZE);
2725 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2729 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2731 struct Vmxnet3_DriverShared *shared = adapter->shared;
2732 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2733 unsigned long flags;
2735 if (!VMXNET3_VERSION_GE_3(adapter))
2738 spin_lock_irqsave(&adapter->cmd_lock, flags);
2739 cmdInfo->varConf.confVer = 1;
2740 cmdInfo->varConf.confLen =
2741 cpu_to_le32(sizeof(*adapter->coal_conf));
2742 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2744 if (adapter->default_coal_mode) {
2745 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2746 VMXNET3_CMD_GET_COALESCE);
2748 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2749 VMXNET3_CMD_SET_COALESCE);
2752 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2756 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2758 struct Vmxnet3_DriverShared *shared = adapter->shared;
2759 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2760 unsigned long flags;
2762 if (!VMXNET3_VERSION_GE_4(adapter))
2765 spin_lock_irqsave(&adapter->cmd_lock, flags);
2767 if (adapter->default_rss_fields) {
2768 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2769 VMXNET3_CMD_GET_RSS_FIELDS);
2770 adapter->rss_fields =
2771 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2773 if (VMXNET3_VERSION_GE_7(adapter)) {
2774 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
2775 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
2776 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2777 VMXNET3_CAP_UDP_RSS)) {
2778 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
2780 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
2783 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
2784 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2785 VMXNET3_CAP_ESP_RSS_IPV4)) {
2786 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
2788 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
2791 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
2792 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2793 VMXNET3_CAP_ESP_RSS_IPV6)) {
2794 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
2796 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
2799 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
2800 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
2801 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2803 cmdInfo->setRssFields = adapter->rss_fields;
2804 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2805 VMXNET3_CMD_SET_RSS_FIELDS);
2806 /* Not all requested RSS may get applied, so get and
2807 * cache what was actually applied.
2809 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2810 VMXNET3_CMD_GET_RSS_FIELDS);
2811 adapter->rss_fields =
2812 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2815 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2819 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2823 unsigned long flags;
2825 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2826 " ring sizes %u %u %u\n", adapter->netdev->name,
2827 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2828 adapter->tx_queue[0].tx_ring.size,
2829 adapter->rx_queue[0].rx_ring[0].size,
2830 adapter->rx_queue[0].rx_ring[1].size);
2832 vmxnet3_tq_init_all(adapter);
2833 err = vmxnet3_rq_init_all(adapter);
2835 netdev_err(adapter->netdev,
2836 "Failed to init rx queue error %d\n", err);
2840 err = vmxnet3_request_irqs(adapter);
2842 netdev_err(adapter->netdev,
2843 "Failed to setup irq for error %d\n", err);
2847 vmxnet3_setup_driver_shared(adapter);
2849 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2850 adapter->shared_pa));
2851 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2852 adapter->shared_pa));
2853 spin_lock_irqsave(&adapter->cmd_lock, flags);
2854 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2855 VMXNET3_CMD_ACTIVATE_DEV);
2856 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2857 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2860 netdev_err(adapter->netdev,
2861 "Failed to activate dev: error %u\n", ret);
2866 vmxnet3_init_bufsize(adapter);
2867 vmxnet3_init_coalesce(adapter);
2868 vmxnet3_init_rssfields(adapter);
2870 for (i = 0; i < adapter->num_rx_queues; i++) {
2871 VMXNET3_WRITE_BAR0_REG(adapter,
2872 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
2873 adapter->rx_queue[i].rx_ring[0].next2fill);
2874 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
2875 (i * VMXNET3_REG_ALIGN)),
2876 adapter->rx_queue[i].rx_ring[1].next2fill);
2879 /* Apply the rx filter settins last. */
2880 vmxnet3_set_mc(adapter->netdev);
2883 * Check link state when first activating device. It will start the
2884 * tx queue if the link is up.
2886 vmxnet3_check_link(adapter, true);
2887 netif_tx_wake_all_queues(adapter->netdev);
2888 for (i = 0; i < adapter->num_rx_queues; i++)
2889 napi_enable(&adapter->rx_queue[i].napi);
2890 vmxnet3_enable_all_intrs(adapter);
2891 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2895 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2896 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2897 vmxnet3_free_irqs(adapter);
2900 /* free up buffers we allocated */
2901 vmxnet3_rq_cleanup_all(adapter);
2907 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2909 unsigned long flags;
2910 spin_lock_irqsave(&adapter->cmd_lock, flags);
2911 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2912 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2917 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2920 unsigned long flags;
2921 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2925 spin_lock_irqsave(&adapter->cmd_lock, flags);
2926 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2927 VMXNET3_CMD_QUIESCE_DEV);
2928 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2929 vmxnet3_disable_all_intrs(adapter);
2931 for (i = 0; i < adapter->num_rx_queues; i++)
2932 napi_disable(&adapter->rx_queue[i].napi);
2933 netif_tx_disable(adapter->netdev);
2934 adapter->link_speed = 0;
2935 netif_carrier_off(adapter->netdev);
2937 vmxnet3_tq_cleanup_all(adapter);
2938 vmxnet3_rq_cleanup_all(adapter);
2939 vmxnet3_free_irqs(adapter);
2945 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
2950 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2952 tmp = (mac[5] << 8) | mac[4];
2953 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2958 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2960 struct sockaddr *addr = p;
2961 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2963 dev_addr_set(netdev, addr->sa_data);
2964 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2970 /* ==================== initialization and cleanup routines ============ */
2973 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
2976 unsigned long mmio_start, mmio_len;
2977 struct pci_dev *pdev = adapter->pdev;
2979 err = pci_enable_device(pdev);
2981 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2985 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2986 vmxnet3_driver_name);
2989 "Failed to request region for adapter: error %d\n", err);
2990 goto err_enable_device;
2993 pci_set_master(pdev);
2995 mmio_start = pci_resource_start(pdev, 0);
2996 mmio_len = pci_resource_len(pdev, 0);
2997 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2998 if (!adapter->hw_addr0) {
2999 dev_err(&pdev->dev, "Failed to map bar0\n");
3004 mmio_start = pci_resource_start(pdev, 1);
3005 mmio_len = pci_resource_len(pdev, 1);
3006 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3007 if (!adapter->hw_addr1) {
3008 dev_err(&pdev->dev, "Failed to map bar1\n");
3015 iounmap(adapter->hw_addr0);
3017 pci_release_selected_regions(pdev, (1 << 2) - 1);
3019 pci_disable_device(pdev);
3025 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3027 BUG_ON(!adapter->pdev);
3029 iounmap(adapter->hw_addr0);
3030 iounmap(adapter->hw_addr1);
3031 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3032 pci_disable_device(adapter->pdev);
3037 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3039 size_t sz, i, ring0_size, ring1_size, comp_size;
3040 /* With version7 ring1 will have only T0 buffers */
3041 if (!VMXNET3_VERSION_GE_7(adapter)) {
3042 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3043 VMXNET3_MAX_ETH_HDR_SIZE) {
3044 adapter->skb_buf_size = adapter->netdev->mtu +
3045 VMXNET3_MAX_ETH_HDR_SIZE;
3046 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3047 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3049 adapter->rx_buf_per_pkt = 1;
3051 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3052 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3053 VMXNET3_MAX_ETH_HDR_SIZE;
3054 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3057 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3058 VMXNET3_MAX_SKB_BUF_SIZE);
3059 adapter->rx_buf_per_pkt = 1;
3060 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3061 adapter->ringBufSize.ring1BufSizeType1 = 0;
3062 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
3066 * for simplicity, force the ring0 size to be a multiple of
3067 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
3069 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
3070 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
3071 ring0_size = (ring0_size + sz - 1) / sz * sz;
3072 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
3074 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
3075 ring1_size = (ring1_size + sz - 1) / sz * sz;
3076 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
3078 /* For v7 and later, keep ring size power of 2 for UPT */
3079 if (VMXNET3_VERSION_GE_7(adapter)) {
3080 ring0_size = rounddown_pow_of_two(ring0_size);
3081 ring1_size = rounddown_pow_of_two(ring1_size);
3083 comp_size = ring0_size + ring1_size;
3085 for (i = 0; i < adapter->num_rx_queues; i++) {
3086 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3088 rq->rx_ring[0].size = ring0_size;
3089 rq->rx_ring[1].size = ring1_size;
3090 rq->comp_ring.size = comp_size;
3096 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3097 u32 rx_ring_size, u32 rx_ring2_size,
3098 u16 txdata_desc_size, u16 rxdata_desc_size)
3102 for (i = 0; i < adapter->num_tx_queues; i++) {
3103 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
3104 tq->tx_ring.size = tx_ring_size;
3105 tq->data_ring.size = tx_ring_size;
3106 tq->comp_ring.size = tx_ring_size;
3107 tq->txdata_desc_size = txdata_desc_size;
3108 tq->shared = &adapter->tqd_start[i].ctrl;
3110 tq->adapter = adapter;
3112 err = vmxnet3_tq_create(tq, adapter);
3114 * Too late to change num_tx_queues. We cannot do away with
3115 * lesser number of queues than what we asked for
3121 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
3122 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3123 vmxnet3_adjust_rx_ring_size(adapter);
3125 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
3126 for (i = 0; i < adapter->num_rx_queues; i++) {
3127 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3128 /* qid and qid2 for rx queues will be assigned later when num
3129 * of rx queues is finalized after allocating intrs */
3130 rq->shared = &adapter->rqd_start[i].ctrl;
3131 rq->adapter = adapter;
3132 rq->data_ring.desc_size = rxdata_desc_size;
3133 err = vmxnet3_rq_create(rq, adapter);
3136 netdev_err(adapter->netdev,
3137 "Could not allocate any rx queues. "
3141 netdev_info(adapter->netdev,
3142 "Number of rx queues changed "
3144 adapter->num_rx_queues = i;
3151 if (!adapter->rxdataring_enabled)
3152 vmxnet3_rq_destroy_all_rxdataring(adapter);
3156 vmxnet3_tq_destroy_all(adapter);
3161 vmxnet3_open(struct net_device *netdev)
3163 struct vmxnet3_adapter *adapter;
3166 adapter = netdev_priv(netdev);
3168 for (i = 0; i < adapter->num_tx_queues; i++)
3169 spin_lock_init(&adapter->tx_queue[i].tx_lock);
3171 if (VMXNET3_VERSION_GE_3(adapter)) {
3172 unsigned long flags;
3173 u16 txdata_desc_size;
3175 spin_lock_irqsave(&adapter->cmd_lock, flags);
3176 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3177 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3178 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
3180 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3182 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3183 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3184 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3185 adapter->txdata_desc_size =
3186 sizeof(struct Vmxnet3_TxDataDesc);
3188 adapter->txdata_desc_size = txdata_desc_size;
3191 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3194 err = vmxnet3_create_queues(adapter,
3195 adapter->tx_ring_size,
3196 adapter->rx_ring_size,
3197 adapter->rx_ring2_size,
3198 adapter->txdata_desc_size,
3199 adapter->rxdata_desc_size);
3203 err = vmxnet3_activate_dev(adapter);
3210 vmxnet3_rq_destroy_all(adapter);
3211 vmxnet3_tq_destroy_all(adapter);
3218 vmxnet3_close(struct net_device *netdev)
3220 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3223 * Reset_work may be in the middle of resetting the device, wait for its
3226 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3227 usleep_range(1000, 2000);
3229 vmxnet3_quiesce_dev(adapter);
3231 vmxnet3_rq_destroy_all(adapter);
3232 vmxnet3_tq_destroy_all(adapter);
3234 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3242 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3247 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3248 * vmxnet3_close() will deadlock.
3250 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3252 /* we need to enable NAPI, otherwise dev_close will deadlock */
3253 for (i = 0; i < adapter->num_rx_queues; i++)
3254 napi_enable(&adapter->rx_queue[i].napi);
3256 * Need to clear the quiesce bit to ensure that vmxnet3_close
3257 * can quiesce the device properly
3259 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3260 dev_close(adapter->netdev);
3265 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3267 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3270 netdev->mtu = new_mtu;
3273 * Reset_work may be in the middle of resetting the device, wait for its
3276 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3277 usleep_range(1000, 2000);
3279 if (netif_running(netdev)) {
3280 vmxnet3_quiesce_dev(adapter);
3281 vmxnet3_reset_dev(adapter);
3283 /* we need to re-create the rx queue based on the new mtu */
3284 vmxnet3_rq_destroy_all(adapter);
3285 vmxnet3_adjust_rx_ring_size(adapter);
3286 err = vmxnet3_rq_create_all(adapter);
3289 "failed to re-create rx queues, "
3290 " error %d. Closing it.\n", err);
3294 err = vmxnet3_activate_dev(adapter);
3297 "failed to re-activate, error %d. "
3298 "Closing it\n", err);
3304 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3306 vmxnet3_force_close(adapter);
3313 vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3315 struct net_device *netdev = adapter->netdev;
3317 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3318 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3319 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3320 NETIF_F_LRO | NETIF_F_HIGHDMA;
3322 if (VMXNET3_VERSION_GE_4(adapter)) {
3323 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3324 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3326 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3327 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3328 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3329 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3330 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3333 if (VMXNET3_VERSION_GE_7(adapter)) {
3334 unsigned long flags;
3336 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3337 VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
3338 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
3340 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3341 VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
3342 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
3344 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3345 VMXNET3_CAP_GENEVE_TSO)) {
3346 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
3348 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3349 VMXNET3_CAP_VXLAN_TSO)) {
3350 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
3352 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3353 VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
3354 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
3356 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3357 VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
3358 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
3361 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3362 spin_lock_irqsave(&adapter->cmd_lock, flags);
3363 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3364 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3365 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3367 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3368 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3369 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3370 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3371 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3372 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3374 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
3375 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
3376 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3377 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3381 netdev->vlan_features = netdev->hw_features &
3382 ~(NETIF_F_HW_VLAN_CTAG_TX |
3383 NETIF_F_HW_VLAN_CTAG_RX);
3384 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3389 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3393 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3396 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3397 mac[4] = tmp & 0xff;
3398 mac[5] = (tmp >> 8) & 0xff;
3401 #ifdef CONFIG_PCI_MSI
3404 * Enable MSIx vectors.
3406 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3408 * number of vectors which were enabled otherwise (this number is greater
3409 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3413 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3415 int ret = pci_enable_msix_range(adapter->pdev,
3416 adapter->intr.msix_entries, nvec, nvec);
3418 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3419 dev_err(&adapter->netdev->dev,
3420 "Failed to enable %d MSI-X, trying %d\n",
3421 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3423 ret = pci_enable_msix_range(adapter->pdev,
3424 adapter->intr.msix_entries,
3425 VMXNET3_LINUX_MIN_MSIX_VECT,
3426 VMXNET3_LINUX_MIN_MSIX_VECT);
3430 dev_err(&adapter->netdev->dev,
3431 "Failed to enable MSI-X, error: %d\n", ret);
3438 #endif /* CONFIG_PCI_MSI */
3441 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3444 unsigned long flags;
3447 spin_lock_irqsave(&adapter->cmd_lock, flags);
3448 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3449 VMXNET3_CMD_GET_CONF_INTR);
3450 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3451 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3452 adapter->intr.type = cfg & 0x3;
3453 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3455 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3456 adapter->intr.type = VMXNET3_IT_MSIX;
3459 #ifdef CONFIG_PCI_MSI
3460 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3461 int i, nvec, nvec_allocated;
3463 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3464 1 : adapter->num_tx_queues;
3465 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3466 0 : adapter->num_rx_queues;
3467 nvec += 1; /* for link event */
3468 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3469 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3471 for (i = 0; i < nvec; i++)
3472 adapter->intr.msix_entries[i].entry = i;
3474 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3475 if (nvec_allocated < 0)
3478 /* If we cannot allocate one MSIx vector per queue
3479 * then limit the number of rx queues to 1
3481 if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3482 nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
3483 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3484 || adapter->num_rx_queues != 1) {
3485 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3486 netdev_err(adapter->netdev,
3487 "Number of rx queues : 1\n");
3488 adapter->num_rx_queues = 1;
3492 adapter->intr.num_intrs = nvec_allocated;
3496 /* If we cannot allocate MSIx vectors use only one rx queue */
3497 dev_info(&adapter->pdev->dev,
3498 "Failed to enable MSI-X, error %d. "
3499 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
3501 adapter->intr.type = VMXNET3_IT_MSI;
3504 if (adapter->intr.type == VMXNET3_IT_MSI) {
3505 if (!pci_enable_msi(adapter->pdev)) {
3506 adapter->num_rx_queues = 1;
3507 adapter->intr.num_intrs = 1;
3511 #endif /* CONFIG_PCI_MSI */
3513 adapter->num_rx_queues = 1;
3514 dev_info(&adapter->netdev->dev,
3515 "Using INTx interrupt, #Rx queues: 1.\n");
3516 adapter->intr.type = VMXNET3_IT_INTX;
3518 /* INT-X related setting */
3519 adapter->intr.num_intrs = 1;
3524 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3526 if (adapter->intr.type == VMXNET3_IT_MSIX)
3527 pci_disable_msix(adapter->pdev);
3528 else if (adapter->intr.type == VMXNET3_IT_MSI)
3529 pci_disable_msi(adapter->pdev);
3531 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3536 vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3538 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3539 adapter->tx_timeout_count++;
3541 netdev_err(adapter->netdev, "tx hang\n");
3542 schedule_work(&adapter->work);
3547 vmxnet3_reset_work(struct work_struct *data)
3549 struct vmxnet3_adapter *adapter;
3551 adapter = container_of(data, struct vmxnet3_adapter, work);
3553 /* if another thread is resetting the device, no need to proceed */
3554 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3557 /* if the device is closed, we must leave it alone */
3559 if (netif_running(adapter->netdev)) {
3560 netdev_notice(adapter->netdev, "resetting\n");
3561 vmxnet3_quiesce_dev(adapter);
3562 vmxnet3_reset_dev(adapter);
3563 vmxnet3_activate_dev(adapter);
3565 netdev_info(adapter->netdev, "already closed\n");
3569 netif_wake_queue(adapter->netdev);
3570 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3575 vmxnet3_probe_device(struct pci_dev *pdev,
3576 const struct pci_device_id *id)
3578 static const struct net_device_ops vmxnet3_netdev_ops = {
3579 .ndo_open = vmxnet3_open,
3580 .ndo_stop = vmxnet3_close,
3581 .ndo_start_xmit = vmxnet3_xmit_frame,
3582 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3583 .ndo_change_mtu = vmxnet3_change_mtu,
3584 .ndo_fix_features = vmxnet3_fix_features,
3585 .ndo_set_features = vmxnet3_set_features,
3586 .ndo_features_check = vmxnet3_features_check,
3587 .ndo_get_stats64 = vmxnet3_get_stats64,
3588 .ndo_tx_timeout = vmxnet3_tx_timeout,
3589 .ndo_set_rx_mode = vmxnet3_set_mc,
3590 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3591 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3592 #ifdef CONFIG_NET_POLL_CONTROLLER
3593 .ndo_poll_controller = vmxnet3_netpoll,
3598 struct net_device *netdev;
3599 struct vmxnet3_adapter *adapter;
3605 unsigned long flags;
3607 if (!pci_msi_enabled())
3612 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3613 (int)num_online_cpus());
3619 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3620 (int)num_online_cpus());
3624 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3625 max(num_tx_queues, num_rx_queues));
3629 pci_set_drvdata(pdev, netdev);
3630 adapter = netdev_priv(netdev);
3631 adapter->netdev = netdev;
3632 adapter->pdev = pdev;
3634 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3635 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3636 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3638 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3640 dev_err(&pdev->dev, "dma_set_mask failed\n");
3644 spin_lock_init(&adapter->cmd_lock);
3645 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3646 sizeof(struct vmxnet3_adapter),
3648 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3649 dev_err(&pdev->dev, "Failed to map dma\n");
3653 adapter->shared = dma_alloc_coherent(
3654 &adapter->pdev->dev,
3655 sizeof(struct Vmxnet3_DriverShared),
3656 &adapter->shared_pa, GFP_KERNEL);
3657 if (!adapter->shared) {
3658 dev_err(&pdev->dev, "Failed to allocate memory\n");
3660 goto err_alloc_shared;
3663 err = vmxnet3_alloc_pci_resources(adapter);
3667 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3668 if (ver & (1 << VMXNET3_REV_7)) {
3669 VMXNET3_WRITE_BAR1_REG(adapter,
3671 1 << VMXNET3_REV_7);
3672 adapter->version = VMXNET3_REV_7 + 1;
3673 } else if (ver & (1 << VMXNET3_REV_6)) {
3674 VMXNET3_WRITE_BAR1_REG(adapter,
3676 1 << VMXNET3_REV_6);
3677 adapter->version = VMXNET3_REV_6 + 1;
3678 } else if (ver & (1 << VMXNET3_REV_5)) {
3679 VMXNET3_WRITE_BAR1_REG(adapter,
3681 1 << VMXNET3_REV_5);
3682 adapter->version = VMXNET3_REV_5 + 1;
3683 } else if (ver & (1 << VMXNET3_REV_4)) {
3684 VMXNET3_WRITE_BAR1_REG(adapter,
3686 1 << VMXNET3_REV_4);
3687 adapter->version = VMXNET3_REV_4 + 1;
3688 } else if (ver & (1 << VMXNET3_REV_3)) {
3689 VMXNET3_WRITE_BAR1_REG(adapter,
3691 1 << VMXNET3_REV_3);
3692 adapter->version = VMXNET3_REV_3 + 1;
3693 } else if (ver & (1 << VMXNET3_REV_2)) {
3694 VMXNET3_WRITE_BAR1_REG(adapter,
3696 1 << VMXNET3_REV_2);
3697 adapter->version = VMXNET3_REV_2 + 1;
3698 } else if (ver & (1 << VMXNET3_REV_1)) {
3699 VMXNET3_WRITE_BAR1_REG(adapter,
3701 1 << VMXNET3_REV_1);
3702 adapter->version = VMXNET3_REV_1 + 1;
3705 "Incompatible h/w version (0x%x) for adapter\n", ver);
3709 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3711 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3713 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3716 "Incompatible upt version (0x%x) for adapter\n", ver);
3721 if (VMXNET3_VERSION_GE_7(adapter)) {
3722 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
3723 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
3724 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3725 adapter->dev_caps[0] = adapter->devcap_supported[0] &
3726 (1UL << VMXNET3_CAP_LARGE_BAR);
3728 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
3729 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
3730 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
3731 adapter->dev_caps[0] |= adapter->devcap_supported[0] &
3732 (1UL << VMXNET3_CAP_OOORX_COMP);
3734 if (adapter->dev_caps[0])
3735 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3737 spin_lock_irqsave(&adapter->cmd_lock, flags);
3738 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3739 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3740 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3743 if (VMXNET3_VERSION_GE_7(adapter) &&
3744 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3745 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
3746 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
3747 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
3749 adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
3750 adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
3751 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
3754 if (VMXNET3_VERSION_GE_6(adapter)) {
3755 spin_lock_irqsave(&adapter->cmd_lock, flags);
3756 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3757 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3758 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3759 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3761 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
3762 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
3764 adapter->num_rx_queues = min(num_rx_queues,
3765 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3766 adapter->num_tx_queues = min(num_tx_queues,
3767 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3769 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
3770 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
3771 adapter->queuesExtEnabled = true;
3773 adapter->queuesExtEnabled = false;
3776 adapter->queuesExtEnabled = false;
3777 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3778 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3779 adapter->num_rx_queues = min(num_rx_queues,
3780 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3781 adapter->num_tx_queues = min(num_tx_queues,
3782 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3784 dev_info(&pdev->dev,
3785 "# of Tx queues : %d, # of Rx queues : %d\n",
3786 adapter->num_tx_queues, adapter->num_rx_queues);
3788 adapter->rx_buf_per_pkt = 1;
3790 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3791 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3792 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3793 &adapter->queue_desc_pa,
3796 if (!adapter->tqd_start) {
3797 dev_err(&pdev->dev, "Failed to allocate memory\n");
3801 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3802 adapter->num_tx_queues);
3804 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3805 sizeof(struct Vmxnet3_PMConf),
3806 &adapter->pm_conf_pa,
3808 if (adapter->pm_conf == NULL) {
3815 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3816 sizeof(struct UPT1_RSSConf),
3817 &adapter->rss_conf_pa,
3819 if (adapter->rss_conf == NULL) {
3823 #endif /* VMXNET3_RSS */
3825 if (VMXNET3_VERSION_GE_3(adapter)) {
3826 adapter->coal_conf =
3827 dma_alloc_coherent(&adapter->pdev->dev,
3828 sizeof(struct Vmxnet3_CoalesceScheme)
3830 &adapter->coal_conf_pa,
3832 if (!adapter->coal_conf) {
3836 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3837 adapter->default_coal_mode = true;
3840 if (VMXNET3_VERSION_GE_4(adapter)) {
3841 adapter->default_rss_fields = true;
3842 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
3845 SET_NETDEV_DEV(netdev, &pdev->dev);
3846 vmxnet3_declare_features(adapter);
3848 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3849 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3851 if (adapter->num_tx_queues == adapter->num_rx_queues)
3852 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3854 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3856 vmxnet3_alloc_intr_resources(adapter);
3859 if (adapter->num_rx_queues > 1 &&
3860 adapter->intr.type == VMXNET3_IT_MSIX) {
3861 adapter->rss = true;
3862 netdev->hw_features |= NETIF_F_RXHASH;
3863 netdev->features |= NETIF_F_RXHASH;
3864 dev_dbg(&pdev->dev, "RSS is enabled.\n");
3866 adapter->rss = false;
3870 vmxnet3_read_mac_addr(adapter, mac);
3871 dev_addr_set(netdev, mac);
3873 netdev->netdev_ops = &vmxnet3_netdev_ops;
3874 vmxnet3_set_ethtool_ops(netdev);
3875 netdev->watchdog_timeo = 5 * HZ;
3877 /* MTU range: 60 - 9190 */
3878 netdev->min_mtu = VMXNET3_MIN_MTU;
3879 if (VMXNET3_VERSION_GE_6(adapter))
3880 netdev->max_mtu = VMXNET3_V6_MAX_MTU;
3882 netdev->max_mtu = VMXNET3_MAX_MTU;
3884 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3885 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3887 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3889 for (i = 0; i < adapter->num_rx_queues; i++) {
3890 netif_napi_add(adapter->netdev,
3891 &adapter->rx_queue[i].napi,
3892 vmxnet3_poll_rx_only);
3895 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3899 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3900 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3902 netif_carrier_off(netdev);
3903 err = register_netdev(netdev);
3906 dev_err(&pdev->dev, "Failed to register adapter\n");
3910 vmxnet3_check_link(adapter, false);
3914 if (VMXNET3_VERSION_GE_3(adapter)) {
3915 dma_free_coherent(&adapter->pdev->dev,
3916 sizeof(struct Vmxnet3_CoalesceScheme),
3917 adapter->coal_conf, adapter->coal_conf_pa);
3919 vmxnet3_free_intr_resources(adapter);
3922 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3923 adapter->rss_conf, adapter->rss_conf_pa);
3926 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3927 adapter->pm_conf, adapter->pm_conf_pa);
3929 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3930 adapter->queue_desc_pa);
3932 vmxnet3_free_pci_resources(adapter);
3934 dma_free_coherent(&adapter->pdev->dev,
3935 sizeof(struct Vmxnet3_DriverShared),
3936 adapter->shared, adapter->shared_pa);
3938 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3939 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
3941 free_netdev(netdev);
3947 vmxnet3_remove_device(struct pci_dev *pdev)
3949 struct net_device *netdev = pci_get_drvdata(pdev);
3950 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3952 int num_rx_queues, rx_queues;
3953 unsigned long flags;
3957 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3958 (int)num_online_cpus());
3962 if (!VMXNET3_VERSION_GE_6(adapter)) {
3963 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3965 if (VMXNET3_VERSION_GE_6(adapter)) {
3966 spin_lock_irqsave(&adapter->cmd_lock, flags);
3967 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3968 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3969 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3970 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3972 rx_queues = (rx_queues >> 8) & 0xff;
3974 rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3975 num_rx_queues = min(num_rx_queues, rx_queues);
3977 num_rx_queues = min(num_rx_queues,
3978 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3981 cancel_work_sync(&adapter->work);
3983 unregister_netdev(netdev);
3985 vmxnet3_free_intr_resources(adapter);
3986 vmxnet3_free_pci_resources(adapter);
3987 if (VMXNET3_VERSION_GE_3(adapter)) {
3988 dma_free_coherent(&adapter->pdev->dev,
3989 sizeof(struct Vmxnet3_CoalesceScheme),
3990 adapter->coal_conf, adapter->coal_conf_pa);
3993 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3994 adapter->rss_conf, adapter->rss_conf_pa);
3996 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3997 adapter->pm_conf, adapter->pm_conf_pa);
3999 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4000 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
4001 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4002 adapter->queue_desc_pa);
4003 dma_free_coherent(&adapter->pdev->dev,
4004 sizeof(struct Vmxnet3_DriverShared),
4005 adapter->shared, adapter->shared_pa);
4006 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4007 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4008 free_netdev(netdev);
4011 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
4013 struct net_device *netdev = pci_get_drvdata(pdev);
4014 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4015 unsigned long flags;
4017 /* Reset_work may be in the middle of resetting the device, wait for its
4020 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
4021 usleep_range(1000, 2000);
4023 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
4025 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4028 spin_lock_irqsave(&adapter->cmd_lock, flags);
4029 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4030 VMXNET3_CMD_QUIESCE_DEV);
4031 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4032 vmxnet3_disable_all_intrs(adapter);
4034 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4041 vmxnet3_suspend(struct device *device)
4043 struct pci_dev *pdev = to_pci_dev(device);
4044 struct net_device *netdev = pci_get_drvdata(pdev);
4045 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4046 struct Vmxnet3_PMConf *pmConf;
4047 struct ethhdr *ehdr;
4048 struct arphdr *ahdr;
4050 struct in_device *in_dev;
4051 struct in_ifaddr *ifa;
4052 unsigned long flags;
4055 if (!netif_running(netdev))
4058 for (i = 0; i < adapter->num_rx_queues; i++)
4059 napi_disable(&adapter->rx_queue[i].napi);
4061 vmxnet3_disable_all_intrs(adapter);
4062 vmxnet3_free_irqs(adapter);
4063 vmxnet3_free_intr_resources(adapter);
4065 netif_device_detach(netdev);
4067 /* Create wake-up filters. */
4068 pmConf = adapter->pm_conf;
4069 memset(pmConf, 0, sizeof(*pmConf));
4071 if (adapter->wol & WAKE_UCAST) {
4072 pmConf->filters[i].patternSize = ETH_ALEN;
4073 pmConf->filters[i].maskSize = 1;
4074 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
4075 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
4077 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4081 if (adapter->wol & WAKE_ARP) {
4084 in_dev = __in_dev_get_rcu(netdev);
4090 ifa = rcu_dereference(in_dev->ifa_list);
4096 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
4097 sizeof(struct arphdr) + /* ARP header */
4098 2 * ETH_ALEN + /* 2 Ethernet addresses*/
4099 2 * sizeof(u32); /*2 IPv4 addresses */
4100 pmConf->filters[i].maskSize =
4101 (pmConf->filters[i].patternSize - 1) / 8 + 1;
4103 /* ETH_P_ARP in Ethernet header. */
4104 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
4105 ehdr->h_proto = htons(ETH_P_ARP);
4107 /* ARPOP_REQUEST in ARP header. */
4108 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
4109 ahdr->ar_op = htons(ARPOP_REQUEST);
4110 arpreq = (u8 *)(ahdr + 1);
4112 /* The Unicast IPv4 address in 'tip' field. */
4113 arpreq += 2 * ETH_ALEN + sizeof(u32);
4114 *(__be32 *)arpreq = ifa->ifa_address;
4118 /* The mask for the relevant bits. */
4119 pmConf->filters[i].mask[0] = 0x00;
4120 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
4121 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
4122 pmConf->filters[i].mask[3] = 0x00;
4123 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
4124 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
4126 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4131 if (adapter->wol & WAKE_MAGIC)
4132 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
4134 pmConf->numFilters = i;
4136 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4137 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4139 adapter->shared->devRead.pmConfDesc.confPA =
4140 cpu_to_le64(adapter->pm_conf_pa);
4142 spin_lock_irqsave(&adapter->cmd_lock, flags);
4143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4144 VMXNET3_CMD_UPDATE_PMCFG);
4145 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4147 pci_save_state(pdev);
4148 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
4150 pci_disable_device(pdev);
4151 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
4158 vmxnet3_resume(struct device *device)
4161 unsigned long flags;
4162 struct pci_dev *pdev = to_pci_dev(device);
4163 struct net_device *netdev = pci_get_drvdata(pdev);
4164 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4166 if (!netif_running(netdev))
4169 pci_set_power_state(pdev, PCI_D0);
4170 pci_restore_state(pdev);
4171 err = pci_enable_device_mem(pdev);
4175 pci_enable_wake(pdev, PCI_D0, 0);
4177 vmxnet3_alloc_intr_resources(adapter);
4179 /* During hibernate and suspend, device has to be reinitialized as the
4180 * device state need not be preserved.
4183 /* Need not check adapter state as other reset tasks cannot run during
4186 spin_lock_irqsave(&adapter->cmd_lock, flags);
4187 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4188 VMXNET3_CMD_QUIESCE_DEV);
4189 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4190 vmxnet3_tq_cleanup_all(adapter);
4191 vmxnet3_rq_cleanup_all(adapter);
4193 vmxnet3_reset_dev(adapter);
4194 err = vmxnet3_activate_dev(adapter);
4197 "failed to re-activate on resume, error: %d", err);
4198 vmxnet3_force_close(adapter);
4201 netif_device_attach(netdev);
4206 static const struct dev_pm_ops vmxnet3_pm_ops = {
4207 .suspend = vmxnet3_suspend,
4208 .resume = vmxnet3_resume,
4209 .freeze = vmxnet3_suspend,
4210 .restore = vmxnet3_resume,
4214 static struct pci_driver vmxnet3_driver = {
4215 .name = vmxnet3_driver_name,
4216 .id_table = vmxnet3_pciid_table,
4217 .probe = vmxnet3_probe_device,
4218 .remove = vmxnet3_remove_device,
4219 .shutdown = vmxnet3_shutdown_device,
4221 .driver.pm = &vmxnet3_pm_ops,
4227 vmxnet3_init_module(void)
4229 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
4230 VMXNET3_DRIVER_VERSION_REPORT);
4231 return pci_register_driver(&vmxnet3_driver);
4234 module_init(vmxnet3_init_module);
4238 vmxnet3_exit_module(void)
4240 pci_unregister_driver(&vmxnet3_driver);
4243 module_exit(vmxnet3_exit_module);
4245 MODULE_AUTHOR("VMware, Inc.");
4246 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4247 MODULE_LICENSE("GPL v2");
4248 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);