1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
43 #include <sys/ioctl.h>
45 #include <sys/resource.h>
50 #include "e1000_82575.h"
51 #include "igb_internal.h"
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into e1000_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static igb_vendor_info_t igb_vendor_info_array[] =
65 { 0x8086, E1000_DEV_ID_I210_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
66 { 0x8086, E1000_DEV_ID_I210_COPPER_IT, PCI_ANY_ID, PCI_ANY_ID, 0},
67 { 0x8086, E1000_DEV_ID_I210_COPPER_OEM1,
68 PCI_ANY_ID, PCI_ANY_ID, 0},
69 { 0x8086, E1000_DEV_ID_I210_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
70 { 0x8086, E1000_DEV_ID_I210_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, E1000_DEV_ID_I210_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
72 /* required last entry */
76 /*********************************************************************
78 *********************************************************************/
79 static int igb_read_mac_addr(struct e1000_hw *hw);
80 static int igb_allocate_pci_resources(struct adapter *adapter);
81 static void igb_free_pci_resources(struct adapter *adapter);
82 static void igb_reset(struct adapter *adapter);
83 static int igb_allocate_queues(struct adapter *adapter);
84 static void igb_setup_transmit_structures(struct adapter *adapter);
85 static void igb_setup_transmit_ring(struct tx_ring *txr);
86 static void igb_initialize_transmit_units(struct adapter *adapter);
87 static void igb_free_transmit_structures(struct adapter *adapter);
88 static void igb_tx_ctx_setup(struct tx_ring *txr, struct igb_packet *packet);
91 igb_probe( device_t *dev )
93 igb_vendor_info_t *ent;
95 if (NULL == dev) return EINVAL;
97 if (dev->pci_vendor_id != IGB_VENDOR_ID)
100 ent = igb_vendor_info_array;
101 while (ent->vendor_id != 0) {
102 if ((dev->pci_vendor_id == ent->vendor_id) &&
103 (dev->pci_device_id == ent->device_id) ) {
114 igb_attach(char *dev_path, device_t *pdev)
116 struct adapter *adapter;
117 struct igb_bind_cmd bind;
120 if (NULL == pdev) return EINVAL;
122 adapter = (struct adapter *)pdev->private_data;
124 if (NULL != adapter) return EBUSY;
126 /* allocate an adapter */
127 pdev->private_data = malloc(sizeof(struct adapter));
128 if (NULL == pdev->private_data) return ENXIO;
130 memset(pdev->private_data, 0, sizeof(struct adapter));
132 adapter = (struct adapter *)pdev->private_data;
134 adapter->ldev = open("/dev/igb_avb", O_RDWR);
135 if (adapter->ldev < 0) {
136 free(pdev->private_data);
137 pdev->private_data = NULL;
142 * dev_path should look something "0000:01:00.0"
145 strncpy(bind.iface, dev_path, IGB_BIND_NAMESZ - 1);
147 if (ioctl(adapter->ldev, IGB_BIND, &bind) < 0) {
148 close(adapter->ldev);
149 free(pdev->private_data);
150 pdev->private_data = NULL;
154 adapter->csr.paddr = 0;
155 adapter->csr.mmap_size = bind.mmap_size;
157 /* Determine hardware and mac info */
158 adapter->hw.vendor_id = pdev->pci_vendor_id;
159 adapter->hw.device_id = pdev->pci_vendor_id;
160 adapter->hw.revision_id = 0;
161 adapter->hw.subsystem_vendor_id = 0;
162 adapter->hw.subsystem_device_id = 0;
164 /* Set MAC type early for PCI setup */
165 adapter->hw.mac.type = e1000_i210;
166 /* Setup PCI resources */
167 if (error = igb_allocate_pci_resources(adapter)) {
172 * Set the frame limits assuming
173 * standard ethernet sized frames.
175 adapter->max_frame_size = 1518;
176 adapter->min_frame_size = 64;
178 adapter->num_queues = 2; /* XXX parameterize this */
181 ** Allocate and Setup Queues
183 if (error = igb_allocate_queues(adapter)) {
188 ** Start from a known state, which means
189 ** reset the transmit queues we own to a known
195 ** Copy the permanent MAC address out of the EEPROM
197 if (igb_read_mac_addr(&adapter->hw) < 0) {
205 igb_free_transmit_structures(adapter);
208 igb_free_pci_resources(adapter);
209 close(adapter->ldev);
210 free(pdev->private_data);
211 pdev->private_data = NULL;
217 igb_detach(device_t *dev)
219 struct adapter *adapter;
221 if (NULL == dev) return EINVAL;
222 adapter = (struct adapter *)dev->private_data;
223 if (NULL == adapter) return ENXIO;
227 igb_free_transmit_structures(adapter);
228 igb_free_pci_resources(adapter);
230 close (adapter->ldev);
231 free(dev->private_data);
232 dev->private_data = NULL;
237 igb_suspend(device_t *dev)
239 struct adapter *adapter;
245 if (NULL == dev) return EINVAL;
246 adapter = (struct adapter *)dev->private_data;
247 if (NULL == adapter) return ENXIO;
249 txr = adapter->tx_rings;
254 /* stop but don't reset the Tx Descriptor Rings */
255 for (i = 0; i < adapter->num_queues; i++, txr++) {
256 txdctl |= IGB_TX_PTHRESH;
257 txdctl |= IGB_TX_HTHRESH << 8;
258 txdctl |= IGB_TX_WTHRESH << 16;
259 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
260 txr->queue_status = IGB_QUEUE_IDLE;
267 igb_resume(device_t *dev)
269 struct adapter *adapter;
275 if (NULL == dev) return EINVAL;
276 adapter = (struct adapter *)dev->private_data;
277 if (NULL == adapter) return ENXIO;
279 txr = adapter->tx_rings;
284 /* resume but don't reset the Tx Descriptor Rings */
285 for (i = 0; i < adapter->num_queues; i++, txr++) {
287 txdctl |= IGB_TX_PTHRESH;
288 txdctl |= IGB_TX_HTHRESH << 8;
289 txdctl |= IGB_TX_WTHRESH << 16;
290 txdctl |= E1000_TXDCTL_PRIORITY;
291 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
292 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
293 txr->queue_status = IGB_QUEUE_WORKING;
300 igb_init(device_t *dev)
302 struct adapter *adapter;
304 if (NULL == dev) return EINVAL;
305 adapter = (struct adapter *)dev->private_data;
306 if (NULL == adapter) return ENXIO;
310 /* Prepare transmit descriptors and buffers */
311 igb_setup_transmit_structures(adapter);
312 igb_initialize_transmit_units(adapter);
318 igb_reset(struct adapter *adapter)
320 struct tx_ring *txr = adapter->tx_rings;
321 struct e1000_hw *hw = &adapter->hw;
327 /* Setup the Tx Descriptor Rings, leave queues idle */
328 for (i = 0; i < adapter->num_queues; i++, txr++) {
329 u64 bus_addr = txr[i].txdma.paddr;
332 txdctl |= IGB_TX_PTHRESH;
333 txdctl |= IGB_TX_HTHRESH << 8;
334 txdctl |= IGB_TX_WTHRESH << 16;
335 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
337 /* reset the descriptor head/tail */
338 E1000_WRITE_REG(hw, E1000_TDLEN(i),
339 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
340 E1000_WRITE_REG(hw, E1000_TDBAH(i),
341 (u_int32_t)(bus_addr >> 32));
342 E1000_WRITE_REG(hw, E1000_TDBAL(i),
343 (u_int32_t)bus_addr);
345 /* Setup the HW Tx Head and Tail descriptor pointers */
346 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
347 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
349 txr[i].queue_status = IGB_QUEUE_IDLE;
355 igb_read_mac_addr(struct e1000_hw *hw)
361 rar_high = E1000_READ_REG(hw, E1000_RAH(0));
362 rar_low = E1000_READ_REG(hw, E1000_RAL(0));
364 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
365 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
367 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
368 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
370 for (i = 0; i < ETH_ADDR_LEN; i++)
371 hw->mac.addr[i] = hw->mac.perm_addr[i];
373 return E1000_SUCCESS;
378 igb_allocate_pci_resources(struct adapter *adapter)
380 int dev = adapter->ldev;
382 adapter->hw.hw_addr = (u8 *)mmap(NULL, \
383 adapter->csr.mmap_size, \
384 PROT_READ | PROT_WRITE, \
389 if (MAP_FAILED == adapter->hw.hw_addr)
396 igb_free_pci_resources(struct adapter *adapter)
398 munmap( adapter->hw.hw_addr, adapter->csr.mmap_size);
404 * Manage DMA'able memory.
407 igb_dma_malloc_page(device_t *dev, struct igb_dma_alloc *dma)
409 struct adapter *adapter;
411 struct igb_buf_cmd ubuf;
413 if (NULL == dev) return EINVAL;
414 if (NULL == dma) return EINVAL;
415 adapter = (struct adapter *)dev->private_data;
416 if (NULL == adapter) return ENXIO;
418 error = ioctl(adapter->ldev, IGB_MAPBUF, &ubuf);
419 if (error < 0) { error = ENOMEM; goto err; }
421 dma->dma_paddr = ubuf.physaddr;
422 dma->mmap_size = ubuf.mmap_size;
423 dma->dma_vaddr = (void *)mmap(NULL, \
425 PROT_READ | PROT_WRITE, \
430 if (MAP_FAILED == dma->dma_vaddr)
437 igb_dma_free_page(device_t *dev, struct igb_dma_alloc *dma)
439 struct adapter *adapter;
440 struct igb_buf_cmd ubuf;
442 if (NULL == dev) return;
443 if (NULL == dma) return;
444 adapter = (struct adapter *)dev->private_data;
445 if (NULL == adapter) return;
447 munmap( dma->dma_vaddr,
450 ubuf.physaddr = dma->dma_paddr;
452 ioctl(adapter->ldev, IGB_UNMAPBUF, &ubuf);
455 dma->dma_vaddr = NULL;
462 /*********************************************************************
464 * Allocate memory for the transmit rings, and then
465 * the descriptors associated with each, called only once at attach.
467 **********************************************************************/
469 igb_allocate_queues(struct adapter *adapter)
471 int dev = adapter->ldev;
472 struct igb_buf_cmd ubuf;
473 int i, error = E1000_SUCCESS;
475 /* allocate the TX ring struct memory */
476 adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) \
477 * adapter->num_queues);
478 if (NULL == adapter->tx_rings) {
483 memset(adapter->tx_rings, 0, sizeof(struct tx_ring) * adapter->num_queues);
485 for (i = 0; i < adapter->num_queues; i++) {
487 error = ioctl(dev, IGB_MAPRING, &ubuf);
492 adapter->tx_rings[i].txdma.paddr = ubuf.physaddr;
493 adapter->tx_rings[i].txdma.mmap_size = ubuf.mmap_size;
494 adapter->tx_rings[i].tx_base = NULL;
495 adapter->tx_rings[i].tx_base = (struct e1000_tx_desc *)mmap(NULL, \
497 PROT_READ | PROT_WRITE, \
502 if (MAP_FAILED == adapter->tx_rings[i].tx_base) {
506 adapter->tx_rings[i].adapter = adapter;
507 adapter->tx_rings[i].me = i;
508 /* XXX Initialize a TX lock ?? */
509 adapter->num_tx_desc = ubuf.mmap_size / sizeof(union e1000_adv_tx_desc);
511 memset((void *)adapter->tx_rings[i].tx_base, 0, ubuf.mmap_size);
512 adapter->tx_rings[i].tx_buffers = (struct igb_tx_buffer *) malloc(sizeof(struct igb_tx_buffer) * \
513 adapter->num_tx_desc);
515 if (NULL == adapter->tx_rings[i].tx_buffers) {
520 memset(adapter->tx_rings[i].tx_buffers, 0, sizeof(struct igb_tx_buffer) * adapter->num_tx_desc);
526 for (i = 0; i < adapter->num_queues; i++) {
527 if (adapter->tx_rings[i].tx_base)
528 munmap(adapter->tx_rings[i].tx_base, \
529 adapter->tx_rings[i].txdma.mmap_size);
531 ioctl(dev, IGB_UNMAPRING, &ubuf);
534 free(adapter->tx_rings);
538 /*********************************************************************
540 * Initialize a transmit ring.
542 **********************************************************************/
544 igb_setup_transmit_ring(struct tx_ring *txr)
546 struct adapter *adapter = txr->adapter;
548 /* Clear the old descriptor contents */
549 memset((void *)txr->tx_base, \
551 (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc);
553 memset(txr->tx_buffers, 0, sizeof(struct igb_tx_buffer) * txr->adapter->num_tx_desc);
556 txr->next_avail_desc = 0;
557 txr->next_to_clean = 0;
559 /* Set number of descriptors available */
560 txr->tx_avail = adapter->num_tx_desc;
564 /*********************************************************************
566 * Initialize all transmit rings.
568 **********************************************************************/
570 igb_setup_transmit_structures(struct adapter *adapter)
572 struct tx_ring *txr = adapter->tx_rings;
575 for (i = 0; i < adapter->num_queues; i++, txr++)
576 igb_setup_transmit_ring(txr);
581 /*********************************************************************
583 * Enable transmit unit.
585 **********************************************************************/
587 igb_initialize_transmit_units(struct adapter *adapter)
589 struct tx_ring *txr = adapter->tx_rings;
590 struct e1000_hw *hw = &adapter->hw;
595 /* Setup the Tx Descriptor Rings */
596 for (i = 0; i < adapter->num_queues; i++, txr++) {
598 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
600 /* Setup the HW Tx Head and Tail descriptor pointers */
601 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
602 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
604 txr->queue_status = IGB_QUEUE_IDLE;
606 txdctl |= IGB_TX_PTHRESH;
607 txdctl |= IGB_TX_HTHRESH << 8;
608 txdctl |= IGB_TX_WTHRESH << 16;
609 txdctl |= E1000_TXDCTL_PRIORITY;
610 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
611 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
616 /*********************************************************************
618 * Free all transmit rings.
620 **********************************************************************/
622 igb_free_transmit_structures(struct adapter *adapter)
625 struct igb_buf_cmd ubuf;
627 for (i = 0; i < adapter->num_queues; i++) {
628 if (adapter->tx_rings[i].tx_base)
629 munmap(adapter->tx_rings[i].tx_base, \
630 adapter->tx_rings[i].txdma.mmap_size);
632 ioctl(adapter->ldev, IGB_UNMAPRING, &ubuf);
633 free(adapter->tx_rings[i].tx_buffers);
636 free(adapter->tx_rings);
637 adapter->tx_rings = NULL;
641 /*********************************************************************
643 * Context Descriptor setup for VLAN or CSUM
645 **********************************************************************/
648 igb_tx_ctx_setup(struct tx_ring *txr, struct igb_packet *packet)
650 struct adapter *adapter = txr->adapter;
651 struct e1000_adv_tx_context_desc *TXD;
652 struct igb_tx_buffer *tx_buffer;
655 u_int64_t remapped_time;
657 ctxd = txr->next_avail_desc;
658 tx_buffer = &txr->tx_buffers[ctxd];
659 TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
661 type_tucmd_mlhl = E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
663 /* Now copy bits into descriptor */
664 TXD->vlan_macip_lens = 0;
665 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
666 TXD->mss_l4len_idx = 0;
668 /* remap the 64-bit nsec time to the value represented in the desc */
669 remapped_time = packet->attime - ((packet->attime / 1000000000)*1000000000);
671 remapped_time /= 32; /* scale to 32 nsec increments */
673 TXD->seqnum_seed = remapped_time;
675 tx_buffer->packet = NULL;
676 tx_buffer->next_eop = -1;
679 /* We've consumed the first desc, adjust counters */
680 if (++ctxd == adapter->num_tx_desc)
682 txr->next_avail_desc = ctxd;
689 /*********************************************************************
691 * This routine maps a single buffer to an Advanced TX descriptor.
692 * returns ENOSPC if we run low on tx descriptors and the app needs to
693 * cleanup descriptors.
695 * this is a simplified routine which doesn't do LSO, checksum offloads,
696 * multiple fragments, etc. The provided buffers are assumed to have
697 * been previously mapped with the provided dma_malloc_page routines.
699 **********************************************************************/
702 igb_xmit(device_t *dev, unsigned int queue_index, struct igb_packet *packet)
704 struct adapter *adapter;
706 struct igb_tx_buffer *tx_buffer;
707 union e1000_adv_tx_desc *txd = NULL;
708 u32 cmd_type_len, olinfo_status = 0;
709 int i, first, last = 0;
711 if (NULL == dev) return EINVAL;
712 adapter = (struct adapter *)dev->private_data;
713 if (NULL == adapter) return ENXIO;
715 if (queue_index > adapter->num_queues)
721 packet->next = NULL; /* used for cleanup */
723 txr = &adapter->tx_rings[queue_index];
725 /* Set basic descriptor constants */
726 cmd_type_len = E1000_ADVTXD_DTYP_DATA;
727 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
729 /* cmd_type_len |= E1000_ADVTXD_DCMD_VLE; to enable VLAN insertion */
732 * Map the packet for DMA
734 * Capture the first descriptor index,
735 * this descriptor will have the index
736 * of the EOP which is the only one that
737 * now gets a DONE bit writeback.
739 first = txr->next_avail_desc;
740 tx_buffer = &txr->tx_buffers[first];
743 ** Make sure we don't overrun the ring,
744 ** we need nsegs descriptors and one for
745 ** the context descriptor used for the
748 if (txr->tx_avail <= 2)
752 * Set up the context descriptor to specify
753 * launchtimes for the packet.
755 igb_tx_ctx_setup(txr, packet);
758 * for performance monitoring, report the DMA time of the tx desc wb
759 * which is performed immediately after the tx buffer is read from
762 olinfo_status |= E1000_TXD_DMA_TXDWB;
764 /* set payload length */
765 olinfo_status |= packet->len << E1000_ADVTXD_PAYLEN_SHIFT;
767 /* Set up our transmit descriptors */
768 i = txr->next_avail_desc;
770 /* we assume every packet is contiguous */
772 tx_buffer = &txr->tx_buffers[i];
773 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
775 txd->read.buffer_addr = htole64(packet->map.paddr + packet->offset);
776 txd->read.cmd_type_len = htole32(cmd_type_len | packet->len);
777 txd->read.olinfo_status = htole32(olinfo_status);
779 if (++i == adapter->num_tx_desc)
781 tx_buffer->packet = NULL;
782 tx_buffer->next_eop = -1;
784 txr->next_avail_desc = i;
786 tx_buffer->packet = packet;
790 * Last Descriptor of Packet
791 * needs End Of Packet (EOP)
792 * and Report Status (RS)
794 txd->read.cmd_type_len |=
795 htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS);
798 * Keep track in the first buffer which
799 * descriptor will be written back
801 tx_buffer = &txr->tx_buffers[first];
802 tx_buffer->next_eop = last;
805 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
806 * that this frame is available to transmit.
808 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
816 igb_trigger(device_t *dev, u_int32_t data)
818 struct adapter *adapter;
820 if (NULL == dev) return;
821 adapter = (struct adapter *)dev->private_data;
822 if (NULL == adapter) return;
824 E1000_WRITE_REG(&(adapter->hw), E1000_WUS, data);
828 igb_writereg(device_t *dev, u_int32_t reg, u_int32_t data)
830 struct adapter *adapter;
832 if (NULL == dev) return;
833 adapter = (struct adapter *)dev->private_data;
834 if (NULL == adapter) return;
836 E1000_WRITE_REG(&(adapter->hw), reg, data);
840 igb_readreg(device_t *dev, u_int32_t reg, u_int32_t *data)
842 struct adapter *adapter;
844 if (NULL == dev) return;
845 adapter = (struct adapter *)dev->private_data;
846 if (NULL == adapter) return;
848 if (NULL == data) return;
850 *data = E1000_READ_REG(&(adapter->hw), reg);
853 /**********************************************************************
855 * Examine each tx_buffer in the used queue. If the hardware is done
856 * processing the packet then return the linked list of associated resources.
858 **********************************************************************/
860 igb_clean(device_t *dev, struct igb_packet **cleaned_packets)
862 struct adapter *adapter;
864 int first, last, done, processed;
865 struct igb_tx_buffer *tx_buffer;
866 struct e1000_tx_desc *tx_desc, *eop_desc;
867 struct igb_packet *last_reclaimed;
870 if (NULL == dev) return;
871 adapter = (struct adapter *)dev->private_data;
872 if (NULL == adapter) return;
874 if (NULL == cleaned_packets) return;
876 *cleaned_packets = NULL; /* nothing reclaimed yet */
878 for (i = 0; i < adapter->num_queues; i++) {
879 txr = &adapter->tx_rings[i];
881 if (txr->tx_avail == adapter->num_tx_desc) {
882 txr->queue_status = IGB_QUEUE_IDLE;
887 first = txr->next_to_clean;
888 tx_desc = &txr->tx_base[first];
889 tx_buffer = &txr->tx_buffers[first];
890 last = tx_buffer->next_eop;
891 eop_desc = &txr->tx_base[last];
894 * What this does is get the index of the
895 * first descriptor AFTER the EOP of the
896 * first packet, that way we can do the
897 * simple comparison on the inner while loop.
899 if (++last == adapter->num_tx_desc)
903 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
904 /* We clean the range of the packet */
905 while (first != done) {
906 if (tx_buffer->packet) {
907 tx_buffer->packet->dmatime = (0xffffffff) & tx_desc->buffer_addr;
908 /* tx_buffer->packet->dmatime += (tx_desc->buffer_addr >> 32) * 1000000000; */
910 tx_buffer->packet->len;
911 if (*cleaned_packets == NULL)
912 *cleaned_packets = tx_buffer->packet;
914 last_reclaimed->next = tx_buffer->packet;
915 last_reclaimed = tx_buffer->packet;
917 tx_buffer->packet = NULL;
919 tx_buffer->next_eop = -1;
920 tx_desc->upper.data = 0;
921 tx_desc->lower.data = 0;
922 tx_desc->buffer_addr = 0;
927 if (++first == adapter->num_tx_desc)
930 tx_buffer = &txr->tx_buffers[first];
931 tx_desc = &txr->tx_base[first];
934 /* See if we can continue to the next packet */
935 last = tx_buffer->next_eop;
937 eop_desc = &txr->tx_base[last];
938 /* Get new done point */ if (++last == adapter->num_tx_desc) last = 0;
944 txr->next_to_clean = first;
946 if (txr->tx_avail >= IGB_QUEUE_THRESHOLD)
947 txr->queue_status &= ~IGB_QUEUE_DEPLETED;
952 #define rdtscll(val) __asm__ __volatile__("rdtsc" : "=A" (val))
955 igb_get_wallclock(device_t *dev, u_int64_t *curtime, u_int64_t *rdtsc)
958 u_int32_t timh, timl;
959 struct adapter *adapter;
962 if (NULL == dev) return EINVAL;
963 adapter = (struct adapter *)dev->private_data;
964 if (NULL == adapter) return ENXIO;
965 if (NULL == curtime) return EINVAL;
966 if (NULL == rdtsc) return EINVAL;
970 /* sample the timestamp bracketed by the RDTSC */
972 E1000_WRITE_REG(hw, E1000_TSAUXC, E1000_TSAUXC_SAMP_AUTO);
975 timl = E1000_READ_REG(hw, E1000_AUXSTMPL0);
976 timh = E1000_READ_REG(hw, E1000_AUXSTMPH0);
978 *curtime = (u_int64_t)timh * 1000000000 + (u_int64_t)timl;
979 *rdtsc = (t1 - t0) / 2 + t0; /* average */
984 igb_set_class_bandwidth(device_t *dev, u_int32_t class_a, u_int32_t class_b, u_int32_t tpktsz)
987 u_int32_t tqavcc0, tqavcc1;
988 u_int32_t tqavhc0, tqavhc1;
989 u_int32_t class_a_idle, class_b_idle;
991 struct adapter *adapter;
993 struct igb_link_cmd link;
996 if (NULL == dev) return EINVAL;
997 adapter = (struct adapter *)dev->private_data;
998 if (NULL == adapter) return ENXIO;
1002 /* get current link speed */
1004 err = ioctl(adapter->ldev, IGB_LINKSPEED, &link);
1006 if (err) return ENXIO;
1008 if (0 == link.up) return EINVAL;
1010 if (link.speed < 100) return EINVAL;
1012 if (link.duplex != FULL_DUPLEX ) return EINVAL;
1014 if ((class_a + class_b) > 75 ) return EINVAL;
1016 if ((tpktsz < 64) || (tpktsz > 2000)) return EINVAL;
1018 tqavctrl = E1000_READ_REG(hw, E1000_TQAVCTRL);
1020 if ((class_a + class_b) == 0 ) {
1021 /* disable the Qav shaper */
1022 tqavctrl &= ~E1000_TQAVCTRL_TX_ARB;
1023 E1000_WRITE_REG(hw, E1000_TQAVCTRL, tqavctrl);
1027 tqavcc0 = E1000_TQAVCC_QUEUEMODE;
1028 tqavcc1 = E1000_TQAVCC_QUEUEMODE;
1030 if (link.speed == 100)
1031 linkrate = E1000_TQAVCC_LINKRATE / 10;
1033 linkrate = E1000_TQAVCC_LINKRATE;
1035 /* XXX convert to fixed point or floating point percents */
1036 class_a_idle = (class_a * 2 * linkrate / 100); /* 'class_a' is a percent */
1037 class_b_idle = (class_b * 2 * linkrate / 100);
1038 tqavcc0 |= class_a_idle;
1039 tqavcc1 |= class_b_idle;
1042 * The datasheet lists a formula for configuring the high credit threshold,
1043 * however it is only relevant in the conditions the high priority SR queues
1044 * are internally pre-empted by manageability traffic or low power proxy modes -
1045 * and if the SR queues are pre-empted, they would burst more packets than expected.
1046 * So - if you enable manageability or proxy modes while running AVB traffic, you
1047 * should program the high credit thresholds to prevent non-compliant packet bursts.
1048 * But be aware the stream didn't stream as much bandwidth as it reserved,
1049 * and you may have had an underrun on the listener.
1051 tqavhc0 = 0xFFFFFFFF;
1052 tqavhc1 = 0xFFFFFFFF;
1054 /* implicitly enable the Qav shaper */
1055 tqavctrl |= E1000_TQAVCTRL_TX_ARB;
1056 E1000_WRITE_REG(hw, E1000_TQAVHC(0), tqavhc0);
1057 E1000_WRITE_REG(hw, E1000_TQAVCC(0), tqavcc0);
1058 E1000_WRITE_REG(hw, E1000_TQAVHC(1), tqavhc1);
1059 E1000_WRITE_REG(hw, E1000_TQAVCC(1), tqavcc1);
1060 E1000_WRITE_REG(hw, E1000_TQAVCTRL, tqavctrl);