1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/if_ether.h>
16 #include <linux/highmem.h>
17 #include "net_driver.h"
20 #include "workarounds.h"
23 * TX descriptor ring full threshold
25 * The tx_queue descriptor ring fill-level must fall below this value
26 * before we restart the netif queue
28 #define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
30 /* We want to be able to nest calls to netif_stop_queue(), since each
31 * channel can have an individual stop on the queue.
33 void efx_stop_queue(struct efx_nic *efx)
35 spin_lock_bh(&efx->netif_stop_lock);
36 EFX_TRACE(efx, "stop TX queue\n");
38 atomic_inc(&efx->netif_stop_count);
39 netif_stop_queue(efx->net_dev);
41 spin_unlock_bh(&efx->netif_stop_lock);
44 /* Wake netif's TX queue
45 * We want to be able to nest calls to netif_stop_queue(), since each
46 * channel can have an individual stop on the queue.
48 void efx_wake_queue(struct efx_nic *efx)
51 if (atomic_dec_and_lock(&efx->netif_stop_count,
52 &efx->netif_stop_lock)) {
53 EFX_TRACE(efx, "waking TX queue\n");
54 netif_wake_queue(efx->net_dev);
55 spin_unlock(&efx->netif_stop_lock);
60 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
61 struct efx_tx_buffer *buffer)
63 if (buffer->unmap_len) {
64 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
65 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
67 if (buffer->unmap_single)
68 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
71 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
73 buffer->unmap_len = 0;
74 buffer->unmap_single = false;
78 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
80 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
81 "complete\n", tx_queue->queue, read_ptr);
86 * struct efx_tso_header - a DMA mapped buffer for packet headers
87 * @next: Linked list of free ones.
88 * The list is protected by the TX queue lock.
89 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
90 * @dma_addr: The DMA address of the header below.
92 * This controls the memory used for a TSO header. Use TSOH_DATA()
93 * to find the packet header data. Use TSOH_SIZE() to calculate the
94 * total size required for a given packet header length. TSO headers
95 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
97 struct efx_tso_header {
99 struct efx_tso_header *next;
105 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
106 struct sk_buff *skb);
107 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
108 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
109 struct efx_tso_header *tsoh);
111 static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
112 struct efx_tx_buffer *buffer)
115 if (likely(!buffer->tsoh->unmap_len)) {
116 buffer->tsoh->next = tx_queue->tso_headers_free;
117 tx_queue->tso_headers_free = buffer->tsoh;
119 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
126 static inline unsigned
127 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
129 /* Depending on the NIC revision, we can use descriptor
130 * lengths up to 8K or 8K-1. However, since PCI Express
131 * devices must split read requests at 4K boundaries, there is
132 * little benefit from using descriptors that cross those
133 * boundaries and we keep things simple by not doing so.
135 unsigned len = (~dma_addr & 0xfff) + 1;
137 /* Work around hardware bug for unaligned buffers. */
138 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
139 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
145 * Add a socket buffer to a TX queue
147 * This maps all fragments of a socket buffer for DMA and adds them to
148 * the TX queue. The queue's insert pointer will be incremented by
149 * the number of fragments in the socket buffer.
151 * If any DMA mapping fails, any mapped fragments will be unmapped,
152 * the queue's insert pointer will be restored to its original value.
154 * This function is split out from efx_hard_start_xmit to allow the
155 * loopback test to direct packets via specific TX queues.
157 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
158 * You must hold netif_tx_lock() to call this function.
160 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
162 struct efx_nic *efx = tx_queue->efx;
163 struct pci_dev *pci_dev = efx->pci_dev;
164 struct efx_tx_buffer *buffer;
165 skb_frag_t *fragment;
168 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
169 dma_addr_t dma_addr, unmap_addr = 0;
170 unsigned int dma_len;
173 netdev_tx_t rc = NETDEV_TX_OK;
175 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
177 if (skb_shinfo(skb)->gso_size)
178 return efx_enqueue_skb_tso(tx_queue, skb);
180 /* Get size of the initial fragment */
181 len = skb_headlen(skb);
183 /* Pad if necessary */
184 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
185 EFX_BUG_ON_PARANOID(skb->data_len);
187 if (skb_pad(skb, len - skb->len))
191 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
192 q_space = EFX_TXQ_MASK - 1 - fill_level;
194 /* Map for DMA. Use pci_map_single rather than pci_map_page
195 * since this is more efficient on machines with sparse
199 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
201 /* Process all fragments */
203 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
206 /* Store fields for marking in the per-fragment final
209 unmap_addr = dma_addr;
211 /* Add to TX queue, splitting across DMA boundaries */
213 if (unlikely(q_space-- <= 0)) {
214 /* It might be that completions have
215 * happened since the xmit path last
216 * checked. Update the xmit path's
217 * copy of read_count.
220 /* This memory barrier protects the
221 * change of stopped from the access
224 tx_queue->old_read_count =
225 *(volatile unsigned *)
226 &tx_queue->read_count;
227 fill_level = (tx_queue->insert_count
228 - tx_queue->old_read_count);
229 q_space = EFX_TXQ_MASK - 1 - fill_level;
230 if (unlikely(q_space-- <= 0))
236 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
237 buffer = &tx_queue->buffer[insert_ptr];
238 efx_tsoh_free(tx_queue, buffer);
239 EFX_BUG_ON_PARANOID(buffer->tsoh);
240 EFX_BUG_ON_PARANOID(buffer->skb);
241 EFX_BUG_ON_PARANOID(buffer->len);
242 EFX_BUG_ON_PARANOID(!buffer->continuation);
243 EFX_BUG_ON_PARANOID(buffer->unmap_len);
245 dma_len = efx_max_tx_len(efx, dma_addr);
246 if (likely(dma_len >= len))
249 /* Fill out per descriptor fields */
250 buffer->len = dma_len;
251 buffer->dma_addr = dma_addr;
254 ++tx_queue->insert_count;
257 /* Transfer ownership of the unmapping to the final buffer */
258 buffer->unmap_single = unmap_single;
259 buffer->unmap_len = unmap_len;
262 /* Get address and size of next fragment */
263 if (i >= skb_shinfo(skb)->nr_frags)
265 fragment = &skb_shinfo(skb)->frags[i];
266 len = fragment->size;
267 page = fragment->page;
268 page_offset = fragment->page_offset;
271 unmap_single = false;
272 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
276 /* Transfer ownership of the skb to the final buffer */
278 buffer->continuation = false;
280 /* Pass off to hardware */
281 falcon_push_buffers(tx_queue);
286 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
287 "fragments for DMA\n", tx_queue->queue, skb->len,
288 skb_shinfo(skb)->nr_frags + 1);
290 /* Mark the packet as transmitted, and free the SKB ourselves */
291 dev_kfree_skb_any(skb);
297 if (tx_queue->stopped == 1)
301 /* Work backwards until we hit the original insert pointer value */
302 while (tx_queue->insert_count != tx_queue->write_count) {
303 --tx_queue->insert_count;
304 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
305 buffer = &tx_queue->buffer[insert_ptr];
306 efx_dequeue_buffer(tx_queue, buffer);
310 /* Free the fragment we were mid-way through pushing */
313 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
316 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
323 /* Remove packets from the TX queue
325 * This removes packets from the TX queue, up to and including the
328 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
331 struct efx_nic *efx = tx_queue->efx;
332 unsigned int stop_index, read_ptr;
334 stop_index = (index + 1) & EFX_TXQ_MASK;
335 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
337 while (read_ptr != stop_index) {
338 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
339 if (unlikely(buffer->len == 0)) {
340 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
341 "completion id %x\n", tx_queue->queue,
343 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
347 efx_dequeue_buffer(tx_queue, buffer);
348 buffer->continuation = true;
351 ++tx_queue->read_count;
352 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
356 /* Initiate a packet transmission. We use one channel per CPU
357 * (sharing when we have more CPUs than channels). On Falcon, the TX
358 * completion events will be directed back to the CPU that transmitted
359 * the packet, which should be cache-efficient.
361 * Context: non-blocking.
362 * Note that returning anything other than NETDEV_TX_OK will cause the
363 * OS to free the skb.
365 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
366 struct net_device *net_dev)
368 struct efx_nic *efx = netdev_priv(net_dev);
369 struct efx_tx_queue *tx_queue;
371 if (unlikely(efx->port_inhibited))
372 return NETDEV_TX_BUSY;
374 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
375 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
377 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
379 return efx_enqueue_skb(tx_queue, skb);
382 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
385 struct efx_nic *efx = tx_queue->efx;
387 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
389 efx_dequeue_buffers(tx_queue, index);
391 /* See if we need to restart the netif queue. This barrier
392 * separates the update of read_count from the test of
395 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
396 fill_level = tx_queue->insert_count - tx_queue->read_count;
397 if (fill_level < EFX_TXQ_THRESHOLD) {
398 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
400 /* Do this under netif_tx_lock(), to avoid racing
401 * with efx_xmit(). */
402 netif_tx_lock(efx->net_dev);
403 if (tx_queue->stopped) {
404 tx_queue->stopped = 0;
407 netif_tx_unlock(efx->net_dev);
412 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
414 struct efx_nic *efx = tx_queue->efx;
415 unsigned int txq_size;
418 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
420 /* Allocate software ring */
421 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
422 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
423 if (!tx_queue->buffer)
425 for (i = 0; i <= EFX_TXQ_MASK; ++i)
426 tx_queue->buffer[i].continuation = true;
428 /* Allocate hardware ring */
429 rc = falcon_probe_tx(tx_queue);
436 kfree(tx_queue->buffer);
437 tx_queue->buffer = NULL;
441 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
443 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
445 tx_queue->insert_count = 0;
446 tx_queue->write_count = 0;
447 tx_queue->read_count = 0;
448 tx_queue->old_read_count = 0;
449 BUG_ON(tx_queue->stopped);
451 /* Set up TX descriptor ring */
452 falcon_init_tx(tx_queue);
455 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
457 struct efx_tx_buffer *buffer;
459 if (!tx_queue->buffer)
462 /* Free any buffers left in the ring */
463 while (tx_queue->read_count != tx_queue->write_count) {
464 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
465 efx_dequeue_buffer(tx_queue, buffer);
466 buffer->continuation = true;
469 ++tx_queue->read_count;
473 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
475 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
477 /* Flush TX queue, remove descriptor ring */
478 falcon_fini_tx(tx_queue);
480 efx_release_tx_buffers(tx_queue);
482 /* Free up TSO header cache */
483 efx_fini_tso(tx_queue);
485 /* Release queue's stop on port, if any */
486 if (tx_queue->stopped) {
487 tx_queue->stopped = 0;
488 efx_wake_queue(tx_queue->efx);
492 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
494 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
495 falcon_remove_tx(tx_queue);
497 kfree(tx_queue->buffer);
498 tx_queue->buffer = NULL;
502 /* Efx TCP segmentation acceleration.
504 * Why? Because by doing it here in the driver we can go significantly
505 * faster than the GSO.
507 * Requires TX checksum offload support.
510 /* Number of bytes inserted at the start of a TSO header buffer,
511 * similar to NET_IP_ALIGN.
513 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
514 #define TSOH_OFFSET 0
516 #define TSOH_OFFSET NET_IP_ALIGN
519 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
521 /* Total size of struct efx_tso_header, buffer and padding */
522 #define TSOH_SIZE(hdr_len) \
523 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
525 /* Size of blocks on free list. Larger blocks must be allocated from
528 #define TSOH_STD_SIZE 128
530 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
531 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
532 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
533 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
536 * struct tso_state - TSO state for an SKB
537 * @out_len: Remaining length in current segment
538 * @seqnum: Current sequence number
539 * @ipv4_id: Current IPv4 ID, host endian
540 * @packet_space: Remaining space in current packet
541 * @dma_addr: DMA address of current position
542 * @in_len: Remaining length in current SKB fragment
543 * @unmap_len: Length of SKB fragment
544 * @unmap_addr: DMA address of SKB fragment
545 * @unmap_single: DMA single vs page mapping flag
546 * @header_len: Number of bytes of header
547 * @full_packet_size: Number of bytes to put in each outgoing segment
549 * The state used during segmentation. It is put into this data structure
550 * just to make it easy to pass into inline functions.
553 /* Output position */
557 unsigned packet_space;
563 dma_addr_t unmap_addr;
567 int full_packet_size;
572 * Verify that our various assumptions about sk_buffs and the conditions
573 * under which TSO will be attempted hold true.
575 static void efx_tso_check_safe(struct sk_buff *skb)
577 __be16 protocol = skb->protocol;
579 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
581 if (protocol == htons(ETH_P_8021Q)) {
582 /* Find the encapsulated protocol; reset network header
583 * and transport header based on that. */
584 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
585 protocol = veh->h_vlan_encapsulated_proto;
586 skb_set_network_header(skb, sizeof(*veh));
587 if (protocol == htons(ETH_P_IP))
588 skb_set_transport_header(skb, sizeof(*veh) +
589 4 * ip_hdr(skb)->ihl);
592 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP));
593 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
594 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
595 + (tcp_hdr(skb)->doff << 2u)) >
601 * Allocate a page worth of efx_tso_header structures, and string them
602 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
604 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
607 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
608 struct efx_tso_header *tsoh;
612 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
613 if (base_kva == NULL) {
614 EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
619 /* pci_alloc_consistent() allocates pages. */
620 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
622 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
623 tsoh = (struct efx_tso_header *)kva;
624 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
625 tsoh->next = tx_queue->tso_headers_free;
626 tx_queue->tso_headers_free = tsoh;
633 /* Free up a TSO header, and all others in the same page. */
634 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
635 struct efx_tso_header *tsoh,
636 struct pci_dev *pci_dev)
638 struct efx_tso_header **p;
639 unsigned long base_kva;
642 base_kva = (unsigned long)tsoh & PAGE_MASK;
643 base_dma = tsoh->dma_addr & PAGE_MASK;
645 p = &tx_queue->tso_headers_free;
647 if (((unsigned long)*p & PAGE_MASK) == base_kva)
653 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
656 static struct efx_tso_header *
657 efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
659 struct efx_tso_header *tsoh;
661 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
665 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
666 TSOH_BUFFER(tsoh), header_len,
668 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
674 tsoh->unmap_len = header_len;
679 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
681 pci_unmap_single(tx_queue->efx->pci_dev,
682 tsoh->dma_addr, tsoh->unmap_len,
688 * efx_tx_queue_insert - push descriptors onto the TX queue
689 * @tx_queue: Efx TX queue
690 * @dma_addr: DMA address of fragment
691 * @len: Length of fragment
692 * @final_buffer: The final buffer inserted into the queue
694 * Push descriptors onto the TX queue. Return 0 on success or 1 if
697 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
698 dma_addr_t dma_addr, unsigned len,
699 struct efx_tx_buffer **final_buffer)
701 struct efx_tx_buffer *buffer;
702 struct efx_nic *efx = tx_queue->efx;
703 unsigned dma_len, fill_level, insert_ptr;
706 EFX_BUG_ON_PARANOID(len <= 0);
708 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
709 /* -1 as there is no way to represent all descriptors used */
710 q_space = EFX_TXQ_MASK - 1 - fill_level;
713 if (unlikely(q_space-- <= 0)) {
714 /* It might be that completions have happened
715 * since the xmit path last checked. Update
716 * the xmit path's copy of read_count.
719 /* This memory barrier protects the change of
720 * stopped from the access of read_count. */
722 tx_queue->old_read_count =
723 *(volatile unsigned *)&tx_queue->read_count;
724 fill_level = (tx_queue->insert_count
725 - tx_queue->old_read_count);
726 q_space = EFX_TXQ_MASK - 1 - fill_level;
727 if (unlikely(q_space-- <= 0)) {
728 *final_buffer = NULL;
735 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
736 buffer = &tx_queue->buffer[insert_ptr];
737 ++tx_queue->insert_count;
739 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
740 tx_queue->read_count >
743 efx_tsoh_free(tx_queue, buffer);
744 EFX_BUG_ON_PARANOID(buffer->len);
745 EFX_BUG_ON_PARANOID(buffer->unmap_len);
746 EFX_BUG_ON_PARANOID(buffer->skb);
747 EFX_BUG_ON_PARANOID(!buffer->continuation);
748 EFX_BUG_ON_PARANOID(buffer->tsoh);
750 buffer->dma_addr = dma_addr;
752 dma_len = efx_max_tx_len(efx, dma_addr);
754 /* If there is enough space to send then do so */
758 buffer->len = dma_len; /* Don't set the other members */
763 EFX_BUG_ON_PARANOID(!len);
765 *final_buffer = buffer;
771 * Put a TSO header into the TX queue.
773 * This is special-cased because we know that it is small enough to fit in
774 * a single fragment, and we know it doesn't cross a page boundary. It
775 * also allows us to not worry about end-of-packet etc.
777 static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
778 struct efx_tso_header *tsoh, unsigned len)
780 struct efx_tx_buffer *buffer;
782 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
783 efx_tsoh_free(tx_queue, buffer);
784 EFX_BUG_ON_PARANOID(buffer->len);
785 EFX_BUG_ON_PARANOID(buffer->unmap_len);
786 EFX_BUG_ON_PARANOID(buffer->skb);
787 EFX_BUG_ON_PARANOID(!buffer->continuation);
788 EFX_BUG_ON_PARANOID(buffer->tsoh);
790 buffer->dma_addr = tsoh->dma_addr;
793 ++tx_queue->insert_count;
797 /* Remove descriptors put into a tx_queue. */
798 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
800 struct efx_tx_buffer *buffer;
801 dma_addr_t unmap_addr;
803 /* Work backwards until we hit the original insert pointer value */
804 while (tx_queue->insert_count != tx_queue->write_count) {
805 --tx_queue->insert_count;
806 buffer = &tx_queue->buffer[tx_queue->insert_count &
808 efx_tsoh_free(tx_queue, buffer);
809 EFX_BUG_ON_PARANOID(buffer->skb);
811 buffer->continuation = true;
812 if (buffer->unmap_len) {
813 unmap_addr = (buffer->dma_addr + buffer->len -
815 if (buffer->unmap_single)
816 pci_unmap_single(tx_queue->efx->pci_dev,
817 unmap_addr, buffer->unmap_len,
820 pci_unmap_page(tx_queue->efx->pci_dev,
821 unmap_addr, buffer->unmap_len,
823 buffer->unmap_len = 0;
829 /* Parse the SKB header and initialise state. */
830 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
832 /* All ethernet/IP/TCP headers combined size is TCP header size
833 * plus offset of TCP header relative to start of packet.
835 st->header_len = ((tcp_hdr(skb)->doff << 2u)
836 + PTR_DIFF(tcp_hdr(skb), skb->data));
837 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
839 st->ipv4_id = ntohs(ip_hdr(skb)->id);
840 st->seqnum = ntohl(tcp_hdr(skb)->seq);
842 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
843 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
844 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
846 st->packet_space = st->full_packet_size;
847 st->out_len = skb->len - st->header_len;
849 st->unmap_single = false;
852 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
855 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
856 frag->page_offset, frag->size,
858 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
859 st->unmap_single = false;
860 st->unmap_len = frag->size;
861 st->in_len = frag->size;
862 st->dma_addr = st->unmap_addr;
868 static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
869 const struct sk_buff *skb)
871 int hl = st->header_len;
872 int len = skb_headlen(skb) - hl;
874 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
875 len, PCI_DMA_TODEVICE);
876 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
877 st->unmap_single = true;
880 st->dma_addr = st->unmap_addr;
888 * tso_fill_packet_with_fragment - form descriptors for the current fragment
889 * @tx_queue: Efx TX queue
890 * @skb: Socket buffer
893 * Form descriptors for the current fragment, until we reach the end
894 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
895 * space in @tx_queue.
897 static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
898 const struct sk_buff *skb,
899 struct tso_state *st)
901 struct efx_tx_buffer *buffer;
902 int n, end_of_packet, rc;
906 if (st->packet_space == 0)
909 EFX_BUG_ON_PARANOID(st->in_len <= 0);
910 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
912 n = min(st->in_len, st->packet_space);
914 st->packet_space -= n;
918 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
919 if (likely(rc == 0)) {
920 if (st->out_len == 0)
921 /* Transfer ownership of the skb */
924 end_of_packet = st->out_len == 0 || st->packet_space == 0;
925 buffer->continuation = !end_of_packet;
927 if (st->in_len == 0) {
928 /* Transfer ownership of the pci mapping */
929 buffer->unmap_len = st->unmap_len;
930 buffer->unmap_single = st->unmap_single;
941 * tso_start_new_packet - generate a new header and prepare for the new packet
942 * @tx_queue: Efx TX queue
943 * @skb: Socket buffer
946 * Generate a new header and prepare for the new packet. Return 0 on
947 * success, or -1 if failed to alloc header.
949 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
950 const struct sk_buff *skb,
951 struct tso_state *st)
953 struct efx_tso_header *tsoh;
954 struct iphdr *tsoh_iph;
955 struct tcphdr *tsoh_th;
959 /* Allocate a DMA-mapped header buffer. */
960 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
961 if (tx_queue->tso_headers_free == NULL) {
962 if (efx_tsoh_block_alloc(tx_queue))
965 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
966 tsoh = tx_queue->tso_headers_free;
967 tx_queue->tso_headers_free = tsoh->next;
970 tx_queue->tso_long_headers++;
971 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
976 header = TSOH_BUFFER(tsoh);
977 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
978 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
980 /* Copy and update the headers. */
981 memcpy(header, skb->data, st->header_len);
983 tsoh_th->seq = htonl(st->seqnum);
984 st->seqnum += skb_shinfo(skb)->gso_size;
985 if (st->out_len > skb_shinfo(skb)->gso_size) {
986 /* This packet will not finish the TSO burst. */
987 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
991 /* This packet will be the last in the TSO burst. */
992 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
993 tsoh_th->fin = tcp_hdr(skb)->fin;
994 tsoh_th->psh = tcp_hdr(skb)->psh;
996 tsoh_iph->tot_len = htons(ip_length);
998 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
999 tsoh_iph->id = htons(st->ipv4_id);
1002 st->packet_space = skb_shinfo(skb)->gso_size;
1003 ++tx_queue->tso_packets;
1005 /* Form a descriptor for this header. */
1006 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1013 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1014 * @tx_queue: Efx TX queue
1015 * @skb: Socket buffer
1017 * Context: You must hold netif_tx_lock() to call this function.
1019 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1020 * @skb was not enqueued. In all cases @skb is consumed. Return
1021 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1023 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1024 struct sk_buff *skb)
1026 struct efx_nic *efx = tx_queue->efx;
1027 int frag_i, rc, rc2 = NETDEV_TX_OK;
1028 struct tso_state state;
1030 /* Verify TSO is safe - these checks should never fail. */
1031 efx_tso_check_safe(skb);
1033 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1035 tso_start(&state, skb);
1037 /* Assume that skb header area contains exactly the headers, and
1038 * all payload is in the frag list.
1040 if (skb_headlen(skb) == state.header_len) {
1041 /* Grab the first payload fragment. */
1042 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1044 rc = tso_get_fragment(&state, efx,
1045 skb_shinfo(skb)->frags + frag_i);
1049 rc = tso_get_head_fragment(&state, efx, skb);
1055 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1059 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1063 /* Move onto the next fragment? */
1064 if (state.in_len == 0) {
1065 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1066 /* End of payload reached. */
1068 rc = tso_get_fragment(&state, efx,
1069 skb_shinfo(skb)->frags + frag_i);
1074 /* Start at new packet? */
1075 if (state.packet_space == 0 &&
1076 tso_start_new_packet(tx_queue, skb, &state) < 0)
1080 /* Pass off to hardware */
1081 falcon_push_buffers(tx_queue);
1083 tx_queue->tso_bursts++;
1084 return NETDEV_TX_OK;
1087 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
1088 dev_kfree_skb_any(skb);
1092 rc2 = NETDEV_TX_BUSY;
1094 /* Stop the queue if it wasn't stopped before. */
1095 if (tx_queue->stopped == 1)
1096 efx_stop_queue(efx);
1099 /* Free the DMA mapping we were in the process of writing out */
1100 if (state.unmap_len) {
1101 if (state.unmap_single)
1102 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1103 state.unmap_len, PCI_DMA_TODEVICE);
1105 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1106 state.unmap_len, PCI_DMA_TODEVICE);
1109 efx_enqueue_unwind(tx_queue);
1115 * Free up all TSO datastructures associated with tx_queue. This
1116 * routine should be called only once the tx_queue is both empty and
1117 * will no longer be used.
1119 static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1123 if (tx_queue->buffer) {
1124 for (i = 0; i <= EFX_TXQ_MASK; ++i)
1125 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1128 while (tx_queue->tso_headers_free != NULL)
1129 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1130 tx_queue->efx->pci_dev);