1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2010 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
23 #include "workarounds.h"
26 * TX descriptor ring full threshold
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
31 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
33 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34 struct efx_tx_buffer *buffer,
35 unsigned int *pkts_compl,
36 unsigned int *bytes_compl)
38 if (buffer->unmap_len) {
39 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
42 if (buffer->unmap_single)
43 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
46 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
48 buffer->unmap_len = 0;
49 buffer->unmap_single = false;
54 (*bytes_compl) += buffer->skb->len;
55 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
57 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
58 "TX queue %d transmission id %x complete\n",
59 tx_queue->queue, tx_queue->read_count);
64 * struct efx_tso_header - a DMA mapped buffer for packet headers
65 * @next: Linked list of free ones.
66 * The list is protected by the TX queue lock.
67 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
68 * @dma_addr: The DMA address of the header below.
70 * This controls the memory used for a TSO header. Use TSOH_DATA()
71 * to find the packet header data. Use TSOH_SIZE() to calculate the
72 * total size required for a given packet header length. TSO headers
73 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
75 struct efx_tso_header {
77 struct efx_tso_header *next;
83 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
85 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
86 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
87 struct efx_tso_header *tsoh);
89 static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
90 struct efx_tx_buffer *buffer)
93 if (likely(!buffer->tsoh->unmap_len)) {
94 buffer->tsoh->next = tx_queue->tso_headers_free;
95 tx_queue->tso_headers_free = buffer->tsoh;
97 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
104 static inline unsigned
105 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
107 /* Depending on the NIC revision, we can use descriptor
108 * lengths up to 8K or 8K-1. However, since PCI Express
109 * devices must split read requests at 4K boundaries, there is
110 * little benefit from using descriptors that cross those
111 * boundaries and we keep things simple by not doing so.
113 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
115 /* Work around hardware bug for unaligned buffers. */
116 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
117 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
123 * Add a socket buffer to a TX queue
125 * This maps all fragments of a socket buffer for DMA and adds them to
126 * the TX queue. The queue's insert pointer will be incremented by
127 * the number of fragments in the socket buffer.
129 * If any DMA mapping fails, any mapped fragments will be unmapped,
130 * the queue's insert pointer will be restored to its original value.
132 * This function is split out from efx_hard_start_xmit to allow the
133 * loopback test to direct packets via specific TX queues.
135 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
136 * You must hold netif_tx_lock() to call this function.
138 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
140 struct efx_nic *efx = tx_queue->efx;
141 struct device *dma_dev = &efx->pci_dev->dev;
142 struct efx_tx_buffer *buffer;
143 skb_frag_t *fragment;
144 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
145 dma_addr_t dma_addr, unmap_addr = 0;
146 unsigned int dma_len;
149 netdev_tx_t rc = NETDEV_TX_OK;
151 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
153 if (skb_shinfo(skb)->gso_size)
154 return efx_enqueue_skb_tso(tx_queue, skb);
156 /* Get size of the initial fragment */
157 len = skb_headlen(skb);
159 /* Pad if necessary */
160 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
161 EFX_BUG_ON_PARANOID(skb->data_len);
163 if (skb_pad(skb, len - skb->len))
167 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
168 q_space = efx->txq_entries - 1 - fill_level;
170 /* Map for DMA. Use dma_map_single rather than dma_map_page
171 * since this is more efficient on machines with sparse
175 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
177 /* Process all fragments */
179 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
182 /* Store fields for marking in the per-fragment final
185 unmap_addr = dma_addr;
187 /* Add to TX queue, splitting across DMA boundaries */
189 if (unlikely(q_space-- <= 0)) {
190 /* It might be that completions have
191 * happened since the xmit path last
192 * checked. Update the xmit path's
193 * copy of read_count.
195 netif_tx_stop_queue(tx_queue->core_txq);
196 /* This memory barrier protects the
197 * change of queue state from the access
200 tx_queue->old_read_count =
201 ACCESS_ONCE(tx_queue->read_count);
202 fill_level = (tx_queue->insert_count
203 - tx_queue->old_read_count);
204 q_space = efx->txq_entries - 1 - fill_level;
205 if (unlikely(q_space-- <= 0)) {
210 if (likely(!efx->loopback_selftest))
211 netif_tx_start_queue(
215 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
216 buffer = &tx_queue->buffer[insert_ptr];
217 efx_tsoh_free(tx_queue, buffer);
218 EFX_BUG_ON_PARANOID(buffer->tsoh);
219 EFX_BUG_ON_PARANOID(buffer->skb);
220 EFX_BUG_ON_PARANOID(buffer->len);
221 EFX_BUG_ON_PARANOID(!buffer->continuation);
222 EFX_BUG_ON_PARANOID(buffer->unmap_len);
224 dma_len = efx_max_tx_len(efx, dma_addr);
225 if (likely(dma_len >= len))
228 /* Fill out per descriptor fields */
229 buffer->len = dma_len;
230 buffer->dma_addr = dma_addr;
233 ++tx_queue->insert_count;
236 /* Transfer ownership of the unmapping to the final buffer */
237 buffer->unmap_single = unmap_single;
238 buffer->unmap_len = unmap_len;
241 /* Get address and size of next fragment */
242 if (i >= skb_shinfo(skb)->nr_frags)
244 fragment = &skb_shinfo(skb)->frags[i];
245 len = skb_frag_size(fragment);
248 unmap_single = false;
249 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
253 /* Transfer ownership of the skb to the final buffer */
255 buffer->continuation = false;
257 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
259 /* Pass off to hardware */
260 efx_nic_push_buffers(tx_queue);
265 netif_err(efx, tx_err, efx->net_dev,
266 " TX queue %d could not map skb with %d bytes %d "
267 "fragments for DMA\n", tx_queue->queue, skb->len,
268 skb_shinfo(skb)->nr_frags + 1);
270 /* Mark the packet as transmitted, and free the SKB ourselves */
271 dev_kfree_skb_any(skb);
274 /* Work backwards until we hit the original insert pointer value */
275 while (tx_queue->insert_count != tx_queue->write_count) {
276 unsigned int pkts_compl = 0, bytes_compl = 0;
277 --tx_queue->insert_count;
278 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
279 buffer = &tx_queue->buffer[insert_ptr];
280 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
284 /* Free the fragment we were mid-way through pushing */
287 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
290 dma_unmap_page(dma_dev, unmap_addr, unmap_len,
297 /* Remove packets from the TX queue
299 * This removes packets from the TX queue, up to and including the
302 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
304 unsigned int *pkts_compl,
305 unsigned int *bytes_compl)
307 struct efx_nic *efx = tx_queue->efx;
308 unsigned int stop_index, read_ptr;
310 stop_index = (index + 1) & tx_queue->ptr_mask;
311 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
313 while (read_ptr != stop_index) {
314 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
315 if (unlikely(buffer->len == 0)) {
316 netif_err(efx, tx_err, efx->net_dev,
317 "TX queue %d spurious TX completion id %x\n",
318 tx_queue->queue, read_ptr);
319 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
323 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
324 buffer->continuation = true;
327 ++tx_queue->read_count;
328 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
332 /* Initiate a packet transmission. We use one channel per CPU
333 * (sharing when we have more CPUs than channels). On Falcon, the TX
334 * completion events will be directed back to the CPU that transmitted
335 * the packet, which should be cache-efficient.
337 * Context: non-blocking.
338 * Note that returning anything other than NETDEV_TX_OK will cause the
339 * OS to free the skb.
341 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
342 struct net_device *net_dev)
344 struct efx_nic *efx = netdev_priv(net_dev);
345 struct efx_tx_queue *tx_queue;
346 unsigned index, type;
348 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
350 index = skb_get_queue_mapping(skb);
351 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
352 if (index >= efx->n_tx_channels) {
353 index -= efx->n_tx_channels;
354 type |= EFX_TXQ_TYPE_HIGHPRI;
356 tx_queue = efx_get_tx_queue(efx, index, type);
358 return efx_enqueue_skb(tx_queue, skb);
361 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
363 struct efx_nic *efx = tx_queue->efx;
365 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
367 netdev_get_tx_queue(efx->net_dev,
368 tx_queue->queue / EFX_TXQ_TYPES +
369 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
370 efx->n_tx_channels : 0));
373 int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
375 struct efx_nic *efx = netdev_priv(net_dev);
376 struct efx_channel *channel;
377 struct efx_tx_queue *tx_queue;
381 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
384 if (num_tc == net_dev->num_tc)
387 for (tc = 0; tc < num_tc; tc++) {
388 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
389 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
392 if (num_tc > net_dev->num_tc) {
393 /* Initialise high-priority queues as necessary */
394 efx_for_each_channel(channel, efx) {
395 efx_for_each_possible_channel_tx_queue(tx_queue,
397 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
399 if (!tx_queue->buffer) {
400 rc = efx_probe_tx_queue(tx_queue);
404 if (!tx_queue->initialised)
405 efx_init_tx_queue(tx_queue);
406 efx_init_tx_queue_core_txq(tx_queue);
410 /* Reduce number of classes before number of queues */
411 net_dev->num_tc = num_tc;
414 rc = netif_set_real_num_tx_queues(net_dev,
415 max_t(int, num_tc, 1) *
420 /* Do not destroy high-priority queues when they become
421 * unused. We would have to flush them first, and it is
422 * fairly difficult to flush a subset of TX queues. Leave
423 * it to efx_fini_channels().
426 net_dev->num_tc = num_tc;
430 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
433 struct efx_nic *efx = tx_queue->efx;
434 unsigned int pkts_compl = 0, bytes_compl = 0;
436 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
438 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
439 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
441 /* See if we need to restart the netif queue. This barrier
442 * separates the update of read_count from the test of the
445 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
446 likely(efx->port_enabled) &&
447 likely(netif_device_present(efx->net_dev))) {
448 fill_level = tx_queue->insert_count - tx_queue->read_count;
449 if (fill_level < EFX_TXQ_THRESHOLD(efx))
450 netif_tx_wake_queue(tx_queue->core_txq);
453 /* Check whether the hardware queue is now empty */
454 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
455 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
456 if (tx_queue->read_count == tx_queue->old_write_count) {
458 tx_queue->empty_read_count =
459 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
464 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
466 struct efx_nic *efx = tx_queue->efx;
467 unsigned int entries;
470 /* Create the smallest power-of-two aligned ring */
471 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
472 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
473 tx_queue->ptr_mask = entries - 1;
475 netif_dbg(efx, probe, efx->net_dev,
476 "creating TX queue %d size %#x mask %#x\n",
477 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
479 /* Allocate software ring */
480 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
482 if (!tx_queue->buffer)
484 for (i = 0; i <= tx_queue->ptr_mask; ++i)
485 tx_queue->buffer[i].continuation = true;
487 /* Allocate hardware ring */
488 rc = efx_nic_probe_tx(tx_queue);
495 kfree(tx_queue->buffer);
496 tx_queue->buffer = NULL;
500 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
502 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
503 "initialising TX queue %d\n", tx_queue->queue);
505 tx_queue->insert_count = 0;
506 tx_queue->write_count = 0;
507 tx_queue->old_write_count = 0;
508 tx_queue->read_count = 0;
509 tx_queue->old_read_count = 0;
510 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
512 /* Set up TX descriptor ring */
513 efx_nic_init_tx(tx_queue);
515 tx_queue->initialised = true;
518 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
520 struct efx_tx_buffer *buffer;
522 if (!tx_queue->buffer)
525 /* Free any buffers left in the ring */
526 while (tx_queue->read_count != tx_queue->write_count) {
527 unsigned int pkts_compl = 0, bytes_compl = 0;
528 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
529 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
530 buffer->continuation = true;
533 ++tx_queue->read_count;
535 netdev_tx_reset_queue(tx_queue->core_txq);
538 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
540 if (!tx_queue->initialised)
543 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
544 "shutting down TX queue %d\n", tx_queue->queue);
546 tx_queue->initialised = false;
548 /* Flush TX queue, remove descriptor ring */
549 efx_nic_fini_tx(tx_queue);
551 efx_release_tx_buffers(tx_queue);
553 /* Free up TSO header cache */
554 efx_fini_tso(tx_queue);
557 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
559 if (!tx_queue->buffer)
562 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
563 "destroying TX queue %d\n", tx_queue->queue);
564 efx_nic_remove_tx(tx_queue);
566 kfree(tx_queue->buffer);
567 tx_queue->buffer = NULL;
571 /* Efx TCP segmentation acceleration.
573 * Why? Because by doing it here in the driver we can go significantly
574 * faster than the GSO.
576 * Requires TX checksum offload support.
579 /* Number of bytes inserted at the start of a TSO header buffer,
580 * similar to NET_IP_ALIGN.
582 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
583 #define TSOH_OFFSET 0
585 #define TSOH_OFFSET NET_IP_ALIGN
588 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
590 /* Total size of struct efx_tso_header, buffer and padding */
591 #define TSOH_SIZE(hdr_len) \
592 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
594 /* Size of blocks on free list. Larger blocks must be allocated from
597 #define TSOH_STD_SIZE 128
599 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
600 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
601 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
602 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
603 #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
606 * struct tso_state - TSO state for an SKB
607 * @out_len: Remaining length in current segment
608 * @seqnum: Current sequence number
609 * @ipv4_id: Current IPv4 ID, host endian
610 * @packet_space: Remaining space in current packet
611 * @dma_addr: DMA address of current position
612 * @in_len: Remaining length in current SKB fragment
613 * @unmap_len: Length of SKB fragment
614 * @unmap_addr: DMA address of SKB fragment
615 * @unmap_single: DMA single vs page mapping flag
616 * @protocol: Network protocol (after any VLAN header)
617 * @header_len: Number of bytes of header
618 * @full_packet_size: Number of bytes to put in each outgoing segment
620 * The state used during segmentation. It is put into this data structure
621 * just to make it easy to pass into inline functions.
624 /* Output position */
628 unsigned packet_space;
634 dma_addr_t unmap_addr;
639 int full_packet_size;
644 * Verify that our various assumptions about sk_buffs and the conditions
645 * under which TSO will be attempted hold true. Return the protocol number.
647 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
649 __be16 protocol = skb->protocol;
651 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
653 if (protocol == htons(ETH_P_8021Q)) {
654 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
655 protocol = veh->h_vlan_encapsulated_proto;
658 if (protocol == htons(ETH_P_IP)) {
659 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
661 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
662 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
664 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
665 + (tcp_hdr(skb)->doff << 2u)) >
673 * Allocate a page worth of efx_tso_header structures, and string them
674 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
676 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
678 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
679 struct efx_tso_header *tsoh;
683 base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
684 if (base_kva == NULL) {
685 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
686 "Unable to allocate page for TSO headers\n");
690 /* dma_alloc_coherent() allocates pages. */
691 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
693 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
694 tsoh = (struct efx_tso_header *)kva;
695 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
696 tsoh->next = tx_queue->tso_headers_free;
697 tx_queue->tso_headers_free = tsoh;
704 /* Free up a TSO header, and all others in the same page. */
705 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
706 struct efx_tso_header *tsoh,
707 struct device *dma_dev)
709 struct efx_tso_header **p;
710 unsigned long base_kva;
713 base_kva = (unsigned long)tsoh & PAGE_MASK;
714 base_dma = tsoh->dma_addr & PAGE_MASK;
716 p = &tx_queue->tso_headers_free;
718 if (((unsigned long)*p & PAGE_MASK) == base_kva)
724 dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
727 static struct efx_tso_header *
728 efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
730 struct efx_tso_header *tsoh;
732 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
736 tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
737 TSOH_BUFFER(tsoh), header_len,
739 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
745 tsoh->unmap_len = header_len;
750 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
752 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
753 tsoh->dma_addr, tsoh->unmap_len,
759 * efx_tx_queue_insert - push descriptors onto the TX queue
760 * @tx_queue: Efx TX queue
761 * @dma_addr: DMA address of fragment
762 * @len: Length of fragment
763 * @final_buffer: The final buffer inserted into the queue
765 * Push descriptors onto the TX queue. Return 0 on success or 1 if
768 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
769 dma_addr_t dma_addr, unsigned len,
770 struct efx_tx_buffer **final_buffer)
772 struct efx_tx_buffer *buffer;
773 struct efx_nic *efx = tx_queue->efx;
774 unsigned dma_len, fill_level, insert_ptr;
777 EFX_BUG_ON_PARANOID(len <= 0);
779 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
780 /* -1 as there is no way to represent all descriptors used */
781 q_space = efx->txq_entries - 1 - fill_level;
784 if (unlikely(q_space-- <= 0)) {
785 /* It might be that completions have happened
786 * since the xmit path last checked. Update
787 * the xmit path's copy of read_count.
789 netif_tx_stop_queue(tx_queue->core_txq);
790 /* This memory barrier protects the change of
791 * queue state from the access of read_count. */
793 tx_queue->old_read_count =
794 ACCESS_ONCE(tx_queue->read_count);
795 fill_level = (tx_queue->insert_count
796 - tx_queue->old_read_count);
797 q_space = efx->txq_entries - 1 - fill_level;
798 if (unlikely(q_space-- <= 0)) {
799 *final_buffer = NULL;
803 netif_tx_start_queue(tx_queue->core_txq);
806 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
807 buffer = &tx_queue->buffer[insert_ptr];
808 ++tx_queue->insert_count;
810 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
811 tx_queue->read_count >=
814 efx_tsoh_free(tx_queue, buffer);
815 EFX_BUG_ON_PARANOID(buffer->len);
816 EFX_BUG_ON_PARANOID(buffer->unmap_len);
817 EFX_BUG_ON_PARANOID(buffer->skb);
818 EFX_BUG_ON_PARANOID(!buffer->continuation);
819 EFX_BUG_ON_PARANOID(buffer->tsoh);
821 buffer->dma_addr = dma_addr;
823 dma_len = efx_max_tx_len(efx, dma_addr);
825 /* If there is enough space to send then do so */
829 buffer->len = dma_len; /* Don't set the other members */
834 EFX_BUG_ON_PARANOID(!len);
836 *final_buffer = buffer;
842 * Put a TSO header into the TX queue.
844 * This is special-cased because we know that it is small enough to fit in
845 * a single fragment, and we know it doesn't cross a page boundary. It
846 * also allows us to not worry about end-of-packet etc.
848 static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
849 struct efx_tso_header *tsoh, unsigned len)
851 struct efx_tx_buffer *buffer;
853 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
854 efx_tsoh_free(tx_queue, buffer);
855 EFX_BUG_ON_PARANOID(buffer->len);
856 EFX_BUG_ON_PARANOID(buffer->unmap_len);
857 EFX_BUG_ON_PARANOID(buffer->skb);
858 EFX_BUG_ON_PARANOID(!buffer->continuation);
859 EFX_BUG_ON_PARANOID(buffer->tsoh);
861 buffer->dma_addr = tsoh->dma_addr;
864 ++tx_queue->insert_count;
868 /* Remove descriptors put into a tx_queue. */
869 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
871 struct efx_tx_buffer *buffer;
872 dma_addr_t unmap_addr;
874 /* Work backwards until we hit the original insert pointer value */
875 while (tx_queue->insert_count != tx_queue->write_count) {
876 --tx_queue->insert_count;
877 buffer = &tx_queue->buffer[tx_queue->insert_count &
879 efx_tsoh_free(tx_queue, buffer);
880 EFX_BUG_ON_PARANOID(buffer->skb);
881 if (buffer->unmap_len) {
882 unmap_addr = (buffer->dma_addr + buffer->len -
884 if (buffer->unmap_single)
885 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
886 unmap_addr, buffer->unmap_len,
889 dma_unmap_page(&tx_queue->efx->pci_dev->dev,
890 unmap_addr, buffer->unmap_len,
892 buffer->unmap_len = 0;
895 buffer->continuation = true;
900 /* Parse the SKB header and initialise state. */
901 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
903 /* All ethernet/IP/TCP headers combined size is TCP header size
904 * plus offset of TCP header relative to start of packet.
906 st->header_len = ((tcp_hdr(skb)->doff << 2u)
907 + PTR_DIFF(tcp_hdr(skb), skb->data));
908 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
910 if (st->protocol == htons(ETH_P_IP))
911 st->ipv4_id = ntohs(ip_hdr(skb)->id);
914 st->seqnum = ntohl(tcp_hdr(skb)->seq);
916 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
917 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
918 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
920 st->out_len = skb->len - st->header_len;
922 st->unmap_single = false;
925 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
928 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
929 skb_frag_size(frag), DMA_TO_DEVICE);
930 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
931 st->unmap_single = false;
932 st->unmap_len = skb_frag_size(frag);
933 st->in_len = skb_frag_size(frag);
934 st->dma_addr = st->unmap_addr;
940 static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
941 const struct sk_buff *skb)
943 int hl = st->header_len;
944 int len = skb_headlen(skb) - hl;
946 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
948 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
949 st->unmap_single = true;
952 st->dma_addr = st->unmap_addr;
960 * tso_fill_packet_with_fragment - form descriptors for the current fragment
961 * @tx_queue: Efx TX queue
962 * @skb: Socket buffer
965 * Form descriptors for the current fragment, until we reach the end
966 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
967 * space in @tx_queue.
969 static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
970 const struct sk_buff *skb,
971 struct tso_state *st)
973 struct efx_tx_buffer *buffer;
974 int n, end_of_packet, rc;
978 if (st->packet_space == 0)
981 EFX_BUG_ON_PARANOID(st->in_len <= 0);
982 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
984 n = min(st->in_len, st->packet_space);
986 st->packet_space -= n;
990 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
991 if (likely(rc == 0)) {
992 if (st->out_len == 0)
993 /* Transfer ownership of the skb */
996 end_of_packet = st->out_len == 0 || st->packet_space == 0;
997 buffer->continuation = !end_of_packet;
999 if (st->in_len == 0) {
1000 /* Transfer ownership of the DMA mapping */
1001 buffer->unmap_len = st->unmap_len;
1002 buffer->unmap_single = st->unmap_single;
1013 * tso_start_new_packet - generate a new header and prepare for the new packet
1014 * @tx_queue: Efx TX queue
1015 * @skb: Socket buffer
1018 * Generate a new header and prepare for the new packet. Return 0 on
1019 * success, or -1 if failed to alloc header.
1021 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1022 const struct sk_buff *skb,
1023 struct tso_state *st)
1025 struct efx_tso_header *tsoh;
1026 struct tcphdr *tsoh_th;
1030 /* Allocate a DMA-mapped header buffer. */
1031 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
1032 if (tx_queue->tso_headers_free == NULL) {
1033 if (efx_tsoh_block_alloc(tx_queue))
1036 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1037 tsoh = tx_queue->tso_headers_free;
1038 tx_queue->tso_headers_free = tsoh->next;
1039 tsoh->unmap_len = 0;
1041 tx_queue->tso_long_headers++;
1042 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1043 if (unlikely(!tsoh))
1047 header = TSOH_BUFFER(tsoh);
1048 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1050 /* Copy and update the headers. */
1051 memcpy(header, skb->data, st->header_len);
1053 tsoh_th->seq = htonl(st->seqnum);
1054 st->seqnum += skb_shinfo(skb)->gso_size;
1055 if (st->out_len > skb_shinfo(skb)->gso_size) {
1056 /* This packet will not finish the TSO burst. */
1057 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
1061 /* This packet will be the last in the TSO burst. */
1062 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
1063 tsoh_th->fin = tcp_hdr(skb)->fin;
1064 tsoh_th->psh = tcp_hdr(skb)->psh;
1067 if (st->protocol == htons(ETH_P_IP)) {
1068 struct iphdr *tsoh_iph =
1069 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1071 tsoh_iph->tot_len = htons(ip_length);
1073 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1074 tsoh_iph->id = htons(st->ipv4_id);
1077 struct ipv6hdr *tsoh_iph =
1078 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1080 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1083 st->packet_space = skb_shinfo(skb)->gso_size;
1084 ++tx_queue->tso_packets;
1086 /* Form a descriptor for this header. */
1087 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1094 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1095 * @tx_queue: Efx TX queue
1096 * @skb: Socket buffer
1098 * Context: You must hold netif_tx_lock() to call this function.
1100 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1101 * @skb was not enqueued. In all cases @skb is consumed. Return
1102 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1104 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1105 struct sk_buff *skb)
1107 struct efx_nic *efx = tx_queue->efx;
1108 int frag_i, rc, rc2 = NETDEV_TX_OK;
1109 struct tso_state state;
1111 /* Find the packet protocol and sanity-check it */
1112 state.protocol = efx_tso_check_protocol(skb);
1114 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1116 tso_start(&state, skb);
1118 /* Assume that skb header area contains exactly the headers, and
1119 * all payload is in the frag list.
1121 if (skb_headlen(skb) == state.header_len) {
1122 /* Grab the first payload fragment. */
1123 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1125 rc = tso_get_fragment(&state, efx,
1126 skb_shinfo(skb)->frags + frag_i);
1130 rc = tso_get_head_fragment(&state, efx, skb);
1136 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1140 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1142 rc2 = NETDEV_TX_BUSY;
1146 /* Move onto the next fragment? */
1147 if (state.in_len == 0) {
1148 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1149 /* End of payload reached. */
1151 rc = tso_get_fragment(&state, efx,
1152 skb_shinfo(skb)->frags + frag_i);
1157 /* Start at new packet? */
1158 if (state.packet_space == 0 &&
1159 tso_start_new_packet(tx_queue, skb, &state) < 0)
1163 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1165 /* Pass off to hardware */
1166 efx_nic_push_buffers(tx_queue);
1168 tx_queue->tso_bursts++;
1169 return NETDEV_TX_OK;
1172 netif_err(efx, tx_err, efx->net_dev,
1173 "Out of memory for TSO headers, or DMA mapping error\n");
1174 dev_kfree_skb_any(skb);
1177 /* Free the DMA mapping we were in the process of writing out */
1178 if (state.unmap_len) {
1179 if (state.unmap_single)
1180 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1181 state.unmap_len, DMA_TO_DEVICE);
1183 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1184 state.unmap_len, DMA_TO_DEVICE);
1187 efx_enqueue_unwind(tx_queue);
1193 * Free up all TSO datastructures associated with tx_queue. This
1194 * routine should be called only once the tx_queue is both empty and
1195 * will no longer be used.
1197 static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1201 if (tx_queue->buffer) {
1202 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1203 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1206 while (tx_queue->tso_headers_free != NULL)
1207 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1208 &tx_queue->efx->pci_dev->dev);