sfc: move more tx code
authorAlex Maftei (amaftei) <amaftei@solarflare.com>
Fri, 10 Jan 2020 13:26:46 +0000 (13:26 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 10 Jan 2020 19:24:37 +0000 (11:24 -0800)
The code that handles transmission finalization will also be common.

Signed-off-by: Alexandru-Mihai Maftei <amaftei@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sfc/tx_common.c
drivers/net/ethernet/sfc/tx_common.h

index e4f0f5c..696e12c 100644 (file)
@@ -518,41 +518,6 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
        return i;
 }
 
-/* Remove packets from the TX queue
- *
- * This removes packets from the TX queue, up to and including the
- * specified index.
- */
-static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
-                               unsigned int index,
-                               unsigned int *pkts_compl,
-                               unsigned int *bytes_compl)
-{
-       struct efx_nic *efx = tx_queue->efx;
-       unsigned int stop_index, read_ptr;
-
-       stop_index = (index + 1) & tx_queue->ptr_mask;
-       read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
-
-       while (read_ptr != stop_index) {
-               struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
-
-               if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
-                   unlikely(buffer->len == 0)) {
-                       netif_err(efx, tx_err, efx->net_dev,
-                                 "TX queue %d spurious TX completion id %x\n",
-                                 tx_queue->queue, read_ptr);
-                       efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
-                       return;
-               }
-
-               efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-
-               ++tx_queue->read_count;
-               read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
-       }
-}
-
 /* Initiate a packet transmission.  We use one channel per CPU
  * (sharing when we have more CPUs than channels).  On Falcon, the TX
  * completion events will be directed back to the CPU that transmitted
@@ -665,45 +630,3 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
        net_dev->num_tc = num_tc;
        return 0;
 }
-
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
-{
-       unsigned fill_level;
-       struct efx_nic *efx = tx_queue->efx;
-       struct efx_tx_queue *txq2;
-       unsigned int pkts_compl = 0, bytes_compl = 0;
-
-       EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
-
-       efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
-       tx_queue->pkts_compl += pkts_compl;
-       tx_queue->bytes_compl += bytes_compl;
-
-       if (pkts_compl > 1)
-               ++tx_queue->merge_events;
-
-       /* See if we need to restart the netif queue.  This memory
-        * barrier ensures that we write read_count (inside
-        * efx_dequeue_buffers()) before reading the queue status.
-        */
-       smp_mb();
-       if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
-           likely(efx->port_enabled) &&
-           likely(netif_device_present(efx->net_dev))) {
-               txq2 = efx_tx_queue_partner(tx_queue);
-               fill_level = max(tx_queue->insert_count - tx_queue->read_count,
-                                txq2->insert_count - txq2->read_count);
-               if (fill_level <= efx->txq_wake_thresh)
-                       netif_tx_wake_queue(tx_queue->core_txq);
-       }
-
-       /* Check whether the hardware queue is now empty */
-       if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
-               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
-               if (tx_queue->read_count == tx_queue->old_write_count) {
-                       smp_mb();
-                       tx_queue->empty_read_count =
-                               tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
-               }
-       }
-}
index e29ade2..0ce699e 100644 (file)
@@ -191,6 +191,82 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
        buffer->flags = 0;
 }
 
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+                               unsigned int index,
+                               unsigned int *pkts_compl,
+                               unsigned int *bytes_compl)
+{
+       struct efx_nic *efx = tx_queue->efx;
+       unsigned int stop_index, read_ptr;
+
+       stop_index = (index + 1) & tx_queue->ptr_mask;
+       read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+       while (read_ptr != stop_index) {
+               struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+               if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+                   unlikely(buffer->len == 0)) {
+                       netif_err(efx, tx_err, efx->net_dev,
+                                 "TX queue %d spurious TX completion id %x\n",
+                                 tx_queue->queue, read_ptr);
+                       efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+                       return;
+               }
+
+               efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+
+               ++tx_queue->read_count;
+               read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+       }
+}
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+       unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+       struct efx_nic *efx = tx_queue->efx;
+       struct efx_tx_queue *txq2;
+
+       EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
+
+       efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+       tx_queue->pkts_compl += pkts_compl;
+       tx_queue->bytes_compl += bytes_compl;
+
+       if (pkts_compl > 1)
+               ++tx_queue->merge_events;
+
+       /* See if we need to restart the netif queue.  This memory
+        * barrier ensures that we write read_count (inside
+        * efx_dequeue_buffers()) before reading the queue status.
+        */
+       smp_mb();
+       if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+           likely(efx->port_enabled) &&
+           likely(netif_device_present(efx->net_dev))) {
+               txq2 = efx_tx_queue_partner(tx_queue);
+               fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+                                txq2->insert_count - txq2->read_count);
+               if (fill_level <= efx->txq_wake_thresh)
+                       netif_tx_wake_queue(tx_queue->core_txq);
+       }
+
+       /* Check whether the hardware queue is now empty */
+       if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+               if (tx_queue->read_count == tx_queue->old_write_count) {
+                       smp_mb();
+                       tx_queue->empty_read_count =
+                               tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+               }
+       }
+}
+
 struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
                                       dma_addr_t dma_addr, size_t len)
 {
index afdfc79..58add94 100644 (file)
@@ -21,6 +21,8 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                        unsigned int *pkts_compl,
                        unsigned int *bytes_compl);
 
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+
 struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
                                       dma_addr_t dma_addr, size_t len);
 int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,