}
/* Transmit timestamps are only available for 8XXX series. They result
- * in three events per packet. These occur in order, and are:
- * - the normal completion event
+ * in up to three events per packet. These occur in order, and are:
+ * - the normal completion event (may be omitted)
* - the low part of the timestamp
* - the high part of the timestamp
*
+ * It's possible for multiple completion events to appear before the
+ * corresponding timestamps. So we can for example get:
+ * COMP N
+ * COMP N+1
+ * TS_LO N
+ * TS_HI N
+ * TS_LO N+1
+ * TS_HI N+1
+ *
+ * In addition it's also possible for the adjacent completions to be
+ * merged, so we may not see COMP N above. As such, the completion
+ * events are not very useful here.
+ *
* Each part of the timestamp is itself split across two 16 bit
* fields in the event.
*/
switch (tx_ev_type) {
case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
- /* In case of Queue flush or FLR, we might have received
- * the previous TX completion event but not the Timestamp
- * events.
- */
- if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
- efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
- ESF_DZ_TX_DESCR_INDX);
- tx_queue->completed_desc_ptr =
- tx_ev_desc_ptr & tx_queue->ptr_mask;
+ /* Ignore this event - see above. */
break;
case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
ts_part = efx_ef10_extract_event_ts(event);
tx_queue->completed_timestamp_major = ts_part;
- efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ efx_xmit_done_single(tx_queue);
break;
default:
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
extern unsigned int efx_piobuf_size;
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
- * @completed_desc_ptr: Most recent completed pointer - only used with
- * timestamping.
* @completed_timestamp_major: Top part of the most recent tx timestamp.
* @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
unsigned int merge_events;
unsigned int bytes_compl;
unsigned int pkts_compl;
- unsigned int completed_desc_ptr;
u32 completed_timestamp_major;
u32 completed_timestamp_minor;
return efx_enqueue_skb(tx_queue, skb);
}
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
+{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int read_ptr;
+ bool finished = false;
+
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (!finished) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!efx_tx_buffer_in_use(buffer)) {
+ struct efx_nic *efx = tx_queue->efx;
+
+ netif_err(efx, hw, efx->net_dev,
+ "TX queue %d spurious single TX completion\n",
+ tx_queue->queue);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ /* Need to check the flag before dequeueing. */
+ if (buffer->flags & EFX_TX_BUF_SKB)
+ finished = true;
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+
+ tx_queue->pkts_compl += pkts_compl;
+ tx_queue->bytes_compl += bytes_compl;
+
+ EFX_WARN_ON_PARANOID(pkts_compl != 1);
+
+ efx_xmit_done_check_empty(tx_queue);
+}
+
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
tx_queue->xmit_more_available = false;
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
tx_queue->channel == efx_ptp_channel(efx));
- tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
- if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
- unlikely(buffer->len == 0)) {
+ if (!efx_tx_buffer_in_use(buffer)) {
netif_err(efx, tx_err, efx->net_dev,
- "TX queue %d spurious TX completion id %x\n",
+ "TX queue %d spurious TX completion id %d\n",
tx_queue->queue, read_ptr);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
}
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+{
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ /* Ensure that read_count is flushed. */
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
netif_tx_wake_queue(tx_queue->core_txq);
}
- /* Check whether the hardware queue is now empty */
- if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
- if (tx_queue->read_count == tx_queue->old_write_count) {
- smp_mb();
- tx_queue->empty_read_count =
- tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
- }
- }
+ efx_xmit_done_check_empty(tx_queue);
}
/* Remove buffers put into a tx_queue for the current packet.
unsigned int *pkts_compl,
unsigned int *bytes_compl);
+static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+{
+ return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
+}
+
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,