1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include "net_driver.h"
23 #include "workarounds.h"
25 /**************************************************************************
29 **************************************************************************
32 /* This is set to 16 for a good reason. In summary, if larger than
33 * 16, the descriptor cache holds more than a default socket
34 * buffer's worth of packets (for UDP we can only have at most one
35 * socket buffer's worth outstanding). This combined with the fact
36 * that we only get 1 TX event per descriptor cache means the NIC
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
45 /* If EFX_MAX_INT_ERRORS internal errors occur within
46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
49 #define EFX_INT_ERROR_EXPIRE 3600
50 #define EFX_MAX_INT_ERRORS 5
52 /* Depth of RX flush request fifo */
53 #define EFX_RX_FLUSH_COUNT 4
55 /* Driver generated events */
56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
76 /**************************************************************************
78 * Solarstorm hardware access
80 **************************************************************************/
82 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
85 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
89 /* Read the current event from the event queue */
90 static inline efx_qword_t *efx_event(struct efx_channel *channel,
93 return ((efx_qword_t *) (channel->eventq.addr)) +
94 (index & channel->eventq_mask);
97 /* See if an event is present
99 * We check both the high and low dword of the event for all ones. We
100 * wrote all ones when we cleared the event, and no valid event can
101 * have all ones in either its high or low dwords. This approach is
102 * robust against reordering.
104 * Note that using a single 64-bit comparison is incorrect; even
105 * though the CPU read will be atomic, the DMA write may not be.
107 static inline int efx_event_present(efx_qword_t *event)
109 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
110 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
113 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
114 const efx_oword_t *mask)
116 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
117 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
120 int efx_nic_test_registers(struct efx_nic *efx,
121 const struct efx_nic_register_test *regs,
124 unsigned address = 0, i, j;
125 efx_oword_t mask, imask, original, reg, buf;
127 for (i = 0; i < n_regs; ++i) {
128 address = regs[i].address;
129 mask = imask = regs[i].mask;
130 EFX_INVERT_OWORD(imask);
132 efx_reado(efx, &original, address);
134 /* bit sweep on and off */
135 for (j = 0; j < 128; j++) {
136 if (!EFX_EXTRACT_OWORD32(mask, j, j))
139 /* Test this testable bit can be set in isolation */
140 EFX_AND_OWORD(reg, original, mask);
141 EFX_SET_OWORD32(reg, j, j, 1);
143 efx_writeo(efx, ®, address);
144 efx_reado(efx, &buf, address);
146 if (efx_masked_compare_oword(®, &buf, &mask))
149 /* Test this testable bit can be cleared in isolation */
150 EFX_OR_OWORD(reg, original, mask);
151 EFX_SET_OWORD32(reg, j, j, 0);
153 efx_writeo(efx, ®, address);
154 efx_reado(efx, &buf, address);
156 if (efx_masked_compare_oword(®, &buf, &mask))
160 efx_writeo(efx, &original, address);
166 netif_err(efx, hw, efx->net_dev,
167 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
168 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
169 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
173 /**************************************************************************
175 * Special buffer handling
176 * Special buffers are used for event queues and the TX and RX
179 *************************************************************************/
182 * Initialise a special buffer
184 * This will define a buffer (previously allocated via
185 * efx_alloc_special_buffer()) in the buffer table, allowing
186 * it to be used for event queues, descriptor rings etc.
189 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
191 efx_qword_t buf_desc;
196 EFX_BUG_ON_PARANOID(!buffer->addr);
198 /* Write buffer descriptors to NIC */
199 for (i = 0; i < buffer->entries; i++) {
200 index = buffer->index + i;
201 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
202 netif_dbg(efx, probe, efx->net_dev,
203 "mapping special buffer %d at %llx\n",
204 index, (unsigned long long)dma_addr);
205 EFX_POPULATE_QWORD_3(buf_desc,
206 FRF_AZ_BUF_ADR_REGION, 0,
207 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
208 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
209 efx_write_buf_tbl(efx, &buf_desc, index);
213 /* Unmaps a buffer and clears the buffer table entries */
215 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
217 efx_oword_t buf_tbl_upd;
218 unsigned int start = buffer->index;
219 unsigned int end = (buffer->index + buffer->entries - 1);
221 if (!buffer->entries)
224 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
225 buffer->index, buffer->index + buffer->entries - 1);
227 EFX_POPULATE_OWORD_4(buf_tbl_upd,
228 FRF_AZ_BUF_UPD_CMD, 0,
229 FRF_AZ_BUF_CLR_CMD, 1,
230 FRF_AZ_BUF_CLR_END_ID, end,
231 FRF_AZ_BUF_CLR_START_ID, start);
232 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
236 * Allocate a new special buffer
238 * This allocates memory for a new buffer, clears it and allocates a
239 * new buffer ID range. It does not write into the buffer table.
241 * This call will allocate 4KB buffers, since 8KB buffers can't be
242 * used for event queues and descriptor rings.
244 static int efx_alloc_special_buffer(struct efx_nic *efx,
245 struct efx_special_buffer *buffer,
248 len = ALIGN(len, EFX_BUF_SIZE);
250 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
251 &buffer->dma_addr, GFP_KERNEL);
255 buffer->entries = len / EFX_BUF_SIZE;
256 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
258 /* All zeros is a potentially valid event so memset to 0xff */
259 memset(buffer->addr, 0xff, len);
261 /* Select new buffer ID */
262 buffer->index = efx->next_buffer_table;
263 efx->next_buffer_table += buffer->entries;
264 #ifdef CONFIG_SFC_SRIOV
265 BUG_ON(efx_sriov_enabled(efx) &&
266 efx->vf_buftbl_base < efx->next_buffer_table);
269 netif_dbg(efx, probe, efx->net_dev,
270 "allocating special buffers %d-%d at %llx+%x "
271 "(virt %p phys %llx)\n", buffer->index,
272 buffer->index + buffer->entries - 1,
273 (u64)buffer->dma_addr, len,
274 buffer->addr, (u64)virt_to_phys(buffer->addr));
280 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
285 netif_dbg(efx, hw, efx->net_dev,
286 "deallocating special buffers %d-%d at %llx+%x "
287 "(virt %p phys %llx)\n", buffer->index,
288 buffer->index + buffer->entries - 1,
289 (u64)buffer->dma_addr, buffer->len,
290 buffer->addr, (u64)virt_to_phys(buffer->addr));
292 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
298 /**************************************************************************
300 * Generic buffer handling
301 * These buffers are used for interrupt status, MAC stats, etc.
303 **************************************************************************/
305 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
308 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
309 &buffer->dma_addr, GFP_ATOMIC);
313 memset(buffer->addr, 0, len);
317 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
320 dma_free_coherent(&efx->pci_dev->dev, buffer->len,
321 buffer->addr, buffer->dma_addr);
326 /**************************************************************************
330 **************************************************************************/
332 /* Returns a pointer to the specified transmit descriptor in the TX
333 * descriptor queue belonging to the specified channel.
335 static inline efx_qword_t *
336 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
338 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
341 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
342 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
347 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
348 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
349 efx_writed_page(tx_queue->efx, ®,
350 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
353 /* Write pointer and first descriptor for TX descriptor ring */
354 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
355 const efx_qword_t *txd)
360 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
361 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
363 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
364 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
365 FRF_AZ_TX_DESC_WPTR, write_ptr);
367 efx_writeo_page(tx_queue->efx, ®,
368 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
372 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
374 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
376 if (empty_read_count == 0)
379 tx_queue->empty_read_count = 0;
380 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
383 /* For each entry inserted into the software descriptor ring, create a
384 * descriptor in the hardware TX descriptor ring (in host memory), and
387 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
390 struct efx_tx_buffer *buffer;
393 unsigned old_write_count = tx_queue->write_count;
395 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
398 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
399 buffer = &tx_queue->buffer[write_ptr];
400 txd = efx_tx_desc(tx_queue, write_ptr);
401 ++tx_queue->write_count;
403 /* Create TX descriptor ring entry */
404 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
405 EFX_POPULATE_QWORD_4(*txd,
407 buffer->flags & EFX_TX_BUF_CONT,
408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
409 FSF_AZ_TX_KER_BUF_REGION, 0,
410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
411 } while (tx_queue->write_count != tx_queue->insert_count);
413 wmb(); /* Ensure descriptors are written before they are fetched */
415 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
416 txd = efx_tx_desc(tx_queue,
417 old_write_count & tx_queue->ptr_mask);
418 efx_push_tx_desc(tx_queue, txd);
421 efx_notify_tx_desc(tx_queue);
425 /* Allocate hardware resources for a TX queue */
426 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
428 struct efx_nic *efx = tx_queue->efx;
431 entries = tx_queue->ptr_mask + 1;
432 return efx_alloc_special_buffer(efx, &tx_queue->txd,
433 entries * sizeof(efx_qword_t));
436 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
438 struct efx_nic *efx = tx_queue->efx;
441 /* Pin TX descriptor ring */
442 efx_init_special_buffer(efx, &tx_queue->txd);
444 /* Push TX descriptor ring to card */
445 EFX_POPULATE_OWORD_10(reg,
446 FRF_AZ_TX_DESCQ_EN, 1,
447 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
448 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
449 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
450 FRF_AZ_TX_DESCQ_EVQ_ID,
451 tx_queue->channel->channel,
452 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
453 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
454 FRF_AZ_TX_DESCQ_SIZE,
455 __ffs(tx_queue->txd.entries),
456 FRF_AZ_TX_DESCQ_TYPE, 0,
457 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
459 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
460 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
461 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
462 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
466 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
469 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
470 /* Only 128 bits in this register */
471 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
473 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
474 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
475 __clear_bit_le(tx_queue->queue, ®);
477 __set_bit_le(tx_queue->queue, ®);
478 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
481 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
482 EFX_POPULATE_OWORD_1(reg,
484 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
486 FFE_BZ_TX_PACE_RESERVED);
487 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL,
492 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
494 struct efx_nic *efx = tx_queue->efx;
495 efx_oword_t tx_flush_descq;
497 EFX_POPULATE_OWORD_2(tx_flush_descq,
498 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
499 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
500 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
503 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
505 struct efx_nic *efx = tx_queue->efx;
506 efx_oword_t tx_desc_ptr;
508 /* Remove TX descriptor ring from card */
509 EFX_ZERO_OWORD(tx_desc_ptr);
510 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
513 /* Unpin TX descriptor ring */
514 efx_fini_special_buffer(efx, &tx_queue->txd);
517 /* Free buffers backing TX queue */
518 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
520 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
523 /**************************************************************************
527 **************************************************************************/
529 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
530 static inline efx_qword_t *
531 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
533 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
536 /* This creates an entry in the RX descriptor queue */
538 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
540 struct efx_rx_buffer *rx_buf;
543 rxd = efx_rx_desc(rx_queue, index);
544 rx_buf = efx_rx_buffer(rx_queue, index);
545 EFX_POPULATE_QWORD_3(*rxd,
546 FSF_AZ_RX_KER_BUF_SIZE,
548 rx_queue->efx->type->rx_buffer_padding,
549 FSF_AZ_RX_KER_BUF_REGION, 0,
550 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
553 /* This writes to the RX_DESC_WPTR register for the specified receive
556 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
558 struct efx_nic *efx = rx_queue->efx;
562 while (rx_queue->notified_count != rx_queue->added_count) {
565 rx_queue->notified_count & rx_queue->ptr_mask);
566 ++rx_queue->notified_count;
570 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
571 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
572 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
573 efx_rx_queue_index(rx_queue));
576 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
578 struct efx_nic *efx = rx_queue->efx;
581 entries = rx_queue->ptr_mask + 1;
582 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
583 entries * sizeof(efx_qword_t));
586 void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
588 efx_oword_t rx_desc_ptr;
589 struct efx_nic *efx = rx_queue->efx;
590 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
591 bool iscsi_digest_en = is_b0;
593 netif_dbg(efx, hw, efx->net_dev,
594 "RX queue %d ring in special buffers %d-%d\n",
595 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
596 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
598 /* Pin RX descriptor ring */
599 efx_init_special_buffer(efx, &rx_queue->rxd);
601 /* Push RX descriptor ring to card */
602 EFX_POPULATE_OWORD_10(rx_desc_ptr,
603 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
604 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
605 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
606 FRF_AZ_RX_DESCQ_EVQ_ID,
607 efx_rx_queue_channel(rx_queue)->channel,
608 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
609 FRF_AZ_RX_DESCQ_LABEL,
610 efx_rx_queue_index(rx_queue),
611 FRF_AZ_RX_DESCQ_SIZE,
612 __ffs(rx_queue->rxd.entries),
613 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
614 /* For >=B0 this is scatter so disable */
615 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
616 FRF_AZ_RX_DESCQ_EN, 1);
617 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
618 efx_rx_queue_index(rx_queue));
621 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
623 struct efx_nic *efx = rx_queue->efx;
624 efx_oword_t rx_flush_descq;
626 EFX_POPULATE_OWORD_2(rx_flush_descq,
627 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
628 FRF_AZ_RX_FLUSH_DESCQ,
629 efx_rx_queue_index(rx_queue));
630 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
633 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
635 efx_oword_t rx_desc_ptr;
636 struct efx_nic *efx = rx_queue->efx;
638 /* Remove RX descriptor ring from card */
639 EFX_ZERO_OWORD(rx_desc_ptr);
640 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
641 efx_rx_queue_index(rx_queue));
643 /* Unpin RX descriptor ring */
644 efx_fini_special_buffer(efx, &rx_queue->rxd);
647 /* Free buffers backing RX queue */
648 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
650 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
653 /**************************************************************************
657 **************************************************************************/
659 /* efx_nic_flush_queues() must be woken up when all flushes are completed,
660 * or more RX flushes can be kicked off.
662 static bool efx_flush_wake(struct efx_nic *efx)
664 /* Ensure that all updates are visible to efx_nic_flush_queues() */
667 return (atomic_read(&efx->drain_pending) == 0 ||
668 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
669 && atomic_read(&efx->rxq_flush_pending) > 0));
672 /* Flush all the transmit queues, and continue flushing receive queues until
673 * they're all flushed. Wait for the DRAIN events to be recieved so that there
674 * are no more RX and TX events left on any channel. */
675 int efx_nic_flush_queues(struct efx_nic *efx)
677 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
678 struct efx_channel *channel;
679 struct efx_rx_queue *rx_queue;
680 struct efx_tx_queue *tx_queue;
683 efx->type->prepare_flush(efx);
685 efx_for_each_channel(channel, efx) {
686 efx_for_each_channel_tx_queue(tx_queue, channel) {
687 atomic_inc(&efx->drain_pending);
688 efx_flush_tx_queue(tx_queue);
690 efx_for_each_channel_rx_queue(rx_queue, channel) {
691 atomic_inc(&efx->drain_pending);
692 rx_queue->flush_pending = true;
693 atomic_inc(&efx->rxq_flush_pending);
697 while (timeout && atomic_read(&efx->drain_pending) > 0) {
698 /* If SRIOV is enabled, then offload receive queue flushing to
699 * the firmware (though we will still have to poll for
700 * completion). If that fails, fall back to the old scheme.
702 if (efx_sriov_enabled(efx)) {
703 rc = efx_mcdi_flush_rxqs(efx);
708 /* The hardware supports four concurrent rx flushes, each of
709 * which may need to be retried if there is an outstanding
712 efx_for_each_channel(channel, efx) {
713 efx_for_each_channel_rx_queue(rx_queue, channel) {
714 if (atomic_read(&efx->rxq_flush_outstanding) >=
718 if (rx_queue->flush_pending) {
719 rx_queue->flush_pending = false;
720 atomic_dec(&efx->rxq_flush_pending);
721 atomic_inc(&efx->rxq_flush_outstanding);
722 efx_flush_rx_queue(rx_queue);
728 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
732 if (atomic_read(&efx->drain_pending)) {
733 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
734 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
735 atomic_read(&efx->rxq_flush_outstanding),
736 atomic_read(&efx->rxq_flush_pending));
739 atomic_set(&efx->drain_pending, 0);
740 atomic_set(&efx->rxq_flush_pending, 0);
741 atomic_set(&efx->rxq_flush_outstanding, 0);
744 efx->type->finish_flush(efx);
749 /**************************************************************************
751 * Event queue processing
752 * Event queues are processed by per-channel tasklets.
754 **************************************************************************/
756 /* Update a channel's event queue's read pointer (RPTR) register
758 * This writes the EVQ_RPTR_REG register for the specified channel's
761 void efx_nic_eventq_read_ack(struct efx_channel *channel)
764 struct efx_nic *efx = channel->efx;
766 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
767 channel->eventq_read_ptr & channel->eventq_mask);
768 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
772 /* Use HW to insert a SW defined event */
773 void efx_generate_event(struct efx_nic *efx, unsigned int evq,
776 efx_oword_t drv_ev_reg;
778 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
779 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
780 drv_ev_reg.u32[0] = event->u32[0];
781 drv_ev_reg.u32[1] = event->u32[1];
782 drv_ev_reg.u32[2] = 0;
783 drv_ev_reg.u32[3] = 0;
784 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
785 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
788 static void efx_magic_event(struct efx_channel *channel, u32 magic)
792 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
793 FSE_AZ_EV_CODE_DRV_GEN_EV,
794 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
795 efx_generate_event(channel->efx, channel->channel, &event);
798 /* Handle a transmit completion event
800 * The NIC batches TX completion events; the message we receive is of
801 * the form "complete all TX events up to this index".
804 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
806 unsigned int tx_ev_desc_ptr;
807 unsigned int tx_ev_q_label;
808 struct efx_tx_queue *tx_queue;
809 struct efx_nic *efx = channel->efx;
812 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
815 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
816 /* Transmit completion */
817 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
818 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
819 tx_queue = efx_channel_get_tx_queue(
820 channel, tx_ev_q_label % EFX_TXQ_TYPES);
821 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
823 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
824 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
825 /* Rewrite the FIFO write pointer */
826 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
827 tx_queue = efx_channel_get_tx_queue(
828 channel, tx_ev_q_label % EFX_TXQ_TYPES);
830 netif_tx_lock(efx->net_dev);
831 efx_notify_tx_desc(tx_queue);
832 netif_tx_unlock(efx->net_dev);
833 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
834 EFX_WORKAROUND_10727(efx)) {
835 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
837 netif_err(efx, tx_err, efx->net_dev,
838 "channel %d unexpected TX event "
839 EFX_QWORD_FMT"\n", channel->channel,
840 EFX_QWORD_VAL(*event));
846 /* Detect errors included in the rx_evt_pkt_ok bit. */
847 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
848 const efx_qword_t *event)
850 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
851 struct efx_nic *efx = rx_queue->efx;
852 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
853 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
854 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
855 bool rx_ev_other_err, rx_ev_pause_frm;
856 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
857 unsigned rx_ev_pkt_type;
859 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
860 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
861 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
862 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
863 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
864 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
865 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
866 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
867 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
868 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
869 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
870 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
871 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
872 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
873 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
875 /* Every error apart from tobe_disc and pause_frm */
876 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
877 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
878 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
880 /* Count errors that are not in MAC stats. Ignore expected
881 * checksum errors during self-test. */
883 ++channel->n_rx_frm_trunc;
884 else if (rx_ev_tobe_disc)
885 ++channel->n_rx_tobe_disc;
886 else if (!efx->loopback_selftest) {
887 if (rx_ev_ip_hdr_chksum_err)
888 ++channel->n_rx_ip_hdr_chksum_err;
889 else if (rx_ev_tcp_udp_chksum_err)
890 ++channel->n_rx_tcp_udp_chksum_err;
893 /* TOBE_DISC is expected on unicast mismatches; don't print out an
894 * error message. FRM_TRUNC indicates RXDP dropped the packet due
895 * to a FIFO overflow.
898 if (rx_ev_other_err && net_ratelimit()) {
899 netif_dbg(efx, rx_err, efx->net_dev,
900 " RX queue %d unexpected RX event "
901 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
902 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
903 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
904 rx_ev_ip_hdr_chksum_err ?
905 " [IP_HDR_CHKSUM_ERR]" : "",
906 rx_ev_tcp_udp_chksum_err ?
907 " [TCP_UDP_CHKSUM_ERR]" : "",
908 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
909 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
910 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
911 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
912 rx_ev_pause_frm ? " [PAUSE]" : "");
916 /* The frame must be discarded if any of these are true. */
917 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
918 rx_ev_tobe_disc | rx_ev_pause_frm) ?
919 EFX_RX_PKT_DISCARD : 0;
922 /* Handle receive events that are not in-order. */
924 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
926 struct efx_nic *efx = rx_queue->efx;
927 unsigned expected, dropped;
929 expected = rx_queue->removed_count & rx_queue->ptr_mask;
930 dropped = (index - expected) & rx_queue->ptr_mask;
931 netif_info(efx, rx_err, efx->net_dev,
932 "dropped %d events (index=%d expected=%d)\n",
933 dropped, index, expected);
935 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
936 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
939 /* Handle a packet received event
941 * The NIC gives a "discard" flag if it's a unicast packet with the
942 * wrong destination address
943 * Also "is multicast" and "matches multicast filter" flags can be used to
944 * discard non-matching multicast packets.
947 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
949 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
950 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
951 unsigned expected_ptr;
954 struct efx_rx_queue *rx_queue;
955 struct efx_nic *efx = channel->efx;
957 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
960 /* Basic packet information */
961 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
962 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
963 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
964 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
965 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
966 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
969 rx_queue = efx_channel_get_rx_queue(channel);
971 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
972 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
973 if (unlikely(rx_ev_desc_ptr != expected_ptr))
974 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
976 if (likely(rx_ev_pkt_ok)) {
977 /* If packet is marked as OK and packet type is TCP/IP or
978 * UDP/IP, then we can rely on the hardware checksum.
980 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
981 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
982 EFX_RX_PKT_CSUMMED : 0;
984 flags = efx_handle_rx_not_ok(rx_queue, event);
987 /* Detect multicast packets that didn't match the filter */
988 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
989 if (rx_ev_mcast_pkt) {
990 unsigned int rx_ev_mcast_hash_match =
991 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
993 if (unlikely(!rx_ev_mcast_hash_match)) {
994 ++channel->n_rx_mcast_mismatch;
995 flags |= EFX_RX_PKT_DISCARD;
999 channel->irq_mod_score += 2;
1001 /* Handle received packet */
1002 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
1005 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1006 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1007 * of all transmit completions.
1010 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1012 struct efx_tx_queue *tx_queue;
1015 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1016 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1017 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1018 qid % EFX_TXQ_TYPES);
1020 efx_magic_event(tx_queue->channel,
1021 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1025 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1026 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1027 * the RX queue back to the mask of RX queues in need of flushing.
1030 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1032 struct efx_channel *channel;
1033 struct efx_rx_queue *rx_queue;
1037 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1038 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1039 if (qid >= efx->n_channels)
1041 channel = efx_get_channel(efx, qid);
1042 if (!efx_channel_has_rx_queue(channel))
1044 rx_queue = efx_channel_get_rx_queue(channel);
1047 netif_info(efx, hw, efx->net_dev,
1048 "RXQ %d flush retry\n", qid);
1049 rx_queue->flush_pending = true;
1050 atomic_inc(&efx->rxq_flush_pending);
1052 efx_magic_event(efx_rx_queue_channel(rx_queue),
1053 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1055 atomic_dec(&efx->rxq_flush_outstanding);
1056 if (efx_flush_wake(efx))
1057 wake_up(&efx->flush_wq);
1061 efx_handle_drain_event(struct efx_channel *channel)
1063 struct efx_nic *efx = channel->efx;
1065 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1066 atomic_dec(&efx->drain_pending);
1067 if (efx_flush_wake(efx))
1068 wake_up(&efx->flush_wq);
1072 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
1074 struct efx_nic *efx = channel->efx;
1075 struct efx_rx_queue *rx_queue =
1076 efx_channel_has_rx_queue(channel) ?
1077 efx_channel_get_rx_queue(channel) : NULL;
1078 unsigned magic, code;
1080 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1081 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1083 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1084 channel->event_test_cpu = raw_smp_processor_id();
1085 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1086 /* The queue must be empty, so we won't receive any rx
1087 * events, so efx_process_channel() won't refill the
1088 * queue. Refill it here */
1089 efx_fast_push_rx_descriptors(rx_queue);
1090 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1091 rx_queue->enabled = false;
1092 efx_handle_drain_event(channel);
1093 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1094 efx_handle_drain_event(channel);
1096 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1097 "generated event "EFX_QWORD_FMT"\n",
1098 channel->channel, EFX_QWORD_VAL(*event));
1103 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1105 struct efx_nic *efx = channel->efx;
1106 unsigned int ev_sub_code;
1107 unsigned int ev_sub_data;
1109 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1110 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1112 switch (ev_sub_code) {
1113 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1114 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1115 channel->channel, ev_sub_data);
1116 efx_handle_tx_flush_done(efx, event);
1117 efx_sriov_tx_flush_done(efx, event);
1119 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1120 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1121 channel->channel, ev_sub_data);
1122 efx_handle_rx_flush_done(efx, event);
1123 efx_sriov_rx_flush_done(efx, event);
1125 case FSE_AZ_EVQ_INIT_DONE_EV:
1126 netif_dbg(efx, hw, efx->net_dev,
1127 "channel %d EVQ %d initialised\n",
1128 channel->channel, ev_sub_data);
1130 case FSE_AZ_SRM_UPD_DONE_EV:
1131 netif_vdbg(efx, hw, efx->net_dev,
1132 "channel %d SRAM update done\n", channel->channel);
1134 case FSE_AZ_WAKE_UP_EV:
1135 netif_vdbg(efx, hw, efx->net_dev,
1136 "channel %d RXQ %d wakeup event\n",
1137 channel->channel, ev_sub_data);
1139 case FSE_AZ_TIMER_EV:
1140 netif_vdbg(efx, hw, efx->net_dev,
1141 "channel %d RX queue %d timer expired\n",
1142 channel->channel, ev_sub_data);
1144 case FSE_AA_RX_RECOVER_EV:
1145 netif_err(efx, rx_err, efx->net_dev,
1146 "channel %d seen DRIVER RX_RESET event. "
1147 "Resetting.\n", channel->channel);
1148 atomic_inc(&efx->rx_reset);
1149 efx_schedule_reset(efx,
1150 EFX_WORKAROUND_6555(efx) ?
1151 RESET_TYPE_RX_RECOVERY :
1152 RESET_TYPE_DISABLE);
1154 case FSE_BZ_RX_DSC_ERROR_EV:
1155 if (ev_sub_data < EFX_VI_BASE) {
1156 netif_err(efx, rx_err, efx->net_dev,
1157 "RX DMA Q %d reports descriptor fetch error."
1158 " RX Q %d is disabled.\n", ev_sub_data,
1160 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1162 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1164 case FSE_BZ_TX_DSC_ERROR_EV:
1165 if (ev_sub_data < EFX_VI_BASE) {
1166 netif_err(efx, tx_err, efx->net_dev,
1167 "TX DMA Q %d reports descriptor fetch error."
1168 " TX Q %d is disabled.\n", ev_sub_data,
1170 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1172 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1175 netif_vdbg(efx, hw, efx->net_dev,
1176 "channel %d unknown driver event code %d "
1177 "data %04x\n", channel->channel, ev_sub_code,
1183 int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1185 struct efx_nic *efx = channel->efx;
1186 unsigned int read_ptr;
1187 efx_qword_t event, *p_event;
1192 read_ptr = channel->eventq_read_ptr;
1195 p_event = efx_event(channel, read_ptr);
1198 if (!efx_event_present(&event))
1202 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1203 "channel %d event is "EFX_QWORD_FMT"\n",
1204 channel->channel, EFX_QWORD_VAL(event));
1206 /* Clear this event by marking it all ones */
1207 EFX_SET_QWORD(*p_event);
1211 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1214 case FSE_AZ_EV_CODE_RX_EV:
1215 efx_handle_rx_event(channel, &event);
1216 if (++spent == budget)
1219 case FSE_AZ_EV_CODE_TX_EV:
1220 tx_packets += efx_handle_tx_event(channel, &event);
1221 if (tx_packets > efx->txq_entries) {
1226 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1227 efx_handle_generated_event(channel, &event);
1229 case FSE_AZ_EV_CODE_DRIVER_EV:
1230 efx_handle_driver_event(channel, &event);
1232 case FSE_CZ_EV_CODE_USER_EV:
1233 efx_sriov_event(channel, &event);
1235 case FSE_CZ_EV_CODE_MCDI_EV:
1236 efx_mcdi_process_event(channel, &event);
1238 case FSE_AZ_EV_CODE_GLOBAL_EV:
1239 if (efx->type->handle_global_event &&
1240 efx->type->handle_global_event(channel, &event))
1242 /* else fall through */
1244 netif_err(channel->efx, hw, channel->efx->net_dev,
1245 "channel %d unknown event type %d (data "
1246 EFX_QWORD_FMT ")\n", channel->channel,
1247 ev_code, EFX_QWORD_VAL(event));
1252 channel->eventq_read_ptr = read_ptr;
1256 /* Check whether an event is present in the eventq at the current
1257 * read pointer. Only useful for self-test.
1259 bool efx_nic_event_present(struct efx_channel *channel)
1261 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1264 /* Allocate buffer table entries for event queue */
1265 int efx_nic_probe_eventq(struct efx_channel *channel)
1267 struct efx_nic *efx = channel->efx;
1270 entries = channel->eventq_mask + 1;
1271 return efx_alloc_special_buffer(efx, &channel->eventq,
1272 entries * sizeof(efx_qword_t));
1275 void efx_nic_init_eventq(struct efx_channel *channel)
1278 struct efx_nic *efx = channel->efx;
1280 netif_dbg(efx, hw, efx->net_dev,
1281 "channel %d event queue in special buffers %d-%d\n",
1282 channel->channel, channel->eventq.index,
1283 channel->eventq.index + channel->eventq.entries - 1);
1285 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1286 EFX_POPULATE_OWORD_3(reg,
1287 FRF_CZ_TIMER_Q_EN, 1,
1288 FRF_CZ_HOST_NOTIFY_MODE, 0,
1289 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1290 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1293 /* Pin event queue buffer */
1294 efx_init_special_buffer(efx, &channel->eventq);
1296 /* Fill event queue with all ones (i.e. empty events) */
1297 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1299 /* Push event queue to card */
1300 EFX_POPULATE_OWORD_3(reg,
1302 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1303 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1304 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1307 efx->type->push_irq_moderation(channel);
1310 void efx_nic_fini_eventq(struct efx_channel *channel)
1313 struct efx_nic *efx = channel->efx;
1315 /* Remove event queue from card */
1316 EFX_ZERO_OWORD(reg);
1317 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1319 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1320 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1322 /* Unpin event queue */
1323 efx_fini_special_buffer(efx, &channel->eventq);
1326 /* Free buffers backing event queue */
1327 void efx_nic_remove_eventq(struct efx_channel *channel)
1329 efx_free_special_buffer(channel->efx, &channel->eventq);
1333 void efx_nic_event_test_start(struct efx_channel *channel)
1335 channel->event_test_cpu = -1;
1337 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1340 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
1342 efx_magic_event(efx_rx_queue_channel(rx_queue),
1343 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1346 /**************************************************************************
1348 * Hardware interrupts
1349 * The hardware interrupt handler does very little work; all the event
1350 * queue processing is carried out by per-channel tasklets.
1352 **************************************************************************/
1354 /* Enable/disable/generate interrupts */
1355 static inline void efx_nic_interrupts(struct efx_nic *efx,
1356 bool enabled, bool force)
1358 efx_oword_t int_en_reg_ker;
1360 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1361 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1362 FRF_AZ_KER_INT_KER, force,
1363 FRF_AZ_DRV_INT_EN_KER, enabled);
1364 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1367 void efx_nic_enable_interrupts(struct efx_nic *efx)
1369 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1370 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1372 efx_nic_interrupts(efx, true, false);
1375 void efx_nic_disable_interrupts(struct efx_nic *efx)
1377 /* Disable interrupts */
1378 efx_nic_interrupts(efx, false, false);
1381 /* Generate a test interrupt
1382 * Interrupt must already have been enabled, otherwise nasty things
1385 void efx_nic_irq_test_start(struct efx_nic *efx)
1387 efx->last_irq_cpu = -1;
1389 efx_nic_interrupts(efx, true, true);
1392 /* Process a fatal interrupt
1393 * Disable bus mastering ASAP and schedule a reset
1395 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1397 struct falcon_nic_data *nic_data = efx->nic_data;
1398 efx_oword_t *int_ker = efx->irq_status.addr;
1399 efx_oword_t fatal_intr;
1400 int error, mem_perr;
1402 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1403 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1405 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1406 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1407 EFX_OWORD_VAL(fatal_intr),
1408 error ? "disabling bus mastering" : "no recognised error");
1410 /* If this is a memory parity error dump which blocks are offending */
1411 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1412 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1415 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1416 netif_err(efx, hw, efx->net_dev,
1417 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1418 EFX_OWORD_VAL(reg));
1421 /* Disable both devices */
1422 pci_clear_master(efx->pci_dev);
1423 if (efx_nic_is_dual_func(efx))
1424 pci_clear_master(nic_data->pci_dev2);
1425 efx_nic_disable_interrupts(efx);
1427 /* Count errors and reset or disable the NIC accordingly */
1428 if (efx->int_error_count == 0 ||
1429 time_after(jiffies, efx->int_error_expire)) {
1430 efx->int_error_count = 0;
1431 efx->int_error_expire =
1432 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1434 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1435 netif_err(efx, hw, efx->net_dev,
1436 "SYSTEM ERROR - reset scheduled\n");
1437 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1439 netif_err(efx, hw, efx->net_dev,
1440 "SYSTEM ERROR - max number of errors seen."
1441 "NIC will be disabled\n");
1442 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1448 /* Handle a legacy interrupt
1449 * Acknowledges the interrupt and schedule event queue processing.
1451 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1453 struct efx_nic *efx = dev_id;
1454 efx_oword_t *int_ker = efx->irq_status.addr;
1455 irqreturn_t result = IRQ_NONE;
1456 struct efx_channel *channel;
1461 /* Could this be ours? If interrupts are disabled then the
1462 * channel state may not be valid.
1464 if (!efx->legacy_irq_enabled)
1467 /* Read the ISR which also ACKs the interrupts */
1468 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1469 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1471 /* Handle non-event-queue sources */
1472 if (queues & (1U << efx->irq_level)) {
1473 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1474 if (unlikely(syserr))
1475 return efx_nic_fatal_interrupt(efx);
1476 efx->last_irq_cpu = raw_smp_processor_id();
1480 if (EFX_WORKAROUND_15783(efx))
1481 efx->irq_zero_count = 0;
1483 /* Schedule processing of any interrupting queues */
1484 efx_for_each_channel(channel, efx) {
1486 efx_schedule_channel_irq(channel);
1489 result = IRQ_HANDLED;
1491 } else if (EFX_WORKAROUND_15783(efx)) {
1494 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1495 * because this might be a shared interrupt. */
1496 if (efx->irq_zero_count++ == 0)
1497 result = IRQ_HANDLED;
1499 /* Ensure we schedule or rearm all event queues */
1500 efx_for_each_channel(channel, efx) {
1501 event = efx_event(channel, channel->eventq_read_ptr);
1502 if (efx_event_present(event))
1503 efx_schedule_channel_irq(channel);
1505 efx_nic_eventq_read_ack(channel);
1509 if (result == IRQ_HANDLED)
1510 netif_vdbg(efx, intr, efx->net_dev,
1511 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1512 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1517 /* Handle an MSI interrupt
1519 * Handle an MSI hardware interrupt. This routine schedules event
1520 * queue processing. No interrupt acknowledgement cycle is necessary.
1521 * Also, we never need to check that the interrupt is for us, since
1522 * MSI interrupts cannot be shared.
1524 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1526 struct efx_channel *channel = *(struct efx_channel **)dev_id;
1527 struct efx_nic *efx = channel->efx;
1528 efx_oword_t *int_ker = efx->irq_status.addr;
1531 netif_vdbg(efx, intr, efx->net_dev,
1532 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1533 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1535 /* Handle non-event-queue sources */
1536 if (channel->channel == efx->irq_level) {
1537 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1538 if (unlikely(syserr))
1539 return efx_nic_fatal_interrupt(efx);
1540 efx->last_irq_cpu = raw_smp_processor_id();
1543 /* Schedule processing of the channel */
1544 efx_schedule_channel_irq(channel);
1550 /* Setup RSS indirection table.
1551 * This maps from the hash value of the packet to RXQ
1553 void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1558 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1561 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1562 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1564 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1565 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1566 efx->rx_indir_table[i]);
1567 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1571 /* Hook interrupt handler(s)
1572 * Try MSI and then legacy interrupts.
1574 int efx_nic_init_interrupt(struct efx_nic *efx)
1576 struct efx_channel *channel;
1579 if (!EFX_INT_MODE_USE_MSI(efx)) {
1580 irq_handler_t handler;
1581 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1582 handler = efx_legacy_interrupt;
1584 handler = falcon_legacy_interrupt_a1;
1586 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1589 netif_err(efx, drv, efx->net_dev,
1590 "failed to hook legacy IRQ %d\n",
1597 /* Hook MSI or MSI-X interrupt */
1598 efx_for_each_channel(channel, efx) {
1599 rc = request_irq(channel->irq, efx_msi_interrupt,
1600 IRQF_PROBE_SHARED, /* Not shared */
1601 efx->channel_name[channel->channel],
1602 &efx->channel[channel->channel]);
1604 netif_err(efx, drv, efx->net_dev,
1605 "failed to hook IRQ %d\n", channel->irq);
1613 efx_for_each_channel(channel, efx)
1614 free_irq(channel->irq, &efx->channel[channel->channel]);
1619 void efx_nic_fini_interrupt(struct efx_nic *efx)
1621 struct efx_channel *channel;
1624 /* Disable MSI/MSI-X interrupts */
1625 efx_for_each_channel(channel, efx) {
1627 free_irq(channel->irq, &efx->channel[channel->channel]);
1630 /* ACK legacy interrupt */
1631 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1632 efx_reado(efx, ®, FR_BZ_INT_ISR0);
1634 falcon_irq_ack_a1(efx);
1636 /* Disable legacy interrupt */
1637 if (efx->legacy_irq)
1638 free_irq(efx->legacy_irq, efx);
1641 /* Looks at available SRAM resources and works out how many queues we
1642 * can support, and where things like descriptor caches should live.
1644 * SRAM is split up as follows:
1645 * 0 buftbl entries for channels
1646 * efx->vf_buftbl_base buftbl entries for SR-IOV
1647 * efx->rx_dc_base RX descriptor caches
1648 * efx->tx_dc_base TX descriptor caches
1650 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1652 unsigned vi_count, buftbl_min;
1654 /* Account for the buffer table entries backing the datapath channels
1655 * and the descriptor caches for those channels.
1657 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1658 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1659 efx->n_channels * EFX_MAX_EVQ_SIZE)
1660 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1661 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1663 #ifdef CONFIG_SFC_SRIOV
1664 if (efx_sriov_wanted(efx)) {
1665 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1667 efx->vf_buftbl_base = buftbl_min;
1669 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1670 vi_count = max(vi_count, EFX_VI_BASE);
1671 buftbl_free = (sram_lim_qw - buftbl_min -
1672 vi_count * vi_dc_entries);
1674 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1676 vf_limit = min(buftbl_free / entries_per_vf,
1677 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1679 if (efx->vf_count > vf_limit) {
1680 netif_err(efx, probe, efx->net_dev,
1681 "Reducing VF count from from %d to %d\n",
1682 efx->vf_count, vf_limit);
1683 efx->vf_count = vf_limit;
1685 vi_count += efx->vf_count * efx_vf_size(efx);
1689 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1690 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1693 u32 efx_nic_fpga_ver(struct efx_nic *efx)
1695 efx_oword_t altera_build;
1696 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1697 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1700 void efx_nic_init_common(struct efx_nic *efx)
1704 /* Set positions of descriptor caches in SRAM. */
1705 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1706 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1707 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1708 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1710 /* Set TX descriptor cache size. */
1711 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1712 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1713 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1715 /* Set RX descriptor cache size. Set low watermark to size-8, as
1716 * this allows most efficient prefetching.
1718 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1719 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1720 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1721 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1722 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1724 /* Program INT_KER address */
1725 EFX_POPULATE_OWORD_2(temp,
1726 FRF_AZ_NORM_INT_VEC_DIS_KER,
1727 EFX_INT_MODE_USE_MSI(efx),
1728 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1729 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1731 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1732 /* Use an interrupt level unused by event queues */
1733 efx->irq_level = 0x1f;
1735 /* Use a valid MSI-X vector */
1738 /* Enable all the genuinely fatal interrupts. (They are still
1739 * masked by the overall interrupt mask, controlled by
1740 * falcon_interrupts()).
1742 * Note: All other fatal interrupts are enabled
1744 EFX_POPULATE_OWORD_3(temp,
1745 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1746 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1747 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1748 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1749 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1750 EFX_INVERT_OWORD(temp);
1751 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1753 efx_nic_push_rx_indir_table(efx);
1755 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1756 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1758 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1759 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1760 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1761 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1762 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1763 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1764 /* Enable SW_EV to inherit in char driver - assume harmless here */
1765 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1766 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1767 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1768 /* Disable hardware watchdog which can misfire */
1769 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1770 /* Squash TX of packets of 16 bytes or less */
1771 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1772 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1773 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1775 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1776 EFX_POPULATE_OWORD_4(temp,
1777 /* Default values */
1778 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1779 FRF_BZ_TX_PACE_SB_AF, 0xb,
1780 FRF_BZ_TX_PACE_FB_BASE, 0,
1781 /* Allow large pace values in the
1783 FRF_BZ_TX_PACE_BIN_TH,
1784 FFE_BZ_TX_PACE_RESERVED);
1785 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1791 #define REGISTER_REVISION_A 1
1792 #define REGISTER_REVISION_B 2
1793 #define REGISTER_REVISION_C 3
1794 #define REGISTER_REVISION_Z 3 /* latest revision */
1796 struct efx_nic_reg {
1798 u32 min_revision:2, max_revision:2;
1801 #define REGISTER(name, min_rev, max_rev) { \
1802 FR_ ## min_rev ## max_rev ## _ ## name, \
1803 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1805 #define REGISTER_AA(name) REGISTER(name, A, A)
1806 #define REGISTER_AB(name) REGISTER(name, A, B)
1807 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1808 #define REGISTER_BB(name) REGISTER(name, B, B)
1809 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1810 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1812 static const struct efx_nic_reg efx_nic_regs[] = {
1813 REGISTER_AZ(ADR_REGION),
1814 REGISTER_AZ(INT_EN_KER),
1815 REGISTER_BZ(INT_EN_CHAR),
1816 REGISTER_AZ(INT_ADR_KER),
1817 REGISTER_BZ(INT_ADR_CHAR),
1818 /* INT_ACK_KER is WO */
1819 /* INT_ISR0 is RC */
1820 REGISTER_AZ(HW_INIT),
1821 REGISTER_CZ(USR_EV_CFG),
1822 REGISTER_AB(EE_SPI_HCMD),
1823 REGISTER_AB(EE_SPI_HADR),
1824 REGISTER_AB(EE_SPI_HDATA),
1825 REGISTER_AB(EE_BASE_PAGE),
1826 REGISTER_AB(EE_VPD_CFG0),
1827 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1828 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1829 /* PCIE_CORE_INDIRECT is indirect */
1830 REGISTER_AB(NIC_STAT),
1831 REGISTER_AB(GPIO_CTL),
1832 REGISTER_AB(GLB_CTL),
1833 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1834 REGISTER_BZ(DP_CTRL),
1835 REGISTER_AZ(MEM_STAT),
1836 REGISTER_AZ(CS_DEBUG),
1837 REGISTER_AZ(ALTERA_BUILD),
1838 REGISTER_AZ(CSR_SPARE),
1839 REGISTER_AB(PCIE_SD_CTL0123),
1840 REGISTER_AB(PCIE_SD_CTL45),
1841 REGISTER_AB(PCIE_PCS_CTL_STAT),
1842 /* DEBUG_DATA_OUT is not used */
1844 REGISTER_AZ(EVQ_CTL),
1845 REGISTER_AZ(EVQ_CNT1),
1846 REGISTER_AZ(EVQ_CNT2),
1847 REGISTER_AZ(BUF_TBL_CFG),
1848 REGISTER_AZ(SRM_RX_DC_CFG),
1849 REGISTER_AZ(SRM_TX_DC_CFG),
1850 REGISTER_AZ(SRM_CFG),
1851 /* BUF_TBL_UPD is WO */
1852 REGISTER_AZ(SRM_UPD_EVQ),
1853 REGISTER_AZ(SRAM_PARITY),
1854 REGISTER_AZ(RX_CFG),
1855 REGISTER_BZ(RX_FILTER_CTL),
1856 /* RX_FLUSH_DESCQ is WO */
1857 REGISTER_AZ(RX_DC_CFG),
1858 REGISTER_AZ(RX_DC_PF_WM),
1859 REGISTER_BZ(RX_RSS_TKEY),
1860 /* RX_NODESC_DROP is RC */
1861 REGISTER_AA(RX_SELF_RST),
1862 /* RX_DEBUG, RX_PUSH_DROP are not used */
1863 REGISTER_CZ(RX_RSS_IPV6_REG1),
1864 REGISTER_CZ(RX_RSS_IPV6_REG2),
1865 REGISTER_CZ(RX_RSS_IPV6_REG3),
1866 /* TX_FLUSH_DESCQ is WO */
1867 REGISTER_AZ(TX_DC_CFG),
1868 REGISTER_AA(TX_CHKSM_CFG),
1869 REGISTER_AZ(TX_CFG),
1870 /* TX_PUSH_DROP is not used */
1871 REGISTER_AZ(TX_RESERVED),
1872 REGISTER_BZ(TX_PACE),
1873 /* TX_PACE_DROP_QID is RC */
1874 REGISTER_BB(TX_VLAN),
1875 REGISTER_BZ(TX_IPFIL_PORTEN),
1876 REGISTER_AB(MD_TXD),
1877 REGISTER_AB(MD_RXD),
1879 REGISTER_AB(MD_PHY_ADR),
1882 REGISTER_AB(MAC_STAT_DMA),
1883 REGISTER_AB(MAC_CTRL),
1884 REGISTER_BB(GEN_MODE),
1885 REGISTER_AB(MAC_MC_HASH_REG0),
1886 REGISTER_AB(MAC_MC_HASH_REG1),
1887 REGISTER_AB(GM_CFG1),
1888 REGISTER_AB(GM_CFG2),
1889 /* GM_IPG and GM_HD are not used */
1890 REGISTER_AB(GM_MAX_FLEN),
1891 /* GM_TEST is not used */
1892 REGISTER_AB(GM_ADR1),
1893 REGISTER_AB(GM_ADR2),
1894 REGISTER_AB(GMF_CFG0),
1895 REGISTER_AB(GMF_CFG1),
1896 REGISTER_AB(GMF_CFG2),
1897 REGISTER_AB(GMF_CFG3),
1898 REGISTER_AB(GMF_CFG4),
1899 REGISTER_AB(GMF_CFG5),
1900 REGISTER_BB(TX_SRC_MAC_CTL),
1901 REGISTER_AB(XM_ADR_LO),
1902 REGISTER_AB(XM_ADR_HI),
1903 REGISTER_AB(XM_GLB_CFG),
1904 REGISTER_AB(XM_TX_CFG),
1905 REGISTER_AB(XM_RX_CFG),
1906 REGISTER_AB(XM_MGT_INT_MASK),
1908 REGISTER_AB(XM_PAUSE_TIME),
1909 REGISTER_AB(XM_TX_PARAM),
1910 REGISTER_AB(XM_RX_PARAM),
1911 /* XM_MGT_INT_MSK (note no 'A') is RC */
1912 REGISTER_AB(XX_PWR_RST),
1913 REGISTER_AB(XX_SD_CTL),
1914 REGISTER_AB(XX_TXDRV_CTL),
1915 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1916 /* XX_CORE_STAT is partly RC */
1919 struct efx_nic_reg_table {
1921 u32 min_revision:2, max_revision:2;
1922 u32 step:6, rows:21;
1925 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1927 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1930 #define REGISTER_TABLE(name, min_rev, max_rev) \
1931 REGISTER_TABLE_DIMENSIONS( \
1932 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1934 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1935 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1936 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1937 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1938 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1939 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1940 #define REGISTER_TABLE_BB_CZ(name) \
1941 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1942 FR_BZ_ ## name ## _STEP, \
1943 FR_BB_ ## name ## _ROWS), \
1944 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1945 FR_BZ_ ## name ## _STEP, \
1946 FR_CZ_ ## name ## _ROWS)
1947 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1949 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1950 /* DRIVER is not used */
1951 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1952 REGISTER_TABLE_BB(TX_IPFIL_TBL),
1953 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1954 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1955 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1956 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1957 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1958 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1959 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1960 /* We can't reasonably read all of the buffer table (up to 8MB!).
1961 * However this driver will only use a few entries. Reading
1962 * 1K entries allows for some expansion of queue count and
1963 * size before we need to change the version. */
1964 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1966 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1968 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1969 REGISTER_TABLE_BB_CZ(TIMER_TBL),
1970 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1971 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1972 /* TX_FILTER_TBL0 is huge and not used by this driver */
1973 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1974 REGISTER_TABLE_CZ(MC_TREG_SMEM),
1975 /* MSIX_PBA_TABLE is not mapped */
1976 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1977 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1980 size_t efx_nic_get_regs_len(struct efx_nic *efx)
1982 const struct efx_nic_reg *reg;
1983 const struct efx_nic_reg_table *table;
1986 for (reg = efx_nic_regs;
1987 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1989 if (efx->type->revision >= reg->min_revision &&
1990 efx->type->revision <= reg->max_revision)
1991 len += sizeof(efx_oword_t);
1993 for (table = efx_nic_reg_tables;
1994 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1996 if (efx->type->revision >= table->min_revision &&
1997 efx->type->revision <= table->max_revision)
1998 len += table->rows * min_t(size_t, table->step, 16);
2003 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
2005 const struct efx_nic_reg *reg;
2006 const struct efx_nic_reg_table *table;
2008 for (reg = efx_nic_regs;
2009 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
2011 if (efx->type->revision >= reg->min_revision &&
2012 efx->type->revision <= reg->max_revision) {
2013 efx_reado(efx, (efx_oword_t *)buf, reg->offset);
2014 buf += sizeof(efx_oword_t);
2018 for (table = efx_nic_reg_tables;
2019 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
2023 if (!(efx->type->revision >= table->min_revision &&
2024 efx->type->revision <= table->max_revision))
2027 size = min_t(size_t, table->step, 16);
2029 for (i = 0; i < table->rows; i++) {
2030 switch (table->step) {
2031 case 4: /* 32-bit register or SRAM */
2032 efx_readd_table(efx, buf, table->offset, i);
2034 case 8: /* 64-bit SRAM */
2036 efx->membase + table->offset,
2039 case 16: /* 128-bit register */
2040 efx_reado_table(efx, buf, table->offset, i);
2042 case 32: /* 128-bit register, interleaved */
2043 efx_reado_table(efx, buf, table->offset, 2 * i);