1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #ifdef CONFIG_RFS_ACCEL
9 #include <linux/cpu_rmap.h>
10 #endif /* CONFIG_RFS_ACCEL */
11 #include <linux/ethtool.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/numa.h>
15 #include <linux/pci.h>
16 #include <linux/utsname.h>
17 #include <linux/version.h>
18 #include <linux/vmalloc.h>
21 #include "ena_netdev.h"
22 #include <linux/bpf_trace.h>
23 #include "ena_pci_id_tbl.h"
25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
26 MODULE_DESCRIPTION(DEVICE_NAME);
27 MODULE_LICENSE("GPL");
29 /* Time in jiffies before concluding the transmitter is hung. */
30 #define TX_TIMEOUT (5 * HZ)
32 #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
34 #define ENA_NAPI_BUDGET 64
36 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
37 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
39 static struct ena_aenq_handlers aenq_handlers;
41 static struct workqueue_struct *ena_wq;
43 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
45 static int ena_rss_init_default(struct ena_adapter *adapter);
46 static void check_for_admin_com_state(struct ena_adapter *adapter);
47 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
48 static int ena_restore_device(struct ena_adapter *adapter);
50 static void ena_init_io_rings(struct ena_adapter *adapter,
51 int first_index, int count);
52 static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
54 static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
56 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
57 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
60 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
61 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
62 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
63 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
64 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
65 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
66 int first_index, int count);
67 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
68 int first_index, int count);
69 static int ena_up(struct ena_adapter *adapter);
70 static void ena_down(struct ena_adapter *adapter);
71 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
72 struct ena_ring *rx_ring);
73 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
74 struct ena_ring *rx_ring);
75 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
76 struct ena_tx_buffer *tx_info);
77 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
78 int first_index, int count);
80 /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
81 static void ena_increase_stat(u64 *statp, u64 cnt,
82 struct u64_stats_sync *syncp)
84 u64_stats_update_begin(syncp);
86 u64_stats_update_end(syncp);
89 static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
91 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
92 ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
95 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
97 struct ena_adapter *adapter = netdev_priv(dev);
99 /* Change the state of the device to trigger reset
100 * Check that we are not in the middle or a trigger already
103 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
106 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
107 ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
109 netif_err(adapter, tx_err, dev, "Transmit time out\n");
112 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
116 for (i = 0; i < adapter->num_io_queues; i++)
117 adapter->rx_ring[i].mtu = mtu;
120 static int ena_change_mtu(struct net_device *dev, int new_mtu)
122 struct ena_adapter *adapter = netdev_priv(dev);
125 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
127 netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
128 update_rx_ring_mtu(adapter, new_mtu);
131 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
138 static int ena_xmit_common(struct net_device *dev,
139 struct ena_ring *ring,
140 struct ena_tx_buffer *tx_info,
141 struct ena_com_tx_ctx *ena_tx_ctx,
145 struct ena_adapter *adapter = netdev_priv(dev);
148 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
150 netif_dbg(adapter, tx_queued, dev,
151 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
153 ena_ring_tx_doorbell(ring);
156 /* prepare the packet's descriptors to dma engine */
157 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
160 /* In case there isn't enough space in the queue for the packet,
161 * we simply drop it. All other failure reasons of
162 * ena_com_prepare_tx() are fatal and therefore require a device reset.
165 netif_err(adapter, tx_queued, dev,
166 "Failed to prepare tx bufs\n");
167 ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
170 adapter->reset_reason =
171 ENA_REGS_RESET_DRIVER_INVALID_STATE;
172 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
177 u64_stats_update_begin(&ring->syncp);
178 ring->tx_stats.cnt++;
179 ring->tx_stats.bytes += bytes;
180 u64_stats_update_end(&ring->syncp);
182 tx_info->tx_descs = nb_hw_desc;
183 tx_info->last_jiffies = jiffies;
184 tx_info->print_once = 0;
186 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
191 /* This is the XDP napi callback. XDP queues use a separate napi callback
194 static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
196 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
197 u32 xdp_work_done, xdp_budget;
198 struct ena_ring *xdp_ring;
199 int napi_comp_call = 0;
202 xdp_ring = ena_napi->xdp_ring;
206 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
207 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
208 napi_complete_done(napi, 0);
212 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
214 /* If the device is about to reset or down, avoid unmask
215 * the interrupt and return 0 so NAPI won't reschedule
217 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
218 napi_complete_done(napi, 0);
220 } else if (xdp_budget > xdp_work_done) {
222 if (napi_complete_done(napi, xdp_work_done))
223 ena_unmask_interrupt(xdp_ring, NULL);
224 ena_update_ring_numa_node(xdp_ring, NULL);
230 u64_stats_update_begin(&xdp_ring->syncp);
231 xdp_ring->tx_stats.napi_comp += napi_comp_call;
232 xdp_ring->tx_stats.tx_poll++;
233 u64_stats_update_end(&xdp_ring->syncp);
234 xdp_ring->tx_stats.last_napi_jiffies = jiffies;
239 static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
240 struct ena_tx_buffer *tx_info,
241 struct xdp_frame *xdpf,
242 struct ena_com_tx_ctx *ena_tx_ctx)
244 struct ena_adapter *adapter = xdp_ring->adapter;
245 struct ena_com_buf *ena_buf;
251 tx_info->xdpf = xdpf;
252 data = tx_info->xdpf->data;
253 size = tx_info->xdpf->len;
255 if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
256 /* Designate part of the packet for LLQ */
257 push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
259 ena_tx_ctx->push_header = data;
265 ena_tx_ctx->header_len = push_len;
268 dma = dma_map_single(xdp_ring->dev,
272 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
273 goto error_report_dma_error;
275 tx_info->map_linear_data = 0;
277 ena_buf = tx_info->bufs;
278 ena_buf->paddr = dma;
281 ena_tx_ctx->ena_bufs = ena_buf;
282 ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
287 error_report_dma_error:
288 ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
290 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
295 static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
296 struct net_device *dev,
297 struct xdp_frame *xdpf,
300 struct ena_com_tx_ctx ena_tx_ctx = {};
301 struct ena_tx_buffer *tx_info;
302 u16 next_to_use, req_id;
305 next_to_use = xdp_ring->next_to_use;
306 req_id = xdp_ring->free_ids[next_to_use];
307 tx_info = &xdp_ring->tx_buffer_info[req_id];
308 tx_info->num_of_bufs = 0;
310 rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
314 ena_tx_ctx.req_id = req_id;
316 rc = ena_xmit_common(dev,
323 goto error_unmap_dma;
325 /* trigger the dma engine. ena_ring_tx_doorbell()
326 * calls a memory barrier inside it.
328 if (flags & XDP_XMIT_FLUSH)
329 ena_ring_tx_doorbell(xdp_ring);
334 ena_unmap_tx_buff(xdp_ring, tx_info);
335 tx_info->xdpf = NULL;
339 static int ena_xdp_xmit(struct net_device *dev, int n,
340 struct xdp_frame **frames, u32 flags)
342 struct ena_adapter *adapter = netdev_priv(dev);
343 struct ena_ring *xdp_ring;
344 int qid, i, nxmit = 0;
346 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
349 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
352 /* We assume that all rings have the same XDP program */
353 if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
356 qid = smp_processor_id() % adapter->xdp_num_queues;
357 qid += adapter->xdp_first_ring;
358 xdp_ring = &adapter->tx_ring[qid];
360 /* Other CPU ids might try to send thorugh this queue */
361 spin_lock(&xdp_ring->xdp_tx_lock);
363 for (i = 0; i < n; i++) {
364 if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
369 /* Ring doorbell to make device aware of the packets */
370 if (flags & XDP_XMIT_FLUSH)
371 ena_ring_tx_doorbell(xdp_ring);
373 spin_unlock(&xdp_ring->xdp_tx_lock);
375 /* Return number of packets sent */
379 static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
381 struct bpf_prog *xdp_prog;
382 struct ena_ring *xdp_ring;
383 u32 verdict = XDP_PASS;
384 struct xdp_frame *xdpf;
387 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
392 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
396 xdpf = xdp_convert_buff_to_frame(xdp);
397 if (unlikely(!xdpf)) {
398 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
399 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
400 verdict = XDP_ABORTED;
404 /* Find xmit queue */
405 xdp_ring = rx_ring->xdp_ring;
407 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
408 spin_lock(&xdp_ring->xdp_tx_lock);
410 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
412 xdp_return_frame(xdpf);
414 spin_unlock(&xdp_ring->xdp_tx_lock);
415 xdp_stat = &rx_ring->rx_stats.xdp_tx;
418 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
419 xdp_stat = &rx_ring->rx_stats.xdp_redirect;
422 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
423 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
424 verdict = XDP_ABORTED;
427 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
428 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
431 xdp_stat = &rx_ring->rx_stats.xdp_drop;
434 xdp_stat = &rx_ring->rx_stats.xdp_pass;
437 bpf_warn_invalid_xdp_action(verdict);
438 xdp_stat = &rx_ring->rx_stats.xdp_invalid;
441 ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
446 static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
448 adapter->xdp_first_ring = adapter->num_io_queues;
449 adapter->xdp_num_queues = adapter->num_io_queues;
451 ena_init_io_rings(adapter,
452 adapter->xdp_first_ring,
453 adapter->xdp_num_queues);
456 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
460 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
461 adapter->xdp_num_queues);
465 rc = ena_create_io_tx_queues_in_range(adapter,
466 adapter->xdp_first_ring,
467 adapter->xdp_num_queues);
474 ena_free_all_io_tx_resources(adapter);
479 /* Provides a way for both kernel and bpf-prog to know
480 * more about the RX-queue a given XDP frame arrived on.
482 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
486 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
489 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
490 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
495 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
499 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
500 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
502 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
509 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
511 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
512 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
515 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
516 struct bpf_prog *prog,
517 int first, int count)
519 struct ena_ring *rx_ring;
522 for (i = first; i < count; i++) {
523 rx_ring = &adapter->rx_ring[i];
524 xchg(&rx_ring->xdp_bpf_prog, prog);
526 ena_xdp_register_rxq_info(rx_ring);
527 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
529 ena_xdp_unregister_rxq_info(rx_ring);
530 rx_ring->rx_headroom = NET_SKB_PAD;
535 static void ena_xdp_exchange_program(struct ena_adapter *adapter,
536 struct bpf_prog *prog)
538 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
540 ena_xdp_exchange_program_rx_in_range(adapter,
543 adapter->num_io_queues);
546 bpf_prog_put(old_bpf_prog);
549 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
554 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
559 adapter->xdp_first_ring = 0;
560 adapter->xdp_num_queues = 0;
561 ena_xdp_exchange_program(adapter, NULL);
563 rc = ena_up(adapter);
570 static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
572 struct ena_adapter *adapter = netdev_priv(netdev);
573 struct bpf_prog *prog = bpf->prog;
574 struct bpf_prog *old_bpf_prog;
578 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
579 rc = ena_xdp_allowed(adapter);
580 if (rc == ENA_XDP_ALLOWED) {
581 old_bpf_prog = adapter->xdp_bpf_prog;
584 ena_init_all_xdp_queues(adapter);
585 } else if (!old_bpf_prog) {
587 ena_init_all_xdp_queues(adapter);
589 ena_xdp_exchange_program(adapter, prog);
591 if (is_up && !old_bpf_prog) {
592 rc = ena_up(adapter);
596 } else if (old_bpf_prog) {
597 rc = ena_destroy_and_free_all_xdp_queues(adapter);
602 prev_mtu = netdev->max_mtu;
603 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
606 netif_info(adapter, drv, adapter->netdev,
607 "XDP program is set, changing the max_mtu from %d to %d",
608 prev_mtu, netdev->max_mtu);
610 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
611 netif_err(adapter, drv, adapter->netdev,
612 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
613 netdev->mtu, ENA_XDP_MAX_MTU);
614 NL_SET_ERR_MSG_MOD(bpf->extack,
615 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
617 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
618 netif_err(adapter, drv, adapter->netdev,
619 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
620 adapter->num_io_queues, adapter->max_num_io_queues);
621 NL_SET_ERR_MSG_MOD(bpf->extack,
622 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
629 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
630 * program as well as to query the current xdp program id.
632 static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
634 switch (bpf->command) {
636 return ena_xdp_set(netdev, bpf);
643 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
645 #ifdef CONFIG_RFS_ACCEL
649 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
650 if (!adapter->netdev->rx_cpu_rmap)
652 for (i = 0; i < adapter->num_io_queues; i++) {
653 int irq_idx = ENA_IO_IRQ_IDX(i);
655 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
656 pci_irq_vector(adapter->pdev, irq_idx));
658 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
659 adapter->netdev->rx_cpu_rmap = NULL;
663 #endif /* CONFIG_RFS_ACCEL */
667 static void ena_init_io_rings_common(struct ena_adapter *adapter,
668 struct ena_ring *ring, u16 qid)
671 ring->pdev = adapter->pdev;
672 ring->dev = &adapter->pdev->dev;
673 ring->netdev = adapter->netdev;
674 ring->napi = &adapter->ena_napi[qid].napi;
675 ring->adapter = adapter;
676 ring->ena_dev = adapter->ena_dev;
677 ring->per_napi_packets = 0;
679 ring->no_interrupt_event_cnt = 0;
680 u64_stats_init(&ring->syncp);
683 static void ena_init_io_rings(struct ena_adapter *adapter,
684 int first_index, int count)
686 struct ena_com_dev *ena_dev;
687 struct ena_ring *txr, *rxr;
690 ena_dev = adapter->ena_dev;
692 for (i = first_index; i < first_index + count; i++) {
693 txr = &adapter->tx_ring[i];
694 rxr = &adapter->rx_ring[i];
696 /* TX common ring state */
697 ena_init_io_rings_common(adapter, txr, i);
699 /* TX specific ring state */
700 txr->ring_size = adapter->requested_tx_ring_size;
701 txr->tx_max_header_size = ena_dev->tx_max_header_size;
702 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
703 txr->sgl_size = adapter->max_tx_sgl_size;
704 txr->smoothed_interval =
705 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
706 txr->disable_meta_caching = adapter->disable_meta_caching;
707 spin_lock_init(&txr->xdp_tx_lock);
709 /* Don't init RX queues for xdp queues */
710 if (!ENA_IS_XDP_INDEX(adapter, i)) {
711 /* RX common ring state */
712 ena_init_io_rings_common(adapter, rxr, i);
714 /* RX specific ring state */
715 rxr->ring_size = adapter->requested_rx_ring_size;
716 rxr->rx_copybreak = adapter->rx_copybreak;
717 rxr->sgl_size = adapter->max_rx_sgl_size;
718 rxr->smoothed_interval =
719 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
720 rxr->empty_rx_queue = 0;
721 rxr->rx_headroom = NET_SKB_PAD;
722 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
723 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues];
728 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
729 * @adapter: network interface device structure
732 * Return 0 on success, negative on failure
734 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
736 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
737 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
740 if (tx_ring->tx_buffer_info) {
741 netif_err(adapter, ifup,
742 adapter->netdev, "tx_buffer_info info is not NULL");
746 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
747 node = cpu_to_node(ena_irq->cpu);
749 tx_ring->tx_buffer_info = vzalloc_node(size, node);
750 if (!tx_ring->tx_buffer_info) {
751 tx_ring->tx_buffer_info = vzalloc(size);
752 if (!tx_ring->tx_buffer_info)
753 goto err_tx_buffer_info;
756 size = sizeof(u16) * tx_ring->ring_size;
757 tx_ring->free_ids = vzalloc_node(size, node);
758 if (!tx_ring->free_ids) {
759 tx_ring->free_ids = vzalloc(size);
760 if (!tx_ring->free_ids)
761 goto err_tx_free_ids;
764 size = tx_ring->tx_max_header_size;
765 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
766 if (!tx_ring->push_buf_intermediate_buf) {
767 tx_ring->push_buf_intermediate_buf = vzalloc(size);
768 if (!tx_ring->push_buf_intermediate_buf)
769 goto err_push_buf_intermediate_buf;
772 /* Req id ring for TX out of order completions */
773 for (i = 0; i < tx_ring->ring_size; i++)
774 tx_ring->free_ids[i] = i;
776 /* Reset tx statistics */
777 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
779 tx_ring->next_to_use = 0;
780 tx_ring->next_to_clean = 0;
781 tx_ring->cpu = ena_irq->cpu;
784 err_push_buf_intermediate_buf:
785 vfree(tx_ring->free_ids);
786 tx_ring->free_ids = NULL;
788 vfree(tx_ring->tx_buffer_info);
789 tx_ring->tx_buffer_info = NULL;
794 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
795 * @adapter: network interface device structure
798 * Free all transmit software resources
800 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
802 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
804 vfree(tx_ring->tx_buffer_info);
805 tx_ring->tx_buffer_info = NULL;
807 vfree(tx_ring->free_ids);
808 tx_ring->free_ids = NULL;
810 vfree(tx_ring->push_buf_intermediate_buf);
811 tx_ring->push_buf_intermediate_buf = NULL;
814 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
820 for (i = first_index; i < first_index + count; i++) {
821 rc = ena_setup_tx_resources(adapter, i);
830 netif_err(adapter, ifup, adapter->netdev,
831 "Tx queue %d: allocation failed\n", i);
833 /* rewind the index freeing the rings as we go */
834 while (first_index < i--)
835 ena_free_tx_resources(adapter, i);
839 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
840 int first_index, int count)
844 for (i = first_index; i < first_index + count; i++)
845 ena_free_tx_resources(adapter, i);
848 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
849 * @adapter: board private structure
851 * Free all transmit software resources
853 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
855 ena_free_all_io_tx_resources_in_range(adapter,
857 adapter->xdp_num_queues +
858 adapter->num_io_queues);
861 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
862 * @adapter: network interface device structure
865 * Returns 0 on success, negative on failure
867 static int ena_setup_rx_resources(struct ena_adapter *adapter,
870 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
871 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
874 if (rx_ring->rx_buffer_info) {
875 netif_err(adapter, ifup, adapter->netdev,
876 "rx_buffer_info is not NULL");
880 /* alloc extra element so in rx path
881 * we can always prefetch rx_info + 1
883 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
884 node = cpu_to_node(ena_irq->cpu);
886 rx_ring->rx_buffer_info = vzalloc_node(size, node);
887 if (!rx_ring->rx_buffer_info) {
888 rx_ring->rx_buffer_info = vzalloc(size);
889 if (!rx_ring->rx_buffer_info)
893 size = sizeof(u16) * rx_ring->ring_size;
894 rx_ring->free_ids = vzalloc_node(size, node);
895 if (!rx_ring->free_ids) {
896 rx_ring->free_ids = vzalloc(size);
897 if (!rx_ring->free_ids) {
898 vfree(rx_ring->rx_buffer_info);
899 rx_ring->rx_buffer_info = NULL;
904 /* Req id ring for receiving RX pkts out of order */
905 for (i = 0; i < rx_ring->ring_size; i++)
906 rx_ring->free_ids[i] = i;
908 /* Reset rx statistics */
909 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
911 rx_ring->next_to_clean = 0;
912 rx_ring->next_to_use = 0;
913 rx_ring->cpu = ena_irq->cpu;
918 /* ena_free_rx_resources - Free I/O Rx Resources
919 * @adapter: network interface device structure
922 * Free all receive software resources
924 static void ena_free_rx_resources(struct ena_adapter *adapter,
927 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
929 vfree(rx_ring->rx_buffer_info);
930 rx_ring->rx_buffer_info = NULL;
932 vfree(rx_ring->free_ids);
933 rx_ring->free_ids = NULL;
936 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
937 * @adapter: board private structure
939 * Return 0 on success, negative on failure
941 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
945 for (i = 0; i < adapter->num_io_queues; i++) {
946 rc = ena_setup_rx_resources(adapter, i);
955 netif_err(adapter, ifup, adapter->netdev,
956 "Rx queue %d: allocation failed\n", i);
958 /* rewind the index freeing the rings as we go */
960 ena_free_rx_resources(adapter, i);
964 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
965 * @adapter: board private structure
967 * Free all receive software resources
969 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
973 for (i = 0; i < adapter->num_io_queues; i++)
974 ena_free_rx_resources(adapter, i);
977 static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
982 /* This would allocate the page on the same NUMA node the executing code
985 page = dev_alloc_page();
987 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
989 return ERR_PTR(-ENOSPC);
992 /* To enable NIC-side port-mirroring, AKA SPAN port,
993 * we make the buffer readable from the nic as well
995 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
997 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
998 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
1001 return ERR_PTR(-EIO);
1007 static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
1008 struct ena_rx_buffer *rx_info)
1010 int headroom = rx_ring->rx_headroom;
1011 struct ena_com_buf *ena_buf;
1016 /* restore page offset value in case it has been changed by device */
1017 rx_info->page_offset = headroom;
1019 /* if previous allocated page is not used */
1020 if (unlikely(rx_info->page))
1023 /* We handle DMA here */
1024 page = ena_alloc_map_page(rx_ring, &dma);
1025 if (unlikely(IS_ERR(page)))
1026 return PTR_ERR(page);
1028 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1029 "Allocate page %p, rx_info %p\n", page, rx_info);
1031 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1033 rx_info->page = page;
1034 ena_buf = &rx_info->ena_buf;
1035 ena_buf->paddr = dma + headroom;
1036 ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
1041 static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
1042 struct ena_rx_buffer *rx_info)
1044 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
1046 dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
1051 static void ena_free_rx_page(struct ena_ring *rx_ring,
1052 struct ena_rx_buffer *rx_info)
1054 struct page *page = rx_info->page;
1056 if (unlikely(!page)) {
1057 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1058 "Trying to free unallocated buffer\n");
1062 ena_unmap_rx_buff(rx_ring, rx_info);
1065 rx_info->page = NULL;
1068 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1070 u16 next_to_use, req_id;
1074 next_to_use = rx_ring->next_to_use;
1076 for (i = 0; i < num; i++) {
1077 struct ena_rx_buffer *rx_info;
1079 req_id = rx_ring->free_ids[next_to_use];
1081 rx_info = &rx_ring->rx_buffer_info[req_id];
1083 rc = ena_alloc_rx_buffer(rx_ring, rx_info);
1084 if (unlikely(rc < 0)) {
1085 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1086 "Failed to allocate buffer for rx queue %d\n",
1090 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1094 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1095 "Failed to add buffer for rx queue %d\n",
1099 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1100 rx_ring->ring_size);
1103 if (unlikely(i < num)) {
1104 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1,
1106 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1107 "Refilled rx qid %d with only %d buffers (from %d)\n",
1108 rx_ring->qid, i, num);
1111 /* ena_com_write_sq_doorbell issues a wmb() */
1113 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1115 rx_ring->next_to_use = next_to_use;
1120 static void ena_free_rx_bufs(struct ena_adapter *adapter,
1123 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1126 for (i = 0; i < rx_ring->ring_size; i++) {
1127 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1130 ena_free_rx_page(rx_ring, rx_info);
1134 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1135 * @adapter: board private structure
1137 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1139 struct ena_ring *rx_ring;
1140 int i, rc, bufs_num;
1142 for (i = 0; i < adapter->num_io_queues; i++) {
1143 rx_ring = &adapter->rx_ring[i];
1144 bufs_num = rx_ring->ring_size - 1;
1145 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1147 if (unlikely(rc != bufs_num))
1148 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1149 "Refilling Queue %d failed. allocated %d buffers from: %d\n",
1154 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1158 for (i = 0; i < adapter->num_io_queues; i++)
1159 ena_free_rx_bufs(adapter, i);
1162 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1163 struct ena_tx_buffer *tx_info)
1165 struct ena_com_buf *ena_buf;
1169 ena_buf = tx_info->bufs;
1170 cnt = tx_info->num_of_bufs;
1175 if (tx_info->map_linear_data) {
1176 dma_unmap_single(tx_ring->dev,
1177 dma_unmap_addr(ena_buf, paddr),
1178 dma_unmap_len(ena_buf, len),
1184 /* unmap remaining mapped pages */
1185 for (i = 0; i < cnt; i++) {
1186 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1187 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1192 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1193 * @tx_ring: TX ring for which buffers be freed
1195 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1197 bool print_once = true;
1200 for (i = 0; i < tx_ring->ring_size; i++) {
1201 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1207 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
1208 "Free uncompleted tx skb qid %d idx 0x%x\n",
1212 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
1213 "Free uncompleted tx skb qid %d idx 0x%x\n",
1217 ena_unmap_tx_buff(tx_ring, tx_info);
1219 dev_kfree_skb_any(tx_info->skb);
1221 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1225 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1227 struct ena_ring *tx_ring;
1230 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1231 tx_ring = &adapter->tx_ring[i];
1232 ena_free_tx_bufs(tx_ring);
1236 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1241 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1242 ena_qid = ENA_IO_TXQ_IDX(i);
1243 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1247 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1252 for (i = 0; i < adapter->num_io_queues; i++) {
1253 ena_qid = ENA_IO_RXQ_IDX(i);
1254 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1255 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1259 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1261 ena_destroy_all_tx_queues(adapter);
1262 ena_destroy_all_rx_queues(adapter);
1265 static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1266 struct ena_tx_buffer *tx_info, bool is_xdp)
1269 netif_err(ring->adapter,
1272 "tx_info doesn't have valid %s",
1273 is_xdp ? "xdp frame" : "skb");
1275 netif_err(ring->adapter,
1278 "Invalid req_id: %hu\n",
1281 ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
1283 /* Trigger device reset */
1284 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1285 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1289 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1291 struct ena_tx_buffer *tx_info;
1293 tx_info = &tx_ring->tx_buffer_info[req_id];
1294 if (likely(tx_info->skb))
1297 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1300 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1302 struct ena_tx_buffer *tx_info;
1304 tx_info = &xdp_ring->tx_buffer_info[req_id];
1305 if (likely(tx_info->xdpf))
1308 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1311 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1313 struct netdev_queue *txq;
1322 next_to_clean = tx_ring->next_to_clean;
1323 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1325 while (tx_pkts < budget) {
1326 struct ena_tx_buffer *tx_info;
1327 struct sk_buff *skb;
1329 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1332 if (unlikely(rc == -EINVAL))
1333 handle_invalid_req_id(tx_ring, req_id, NULL,
1338 /* validate that the request id points to a valid skb */
1339 rc = validate_tx_req_id(tx_ring, req_id);
1343 tx_info = &tx_ring->tx_buffer_info[req_id];
1346 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1347 prefetch(&skb->end);
1349 tx_info->skb = NULL;
1350 tx_info->last_jiffies = 0;
1352 ena_unmap_tx_buff(tx_ring, tx_info);
1354 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1355 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1358 tx_bytes += skb->len;
1361 total_done += tx_info->tx_descs;
1363 tx_ring->free_ids[next_to_clean] = req_id;
1364 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1365 tx_ring->ring_size);
1368 tx_ring->next_to_clean = next_to_clean;
1369 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1370 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1372 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1374 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1375 "tx_poll: q %d done. total pkts: %d\n",
1376 tx_ring->qid, tx_pkts);
1378 /* need to make the rings circular update visible to
1379 * ena_start_xmit() before checking for netif_queue_stopped().
1383 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1384 ENA_TX_WAKEUP_THRESH);
1385 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1386 __netif_tx_lock(txq, smp_processor_id());
1388 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1389 ENA_TX_WAKEUP_THRESH);
1390 if (netif_tx_queue_stopped(txq) && above_thresh &&
1391 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1392 netif_tx_wake_queue(txq);
1393 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
1396 __netif_tx_unlock(txq);
1402 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
1404 struct sk_buff *skb;
1407 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1408 rx_ring->rx_copybreak);
1410 skb = build_skb(first_frag, ENA_PAGE_SIZE);
1412 if (unlikely(!skb)) {
1413 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
1416 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1417 "Failed to allocate skb. first_frag %s\n",
1418 first_frag ? "provided" : "not provided");
1425 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1426 struct ena_com_rx_buf_info *ena_bufs,
1430 struct ena_rx_buffer *rx_info;
1431 struct ena_adapter *adapter;
1432 u16 len, req_id, buf = 0;
1433 struct sk_buff *skb;
1438 len = ena_bufs[buf].len;
1439 req_id = ena_bufs[buf].req_id;
1441 rx_info = &rx_ring->rx_buffer_info[req_id];
1443 if (unlikely(!rx_info->page)) {
1444 adapter = rx_ring->adapter;
1445 netif_err(adapter, rx_err, rx_ring->netdev,
1446 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
1447 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
1448 adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
1449 /* Make sure reset reason is set before triggering the reset */
1450 smp_mb__before_atomic();
1451 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1455 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1456 "rx_info %p page %p\n",
1457 rx_info, rx_info->page);
1459 /* save virt address of first buffer */
1460 page_addr = page_address(rx_info->page);
1461 page_offset = rx_info->page_offset;
1462 data_addr = page_addr + page_offset;
1464 prefetch(data_addr);
1466 if (len <= rx_ring->rx_copybreak) {
1467 skb = ena_alloc_skb(rx_ring, NULL);
1471 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1472 "RX allocated small packet. len %d. data_len %d\n",
1473 skb->len, skb->data_len);
1475 /* sync this buffer for CPU use */
1476 dma_sync_single_for_cpu(rx_ring->dev,
1477 dma_unmap_addr(&rx_info->ena_buf, paddr),
1480 skb_copy_to_linear_data(skb, data_addr, len);
1481 dma_sync_single_for_device(rx_ring->dev,
1482 dma_unmap_addr(&rx_info->ena_buf, paddr),
1487 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1488 rx_ring->free_ids[*next_to_clean] = req_id;
1489 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1490 rx_ring->ring_size);
1494 ena_unmap_rx_buff(rx_ring, rx_info);
1496 skb = ena_alloc_skb(rx_ring, page_addr);
1500 /* Populate skb's linear part */
1501 skb_reserve(skb, page_offset);
1503 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1506 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1507 "RX skb updated. len %d. data_len %d\n",
1508 skb->len, skb->data_len);
1510 rx_info->page = NULL;
1512 rx_ring->free_ids[*next_to_clean] = req_id;
1514 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1515 rx_ring->ring_size);
1516 if (likely(--descs == 0))
1520 len = ena_bufs[buf].len;
1521 req_id = ena_bufs[buf].req_id;
1523 rx_info = &rx_ring->rx_buffer_info[req_id];
1525 ena_unmap_rx_buff(rx_ring, rx_info);
1527 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1528 rx_info->page_offset, len, ENA_PAGE_SIZE);
1535 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1536 * @adapter: structure containing adapter specific data
1537 * @ena_rx_ctx: received packet context/metadata
1538 * @skb: skb currently being received and modified
1540 static void ena_rx_checksum(struct ena_ring *rx_ring,
1541 struct ena_com_rx_ctx *ena_rx_ctx,
1542 struct sk_buff *skb)
1544 /* Rx csum disabled */
1545 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1546 skb->ip_summed = CHECKSUM_NONE;
1550 /* For fragmented packets the checksum isn't valid */
1551 if (ena_rx_ctx->frag) {
1552 skb->ip_summed = CHECKSUM_NONE;
1556 /* if IP and error */
1557 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1558 (ena_rx_ctx->l3_csum_err))) {
1559 /* ipv4 checksum error */
1560 skb->ip_summed = CHECKSUM_NONE;
1561 ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
1563 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1564 "RX IPv4 header checksum error\n");
1569 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1570 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1571 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1572 /* TCP/UDP checksum error */
1573 ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
1575 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1576 "RX L4 checksum error\n");
1577 skb->ip_summed = CHECKSUM_NONE;
1581 if (likely(ena_rx_ctx->l4_csum_checked)) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1,
1586 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1,
1588 skb->ip_summed = CHECKSUM_NONE;
1591 skb->ip_summed = CHECKSUM_NONE;
1597 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1598 struct ena_com_rx_ctx *ena_rx_ctx,
1599 struct sk_buff *skb)
1601 enum pkt_hash_types hash_type;
1603 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1604 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1605 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1607 hash_type = PKT_HASH_TYPE_L4;
1609 hash_type = PKT_HASH_TYPE_NONE;
1611 /* Override hash type if the packet is fragmented */
1612 if (ena_rx_ctx->frag)
1613 hash_type = PKT_HASH_TYPE_NONE;
1615 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1619 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1621 struct ena_rx_buffer *rx_info;
1624 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1625 xdp_prepare_buff(xdp, page_address(rx_info->page),
1626 rx_info->page_offset,
1627 rx_ring->ena_bufs[0].len, false);
1628 /* If for some reason we received a bigger packet than
1629 * we expect, then we simply drop it
1631 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1634 ret = ena_xdp_execute(rx_ring, xdp);
1636 /* The xdp program might expand the headers */
1637 if (ret == XDP_PASS) {
1638 rx_info->page_offset = xdp->data - xdp->data_hard_start;
1639 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1644 /* ena_clean_rx_irq - Cleanup RX irq
1645 * @rx_ring: RX ring to clean
1646 * @napi: napi handler
1647 * @budget: how many packets driver is allowed to clean
1649 * Returns the number of cleaned buffers.
1651 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1654 u16 next_to_clean = rx_ring->next_to_clean;
1655 struct ena_com_rx_ctx ena_rx_ctx;
1656 struct ena_rx_buffer *rx_info;
1657 struct ena_adapter *adapter;
1658 u32 res_budget, work_done;
1659 int rx_copybreak_pkt = 0;
1660 int refill_threshold;
1661 struct sk_buff *skb;
1662 int refill_required;
1663 struct xdp_buff xdp;
1670 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1671 "%s qid %d\n", __func__, rx_ring->qid);
1672 res_budget = budget;
1673 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
1676 xdp_verdict = XDP_PASS;
1678 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1679 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1680 ena_rx_ctx.descs = 0;
1681 ena_rx_ctx.pkt_offset = 0;
1682 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1683 rx_ring->ena_com_io_sq,
1688 if (unlikely(ena_rx_ctx.descs == 0))
1691 /* First descriptor might have an offset set by the device */
1692 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1693 rx_info->page_offset += ena_rx_ctx.pkt_offset;
1695 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1696 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1697 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1698 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1700 if (ena_xdp_present_ring(rx_ring))
1701 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1703 /* allocate skb and fill it */
1704 if (xdp_verdict == XDP_PASS)
1705 skb = ena_rx_skb(rx_ring,
1710 if (unlikely(!skb)) {
1711 for (i = 0; i < ena_rx_ctx.descs; i++) {
1712 int req_id = rx_ring->ena_bufs[i].req_id;
1714 rx_ring->free_ids[next_to_clean] = req_id;
1716 ENA_RX_RING_IDX_NEXT(next_to_clean,
1717 rx_ring->ring_size);
1719 /* Packets was passed for transmission, unmap it
1722 if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
1723 ena_unmap_rx_buff(rx_ring,
1724 &rx_ring->rx_buffer_info[req_id]);
1725 rx_ring->rx_buffer_info[req_id].page = NULL;
1728 if (xdp_verdict != XDP_PASS) {
1729 xdp_flags |= xdp_verdict;
1736 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1738 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1740 skb_record_rx_queue(skb, rx_ring->qid);
1742 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak)
1745 total_len += skb->len;
1747 napi_gro_receive(napi, skb);
1750 } while (likely(res_budget));
1752 work_done = budget - res_budget;
1753 rx_ring->per_napi_packets += work_done;
1754 u64_stats_update_begin(&rx_ring->syncp);
1755 rx_ring->rx_stats.bytes += total_len;
1756 rx_ring->rx_stats.cnt += work_done;
1757 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1758 u64_stats_update_end(&rx_ring->syncp);
1760 rx_ring->next_to_clean = next_to_clean;
1762 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
1764 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1765 ENA_RX_REFILL_THRESH_PACKET);
1767 /* Optimization, try to batch new rx buffers */
1768 if (refill_required > refill_threshold) {
1769 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1770 ena_refill_rx_bufs(rx_ring, refill_required);
1773 if (xdp_flags & XDP_REDIRECT)
1779 adapter = netdev_priv(rx_ring->netdev);
1781 if (rc == -ENOSPC) {
1782 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
1784 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1786 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
1788 adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
1791 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1796 static void ena_dim_work(struct work_struct *w)
1798 struct dim *dim = container_of(w, struct dim, work);
1799 struct dim_cq_moder cur_moder =
1800 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1801 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1803 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1804 dim->state = DIM_START_MEASURE;
1807 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1809 struct dim_sample dim_sample;
1810 struct ena_ring *rx_ring = ena_napi->rx_ring;
1812 if (!rx_ring->per_napi_packets)
1815 rx_ring->non_empty_napi_events++;
1817 dim_update_sample(rx_ring->non_empty_napi_events,
1818 rx_ring->rx_stats.cnt,
1819 rx_ring->rx_stats.bytes,
1822 net_dim(&ena_napi->dim, dim_sample);
1824 rx_ring->per_napi_packets = 0;
1827 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1828 struct ena_ring *rx_ring)
1830 struct ena_eth_io_intr_reg intr_reg;
1831 u32 rx_interval = 0;
1832 /* Rx ring can be NULL when for XDP tx queues which don't have an
1833 * accompanying rx_ring pair.
1836 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1837 rx_ring->smoothed_interval :
1838 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1840 /* Update intr register: rx intr delay,
1841 * tx intr delay and interrupt unmask
1843 ena_com_update_intr_reg(&intr_reg,
1845 tx_ring->smoothed_interval,
1848 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
1851 /* It is a shared MSI-X.
1852 * Tx and Rx CQ have pointer to it.
1853 * So we use one of them to reach the intr reg
1854 * The Tx ring is used because the rx_ring is NULL for XDP queues
1856 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1859 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1860 struct ena_ring *rx_ring)
1862 int cpu = get_cpu();
1865 /* Check only one ring since the 2 rings are running on the same cpu */
1866 if (likely(tx_ring->cpu == cpu))
1869 numa_node = cpu_to_node(cpu);
1872 if (numa_node != NUMA_NO_NODE) {
1873 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1875 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1888 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1897 if (unlikely(!xdp_ring))
1899 next_to_clean = xdp_ring->next_to_clean;
1901 while (tx_pkts < budget) {
1902 struct ena_tx_buffer *tx_info;
1903 struct xdp_frame *xdpf;
1905 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1908 if (unlikely(rc == -EINVAL))
1909 handle_invalid_req_id(xdp_ring, req_id, NULL,
1914 /* validate that the request id points to a valid xdp_frame */
1915 rc = validate_xdp_req_id(xdp_ring, req_id);
1919 tx_info = &xdp_ring->tx_buffer_info[req_id];
1920 xdpf = tx_info->xdpf;
1922 tx_info->xdpf = NULL;
1923 tx_info->last_jiffies = 0;
1924 ena_unmap_tx_buff(xdp_ring, tx_info);
1926 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1927 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1930 tx_bytes += xdpf->len;
1932 total_done += tx_info->tx_descs;
1934 xdp_return_frame(xdpf);
1935 xdp_ring->free_ids[next_to_clean] = req_id;
1936 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1937 xdp_ring->ring_size);
1940 xdp_ring->next_to_clean = next_to_clean;
1941 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1942 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1944 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1945 "tx_poll: q %d done. total pkts: %d\n",
1946 xdp_ring->qid, tx_pkts);
1951 static int ena_io_poll(struct napi_struct *napi, int budget)
1953 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1954 struct ena_ring *tx_ring, *rx_ring;
1956 int rx_work_done = 0;
1958 int napi_comp_call = 0;
1961 tx_ring = ena_napi->tx_ring;
1962 rx_ring = ena_napi->rx_ring;
1964 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1966 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1967 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1968 napi_complete_done(napi, 0);
1972 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1973 /* On netpoll the budget is zero and the handler should only clean the
1977 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1979 /* If the device is about to reset or down, avoid unmask
1980 * the interrupt and return 0 so NAPI won't reschedule
1982 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1983 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1984 napi_complete_done(napi, 0);
1987 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1990 /* Update numa and unmask the interrupt only when schedule
1991 * from the interrupt context (vs from sk_busy_loop)
1993 if (napi_complete_done(napi, rx_work_done) &&
1994 READ_ONCE(ena_napi->interrupts_masked)) {
1995 smp_rmb(); /* make sure interrupts_masked is read */
1996 WRITE_ONCE(ena_napi->interrupts_masked, false);
1997 /* We apply adaptive moderation on Rx path only.
1998 * Tx uses static interrupt moderation.
2000 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
2001 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
2003 ena_unmask_interrupt(tx_ring, rx_ring);
2006 ena_update_ring_numa_node(tx_ring, rx_ring);
2013 u64_stats_update_begin(&tx_ring->syncp);
2014 tx_ring->tx_stats.napi_comp += napi_comp_call;
2015 tx_ring->tx_stats.tx_poll++;
2016 u64_stats_update_end(&tx_ring->syncp);
2018 tx_ring->tx_stats.last_napi_jiffies = jiffies;
2023 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
2025 struct ena_adapter *adapter = (struct ena_adapter *)data;
2027 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
2029 /* Don't call the aenq handler before probe is done */
2030 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
2031 ena_com_aenq_intr_handler(adapter->ena_dev, data);
2036 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
2037 * @irq: interrupt number
2038 * @data: pointer to a network interface private napi device structure
2040 static irqreturn_t ena_intr_msix_io(int irq, void *data)
2042 struct ena_napi *ena_napi = data;
2044 /* Used to check HW health */
2045 WRITE_ONCE(ena_napi->first_interrupt, true);
2047 WRITE_ONCE(ena_napi->interrupts_masked, true);
2048 smp_wmb(); /* write interrupts_masked before calling napi */
2050 napi_schedule_irqoff(&ena_napi->napi);
2055 /* Reserve a single MSI-X vector for management (admin + aenq).
2056 * plus reserve one vector for each potential io queue.
2057 * the number of potential io queues is the minimum of what the device
2058 * supports and the number of vCPUs.
2060 static int ena_enable_msix(struct ena_adapter *adapter)
2062 int msix_vecs, irq_cnt;
2064 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2065 netif_err(adapter, probe, adapter->netdev,
2066 "Error, MSI-X is already enabled\n");
2070 /* Reserved the max msix vectors we might need */
2071 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
2072 netif_dbg(adapter, probe, adapter->netdev,
2073 "Trying to enable MSI-X, vectors %d\n", msix_vecs);
2075 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
2076 msix_vecs, PCI_IRQ_MSIX);
2079 netif_err(adapter, probe, adapter->netdev,
2080 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
2084 if (irq_cnt != msix_vecs) {
2085 netif_notice(adapter, probe, adapter->netdev,
2086 "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
2087 irq_cnt, msix_vecs);
2088 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
2091 if (ena_init_rx_cpu_rmap(adapter))
2092 netif_warn(adapter, probe, adapter->netdev,
2093 "Failed to map IRQs to CPUs\n");
2095 adapter->msix_vecs = irq_cnt;
2096 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
2101 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2105 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2106 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2107 pci_name(adapter->pdev));
2108 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2109 ena_intr_msix_mgmnt;
2110 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2111 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
2112 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
2113 cpu = cpumask_first(cpu_online_mask);
2114 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2115 cpumask_set_cpu(cpu,
2116 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2119 static void ena_setup_io_intr(struct ena_adapter *adapter)
2121 struct net_device *netdev;
2122 int irq_idx, i, cpu;
2125 netdev = adapter->netdev;
2126 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2128 for (i = 0; i < io_queue_count; i++) {
2129 irq_idx = ENA_IO_IRQ_IDX(i);
2130 cpu = i % num_online_cpus();
2132 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2133 "%s-Tx-Rx-%d", netdev->name, i);
2134 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2135 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2136 adapter->irq_tbl[irq_idx].vector =
2137 pci_irq_vector(adapter->pdev, irq_idx);
2138 adapter->irq_tbl[irq_idx].cpu = cpu;
2140 cpumask_set_cpu(cpu,
2141 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2145 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2147 unsigned long flags = 0;
2148 struct ena_irq *irq;
2151 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2152 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2155 netif_err(adapter, probe, adapter->netdev,
2156 "Failed to request admin irq\n");
2160 netif_dbg(adapter, probe, adapter->netdev,
2161 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2162 irq->affinity_hint_mask.bits[0], irq->vector);
2164 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2169 static int ena_request_io_irq(struct ena_adapter *adapter)
2171 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2172 unsigned long flags = 0;
2173 struct ena_irq *irq;
2176 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2177 netif_err(adapter, ifup, adapter->netdev,
2178 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2182 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2183 irq = &adapter->irq_tbl[i];
2184 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2187 netif_err(adapter, ifup, adapter->netdev,
2188 "Failed to request I/O IRQ. index %d rc %d\n",
2193 netif_dbg(adapter, ifup, adapter->netdev,
2194 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2195 i, irq->affinity_hint_mask.bits[0], irq->vector);
2197 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2203 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2204 irq = &adapter->irq_tbl[k];
2205 free_irq(irq->vector, irq->data);
2211 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2213 struct ena_irq *irq;
2215 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2216 synchronize_irq(irq->vector);
2217 irq_set_affinity_hint(irq->vector, NULL);
2218 free_irq(irq->vector, irq->data);
2221 static void ena_free_io_irq(struct ena_adapter *adapter)
2223 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2224 struct ena_irq *irq;
2227 #ifdef CONFIG_RFS_ACCEL
2228 if (adapter->msix_vecs >= 1) {
2229 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2230 adapter->netdev->rx_cpu_rmap = NULL;
2232 #endif /* CONFIG_RFS_ACCEL */
2234 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2235 irq = &adapter->irq_tbl[i];
2236 irq_set_affinity_hint(irq->vector, NULL);
2237 free_irq(irq->vector, irq->data);
2241 static void ena_disable_msix(struct ena_adapter *adapter)
2243 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2244 pci_free_irq_vectors(adapter->pdev);
2247 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2249 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2252 if (!netif_running(adapter->netdev))
2255 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
2256 synchronize_irq(adapter->irq_tbl[i].vector);
2259 static void ena_del_napi_in_range(struct ena_adapter *adapter,
2265 for (i = first_index; i < first_index + count; i++) {
2266 netif_napi_del(&adapter->ena_napi[i].napi);
2268 WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
2269 adapter->ena_napi[i].xdp_ring);
2273 static void ena_init_napi_in_range(struct ena_adapter *adapter,
2274 int first_index, int count)
2278 for (i = first_index; i < first_index + count; i++) {
2279 struct ena_napi *napi = &adapter->ena_napi[i];
2281 netif_napi_add(adapter->netdev,
2283 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
2286 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2287 napi->rx_ring = &adapter->rx_ring[i];
2288 napi->tx_ring = &adapter->tx_ring[i];
2290 napi->xdp_ring = &adapter->tx_ring[i];
2296 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2302 for (i = first_index; i < first_index + count; i++)
2303 napi_disable(&adapter->ena_napi[i].napi);
2306 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2312 for (i = first_index; i < first_index + count; i++)
2313 napi_enable(&adapter->ena_napi[i].napi);
2316 /* Configure the Rx forwarding */
2317 static int ena_rss_configure(struct ena_adapter *adapter)
2319 struct ena_com_dev *ena_dev = adapter->ena_dev;
2322 /* In case the RSS table wasn't initialized by probe */
2323 if (!ena_dev->rss.tbl_log_size) {
2324 rc = ena_rss_init_default(adapter);
2325 if (rc && (rc != -EOPNOTSUPP)) {
2326 netif_err(adapter, ifup, adapter->netdev,
2327 "Failed to init RSS rc: %d\n", rc);
2332 /* Set indirect table */
2333 rc = ena_com_indirect_table_set(ena_dev);
2334 if (unlikely(rc && rc != -EOPNOTSUPP))
2337 /* Configure hash function (if supported) */
2338 rc = ena_com_set_hash_function(ena_dev);
2339 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2342 /* Configure hash inputs (if supported) */
2343 rc = ena_com_set_hash_ctrl(ena_dev);
2344 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2350 static int ena_up_complete(struct ena_adapter *adapter)
2354 rc = ena_rss_configure(adapter);
2358 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2360 ena_refill_all_rx_bufs(adapter);
2362 /* enable transmits */
2363 netif_tx_start_all_queues(adapter->netdev);
2365 ena_napi_enable_in_range(adapter,
2367 adapter->xdp_num_queues + adapter->num_io_queues);
2372 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2374 struct ena_com_create_io_ctx ctx;
2375 struct ena_com_dev *ena_dev;
2376 struct ena_ring *tx_ring;
2381 ena_dev = adapter->ena_dev;
2383 tx_ring = &adapter->tx_ring[qid];
2384 msix_vector = ENA_IO_IRQ_IDX(qid);
2385 ena_qid = ENA_IO_TXQ_IDX(qid);
2387 memset(&ctx, 0x0, sizeof(ctx));
2389 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2391 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2392 ctx.msix_vector = msix_vector;
2393 ctx.queue_size = tx_ring->ring_size;
2394 ctx.numa_node = cpu_to_node(tx_ring->cpu);
2396 rc = ena_com_create_io_queue(ena_dev, &ctx);
2398 netif_err(adapter, ifup, adapter->netdev,
2399 "Failed to create I/O TX queue num %d rc: %d\n",
2404 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2405 &tx_ring->ena_com_io_sq,
2406 &tx_ring->ena_com_io_cq);
2408 netif_err(adapter, ifup, adapter->netdev,
2409 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2411 ena_com_destroy_io_queue(ena_dev, ena_qid);
2415 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2419 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2420 int first_index, int count)
2422 struct ena_com_dev *ena_dev = adapter->ena_dev;
2425 for (i = first_index; i < first_index + count; i++) {
2426 rc = ena_create_io_tx_queue(adapter, i);
2434 while (i-- > first_index)
2435 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2440 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2442 struct ena_com_dev *ena_dev;
2443 struct ena_com_create_io_ctx ctx;
2444 struct ena_ring *rx_ring;
2449 ena_dev = adapter->ena_dev;
2451 rx_ring = &adapter->rx_ring[qid];
2452 msix_vector = ENA_IO_IRQ_IDX(qid);
2453 ena_qid = ENA_IO_RXQ_IDX(qid);
2455 memset(&ctx, 0x0, sizeof(ctx));
2458 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2459 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2460 ctx.msix_vector = msix_vector;
2461 ctx.queue_size = rx_ring->ring_size;
2462 ctx.numa_node = cpu_to_node(rx_ring->cpu);
2464 rc = ena_com_create_io_queue(ena_dev, &ctx);
2466 netif_err(adapter, ifup, adapter->netdev,
2467 "Failed to create I/O RX queue num %d rc: %d\n",
2472 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2473 &rx_ring->ena_com_io_sq,
2474 &rx_ring->ena_com_io_cq);
2476 netif_err(adapter, ifup, adapter->netdev,
2477 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2482 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2486 ena_com_destroy_io_queue(ena_dev, ena_qid);
2490 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2492 struct ena_com_dev *ena_dev = adapter->ena_dev;
2495 for (i = 0; i < adapter->num_io_queues; i++) {
2496 rc = ena_create_io_rx_queue(adapter, i);
2499 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2506 cancel_work_sync(&adapter->ena_napi[i].dim.work);
2507 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2513 static void set_io_rings_size(struct ena_adapter *adapter,
2519 for (i = 0; i < adapter->num_io_queues; i++) {
2520 adapter->tx_ring[i].ring_size = new_tx_size;
2521 adapter->rx_ring[i].ring_size = new_rx_size;
2525 /* This function allows queue allocation to backoff when the system is
2526 * low on memory. If there is not enough memory to allocate io queues
2527 * the driver will try to allocate smaller queues.
2529 * The backoff algorithm is as follows:
2530 * 1. Try to allocate TX and RX and if successful.
2531 * 1.1. return success
2533 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2535 * 3. If TX or RX is smaller than 256
2536 * 3.1. return failure.
2538 * 4.1. go back to 1.
2540 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2542 int rc, cur_rx_ring_size, cur_tx_ring_size;
2543 int new_rx_ring_size, new_tx_ring_size;
2545 /* current queue sizes might be set to smaller than the requested
2546 * ones due to past queue allocation failures.
2548 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2549 adapter->requested_rx_ring_size);
2552 if (ena_xdp_present(adapter)) {
2553 rc = ena_setup_and_create_all_xdp_queues(adapter);
2558 rc = ena_setup_tx_resources_in_range(adapter,
2560 adapter->num_io_queues);
2564 rc = ena_create_io_tx_queues_in_range(adapter,
2566 adapter->num_io_queues);
2568 goto err_create_tx_queues;
2570 rc = ena_setup_all_rx_resources(adapter);
2574 rc = ena_create_all_io_rx_queues(adapter);
2576 goto err_create_rx_queues;
2580 err_create_rx_queues:
2581 ena_free_all_io_rx_resources(adapter);
2583 ena_destroy_all_tx_queues(adapter);
2584 err_create_tx_queues:
2585 ena_free_all_io_tx_resources(adapter);
2587 if (rc != -ENOMEM) {
2588 netif_err(adapter, ifup, adapter->netdev,
2589 "Queue creation failed with error code %d\n",
2594 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2595 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2597 netif_err(adapter, ifup, adapter->netdev,
2598 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2599 cur_tx_ring_size, cur_rx_ring_size);
2601 new_tx_ring_size = cur_tx_ring_size;
2602 new_rx_ring_size = cur_rx_ring_size;
2604 /* Decrease the size of the larger queue, or
2605 * decrease both if they are the same size.
2607 if (cur_rx_ring_size <= cur_tx_ring_size)
2608 new_tx_ring_size = cur_tx_ring_size / 2;
2609 if (cur_rx_ring_size >= cur_tx_ring_size)
2610 new_rx_ring_size = cur_rx_ring_size / 2;
2612 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2613 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2614 netif_err(adapter, ifup, adapter->netdev,
2615 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2620 netif_err(adapter, ifup, adapter->netdev,
2621 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2625 set_io_rings_size(adapter, new_tx_ring_size,
2630 static int ena_up(struct ena_adapter *adapter)
2632 int io_queue_count, rc, i;
2634 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
2636 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2637 ena_setup_io_intr(adapter);
2639 /* napi poll functions should be initialized before running
2640 * request_irq(), to handle a rare condition where there is a pending
2641 * interrupt, causing the ISR to fire immediately while the poll
2642 * function wasn't set yet, causing a null dereference
2644 ena_init_napi_in_range(adapter, 0, io_queue_count);
2646 rc = ena_request_io_irq(adapter);
2650 rc = create_queues_with_size_backoff(adapter);
2652 goto err_create_queues_with_backoff;
2654 rc = ena_up_complete(adapter);
2658 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2659 netif_carrier_on(adapter->netdev);
2661 ena_increase_stat(&adapter->dev_stats.interface_up, 1,
2664 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2666 /* Enable completion queues interrupt */
2667 for (i = 0; i < adapter->num_io_queues; i++)
2668 ena_unmask_interrupt(&adapter->tx_ring[i],
2669 &adapter->rx_ring[i]);
2671 /* schedule napi in case we had pending packets
2672 * from the last time we disable napi
2674 for (i = 0; i < io_queue_count; i++)
2675 napi_schedule(&adapter->ena_napi[i].napi);
2680 ena_destroy_all_tx_queues(adapter);
2681 ena_free_all_io_tx_resources(adapter);
2682 ena_destroy_all_rx_queues(adapter);
2683 ena_free_all_io_rx_resources(adapter);
2684 err_create_queues_with_backoff:
2685 ena_free_io_irq(adapter);
2687 ena_del_napi_in_range(adapter, 0, io_queue_count);
2692 static void ena_down(struct ena_adapter *adapter)
2694 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2696 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2698 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2700 ena_increase_stat(&adapter->dev_stats.interface_down, 1,
2703 netif_carrier_off(adapter->netdev);
2704 netif_tx_disable(adapter->netdev);
2706 /* After this point the napi handler won't enable the tx queue */
2707 ena_napi_disable_in_range(adapter, 0, io_queue_count);
2709 /* After destroy the queue there won't be any new interrupts */
2711 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2714 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2716 netif_err(adapter, ifdown, adapter->netdev,
2717 "Device reset failed\n");
2718 /* stop submitting admin commands on a device that was reset */
2719 ena_com_set_admin_running_state(adapter->ena_dev, false);
2722 ena_destroy_all_io_queues(adapter);
2724 ena_disable_io_intr_sync(adapter);
2725 ena_free_io_irq(adapter);
2726 ena_del_napi_in_range(adapter, 0, io_queue_count);
2728 ena_free_all_tx_bufs(adapter);
2729 ena_free_all_rx_bufs(adapter);
2730 ena_free_all_io_tx_resources(adapter);
2731 ena_free_all_io_rx_resources(adapter);
2734 /* ena_open - Called when a network interface is made active
2735 * @netdev: network interface device structure
2737 * Returns 0 on success, negative value on failure
2739 * The open entry point is called when a network interface is made
2740 * active by the system (IFF_UP). At this point all resources needed
2741 * for transmit and receive operations are allocated, the interrupt
2742 * handler is registered with the OS, the watchdog timer is started,
2743 * and the stack is notified that the interface is ready.
2745 static int ena_open(struct net_device *netdev)
2747 struct ena_adapter *adapter = netdev_priv(netdev);
2750 /* Notify the stack of the actual queue counts. */
2751 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2753 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2757 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2759 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2763 rc = ena_up(adapter);
2770 /* ena_close - Disables a network interface
2771 * @netdev: network interface device structure
2773 * Returns 0, this is not allowed to fail
2775 * The close entry point is called when an interface is de-activated
2776 * by the OS. The hardware is still under the drivers control, but
2777 * needs to be disabled. A global MAC reset is issued to stop the
2778 * hardware, and all transmit and receive resources are freed.
2780 static int ena_close(struct net_device *netdev)
2782 struct ena_adapter *adapter = netdev_priv(netdev);
2784 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2786 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2789 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2792 /* Check for device status and issue reset if needed*/
2793 check_for_admin_com_state(adapter);
2794 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2795 netif_err(adapter, ifdown, adapter->netdev,
2796 "Destroy failure, restarting device\n");
2797 ena_dump_stats_to_dmesg(adapter);
2798 /* rtnl lock already obtained in dev_ioctl() layer */
2799 ena_destroy_device(adapter, false);
2800 ena_restore_device(adapter);
2806 int ena_update_queue_sizes(struct ena_adapter *adapter,
2812 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2813 ena_close(adapter->netdev);
2814 adapter->requested_tx_ring_size = new_tx_size;
2815 adapter->requested_rx_ring_size = new_rx_size;
2816 ena_init_io_rings(adapter,
2818 adapter->xdp_num_queues +
2819 adapter->num_io_queues);
2820 return dev_was_up ? ena_up(adapter) : 0;
2823 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2825 struct ena_com_dev *ena_dev = adapter->ena_dev;
2826 int prev_channel_count;
2829 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2830 ena_close(adapter->netdev);
2831 prev_channel_count = adapter->num_io_queues;
2832 adapter->num_io_queues = new_channel_count;
2833 if (ena_xdp_present(adapter) &&
2834 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2835 adapter->xdp_first_ring = new_channel_count;
2836 adapter->xdp_num_queues = new_channel_count;
2837 if (prev_channel_count > new_channel_count)
2838 ena_xdp_exchange_program_rx_in_range(adapter,
2841 prev_channel_count);
2843 ena_xdp_exchange_program_rx_in_range(adapter,
2844 adapter->xdp_bpf_prog,
2849 /* We need to destroy the rss table so that the indirection
2850 * table will be reinitialized by ena_up()
2852 ena_com_rss_destroy(ena_dev);
2853 ena_init_io_rings(adapter,
2855 adapter->xdp_num_queues +
2856 adapter->num_io_queues);
2857 return dev_was_up ? ena_open(adapter->netdev) : 0;
2860 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
2861 struct sk_buff *skb,
2862 bool disable_meta_caching)
2864 u32 mss = skb_shinfo(skb)->gso_size;
2865 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2868 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2869 ena_tx_ctx->l4_csum_enable = 1;
2871 ena_tx_ctx->tso_enable = 1;
2872 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2873 ena_tx_ctx->l4_csum_partial = 0;
2875 ena_tx_ctx->tso_enable = 0;
2876 ena_meta->l4_hdr_len = 0;
2877 ena_tx_ctx->l4_csum_partial = 1;
2880 switch (ip_hdr(skb)->version) {
2882 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2883 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2886 ena_tx_ctx->l3_csum_enable = 1;
2887 l4_protocol = ip_hdr(skb)->protocol;
2890 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2891 l4_protocol = ipv6_hdr(skb)->nexthdr;
2897 if (l4_protocol == IPPROTO_TCP)
2898 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2900 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2902 ena_meta->mss = mss;
2903 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2904 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2905 ena_tx_ctx->meta_valid = 1;
2906 } else if (disable_meta_caching) {
2907 memset(ena_meta, 0, sizeof(*ena_meta));
2908 ena_tx_ctx->meta_valid = 1;
2910 ena_tx_ctx->meta_valid = 0;
2914 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2915 struct sk_buff *skb)
2917 int num_frags, header_len, rc;
2919 num_frags = skb_shinfo(skb)->nr_frags;
2920 header_len = skb_headlen(skb);
2922 if (num_frags < tx_ring->sgl_size)
2925 if ((num_frags == tx_ring->sgl_size) &&
2926 (header_len < tx_ring->tx_max_header_size))
2929 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
2931 rc = skb_linearize(skb);
2933 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
2940 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2941 struct ena_tx_buffer *tx_info,
2942 struct sk_buff *skb,
2946 struct ena_adapter *adapter = tx_ring->adapter;
2947 struct ena_com_buf *ena_buf;
2949 u32 skb_head_len, frag_len, last_frag;
2954 skb_head_len = skb_headlen(skb);
2956 ena_buf = tx_info->bufs;
2958 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2959 /* When the device is LLQ mode, the driver will copy
2960 * the header into the device memory space.
2961 * the ena_com layer assume the header is in a linear
2963 * This assumption might be wrong since part of the header
2964 * can be in the fragmented buffers.
2965 * Use skb_header_pointer to make sure the header is in a
2966 * linear memory space.
2969 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2970 *push_hdr = skb_header_pointer(skb, 0, push_len,
2971 tx_ring->push_buf_intermediate_buf);
2972 *header_len = push_len;
2973 if (unlikely(skb->data != *push_hdr)) {
2974 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
2977 delta = push_len - skb_head_len;
2981 *header_len = min_t(u32, skb_head_len,
2982 tx_ring->tx_max_header_size);
2985 netif_dbg(adapter, tx_queued, adapter->netdev,
2986 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2987 *push_hdr, push_len);
2989 if (skb_head_len > push_len) {
2990 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2991 skb_head_len - push_len, DMA_TO_DEVICE);
2992 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2993 goto error_report_dma_error;
2995 ena_buf->paddr = dma;
2996 ena_buf->len = skb_head_len - push_len;
2999 tx_info->num_of_bufs++;
3000 tx_info->map_linear_data = 1;
3002 tx_info->map_linear_data = 0;
3005 last_frag = skb_shinfo(skb)->nr_frags;
3007 for (i = 0; i < last_frag; i++) {
3008 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3010 frag_len = skb_frag_size(frag);
3012 if (unlikely(delta >= frag_len)) {
3017 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
3018 frag_len - delta, DMA_TO_DEVICE);
3019 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
3020 goto error_report_dma_error;
3022 ena_buf->paddr = dma;
3023 ena_buf->len = frag_len - delta;
3025 tx_info->num_of_bufs++;
3031 error_report_dma_error:
3032 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
3034 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
3036 tx_info->skb = NULL;
3038 tx_info->num_of_bufs += i;
3039 ena_unmap_tx_buff(tx_ring, tx_info);
3044 /* Called with netif_tx_lock. */
3045 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
3047 struct ena_adapter *adapter = netdev_priv(dev);
3048 struct ena_tx_buffer *tx_info;
3049 struct ena_com_tx_ctx ena_tx_ctx;
3050 struct ena_ring *tx_ring;
3051 struct netdev_queue *txq;
3053 u16 next_to_use, req_id, header_len;
3056 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
3057 /* Determine which tx ring we will be placed on */
3058 qid = skb_get_queue_mapping(skb);
3059 tx_ring = &adapter->tx_ring[qid];
3060 txq = netdev_get_tx_queue(dev, qid);
3062 rc = ena_check_and_linearize_skb(tx_ring, skb);
3064 goto error_drop_packet;
3066 skb_tx_timestamp(skb);
3068 next_to_use = tx_ring->next_to_use;
3069 req_id = tx_ring->free_ids[next_to_use];
3070 tx_info = &tx_ring->tx_buffer_info[req_id];
3071 tx_info->num_of_bufs = 0;
3073 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
3075 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
3077 goto error_drop_packet;
3079 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
3080 ena_tx_ctx.ena_bufs = tx_info->bufs;
3081 ena_tx_ctx.push_header = push_hdr;
3082 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
3083 ena_tx_ctx.req_id = req_id;
3084 ena_tx_ctx.header_len = header_len;
3086 /* set flags and meta data */
3087 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
3089 rc = ena_xmit_common(dev,
3096 goto error_unmap_dma;
3098 netdev_tx_sent_queue(txq, skb->len);
3100 /* stop the queue when no more space available, the packet can have up
3101 * to sgl_size + 2. one for the meta descriptor and one for header
3102 * (if the header is larger than tx_max_header_size).
3104 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3105 tx_ring->sgl_size + 2))) {
3106 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3109 netif_tx_stop_queue(txq);
3110 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
3113 /* There is a rare condition where this function decide to
3114 * stop the queue but meanwhile clean_tx_irq updates
3115 * next_to_completion and terminates.
3116 * The queue will remain stopped forever.
3117 * To solve this issue add a mb() to make sure that
3118 * netif_tx_stop_queue() write is vissible before checking if
3119 * there is additional space in the queue.
3123 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3124 ENA_TX_WAKEUP_THRESH)) {
3125 netif_tx_wake_queue(txq);
3126 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
3131 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
3132 /* trigger the dma engine. ena_ring_tx_doorbell()
3133 * calls a memory barrier inside it.
3135 ena_ring_tx_doorbell(tx_ring);
3137 return NETDEV_TX_OK;
3140 ena_unmap_tx_buff(tx_ring, tx_info);
3141 tx_info->skb = NULL;
3145 return NETDEV_TX_OK;
3148 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
3149 struct net_device *sb_dev)
3152 /* we suspect that this is good for in--kernel network services that
3153 * want to loop incoming skb rx to tx in normal user generated traffic,
3154 * most probably we will not get to this
3156 if (skb_rx_queue_recorded(skb))
3157 qid = skb_get_rx_queue(skb);
3159 qid = netdev_pick_tx(dev, skb, NULL);
3164 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3166 struct device *dev = &pdev->dev;
3167 struct ena_admin_host_info *host_info;
3170 /* Allocate only the host info */
3171 rc = ena_com_allocate_host_info(ena_dev);
3173 dev_err(dev, "Cannot allocate host info\n");
3177 host_info = ena_dev->host_attr.host_info;
3179 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
3180 host_info->os_type = ENA_ADMIN_OS_LINUX;
3181 host_info->kernel_ver = LINUX_VERSION_CODE;
3182 strlcpy(host_info->kernel_ver_str, utsname()->version,
3183 sizeof(host_info->kernel_ver_str) - 1);
3184 host_info->os_dist = 0;
3185 strncpy(host_info->os_dist_str, utsname()->release,
3186 sizeof(host_info->os_dist_str) - 1);
3187 host_info->driver_version =
3188 (DRV_MODULE_GEN_MAJOR) |
3189 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3190 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3191 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
3192 host_info->num_cpus = num_online_cpus();
3194 host_info->driver_supported_features =
3195 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
3196 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
3197 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
3198 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
3200 rc = ena_com_set_host_attributes(ena_dev);
3202 if (rc == -EOPNOTSUPP)
3203 dev_warn(dev, "Cannot set host attributes\n");
3205 dev_err(dev, "Cannot set host attributes\n");
3213 ena_com_delete_host_info(ena_dev);
3216 static void ena_config_debug_area(struct ena_adapter *adapter)
3218 u32 debug_area_size;
3221 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3222 if (ss_count <= 0) {
3223 netif_err(adapter, drv, adapter->netdev,
3224 "SS count is negative\n");
3228 /* allocate 32 bytes for each string and 64bit for the value */
3229 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3231 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3233 netif_err(adapter, drv, adapter->netdev,
3234 "Cannot allocate debug area\n");
3238 rc = ena_com_set_host_attributes(adapter->ena_dev);
3240 if (rc == -EOPNOTSUPP)
3241 netif_warn(adapter, drv, adapter->netdev,
3242 "Cannot set host attributes\n");
3244 netif_err(adapter, drv, adapter->netdev,
3245 "Cannot set host attributes\n");
3251 ena_com_delete_debug_area(adapter->ena_dev);
3254 int ena_update_hw_stats(struct ena_adapter *adapter)
3258 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
3260 dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
3267 static void ena_get_stats64(struct net_device *netdev,
3268 struct rtnl_link_stats64 *stats)
3270 struct ena_adapter *adapter = netdev_priv(netdev);
3271 struct ena_ring *rx_ring, *tx_ring;
3277 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3280 for (i = 0; i < adapter->num_io_queues; i++) {
3283 tx_ring = &adapter->tx_ring[i];
3286 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3287 packets = tx_ring->tx_stats.cnt;
3288 bytes = tx_ring->tx_stats.bytes;
3289 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
3291 stats->tx_packets += packets;
3292 stats->tx_bytes += bytes;
3294 rx_ring = &adapter->rx_ring[i];
3297 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3298 packets = rx_ring->rx_stats.cnt;
3299 bytes = rx_ring->rx_stats.bytes;
3300 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3302 stats->rx_packets += packets;
3303 stats->rx_bytes += bytes;
3307 start = u64_stats_fetch_begin_irq(&adapter->syncp);
3308 rx_drops = adapter->dev_stats.rx_drops;
3309 tx_drops = adapter->dev_stats.tx_drops;
3310 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
3312 stats->rx_dropped = rx_drops;
3313 stats->tx_dropped = tx_drops;
3315 stats->multicast = 0;
3316 stats->collisions = 0;
3318 stats->rx_length_errors = 0;
3319 stats->rx_crc_errors = 0;
3320 stats->rx_frame_errors = 0;
3321 stats->rx_fifo_errors = 0;
3322 stats->rx_missed_errors = 0;
3323 stats->tx_window_errors = 0;
3325 stats->rx_errors = 0;
3326 stats->tx_errors = 0;
3329 static const struct net_device_ops ena_netdev_ops = {
3330 .ndo_open = ena_open,
3331 .ndo_stop = ena_close,
3332 .ndo_start_xmit = ena_start_xmit,
3333 .ndo_select_queue = ena_select_queue,
3334 .ndo_get_stats64 = ena_get_stats64,
3335 .ndo_tx_timeout = ena_tx_timeout,
3336 .ndo_change_mtu = ena_change_mtu,
3337 .ndo_set_mac_address = NULL,
3338 .ndo_validate_addr = eth_validate_addr,
3340 .ndo_xdp_xmit = ena_xdp_xmit,
3343 static int ena_device_validate_params(struct ena_adapter *adapter,
3344 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3346 struct net_device *netdev = adapter->netdev;
3349 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3352 netif_err(adapter, drv, netdev,
3353 "Error, mac address are different\n");
3357 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3358 netif_err(adapter, drv, netdev,
3359 "Error, device max mtu is smaller than netdev MTU\n");
3366 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3368 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3369 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3370 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3371 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3372 llq_config->llq_ring_entry_size_value = 128;
3375 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3376 struct ena_com_dev *ena_dev,
3377 struct ena_admin_feature_llq_desc *llq,
3378 struct ena_llq_configurations *llq_default_configurations)
3381 u32 llq_feature_mask;
3383 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3384 if (!(ena_dev->supported_features & llq_feature_mask)) {
3385 dev_warn(&pdev->dev,
3386 "LLQ is not supported Fallback to host mode policy.\n");
3387 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3391 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3394 "Failed to configure the device mode. Fallback to host mode policy.\n");
3395 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3401 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3404 bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3407 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3409 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3410 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3416 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3417 pci_resource_start(pdev, ENA_MEM_BAR),
3418 pci_resource_len(pdev, ENA_MEM_BAR));
3420 if (!ena_dev->mem_bar)
3426 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3427 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3430 struct ena_llq_configurations llq_config;
3431 struct device *dev = &pdev->dev;
3432 bool readless_supported;
3437 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3439 dev_err(dev, "Failed to init mmio read less\n");
3443 /* The PCIe configuration space revision id indicate if mmio reg
3446 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3447 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3449 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3451 dev_err(dev, "Can not reset device\n");
3452 goto err_mmio_read_less;
3455 rc = ena_com_validate_version(ena_dev);
3457 dev_err(dev, "Device version is too low\n");
3458 goto err_mmio_read_less;
3461 dma_width = ena_com_get_dma_width(ena_dev);
3462 if (dma_width < 0) {
3463 dev_err(dev, "Invalid dma width value %d", dma_width);
3465 goto err_mmio_read_less;
3468 rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
3470 dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
3471 goto err_mmio_read_less;
3474 /* ENA admin level init */
3475 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3478 "Can not initialize ena admin queue with device\n");
3479 goto err_mmio_read_less;
3482 /* To enable the msix interrupts the driver needs to know the number
3483 * of queues. So the driver uses polling mode to retrieve this
3486 ena_com_set_admin_polling_mode(ena_dev, true);
3488 ena_config_host_info(ena_dev, pdev);
3490 /* Get Device Attributes*/
3491 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3493 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3494 goto err_admin_init;
3497 /* Try to turn all the available aenq groups */
3498 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3499 BIT(ENA_ADMIN_FATAL_ERROR) |
3500 BIT(ENA_ADMIN_WARNING) |
3501 BIT(ENA_ADMIN_NOTIFICATION) |
3502 BIT(ENA_ADMIN_KEEP_ALIVE);
3504 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3506 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3508 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3509 goto err_admin_init;
3512 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3514 set_default_llq_configurations(&llq_config);
3516 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3519 dev_err(dev, "ENA device init failed\n");
3520 goto err_admin_init;
3526 ena_com_delete_host_info(ena_dev);
3527 ena_com_admin_destroy(ena_dev);
3529 ena_com_mmio_reg_read_request_destroy(ena_dev);
3534 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3536 struct ena_com_dev *ena_dev = adapter->ena_dev;
3537 struct device *dev = &adapter->pdev->dev;
3540 rc = ena_enable_msix(adapter);
3542 dev_err(dev, "Can not reserve msix vectors\n");
3546 ena_setup_mgmnt_intr(adapter);
3548 rc = ena_request_mgmnt_irq(adapter);
3550 dev_err(dev, "Can not setup management interrupts\n");
3551 goto err_disable_msix;
3554 ena_com_set_admin_polling_mode(ena_dev, false);
3556 ena_com_admin_aenq_enable(ena_dev);
3561 ena_disable_msix(adapter);
3566 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3568 struct net_device *netdev = adapter->netdev;
3569 struct ena_com_dev *ena_dev = adapter->ena_dev;
3572 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3575 netif_carrier_off(netdev);
3577 del_timer_sync(&adapter->timer_service);
3579 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3580 adapter->dev_up_before_reset = dev_up;
3582 ena_com_set_admin_running_state(ena_dev, false);
3584 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3587 /* Stop the device from sending AENQ events (in case reset flag is set
3588 * and device is up, ena_down() already reset the device.
3590 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3591 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3593 ena_free_mgmnt_irq(adapter);
3595 ena_disable_msix(adapter);
3597 ena_com_abort_admin_commands(ena_dev);
3599 ena_com_wait_for_abort_completion(ena_dev);
3601 ena_com_admin_destroy(ena_dev);
3603 ena_com_mmio_reg_read_request_destroy(ena_dev);
3605 /* return reset reason to default value */
3606 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3608 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3609 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3612 static int ena_restore_device(struct ena_adapter *adapter)
3614 struct ena_com_dev_get_features_ctx get_feat_ctx;
3615 struct ena_com_dev *ena_dev = adapter->ena_dev;
3616 struct pci_dev *pdev = adapter->pdev;
3620 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3621 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3623 dev_err(&pdev->dev, "Can not initialize device\n");
3626 adapter->wd_state = wd_state;
3628 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3630 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3631 goto err_device_destroy;
3634 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3636 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3637 goto err_device_destroy;
3639 /* If the interface was up before the reset bring it up */
3640 if (adapter->dev_up_before_reset) {
3641 rc = ena_up(adapter);
3643 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3644 goto err_disable_msix;
3648 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3650 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3651 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3652 netif_carrier_on(adapter->netdev);
3654 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3655 adapter->last_keep_alive_jiffies = jiffies;
3657 dev_err(&pdev->dev, "Device reset completed successfully\n");
3661 ena_free_mgmnt_irq(adapter);
3662 ena_disable_msix(adapter);
3664 ena_com_abort_admin_commands(ena_dev);
3665 ena_com_wait_for_abort_completion(ena_dev);
3666 ena_com_admin_destroy(ena_dev);
3667 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3668 ena_com_mmio_reg_read_request_destroy(ena_dev);
3670 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3671 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3673 "Reset attempt failed. Can not reset the device\n");
3678 static void ena_fw_reset_device(struct work_struct *work)
3680 struct ena_adapter *adapter =
3681 container_of(work, struct ena_adapter, reset_task);
3685 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3686 ena_destroy_device(adapter, false);
3687 ena_restore_device(adapter);
3693 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3694 struct ena_ring *rx_ring)
3696 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi);
3698 if (likely(READ_ONCE(ena_napi->first_interrupt)))
3701 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3704 rx_ring->no_interrupt_event_cnt++;
3706 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3707 netif_err(adapter, rx_err, adapter->netdev,
3708 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3710 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3711 smp_mb__before_atomic();
3712 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3719 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3720 struct ena_ring *tx_ring)
3722 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
3723 unsigned int time_since_last_napi;
3724 unsigned int missing_tx_comp_to;
3725 bool is_tx_comp_time_expired;
3726 struct ena_tx_buffer *tx_buf;
3727 unsigned long last_jiffies;
3731 for (i = 0; i < tx_ring->ring_size; i++) {
3732 tx_buf = &tx_ring->tx_buffer_info[i];
3733 last_jiffies = tx_buf->last_jiffies;
3735 if (last_jiffies == 0)
3736 /* no pending Tx at this location */
3739 is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
3740 2 * adapter->missing_tx_completion_to);
3742 if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) {
3743 /* If after graceful period interrupt is still not
3744 * received, we schedule a reset
3746 netif_err(adapter, tx_err, adapter->netdev,
3747 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3749 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3750 smp_mb__before_atomic();
3751 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3755 is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
3756 adapter->missing_tx_completion_to);
3758 if (unlikely(is_tx_comp_time_expired)) {
3759 if (!tx_buf->print_once) {
3760 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
3761 missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
3762 netif_notice(adapter, tx_err, adapter->netdev,
3763 "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
3764 tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
3767 tx_buf->print_once = 1;
3772 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3773 netif_err(adapter, tx_err, adapter->netdev,
3774 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3776 adapter->missing_tx_completion_threshold);
3777 adapter->reset_reason =
3778 ENA_REGS_RESET_MISS_TX_CMPL;
3779 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3783 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
3789 static void check_for_missing_completions(struct ena_adapter *adapter)
3791 struct ena_ring *tx_ring;
3792 struct ena_ring *rx_ring;
3796 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
3797 /* Make sure the driver doesn't turn the device in other process */
3800 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3803 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3806 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3809 budget = ENA_MONITORED_TX_QUEUES;
3811 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
3812 tx_ring = &adapter->tx_ring[i];
3813 rx_ring = &adapter->rx_ring[i];
3815 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3819 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3820 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
3829 adapter->last_monitored_tx_qid = i % io_queue_count;
3832 /* trigger napi schedule after 2 consecutive detections */
3833 #define EMPTY_RX_REFILL 2
3834 /* For the rare case where the device runs out of Rx descriptors and the
3835 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3837 * This case will lead to a deadlock:
3838 * The device won't send interrupts since all the new Rx packets will be dropped
3839 * The napi handler won't allocate new Rx descriptors so the device will be
3840 * able to send new packets.
3842 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3843 * It is recommended to have at least 512MB, with a minimum of 128MB for
3844 * constrained environment).
3846 * When such a situation is detected - Reschedule napi
3848 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3850 struct ena_ring *rx_ring;
3851 int i, refill_required;
3853 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3856 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3859 for (i = 0; i < adapter->num_io_queues; i++) {
3860 rx_ring = &adapter->rx_ring[i];
3862 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3863 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3864 rx_ring->empty_rx_queue++;
3866 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3867 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1,
3870 netif_err(adapter, drv, adapter->netdev,
3871 "Trigger refill for ring %d\n", i);
3873 napi_schedule(rx_ring->napi);
3874 rx_ring->empty_rx_queue = 0;
3877 rx_ring->empty_rx_queue = 0;
3882 /* Check for keep alive expiration */
3883 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3885 unsigned long keep_alive_expired;
3887 if (!adapter->wd_state)
3890 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3893 keep_alive_expired = adapter->last_keep_alive_jiffies +
3894 adapter->keep_alive_timeout;
3895 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3896 netif_err(adapter, drv, adapter->netdev,
3897 "Keep alive watchdog timeout.\n");
3898 ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
3900 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3901 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3905 static void check_for_admin_com_state(struct ena_adapter *adapter)
3907 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3908 netif_err(adapter, drv, adapter->netdev,
3909 "ENA admin queue is not in running state!\n");
3910 ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
3912 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3913 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3917 static void ena_update_hints(struct ena_adapter *adapter,
3918 struct ena_admin_ena_hw_hints *hints)
3920 struct net_device *netdev = adapter->netdev;
3922 if (hints->admin_completion_tx_timeout)
3923 adapter->ena_dev->admin_queue.completion_timeout =
3924 hints->admin_completion_tx_timeout * 1000;
3926 if (hints->mmio_read_timeout)
3927 /* convert to usec */
3928 adapter->ena_dev->mmio_read.reg_read_to =
3929 hints->mmio_read_timeout * 1000;
3931 if (hints->missed_tx_completion_count_threshold_to_reset)
3932 adapter->missing_tx_completion_threshold =
3933 hints->missed_tx_completion_count_threshold_to_reset;
3935 if (hints->missing_tx_completion_timeout) {
3936 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3937 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3939 adapter->missing_tx_completion_to =
3940 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3943 if (hints->netdev_wd_timeout)
3944 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3946 if (hints->driver_watchdog_timeout) {
3947 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3948 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3950 adapter->keep_alive_timeout =
3951 msecs_to_jiffies(hints->driver_watchdog_timeout);
3955 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3956 struct net_device *netdev)
3958 host_info->supported_network_features[0] =
3959 netdev->features & GENMASK_ULL(31, 0);
3960 host_info->supported_network_features[1] =
3961 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3964 static void ena_timer_service(struct timer_list *t)
3966 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3967 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3968 struct ena_admin_host_info *host_info =
3969 adapter->ena_dev->host_attr.host_info;
3971 check_for_missing_keep_alive(adapter);
3973 check_for_admin_com_state(adapter);
3975 check_for_missing_completions(adapter);
3977 check_for_empty_rx_ring(adapter);
3980 ena_dump_stats_to_buf(adapter, debug_area);
3983 ena_update_host_info(host_info, adapter->netdev);
3985 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3986 netif_err(adapter, drv, adapter->netdev,
3987 "Trigger reset is on\n");
3988 ena_dump_stats_to_dmesg(adapter);
3989 queue_work(ena_wq, &adapter->reset_task);
3993 /* Reset the timer */
3994 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3997 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
3998 struct ena_com_dev *ena_dev,
3999 struct ena_com_dev_get_features_ctx *get_feat_ctx)
4001 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
4003 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
4004 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4005 &get_feat_ctx->max_queue_ext.max_queue_ext;
4006 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
4007 max_queue_ext->max_rx_cq_num);
4009 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
4010 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
4012 struct ena_admin_queue_feature_desc *max_queues =
4013 &get_feat_ctx->max_queues;
4014 io_tx_sq_num = max_queues->max_sq_num;
4015 io_tx_cq_num = max_queues->max_cq_num;
4016 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
4019 /* In case of LLQ use the llq fields for the tx SQ/CQ */
4020 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4021 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
4023 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
4024 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
4025 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
4026 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
4027 /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
4028 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
4030 return max_num_io_queues;
4033 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
4034 struct net_device *netdev)
4036 netdev_features_t dev_features = 0;
4038 /* Set offload features */
4039 if (feat->offload.tx &
4040 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
4041 dev_features |= NETIF_F_IP_CSUM;
4043 if (feat->offload.tx &
4044 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
4045 dev_features |= NETIF_F_IPV6_CSUM;
4047 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
4048 dev_features |= NETIF_F_TSO;
4050 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
4051 dev_features |= NETIF_F_TSO6;
4053 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
4054 dev_features |= NETIF_F_TSO_ECN;
4056 if (feat->offload.rx_supported &
4057 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
4058 dev_features |= NETIF_F_RXCSUM;
4060 if (feat->offload.rx_supported &
4061 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
4062 dev_features |= NETIF_F_RXCSUM;
4070 netdev->hw_features |= netdev->features;
4071 netdev->vlan_features |= netdev->features;
4074 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
4075 struct ena_com_dev_get_features_ctx *feat)
4077 struct net_device *netdev = adapter->netdev;
4079 /* Copy mac address */
4080 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
4081 eth_hw_addr_random(netdev);
4082 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
4084 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
4085 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4088 /* Set offload features */
4089 ena_set_dev_offloads(feat, netdev);
4091 adapter->max_mtu = feat->dev_attr.max_mtu;
4092 netdev->max_mtu = adapter->max_mtu;
4093 netdev->min_mtu = ENA_MIN_MTU;
4096 static int ena_rss_init_default(struct ena_adapter *adapter)
4098 struct ena_com_dev *ena_dev = adapter->ena_dev;
4099 struct device *dev = &adapter->pdev->dev;
4103 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
4105 dev_err(dev, "Cannot init indirect table\n");
4109 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
4110 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
4111 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
4112 ENA_IO_RXQ_IDX(val));
4113 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4114 dev_err(dev, "Cannot fill indirect table\n");
4115 goto err_fill_indir;
4119 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
4120 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
4121 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4122 dev_err(dev, "Cannot fill hash function\n");
4123 goto err_fill_indir;
4126 rc = ena_com_set_default_hash_ctrl(ena_dev);
4127 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4128 dev_err(dev, "Cannot fill hash control\n");
4129 goto err_fill_indir;
4135 ena_com_rss_destroy(ena_dev);
4141 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4143 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4145 pci_release_selected_regions(pdev, release_bars);
4149 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
4151 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4152 struct ena_com_dev *ena_dev = ctx->ena_dev;
4153 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4154 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4155 u32 max_tx_queue_size;
4156 u32 max_rx_queue_size;
4158 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
4159 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4160 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4161 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4162 max_queue_ext->max_rx_sq_depth);
4163 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
4165 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4166 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4167 llq->max_llq_depth);
4169 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4170 max_queue_ext->max_tx_sq_depth);
4172 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4173 max_queue_ext->max_per_packet_tx_descs);
4174 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4175 max_queue_ext->max_per_packet_rx_descs);
4177 struct ena_admin_queue_feature_desc *max_queues =
4178 &ctx->get_feat_ctx->max_queues;
4179 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4180 max_queues->max_sq_depth);
4181 max_tx_queue_size = max_queues->max_cq_depth;
4183 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4184 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4185 llq->max_llq_depth);
4187 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4188 max_queues->max_sq_depth);
4190 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4191 max_queues->max_packet_tx_descs);
4192 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4193 max_queues->max_packet_rx_descs);
4196 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4197 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
4199 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4201 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4204 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4205 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4207 ctx->max_tx_queue_size = max_tx_queue_size;
4208 ctx->max_rx_queue_size = max_rx_queue_size;
4209 ctx->tx_queue_size = tx_queue_size;
4210 ctx->rx_queue_size = rx_queue_size;
4215 /* ena_probe - Device Initialization Routine
4216 * @pdev: PCI device information struct
4217 * @ent: entry in ena_pci_tbl
4219 * Returns 0 on success, negative on failure
4221 * ena_probe initializes an adapter identified by a pci_dev structure.
4222 * The OS initialization, configuring of the adapter private structure,
4223 * and a hardware reset occur.
4225 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4227 struct ena_calc_queue_size_ctx calc_queue_ctx = {};
4228 struct ena_com_dev_get_features_ctx get_feat_ctx;
4229 struct ena_com_dev *ena_dev = NULL;
4230 struct ena_adapter *adapter;
4231 struct net_device *netdev;
4232 static int adapters_found;
4233 u32 max_num_io_queues;
4237 dev_dbg(&pdev->dev, "%s\n", __func__);
4239 rc = pci_enable_device_mem(pdev);
4241 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4245 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
4247 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
4248 goto err_disable_device;
4251 pci_set_master(pdev);
4253 ena_dev = vzalloc(sizeof(*ena_dev));
4256 goto err_disable_device;
4259 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4260 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4262 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4264 goto err_free_ena_dev;
4267 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4268 pci_resource_start(pdev, ENA_REG_BAR),
4269 pci_resource_len(pdev, ENA_REG_BAR));
4270 if (!ena_dev->reg_bar) {
4271 dev_err(&pdev->dev, "Failed to remap regs bar\n");
4273 goto err_free_region;
4276 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
4278 ena_dev->dmadev = &pdev->dev;
4280 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS);
4282 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4284 goto err_free_region;
4287 SET_NETDEV_DEV(netdev, &pdev->dev);
4288 adapter = netdev_priv(netdev);
4289 adapter->ena_dev = ena_dev;
4290 adapter->netdev = netdev;
4291 adapter->pdev = pdev;
4292 adapter->msg_enable = DEFAULT_MSG_ENABLE;
4294 ena_dev->net_device = netdev;
4296 pci_set_drvdata(pdev, adapter);
4298 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4300 dev_err(&pdev->dev, "ENA device init failed\n");
4303 goto err_netdev_destroy;
4306 rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
4308 dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
4309 goto err_device_destroy;
4312 calc_queue_ctx.ena_dev = ena_dev;
4313 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4314 calc_queue_ctx.pdev = pdev;
4316 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4317 * Updated during device initialization with the real granularity
4319 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
4320 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
4321 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
4322 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4323 rc = ena_calc_io_queue_size(&calc_queue_ctx);
4324 if (rc || !max_num_io_queues) {
4326 goto err_device_destroy;
4329 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4331 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
4333 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4334 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
4335 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4336 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
4337 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4338 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
4340 adapter->num_io_queues = max_num_io_queues;
4341 adapter->max_num_io_queues = max_num_io_queues;
4342 adapter->last_monitored_tx_qid = 0;
4344 adapter->xdp_first_ring = 0;
4345 adapter->xdp_num_queues = 0;
4347 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4348 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4349 adapter->disable_meta_caching =
4350 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
4351 BIT(ENA_ADMIN_DISABLE_META_CACHING));
4353 adapter->wd_state = wd_state;
4355 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4357 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4360 "Failed to query interrupt moderation feature\n");
4361 goto err_device_destroy;
4363 ena_init_io_rings(adapter,
4365 adapter->xdp_num_queues +
4366 adapter->num_io_queues);
4368 netdev->netdev_ops = &ena_netdev_ops;
4369 netdev->watchdog_timeo = TX_TIMEOUT;
4370 ena_set_ethtool_ops(netdev);
4372 netdev->priv_flags |= IFF_UNICAST_FLT;
4374 u64_stats_init(&adapter->syncp);
4376 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
4379 "Failed to enable and set the admin interrupts\n");
4380 goto err_worker_destroy;
4382 rc = ena_rss_init_default(adapter);
4383 if (rc && (rc != -EOPNOTSUPP)) {
4384 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4388 ena_config_debug_area(adapter);
4390 if (!ena_update_hw_stats(adapter))
4391 adapter->eni_stats_supported = true;
4393 adapter->eni_stats_supported = false;
4395 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4397 netif_carrier_off(netdev);
4399 rc = register_netdev(netdev);
4401 dev_err(&pdev->dev, "Cannot register net device\n");
4405 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4407 adapter->last_keep_alive_jiffies = jiffies;
4408 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4409 adapter->missing_tx_completion_to = TX_TIMEOUT;
4410 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4412 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4414 timer_setup(&adapter->timer_service, ena_timer_service, 0);
4415 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4417 dev_info(&pdev->dev,
4418 "%s found at mem %lx, mac addr %pM\n",
4419 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4422 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4429 ena_com_delete_debug_area(ena_dev);
4430 ena_com_rss_destroy(ena_dev);
4432 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4433 /* stop submitting admin commands on a device that was reset */
4434 ena_com_set_admin_running_state(ena_dev, false);
4435 ena_free_mgmnt_irq(adapter);
4436 ena_disable_msix(adapter);
4438 del_timer(&adapter->timer_service);
4440 ena_com_delete_host_info(ena_dev);
4441 ena_com_admin_destroy(ena_dev);
4443 free_netdev(netdev);
4445 ena_release_bars(ena_dev, pdev);
4449 pci_disable_device(pdev);
4453 /*****************************************************************************/
4455 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4456 * @pdev: PCI device information struct
4457 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4459 * __ena_shutoff is a helper routine that does the real work on shutdown and
4460 * removal paths; the difference between those paths is with regards to whether
4461 * dettach or unregister the netdevice.
4463 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4465 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4466 struct ena_com_dev *ena_dev;
4467 struct net_device *netdev;
4469 ena_dev = adapter->ena_dev;
4470 netdev = adapter->netdev;
4472 #ifdef CONFIG_RFS_ACCEL
4473 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4474 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4475 netdev->rx_cpu_rmap = NULL;
4477 #endif /* CONFIG_RFS_ACCEL */
4479 /* Make sure timer and reset routine won't be called after
4480 * freeing device resources.
4482 del_timer_sync(&adapter->timer_service);
4483 cancel_work_sync(&adapter->reset_task);
4485 rtnl_lock(); /* lock released inside the below if-else block */
4486 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
4487 ena_destroy_device(adapter, true);
4489 netif_device_detach(netdev);
4494 unregister_netdev(netdev);
4495 free_netdev(netdev);
4498 ena_com_rss_destroy(ena_dev);
4500 ena_com_delete_debug_area(ena_dev);
4502 ena_com_delete_host_info(ena_dev);
4504 ena_release_bars(ena_dev, pdev);
4506 pci_disable_device(pdev);
4511 /* ena_remove - Device Removal Routine
4512 * @pdev: PCI device information struct
4514 * ena_remove is called by the PCI subsystem to alert the driver
4515 * that it should release a PCI device.
4518 static void ena_remove(struct pci_dev *pdev)
4520 __ena_shutoff(pdev, false);
4523 /* ena_shutdown - Device Shutdown Routine
4524 * @pdev: PCI device information struct
4526 * ena_shutdown is called by the PCI subsystem to alert the driver that
4527 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4530 static void ena_shutdown(struct pci_dev *pdev)
4532 __ena_shutoff(pdev, true);
4535 /* ena_suspend - PM suspend callback
4536 * @dev_d: Device information struct
4538 static int __maybe_unused ena_suspend(struct device *dev_d)
4540 struct pci_dev *pdev = to_pci_dev(dev_d);
4541 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4543 ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp);
4546 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4548 "Ignoring device reset request as the device is being suspended\n");
4549 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4551 ena_destroy_device(adapter, true);
4556 /* ena_resume - PM resume callback
4557 * @dev_d: Device information struct
4559 static int __maybe_unused ena_resume(struct device *dev_d)
4561 struct ena_adapter *adapter = dev_get_drvdata(dev_d);
4564 ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp);
4567 rc = ena_restore_device(adapter);
4572 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
4574 static struct pci_driver ena_pci_driver = {
4575 .name = DRV_MODULE_NAME,
4576 .id_table = ena_pci_tbl,
4578 .remove = ena_remove,
4579 .shutdown = ena_shutdown,
4580 .driver.pm = &ena_pm_ops,
4581 .sriov_configure = pci_sriov_configure_simple,
4584 static int __init ena_init(void)
4588 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4590 pr_err("Failed to create workqueue\n");
4594 ret = pci_register_driver(&ena_pci_driver);
4596 destroy_workqueue(ena_wq);
4601 static void __exit ena_cleanup(void)
4603 pci_unregister_driver(&ena_pci_driver);
4606 destroy_workqueue(ena_wq);
4611 /******************************************************************************
4612 ******************************** AENQ Handlers *******************************
4613 *****************************************************************************/
4614 /* ena_update_on_link_change:
4615 * Notify the network interface about the change in link status
4617 static void ena_update_on_link_change(void *adapter_data,
4618 struct ena_admin_aenq_entry *aenq_e)
4620 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4621 struct ena_admin_aenq_link_change_desc *aenq_desc =
4622 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4623 int status = aenq_desc->flags &
4624 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4627 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
4628 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4629 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4630 netif_carrier_on(adapter->netdev);
4632 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4633 netif_carrier_off(adapter->netdev);
4637 static void ena_keep_alive_wd(void *adapter_data,
4638 struct ena_admin_aenq_entry *aenq_e)
4640 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4641 struct ena_admin_aenq_keep_alive_desc *desc;
4645 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4646 adapter->last_keep_alive_jiffies = jiffies;
4648 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4649 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
4651 u64_stats_update_begin(&adapter->syncp);
4652 /* These stats are accumulated by the device, so the counters indicate
4653 * all drops since last reset.
4655 adapter->dev_stats.rx_drops = rx_drops;
4656 adapter->dev_stats.tx_drops = tx_drops;
4657 u64_stats_update_end(&adapter->syncp);
4660 static void ena_notification(void *adapter_data,
4661 struct ena_admin_aenq_entry *aenq_e)
4663 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4664 struct ena_admin_ena_hw_hints *hints;
4666 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4667 "Invalid group(%x) expected %x\n",
4668 aenq_e->aenq_common_desc.group,
4669 ENA_ADMIN_NOTIFICATION);
4671 switch (aenq_e->aenq_common_desc.syndrome) {
4672 case ENA_ADMIN_UPDATE_HINTS:
4673 hints = (struct ena_admin_ena_hw_hints *)
4674 (&aenq_e->inline_data_w4);
4675 ena_update_hints(adapter, hints);
4678 netif_err(adapter, drv, adapter->netdev,
4679 "Invalid aenq notification link state %d\n",
4680 aenq_e->aenq_common_desc.syndrome);
4684 /* This handler will called for unknown event group or unimplemented handlers*/
4685 static void unimplemented_aenq_handler(void *data,
4686 struct ena_admin_aenq_entry *aenq_e)
4688 struct ena_adapter *adapter = (struct ena_adapter *)data;
4690 netif_err(adapter, drv, adapter->netdev,
4691 "Unknown event was received or event with unimplemented handler\n");
4694 static struct ena_aenq_handlers aenq_handlers = {
4696 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4697 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4698 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4700 .unimplemented_handler = unimplemented_aenq_handler
4703 module_init(ena_init);
4704 module_exit(ena_cleanup);