2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <net/geneve.h>
38 #include <linux/bpf.h>
39 #include <linux/if_bridge.h>
40 #include <net/page_pool.h>
41 #include <net/xdp_sock_drv.h>
47 #include "en_accel/ipsec.h"
48 #include "en_accel/en_accel.h"
49 #include "en_accel/tls.h"
50 #include "accel/ipsec.h"
51 #include "accel/tls.h"
52 #include "lib/vxlan.h"
53 #include "lib/clock.h"
57 #include "en/monitor_stats.h"
58 #include "en/health.h"
59 #include "en/params.h"
60 #include "en/xsk/pool.h"
61 #include "en/xsk/setup.h"
62 #include "en/xsk/rx.h"
63 #include "en/xsk/tx.h"
64 #include "en/hv_vhca_stats.h"
65 #include "en/devlink.h"
70 #include "fpga/ipsec.h"
72 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
74 bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
75 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
76 MLX5_CAP_ETH(mdev, reg_umr_sq);
77 u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
78 bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
83 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
84 (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
90 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
91 struct mlx5e_params *params)
93 params->log_rq_mtu_frames = is_kdump_kernel() ?
94 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
95 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
97 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
98 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
99 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
100 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
101 BIT(params->log_rq_mtu_frames),
102 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
103 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
106 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
107 struct mlx5e_params *params)
109 if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
112 if (mlx5_fpga_is_ipsec_device(mdev))
115 if (params->xdp_prog) {
116 /* XSK params are not considered here. If striding RQ is in use,
117 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
118 * be called with the known XSK params.
120 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
127 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
129 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
130 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
131 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
135 void mlx5e_update_carrier(struct mlx5e_priv *priv)
137 struct mlx5_core_dev *mdev = priv->mdev;
140 port_state = mlx5_query_vport_state(mdev,
141 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
144 if (port_state == VPORT_STATE_UP) {
145 netdev_info(priv->netdev, "Link up\n");
146 netif_carrier_on(priv->netdev);
148 netdev_info(priv->netdev, "Link down\n");
149 netif_carrier_off(priv->netdev);
153 static void mlx5e_update_carrier_work(struct work_struct *work)
155 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
156 update_carrier_work);
158 mutex_lock(&priv->state_lock);
159 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
160 if (priv->profile->update_carrier)
161 priv->profile->update_carrier(priv);
162 mutex_unlock(&priv->state_lock);
165 static void mlx5e_update_stats_work(struct work_struct *work)
167 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
170 mutex_lock(&priv->state_lock);
171 priv->profile->update_stats(priv);
172 mutex_unlock(&priv->state_lock);
175 void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
177 if (!priv->profile->update_stats)
180 if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
183 queue_work(priv->wq, &priv->update_stats_work);
186 static int async_event(struct notifier_block *nb, unsigned long event, void *data)
188 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
189 struct mlx5_eqe *eqe = data;
191 if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
194 switch (eqe->sub_type) {
195 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
196 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
197 queue_work(priv->wq, &priv->update_carrier_work);
206 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
208 priv->events_nb.notifier_call = async_event;
209 mlx5_notifier_register(priv->mdev, &priv->events_nb);
212 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
214 mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
217 static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
219 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
223 case MLX5_DRIVER_EVENT_TYPE_TRAP:
224 err = mlx5e_handle_trap_event(priv, data);
227 netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event);
233 static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
235 priv->blocking_events_nb.notifier_call = blocking_event;
236 mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
239 static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
241 mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
244 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
245 struct mlx5e_icosq *sq,
246 struct mlx5e_umr_wqe *wqe)
248 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
249 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
250 u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
252 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
254 cseg->umr_mkey = rq->mkey_be;
256 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
257 ucseg->xlt_octowords =
258 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
259 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
262 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
263 struct mlx5e_channel *c)
265 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
267 rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
268 sizeof(*rq->mpwqe.info)),
269 GFP_KERNEL, cpu_to_node(c->cpu));
273 mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
278 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
279 u64 npages, u8 page_shift,
280 struct mlx5_core_mkey *umr_mkey,
281 dma_addr_t filler_addr)
283 struct mlx5_mtt *mtt;
290 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
292 in = kvzalloc(inlen, GFP_KERNEL);
296 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
298 MLX5_SET(mkc, mkc, free, 1);
299 MLX5_SET(mkc, mkc, umr_en, 1);
300 MLX5_SET(mkc, mkc, lw, 1);
301 MLX5_SET(mkc, mkc, lr, 1);
302 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
303 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
304 MLX5_SET(mkc, mkc, qpn, 0xffffff);
305 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
306 MLX5_SET64(mkc, mkc, len, npages << page_shift);
307 MLX5_SET(mkc, mkc, translations_octword_size,
308 MLX5_MTT_OCTW(npages));
309 MLX5_SET(mkc, mkc, log_page_size, page_shift);
310 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
311 MLX5_MTT_OCTW(npages));
313 /* Initialize the mkey with all MTTs pointing to a default
314 * page (filler_addr). When the channels are activated, UMR
315 * WQEs will redirect the RX WQEs to the actual memory from
316 * the RQ's pool, while the gaps (wqe_overflow) remain mapped
317 * to the default page.
319 mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
320 for (i = 0 ; i < npages ; i++)
321 mtt[i].ptag = cpu_to_be64(filler_addr);
323 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
329 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
331 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
333 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey,
334 rq->wqe_overflow.addr);
337 static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
339 return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
342 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
344 struct mlx5e_wqe_frag_info next_frag = {};
345 struct mlx5e_wqe_frag_info *prev = NULL;
348 next_frag.di = &rq->wqe.di[0];
350 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
351 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
352 struct mlx5e_wqe_frag_info *frag =
353 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
356 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
357 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
359 next_frag.offset = 0;
361 prev->last_in_page = true;
366 next_frag.offset += frag_info[f].frag_stride;
372 prev->last_in_page = true;
375 int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
377 int len = wq_sz << rq->wqe.info.log_num_frags;
379 rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
383 mlx5e_init_frags_partition(rq);
388 void mlx5e_free_di_list(struct mlx5e_rq *rq)
393 static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
395 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
397 mlx5e_reporter_rq_cqe_err(rq);
400 static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
402 rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
403 if (!rq->wqe_overflow.page)
406 rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
407 PAGE_SIZE, rq->buff.map_dir);
408 if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
409 __free_page(rq->wqe_overflow.page);
415 static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
417 dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
419 __free_page(rq->wqe_overflow.page);
422 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
423 struct mlx5e_params *params,
424 struct mlx5e_xsk_param *xsk,
425 struct xsk_buff_pool *xsk_pool,
426 struct mlx5e_rq_param *rqp,
429 struct page_pool_params pp_params = { 0 };
430 struct mlx5_core_dev *mdev = c->mdev;
431 void *rqc = rqp->rqc;
432 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
439 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
441 rq->wq_type = params->rq_wq_type;
443 rq->netdev = c->netdev;
445 rq->tstamp = c->tstamp;
446 rq->clock = &mdev->clock;
447 rq->icosq = &c->icosq;
450 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
451 rq->xdpsq = &c->rq_xdpsq;
452 rq->xsk_pool = xsk_pool;
453 rq->ptp_cyc2time = mlx5_is_real_time_rq(mdev) ?
454 mlx5_real_time_cyc2time :
455 mlx5_timecounter_cyc2time;
458 rq->stats = &c->priv->channel_stats[c->ix].xskrq;
460 rq->stats = &c->priv->channel_stats[c->ix].rq;
461 INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
463 if (params->xdp_prog)
464 bpf_prog_inc(params->xdp_prog);
465 RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
469 rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
470 err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
472 goto err_rq_xdp_prog;
474 rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
475 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
476 pool_size = 1 << params->log_rq_mtu_frames;
478 switch (rq->wq_type) {
479 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
480 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
485 err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
487 goto err_rq_wq_destroy;
489 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
491 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
493 pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
494 mlx5e_mpwqe_get_log_rq_size(params, xsk);
496 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
497 rq->mpwqe.num_strides =
498 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
500 rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
502 err = mlx5e_create_rq_umr_mkey(mdev, rq);
504 goto err_rq_drop_page;
505 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
507 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
511 default: /* MLX5_WQ_TYPE_CYCLIC */
512 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
517 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
519 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
521 rq->wqe.info = rqp->frags_info;
522 rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
525 kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
526 (wq_sz << rq->wqe.info.log_num_frags)),
527 GFP_KERNEL, cpu_to_node(c->cpu));
528 if (!rq->wqe.frags) {
530 goto err_rq_wq_destroy;
533 err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
537 rq->mkey_be = c->mkey_be;
540 err = mlx5e_rq_set_handlers(rq, params, xsk);
542 goto err_free_by_rq_type;
545 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
546 MEM_TYPE_XSK_BUFF_POOL, NULL);
547 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
549 /* Create a page_pool and register it with rxq */
551 pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
552 pp_params.pool_size = pool_size;
553 pp_params.nid = cpu_to_node(c->cpu);
554 pp_params.dev = c->pdev;
555 pp_params.dma_dir = rq->buff.map_dir;
557 /* page_pool can be used even when there is no rq->xdp_prog,
558 * given page_pool does not handle DMA mapping there is no
559 * required state to clear. And page_pool gracefully handle
562 rq->page_pool = page_pool_create(&pp_params);
563 if (IS_ERR(rq->page_pool)) {
564 err = PTR_ERR(rq->page_pool);
565 rq->page_pool = NULL;
566 goto err_free_by_rq_type;
568 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
569 MEM_TYPE_PAGE_POOL, rq->page_pool);
572 goto err_free_by_rq_type;
574 for (i = 0; i < wq_sz; i++) {
575 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
576 struct mlx5e_rx_wqe_ll *wqe =
577 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
579 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
580 u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
582 wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
583 wqe->data[0].byte_count = cpu_to_be32(byte_count);
584 wqe->data[0].lkey = rq->mkey_be;
586 struct mlx5e_rx_wqe_cyc *wqe =
587 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
590 for (f = 0; f < rq->wqe.info.num_frags; f++) {
591 u32 frag_size = rq->wqe.info.arr[f].frag_size |
592 MLX5_HW_START_PADDING;
594 wqe->data[f].byte_count = cpu_to_be32(frag_size);
595 wqe->data[f].lkey = rq->mkey_be;
597 /* check if num_frags is not a pow of two */
598 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
599 wqe->data[f].byte_count = 0;
600 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
601 wqe->data[f].addr = 0;
606 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
608 switch (params->rx_cq_moderation.cq_period_mode) {
609 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
610 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
612 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
614 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
617 rq->page_cache.head = 0;
618 rq->page_cache.tail = 0;
623 switch (rq->wq_type) {
624 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
625 kvfree(rq->mpwqe.info);
627 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
629 mlx5e_free_mpwqe_rq_drop_page(rq);
631 default: /* MLX5_WQ_TYPE_CYCLIC */
632 mlx5e_free_di_list(rq);
634 kvfree(rq->wqe.frags);
637 mlx5_wq_destroy(&rq->wq_ctrl);
639 xdp_rxq_info_unreg(&rq->xdp_rxq);
641 if (params->xdp_prog)
642 bpf_prog_put(params->xdp_prog);
647 static void mlx5e_free_rq(struct mlx5e_rq *rq)
649 struct bpf_prog *old_prog;
652 old_prog = rcu_dereference_protected(rq->xdp_prog,
653 lockdep_is_held(&rq->priv->state_lock));
655 bpf_prog_put(old_prog);
657 switch (rq->wq_type) {
658 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
659 kvfree(rq->mpwqe.info);
660 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
661 mlx5e_free_mpwqe_rq_drop_page(rq);
663 default: /* MLX5_WQ_TYPE_CYCLIC */
664 kvfree(rq->wqe.frags);
665 mlx5e_free_di_list(rq);
668 for (i = rq->page_cache.head; i != rq->page_cache.tail;
669 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
670 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
672 /* With AF_XDP, page_cache is not used, so this loop is not
673 * entered, and it's safe to call mlx5e_page_release_dynamic
676 mlx5e_page_release_dynamic(rq, dma_info, false);
679 xdp_rxq_info_unreg(&rq->xdp_rxq);
680 page_pool_destroy(rq->page_pool);
681 mlx5_wq_destroy(&rq->wq_ctrl);
684 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
686 struct mlx5_core_dev *mdev = rq->mdev;
694 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
695 sizeof(u64) * rq->wq_ctrl.buf.npages;
696 in = kvzalloc(inlen, GFP_KERNEL);
700 ts_format = mlx5_is_real_time_rq(mdev) ?
701 MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME :
702 MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
703 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
704 wq = MLX5_ADDR_OF(rqc, rqc, wq);
706 memcpy(rqc, param->rqc, sizeof(param->rqc));
708 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
709 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
710 MLX5_SET(rqc, rqc, ts_format, ts_format);
711 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
712 MLX5_ADAPTER_PAGE_SHIFT);
713 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
715 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
716 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
718 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
725 int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
727 struct mlx5_core_dev *mdev = rq->mdev;
734 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
735 in = kvzalloc(inlen, GFP_KERNEL);
739 if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
740 mlx5e_rqwq_reset(rq);
742 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
744 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
745 MLX5_SET(rqc, rqc, state, next_state);
747 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
754 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
756 struct mlx5_core_dev *mdev = rq->mdev;
763 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
764 in = kvzalloc(inlen, GFP_KERNEL);
768 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
770 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
771 MLX5_SET64(modify_rq_in, in, modify_bitmask,
772 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
773 MLX5_SET(rqc, rqc, scatter_fcs, enable);
774 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
776 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
783 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
785 struct mlx5_core_dev *mdev = rq->mdev;
791 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
792 in = kvzalloc(inlen, GFP_KERNEL);
796 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
798 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
799 MLX5_SET64(modify_rq_in, in, modify_bitmask,
800 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
801 MLX5_SET(rqc, rqc, vsd, vsd);
802 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
804 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
811 void mlx5e_destroy_rq(struct mlx5e_rq *rq)
813 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
816 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
818 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
820 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
823 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
827 } while (time_before(jiffies, exp_time));
829 netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
830 rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
832 mlx5e_reporter_rx_timeout(rq);
836 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
838 struct mlx5_wq_ll *wq;
842 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
848 /* Outstanding UMR WQEs (in progress) start at wq->head */
849 for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
850 rq->dealloc_wqe(rq, head);
851 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
854 rq->mpwqe.actual_wq_head = wq->head;
855 rq->mpwqe.umr_in_progress = 0;
856 rq->mpwqe.umr_completed = 0;
859 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
864 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
865 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
867 mlx5e_free_rx_in_progress_descs(rq);
869 while (!mlx5_wq_ll_is_empty(wq)) {
870 struct mlx5e_rx_wqe_ll *wqe;
872 wqe_ix_be = *wq->tail_next;
873 wqe_ix = be16_to_cpu(wqe_ix_be);
874 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
875 rq->dealloc_wqe(rq, wqe_ix);
876 mlx5_wq_ll_pop(wq, wqe_ix_be,
877 &wqe->next.next_wqe_index);
880 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
882 while (!mlx5_wq_cyc_is_empty(wq)) {
883 wqe_ix = mlx5_wq_cyc_get_tail(wq);
884 rq->dealloc_wqe(rq, wqe_ix);
891 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
892 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
893 struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq)
897 err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq);
901 err = mlx5e_create_rq(rq, param);
905 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
909 if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev))
910 __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */
912 if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
913 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
915 if (params->rx_dim_enabled)
916 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
918 /* We disable csum_complete when XDP is enabled since
919 * XDP programs might manipulate packets which will render
920 * skb->checksum incorrect.
922 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
923 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
925 /* For CQE compression on striding RQ, use stride index provided by
926 * HW if capability is supported.
928 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
929 MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
930 __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
935 mlx5e_destroy_rq(rq);
942 void mlx5e_activate_rq(struct mlx5e_rq *rq)
944 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
945 mlx5e_trigger_irq(rq->icosq);
948 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
950 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
951 synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
954 void mlx5e_close_rq(struct mlx5e_rq *rq)
956 cancel_work_sync(&rq->dim.work);
957 cancel_work_sync(&rq->icosq->recover_work);
958 cancel_work_sync(&rq->recover_work);
959 mlx5e_destroy_rq(rq);
960 mlx5e_free_rx_descs(rq);
964 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
966 kvfree(sq->db.xdpi_fifo.xi);
967 kvfree(sq->db.wqe_info);
970 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
972 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
973 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
974 int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
976 xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
981 xdpi_fifo->pc = &sq->xdpi_fifo_pc;
982 xdpi_fifo->cc = &sq->xdpi_fifo_cc;
983 xdpi_fifo->mask = dsegs_per_wq - 1;
988 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
990 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
993 sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
995 if (!sq->db.wqe_info)
998 err = mlx5e_alloc_xdpsq_fifo(sq, numa);
1000 mlx5e_free_xdpsq_db(sq);
1007 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
1008 struct mlx5e_params *params,
1009 struct xsk_buff_pool *xsk_pool,
1010 struct mlx5e_sq_param *param,
1011 struct mlx5e_xdpsq *sq,
1014 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1015 struct mlx5_core_dev *mdev = c->mdev;
1016 struct mlx5_wq_cyc *wq = &sq->wq;
1020 sq->mkey_be = c->mkey_be;
1022 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1023 sq->min_inline_mode = params->tx_min_inline_mode;
1024 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1025 sq->xsk_pool = xsk_pool;
1027 sq->stats = sq->xsk_pool ?
1028 &c->priv->channel_stats[c->ix].xsksq :
1030 &c->priv->channel_stats[c->ix].xdpsq :
1031 &c->priv->channel_stats[c->ix].rq_xdpsq;
1033 param->wq.db_numa_node = cpu_to_node(c->cpu);
1034 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1037 wq->db = &wq->db[MLX5_SND_DBR];
1039 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1041 goto err_sq_wq_destroy;
1046 mlx5_wq_destroy(&sq->wq_ctrl);
1051 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1053 mlx5e_free_xdpsq_db(sq);
1054 mlx5_wq_destroy(&sq->wq_ctrl);
1057 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1059 kvfree(sq->db.wqe_info);
1062 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1064 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1067 size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
1068 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1069 if (!sq->db.wqe_info)
1075 static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
1077 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1080 mlx5e_reporter_icosq_cqe_err(sq);
1083 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1084 struct mlx5e_sq_param *param,
1085 struct mlx5e_icosq *sq)
1087 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1088 struct mlx5_core_dev *mdev = c->mdev;
1089 struct mlx5_wq_cyc *wq = &sq->wq;
1093 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1095 param->wq.db_numa_node = cpu_to_node(c->cpu);
1096 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1099 wq->db = &wq->db[MLX5_SND_DBR];
1101 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1103 goto err_sq_wq_destroy;
1105 INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
1110 mlx5_wq_destroy(&sq->wq_ctrl);
1115 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1117 mlx5e_free_icosq_db(sq);
1118 mlx5_wq_destroy(&sq->wq_ctrl);
1121 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1123 kvfree(sq->db.wqe_info);
1124 kvfree(sq->db.skb_fifo.fifo);
1125 kvfree(sq->db.dma_fifo);
1128 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1130 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1131 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1133 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1134 sizeof(*sq->db.dma_fifo)),
1136 sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
1137 sizeof(*sq->db.skb_fifo.fifo)),
1139 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1140 sizeof(*sq->db.wqe_info)),
1142 if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
1143 mlx5e_free_txqsq_db(sq);
1147 sq->dma_fifo_mask = df_sz - 1;
1149 sq->db.skb_fifo.pc = &sq->skb_fifo_pc;
1150 sq->db.skb_fifo.cc = &sq->skb_fifo_cc;
1151 sq->db.skb_fifo.mask = df_sz - 1;
1156 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1158 struct mlx5e_params *params,
1159 struct mlx5e_sq_param *param,
1160 struct mlx5e_txqsq *sq,
1163 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1164 struct mlx5_core_dev *mdev = c->mdev;
1165 struct mlx5_wq_cyc *wq = &sq->wq;
1169 sq->tstamp = c->tstamp;
1170 sq->clock = &mdev->clock;
1171 sq->mkey_be = c->mkey_be;
1172 sq->netdev = c->netdev;
1176 sq->txq_ix = txq_ix;
1177 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1178 sq->min_inline_mode = params->tx_min_inline_mode;
1179 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1180 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
1181 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
1182 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
1183 if (MLX5_IPSEC_DEV(c->priv->mdev))
1184 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1185 if (mlx5_accel_is_tls_device(c->priv->mdev))
1186 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
1188 set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
1189 sq->stop_room = param->stop_room;
1190 sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
1191 mlx5_real_time_cyc2time :
1192 mlx5_timecounter_cyc2time;
1194 param->wq.db_numa_node = cpu_to_node(c->cpu);
1195 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1198 wq->db = &wq->db[MLX5_SND_DBR];
1200 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1202 goto err_sq_wq_destroy;
1204 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1205 sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1210 mlx5_wq_destroy(&sq->wq_ctrl);
1215 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1217 mlx5e_free_txqsq_db(sq);
1218 mlx5_wq_destroy(&sq->wq_ctrl);
1221 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1222 struct mlx5e_sq_param *param,
1223 struct mlx5e_create_sq_param *csp,
1233 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1234 sizeof(u64) * csp->wq_ctrl->buf.npages;
1235 in = kvzalloc(inlen, GFP_KERNEL);
1239 ts_format = mlx5_is_real_time_sq(mdev) ?
1240 MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME :
1241 MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
1242 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1243 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1245 memcpy(sqc, param->sqc, sizeof(param->sqc));
1246 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1247 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1248 MLX5_SET(sqc, sqc, cqn, csp->cqn);
1249 MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
1250 MLX5_SET(sqc, sqc, ts_format, ts_format);
1253 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1254 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
1256 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1257 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1259 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1260 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
1261 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
1262 MLX5_ADAPTER_PAGE_SHIFT);
1263 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
1265 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1266 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1268 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1275 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1276 struct mlx5e_modify_sq_param *p)
1284 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1285 in = kvzalloc(inlen, GFP_KERNEL);
1289 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1291 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1292 MLX5_SET(sqc, sqc, state, p->next_state);
1293 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1295 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
1297 if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
1299 MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
1301 MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
1303 err = mlx5_core_modify_sq(mdev, sqn, in);
1310 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1312 mlx5_core_destroy_sq(mdev, sqn);
1315 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1316 struct mlx5e_sq_param *param,
1317 struct mlx5e_create_sq_param *csp,
1318 u16 qos_queue_group_id,
1321 struct mlx5e_modify_sq_param msp = {0};
1324 err = mlx5e_create_sq(mdev, param, csp, sqn);
1328 msp.curr_state = MLX5_SQC_STATE_RST;
1329 msp.next_state = MLX5_SQC_STATE_RDY;
1330 if (qos_queue_group_id) {
1331 msp.qos_update = true;
1332 msp.qos_queue_group_id = qos_queue_group_id;
1334 err = mlx5e_modify_sq(mdev, *sqn, &msp);
1336 mlx5e_destroy_sq(mdev, *sqn);
1341 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1342 struct mlx5e_txqsq *sq, u32 rate);
1344 int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
1345 struct mlx5e_params *params, struct mlx5e_sq_param *param,
1346 struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
1348 struct mlx5e_create_sq_param csp = {};
1352 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
1356 if (qos_queue_group_id)
1357 sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
1359 sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
1363 csp.cqn = sq->cq.mcq.cqn;
1364 csp.wq_ctrl = &sq->wq_ctrl;
1365 csp.min_inline_mode = sq->min_inline_mode;
1366 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
1368 goto err_free_txqsq;
1370 tx_rate = c->priv->tx_rates[sq->txq_ix];
1372 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1374 if (params->tx_dim_enabled)
1375 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1380 mlx5e_free_txqsq(sq);
1385 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1387 sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
1388 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1389 netdev_tx_reset_queue(sq->txq);
1390 netif_tx_start_queue(sq->txq);
1393 void mlx5e_tx_disable_queue(struct netdev_queue *txq)
1395 __netif_tx_lock_bh(txq);
1396 netif_tx_stop_queue(txq);
1397 __netif_tx_unlock_bh(txq);
1400 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1402 struct mlx5_wq_cyc *wq = &sq->wq;
1404 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1405 synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
1407 mlx5e_tx_disable_queue(sq->txq);
1409 /* last doorbell out, godspeed .. */
1410 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1411 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1412 struct mlx5e_tx_wqe *nop;
1414 sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
1418 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1419 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1423 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1425 struct mlx5_core_dev *mdev = sq->mdev;
1426 struct mlx5_rate_limit rl = {0};
1428 cancel_work_sync(&sq->dim.work);
1429 cancel_work_sync(&sq->recover_work);
1430 mlx5e_destroy_sq(mdev, sq->sqn);
1431 if (sq->rate_limit) {
1432 rl.rate = sq->rate_limit;
1433 mlx5_rl_remove_rate(mdev, &rl);
1435 mlx5e_free_txqsq_descs(sq);
1436 mlx5e_free_txqsq(sq);
1439 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
1441 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
1444 mlx5e_reporter_tx_err_cqe(sq);
1447 int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
1448 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
1450 struct mlx5e_create_sq_param csp = {};
1453 err = mlx5e_alloc_icosq(c, param, sq);
1457 csp.cqn = sq->cq.mcq.cqn;
1458 csp.wq_ctrl = &sq->wq_ctrl;
1459 csp.min_inline_mode = params->tx_min_inline_mode;
1460 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
1462 goto err_free_icosq;
1467 mlx5e_free_icosq(sq);
1472 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
1474 set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1477 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
1479 clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1480 synchronize_net(); /* Sync with NAPI. */
1483 void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1485 struct mlx5e_channel *c = sq->channel;
1487 mlx5e_destroy_sq(c->mdev, sq->sqn);
1488 mlx5e_free_icosq_descs(sq);
1489 mlx5e_free_icosq(sq);
1492 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
1493 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
1494 struct mlx5e_xdpsq *sq, bool is_redirect)
1496 struct mlx5e_create_sq_param csp = {};
1499 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
1504 csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
1505 csp.cqn = sq->cq.mcq.cqn;
1506 csp.wq_ctrl = &sq->wq_ctrl;
1507 csp.min_inline_mode = sq->min_inline_mode;
1508 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1509 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
1511 goto err_free_xdpsq;
1513 mlx5e_set_xmit_fp(sq, param->is_mpw);
1515 if (!param->is_mpw) {
1516 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1517 unsigned int inline_hdr_sz = 0;
1520 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1521 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1525 /* Pre initialize fixed WQE fields */
1526 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1527 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1528 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1529 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1530 struct mlx5_wqe_data_seg *dseg;
1532 sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
1537 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1538 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1540 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1541 dseg->lkey = sq->mkey_be;
1548 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1549 mlx5e_free_xdpsq(sq);
1554 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1556 struct mlx5e_channel *c = sq->channel;
1558 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1559 synchronize_net(); /* Sync with NAPI. */
1561 mlx5e_destroy_sq(c->mdev, sq->sqn);
1562 mlx5e_free_xdpsq_descs(sq);
1563 mlx5e_free_xdpsq(sq);
1566 static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
1567 struct mlx5e_cq_param *param,
1568 struct mlx5e_cq *cq)
1570 struct mlx5_core_dev *mdev = priv->mdev;
1571 struct mlx5_core_cq *mcq = &cq->mcq;
1577 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1581 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
1587 mcq->set_ci_db = cq->wq_ctrl.db.db;
1588 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1589 *mcq->set_ci_db = 0;
1591 mcq->vector = param->eq_ix;
1592 mcq->comp = mlx5e_completion_event;
1593 mcq->event = mlx5e_cq_error_event;
1596 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1597 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1603 cq->netdev = priv->netdev;
1609 static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
1610 struct mlx5e_cq_param *param,
1611 struct mlx5e_create_cq_param *ccp,
1612 struct mlx5e_cq *cq)
1616 param->wq.buf_numa_node = ccp->node;
1617 param->wq.db_numa_node = ccp->node;
1618 param->eq_ix = ccp->ix;
1620 err = mlx5e_alloc_cq_common(priv, param, cq);
1622 cq->napi = ccp->napi;
1623 cq->ch_stats = ccp->ch_stats;
1628 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1630 mlx5_wq_destroy(&cq->wq_ctrl);
1633 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1635 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
1636 struct mlx5_core_dev *mdev = cq->mdev;
1637 struct mlx5_core_cq *mcq = &cq->mcq;
1642 unsigned int irqn_not_used;
1646 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1650 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1651 sizeof(u64) * cq->wq_ctrl.buf.npages;
1652 in = kvzalloc(inlen, GFP_KERNEL);
1656 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1658 memcpy(cqc, param->cqc, sizeof(param->cqc));
1660 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1661 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1663 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1664 MLX5_SET(cqc, cqc, c_eqn, eqn);
1665 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1666 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1667 MLX5_ADAPTER_PAGE_SHIFT);
1668 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1670 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
1682 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1684 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1687 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
1688 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
1689 struct mlx5e_cq *cq)
1691 struct mlx5_core_dev *mdev = priv->mdev;
1694 err = mlx5e_alloc_cq(priv, param, ccp, cq);
1698 err = mlx5e_create_cq(cq, param);
1702 if (MLX5_CAP_GEN(mdev, cq_moderation))
1703 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1712 void mlx5e_close_cq(struct mlx5e_cq *cq)
1714 mlx5e_destroy_cq(cq);
1718 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1719 struct mlx5e_params *params,
1720 struct mlx5e_create_cq_param *ccp,
1721 struct mlx5e_channel_param *cparam)
1726 for (tc = 0; tc < c->num_tc; tc++) {
1727 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
1728 ccp, &c->sq[tc].cq);
1730 goto err_close_tx_cqs;
1736 for (tc--; tc >= 0; tc--)
1737 mlx5e_close_cq(&c->sq[tc].cq);
1742 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1746 for (tc = 0; tc < c->num_tc; tc++)
1747 mlx5e_close_cq(&c->sq[tc].cq);
1750 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1751 struct mlx5e_params *params,
1752 struct mlx5e_channel_param *cparam)
1756 for (tc = 0; tc < params->num_tc; tc++) {
1757 int txq_ix = c->ix + tc * params->num_channels;
1759 err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
1760 params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
1768 for (tc--; tc >= 0; tc--)
1769 mlx5e_close_txqsq(&c->sq[tc]);
1774 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1778 for (tc = 0; tc < c->num_tc; tc++)
1779 mlx5e_close_txqsq(&c->sq[tc]);
1782 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1783 struct mlx5e_txqsq *sq, u32 rate)
1785 struct mlx5e_priv *priv = netdev_priv(dev);
1786 struct mlx5_core_dev *mdev = priv->mdev;
1787 struct mlx5e_modify_sq_param msp = {0};
1788 struct mlx5_rate_limit rl = {0};
1792 if (rate == sq->rate_limit)
1796 if (sq->rate_limit) {
1797 rl.rate = sq->rate_limit;
1798 /* remove current rl index to free space to next ones */
1799 mlx5_rl_remove_rate(mdev, &rl);
1806 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
1808 netdev_err(dev, "Failed configuring rate %u: %d\n",
1814 msp.curr_state = MLX5_SQC_STATE_RDY;
1815 msp.next_state = MLX5_SQC_STATE_RDY;
1816 msp.rl_index = rl_index;
1817 msp.rl_update = true;
1818 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1820 netdev_err(dev, "Failed configuring rate %u: %d\n",
1822 /* remove the rate from the table */
1824 mlx5_rl_remove_rate(mdev, &rl);
1828 sq->rate_limit = rate;
1832 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1834 struct mlx5e_priv *priv = netdev_priv(dev);
1835 struct mlx5_core_dev *mdev = priv->mdev;
1836 struct mlx5e_txqsq *sq = priv->txq2sq[index];
1839 if (!mlx5_rl_is_supported(mdev)) {
1840 netdev_err(dev, "Rate limiting is not supported on this device\n");
1844 /* rate is given in Mb/sec, HW config is in Kb/sec */
1847 /* Check whether rate in valid range, 0 is always valid */
1848 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1849 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1853 mutex_lock(&priv->state_lock);
1854 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1855 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1857 priv->tx_rates[index] = rate;
1858 mutex_unlock(&priv->state_lock);
1863 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
1865 *ccp = (struct mlx5e_create_cq_param) {
1867 .ch_stats = c->stats,
1868 .node = cpu_to_node(c->cpu),
1873 static int mlx5e_open_queues(struct mlx5e_channel *c,
1874 struct mlx5e_params *params,
1875 struct mlx5e_channel_param *cparam)
1877 struct dim_cq_moder icocq_moder = {0, 0};
1878 struct mlx5e_create_cq_param ccp;
1881 mlx5e_build_create_cq_param(&ccp, c);
1883 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
1884 &c->async_icosq.cq);
1888 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
1891 goto err_close_async_icosq_cq;
1893 err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
1895 goto err_close_icosq_cq;
1897 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
1900 goto err_close_tx_cqs;
1902 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
1905 goto err_close_xdp_tx_cqs;
1907 err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
1908 &ccp, &c->rq_xdpsq.cq) : 0;
1910 goto err_close_rx_cq;
1912 spin_lock_init(&c->async_icosq_lock);
1914 err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
1916 goto err_close_xdpsq_cq;
1918 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1920 goto err_close_async_icosq;
1922 err = mlx5e_open_sqs(c, params, cparam);
1924 goto err_close_icosq;
1927 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
1928 &c->rq_xdpsq, false);
1933 err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq);
1935 goto err_close_xdp_sq;
1937 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
1944 mlx5e_close_rq(&c->rq);
1948 mlx5e_close_xdpsq(&c->rq_xdpsq);
1954 mlx5e_close_icosq(&c->icosq);
1956 err_close_async_icosq:
1957 mlx5e_close_icosq(&c->async_icosq);
1961 mlx5e_close_cq(&c->rq_xdpsq.cq);
1964 mlx5e_close_cq(&c->rq.cq);
1966 err_close_xdp_tx_cqs:
1967 mlx5e_close_cq(&c->xdpsq.cq);
1970 mlx5e_close_tx_cqs(c);
1973 mlx5e_close_cq(&c->icosq.cq);
1975 err_close_async_icosq_cq:
1976 mlx5e_close_cq(&c->async_icosq.cq);
1981 static void mlx5e_close_queues(struct mlx5e_channel *c)
1983 mlx5e_close_xdpsq(&c->xdpsq);
1984 mlx5e_close_rq(&c->rq);
1986 mlx5e_close_xdpsq(&c->rq_xdpsq);
1988 mlx5e_close_icosq(&c->icosq);
1989 mlx5e_close_icosq(&c->async_icosq);
1991 mlx5e_close_cq(&c->rq_xdpsq.cq);
1992 mlx5e_close_cq(&c->rq.cq);
1993 mlx5e_close_cq(&c->xdpsq.cq);
1994 mlx5e_close_tx_cqs(c);
1995 mlx5e_close_cq(&c->icosq.cq);
1996 mlx5e_close_cq(&c->async_icosq.cq);
1999 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
2001 u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
2003 return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
2006 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
2007 struct mlx5e_params *params,
2008 struct mlx5e_channel_param *cparam,
2009 struct xsk_buff_pool *xsk_pool,
2010 struct mlx5e_channel **cp)
2012 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
2013 struct net_device *netdev = priv->netdev;
2014 struct mlx5e_xsk_param xsk;
2015 struct mlx5e_channel *c;
2020 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
2024 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
2029 c->mdev = priv->mdev;
2030 c->tstamp = &priv->tstamp;
2033 c->pdev = mlx5_core_dma_dev(priv->mdev);
2034 c->netdev = priv->netdev;
2035 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
2036 c->num_tc = params->num_tc;
2037 c->xdp = !!params->xdp_prog;
2038 c->stats = &priv->channel_stats[ix].ch;
2039 c->aff_mask = irq_get_effective_affinity_mask(irq);
2040 c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
2042 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
2044 err = mlx5e_open_queues(c, params, cparam);
2049 mlx5e_build_xsk_param(xsk_pool, &xsk);
2050 err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
2052 goto err_close_queues;
2060 mlx5e_close_queues(c);
2063 netif_napi_del(&c->napi);
2070 static void mlx5e_activate_channel(struct mlx5e_channel *c)
2074 napi_enable(&c->napi);
2076 for (tc = 0; tc < c->num_tc; tc++)
2077 mlx5e_activate_txqsq(&c->sq[tc]);
2078 mlx5e_activate_icosq(&c->icosq);
2079 mlx5e_activate_icosq(&c->async_icosq);
2080 mlx5e_activate_rq(&c->rq);
2082 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2083 mlx5e_activate_xsk(c);
2086 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
2090 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2091 mlx5e_deactivate_xsk(c);
2093 mlx5e_deactivate_rq(&c->rq);
2094 mlx5e_deactivate_icosq(&c->async_icosq);
2095 mlx5e_deactivate_icosq(&c->icosq);
2096 for (tc = 0; tc < c->num_tc; tc++)
2097 mlx5e_deactivate_txqsq(&c->sq[tc]);
2098 mlx5e_qos_deactivate_queues(c);
2100 napi_disable(&c->napi);
2103 static void mlx5e_close_channel(struct mlx5e_channel *c)
2105 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2107 mlx5e_close_queues(c);
2108 mlx5e_qos_close_queues(c);
2109 netif_napi_del(&c->napi);
2114 #define DEFAULT_FRAG_SIZE (2048)
2116 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
2117 struct mlx5e_params *params,
2118 struct mlx5e_xsk_param *xsk,
2119 struct mlx5e_rq_frags_info *info)
2121 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
2122 int frag_size_max = DEFAULT_FRAG_SIZE;
2126 if (mlx5_fpga_is_ipsec_device(mdev))
2127 byte_count += MLX5E_METADATA_ETHER_LEN;
2129 if (mlx5e_rx_is_linear_skb(params, xsk)) {
2132 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
2133 frag_stride = roundup_pow_of_two(frag_stride);
2135 info->arr[0].frag_size = byte_count;
2136 info->arr[0].frag_stride = frag_stride;
2137 info->num_frags = 1;
2138 info->wqe_bulk = PAGE_SIZE / frag_stride;
2142 if (byte_count > PAGE_SIZE +
2143 (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
2144 frag_size_max = PAGE_SIZE;
2147 while (buf_size < byte_count) {
2148 int frag_size = byte_count - buf_size;
2150 if (i < MLX5E_MAX_RX_FRAGS - 1)
2151 frag_size = min(frag_size, frag_size_max);
2153 info->arr[i].frag_size = frag_size;
2154 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
2156 buf_size += frag_size;
2159 info->num_frags = i;
2160 /* number of different wqes sharing a page */
2161 info->wqe_bulk = 1 + (info->num_frags % 2);
2164 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
2165 info->log_num_frags = order_base_2(info->num_frags);
2168 static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
2170 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
2173 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2174 sz += sizeof(struct mlx5e_rx_wqe_ll);
2176 default: /* MLX5_WQ_TYPE_CYCLIC */
2177 sz += sizeof(struct mlx5e_rx_wqe_cyc);
2180 return order_base_2(sz);
2183 static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
2185 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2187 return MLX5_GET(wq, wq, log_wq_sz);
2190 void mlx5e_build_rq_param(struct mlx5e_priv *priv,
2191 struct mlx5e_params *params,
2192 struct mlx5e_xsk_param *xsk,
2193 struct mlx5e_rq_param *param)
2195 struct mlx5_core_dev *mdev = priv->mdev;
2196 void *rqc = param->rqc;
2197 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2200 switch (params->rq_wq_type) {
2201 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2202 MLX5_SET(wq, wq, log_wqe_num_of_strides,
2203 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
2204 MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
2205 MLX5_SET(wq, wq, log_wqe_stride_size,
2206 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
2207 MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
2208 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
2210 default: /* MLX5_WQ_TYPE_CYCLIC */
2211 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
2212 mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info);
2213 ndsegs = param->frags_info.num_frags;
2216 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
2217 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
2218 MLX5_SET(wq, wq, log_wq_stride,
2219 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
2220 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
2221 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
2222 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
2223 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
2225 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
2226 mlx5e_build_rx_cq_param(priv, params, xsk, ¶m->cqp);
2229 static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
2230 struct mlx5e_rq_param *param)
2232 struct mlx5_core_dev *mdev = priv->mdev;
2233 void *rqc = param->rqc;
2234 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2236 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
2237 MLX5_SET(wq, wq, log_wq_stride,
2238 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
2239 MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
2241 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
2244 void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2245 struct mlx5e_sq_param *param)
2247 void *sqc = param->sqc;
2248 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2250 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
2251 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
2253 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
2256 void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
2257 struct mlx5e_sq_param *param)
2259 void *sqc = param->sqc;
2260 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2263 allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
2264 !!MLX5_IPSEC_DEV(priv->mdev);
2265 mlx5e_build_sq_param_common(priv, param);
2266 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2267 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
2268 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
2269 param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
2270 mlx5e_build_tx_cq_param(priv, params, ¶m->cqp);
2273 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2274 struct mlx5e_cq_param *param)
2276 void *cqc = param->cqc;
2278 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
2279 if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
2280 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
2283 void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
2284 struct mlx5e_params *params,
2285 struct mlx5e_xsk_param *xsk,
2286 struct mlx5e_cq_param *param)
2288 struct mlx5_core_dev *mdev = priv->mdev;
2289 bool hw_stridx = false;
2290 void *cqc = param->cqc;
2293 switch (params->rq_wq_type) {
2294 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2295 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
2296 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
2297 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
2299 default: /* MLX5_WQ_TYPE_CYCLIC */
2300 log_cq_size = params->log_rq_mtu_frames;
2303 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
2304 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
2305 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
2306 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
2307 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2310 mlx5e_build_common_cq_param(priv, param);
2311 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
2314 void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2315 struct mlx5e_params *params,
2316 struct mlx5e_cq_param *param)
2318 void *cqc = param->cqc;
2320 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
2322 mlx5e_build_common_cq_param(priv, param);
2323 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
2326 void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2328 struct mlx5e_cq_param *param)
2330 void *cqc = param->cqc;
2332 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2334 mlx5e_build_common_cq_param(priv, param);
2336 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2339 void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2341 struct mlx5e_sq_param *param)
2343 void *sqc = param->sqc;
2344 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2346 mlx5e_build_sq_param_common(priv, param);
2348 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2349 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2350 mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp);
2353 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2354 struct mlx5e_params *params,
2355 struct mlx5e_sq_param *param)
2357 void *sqc = param->sqc;
2358 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2360 mlx5e_build_sq_param_common(priv, param);
2361 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2362 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
2363 mlx5e_build_tx_cq_param(priv, params, ¶m->cqp);
2366 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
2367 struct mlx5e_rq_param *rqp)
2369 switch (params->rq_wq_type) {
2370 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2371 return order_base_2(MLX5E_UMR_WQEBBS) +
2372 mlx5e_get_rq_log_wq_sz(rqp->rqc);
2373 default: /* MLX5_WQ_TYPE_CYCLIC */
2374 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2378 static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev)
2380 if (netdev->hw_features & NETIF_F_HW_TLS_RX)
2381 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2383 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2386 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2387 struct mlx5e_params *params,
2388 struct mlx5e_channel_param *cparam)
2390 u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
2392 mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
2394 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
2395 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev);
2397 mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
2398 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2399 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2400 mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
2403 int mlx5e_open_channels(struct mlx5e_priv *priv,
2404 struct mlx5e_channels *chs)
2406 struct mlx5e_channel_param *cparam;
2410 chs->num = chs->params.num_channels;
2412 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2413 cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2414 if (!chs->c || !cparam)
2417 mlx5e_build_channel_param(priv, &chs->params, cparam);
2418 for (i = 0; i < chs->num; i++) {
2419 struct xsk_buff_pool *xsk_pool = NULL;
2421 if (chs->params.xdp_prog)
2422 xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
2424 err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
2426 goto err_close_channels;
2429 if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) {
2430 err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port,
2433 goto err_close_channels;
2436 err = mlx5e_qos_open_queues(priv, chs);
2440 mlx5e_health_channels_update(priv);
2446 mlx5e_port_ptp_close(chs->port_ptp);
2449 for (i--; i >= 0; i--)
2450 mlx5e_close_channel(chs->c[i]);
2459 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2463 for (i = 0; i < chs->num; i++)
2464 mlx5e_activate_channel(chs->c[i]);
2467 mlx5e_ptp_activate_channel(chs->port_ptp);
2470 #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
2472 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2477 for (i = 0; i < chs->num; i++) {
2478 int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
2480 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
2482 /* Don't wait on the XSK RQ, because the newer xdpsock sample
2483 * doesn't provide any Fill Ring entries at the setup stage.
2487 return err ? -ETIMEDOUT : 0;
2490 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2495 mlx5e_ptp_deactivate_channel(chs->port_ptp);
2497 for (i = 0; i < chs->num; i++)
2498 mlx5e_deactivate_channel(chs->c[i]);
2501 void mlx5e_close_channels(struct mlx5e_channels *chs)
2506 mlx5e_port_ptp_close(chs->port_ptp);
2508 for (i = 0; i < chs->num; i++)
2509 mlx5e_close_channel(chs->c[i]);
2516 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2518 struct mlx5_core_dev *mdev = priv->mdev;
2525 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2526 in = kvzalloc(inlen, GFP_KERNEL);
2530 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2532 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2533 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2535 for (i = 0; i < sz; i++)
2536 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2538 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2540 rqt->enabled = true;
2546 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2548 rqt->enabled = false;
2549 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2552 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2554 struct mlx5e_rqt *rqt = &priv->indir_rqt;
2557 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2559 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2563 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
2568 for (ix = 0; ix < priv->max_nch; ix++) {
2569 err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
2571 goto err_destroy_rqts;
2577 mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err);
2578 for (ix--; ix >= 0; ix--)
2579 mlx5e_destroy_rqt(priv, &tirs[ix].rqt);
2584 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
2588 for (i = 0; i < priv->max_nch; i++)
2589 mlx5e_destroy_rqt(priv, &tirs[i].rqt);
2592 static int mlx5e_rx_hash_fn(int hfunc)
2594 return (hfunc == ETH_RSS_HASH_TOP) ?
2595 MLX5_RX_HASH_FN_TOEPLITZ :
2596 MLX5_RX_HASH_FN_INVERTED_XOR8;
2599 int mlx5e_bits_invert(unsigned long a, int size)
2604 for (i = 0; i < size; i++)
2605 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2610 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2611 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2615 for (i = 0; i < sz; i++) {
2621 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2622 ix = mlx5e_bits_invert(i, ilog2(sz));
2624 ix = priv->rss_params.indirection_rqt[ix];
2625 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2629 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2633 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2634 struct mlx5e_redirect_rqt_param rrp)
2636 struct mlx5_core_dev *mdev = priv->mdev;
2642 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2643 in = kvzalloc(inlen, GFP_KERNEL);
2647 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2649 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2650 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2651 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2652 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2658 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2659 struct mlx5e_redirect_rqt_param rrp)
2664 if (ix >= rrp.rss.channels->num)
2665 return priv->drop_rq.rqn;
2667 return rrp.rss.channels->c[ix]->rq.rqn;
2670 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2671 struct mlx5e_redirect_rqt_param rrp)
2676 if (priv->indir_rqt.enabled) {
2678 rqtn = priv->indir_rqt.rqtn;
2679 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2682 for (ix = 0; ix < priv->max_nch; ix++) {
2683 struct mlx5e_redirect_rqt_param direct_rrp = {
2686 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2690 /* Direct RQ Tables */
2691 if (!priv->direct_tir[ix].rqt.enabled)
2694 rqtn = priv->direct_tir[ix].rqt.rqtn;
2695 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2699 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2700 struct mlx5e_channels *chs)
2702 struct mlx5e_redirect_rqt_param rrp = {
2707 .hfunc = priv->rss_params.hfunc,
2712 mlx5e_redirect_rqts(priv, rrp);
2715 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2717 struct mlx5e_redirect_rqt_param drop_rrp = {
2720 .rqn = priv->drop_rq.rqn,
2724 mlx5e_redirect_rqts(priv, drop_rrp);
2727 static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
2728 [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2729 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2730 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2732 [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2733 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2734 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2736 [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2737 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2738 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2740 [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2741 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2742 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2744 [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2746 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2748 [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2750 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2752 [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2754 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2756 [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2758 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2760 [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2762 .rx_hash_fields = MLX5_HASH_IP,
2764 [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2766 .rx_hash_fields = MLX5_HASH_IP,
2770 struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
2772 return tirc_default_config[tt];
2775 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2777 if (!params->lro_en)
2780 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2782 MLX5_SET(tirc, tirc, lro_enable_mask,
2783 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2784 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2785 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2786 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2787 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2790 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
2791 const struct mlx5e_tirc_config *ttconfig,
2792 void *tirc, bool inner)
2794 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2795 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2797 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
2798 if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
2799 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2800 rx_hash_toeplitz_key);
2801 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2802 rx_hash_toeplitz_key);
2804 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2805 memcpy(rss_key, rss_params->toeplitz_hash_key, len);
2807 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2808 ttconfig->l3_prot_type);
2809 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2810 ttconfig->l4_prot_type);
2811 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2812 ttconfig->rx_hash_fields);
2815 static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
2816 enum mlx5e_traffic_types tt,
2819 *ttconfig = tirc_default_config[tt];
2820 ttconfig->rx_hash_fields = rx_hash_fields;
2823 void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in)
2825 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2826 struct mlx5e_rss_params *rss = &priv->rss_params;
2827 struct mlx5_core_dev *mdev = priv->mdev;
2828 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
2829 struct mlx5e_tirc_config ttconfig;
2832 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
2834 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2835 memset(tirc, 0, ctxlen);
2836 mlx5e_update_rx_hash_fields(&ttconfig, tt,
2837 rss->rx_hash_fields[tt]);
2838 mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
2839 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
2842 /* Verify inner tirs resources allocated */
2843 if (!priv->inner_indir_tir[0].tirn)
2846 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2847 memset(tirc, 0, ctxlen);
2848 mlx5e_update_rx_hash_fields(&ttconfig, tt,
2849 rss->rx_hash_fields[tt]);
2850 mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
2851 mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
2855 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2857 struct mlx5_core_dev *mdev = priv->mdev;
2866 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2867 in = kvzalloc(inlen, GFP_KERNEL);
2871 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2872 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2874 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2876 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2877 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
2882 for (ix = 0; ix < priv->max_nch; ix++) {
2883 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in);
2894 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
2896 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2897 struct mlx5e_params *params, u16 mtu)
2899 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
2902 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2906 /* Update vport context MTU */
2907 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2911 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2912 struct mlx5e_params *params, u16 *mtu)
2917 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2918 if (err || !hw_mtu) /* fallback to port oper mtu */
2919 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2921 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
2924 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2926 struct mlx5e_params *params = &priv->channels.params;
2927 struct net_device *netdev = priv->netdev;
2928 struct mlx5_core_dev *mdev = priv->mdev;
2932 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2936 mlx5e_query_mtu(mdev, params, &mtu);
2937 if (mtu != params->sw_mtu)
2938 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2939 __func__, mtu, params->sw_mtu);
2941 params->sw_mtu = mtu;
2945 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu);
2947 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
2949 struct mlx5e_params *params = &priv->channels.params;
2950 struct net_device *netdev = priv->netdev;
2951 struct mlx5_core_dev *mdev = priv->mdev;
2954 /* MTU range: 68 - hw-specific max */
2955 netdev->min_mtu = ETH_MIN_MTU;
2957 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2958 netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
2962 static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
2966 netdev_reset_tc(netdev);
2971 netdev_set_num_tc(netdev, ntc);
2973 /* Map netdev TCs to offset 0
2974 * We have our own UP to TXQ mapping for QoS
2976 for (tc = 0; tc < ntc; tc++)
2977 netdev_set_tc_queue(netdev, tc, nch, 0);
2980 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
2982 int qos_queues, nch, ntc, num_txqs, err;
2984 qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
2986 nch = priv->channels.params.num_channels;
2987 ntc = priv->channels.params.num_tc;
2988 num_txqs = nch * ntc + qos_queues;
2989 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
2992 mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
2993 err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
2995 netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
3000 static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
3002 struct net_device *netdev = priv->netdev;
3003 int old_num_txqs, old_ntc;
3004 int num_rxqs, nch, ntc;
3007 old_num_txqs = netdev->real_num_tx_queues;
3008 old_ntc = netdev->num_tc;
3010 nch = priv->channels.params.num_channels;
3011 ntc = priv->channels.params.num_tc;
3012 num_rxqs = nch * priv->profile->rq_groups;
3014 mlx5e_netdev_set_tcs(netdev, nch, ntc);
3016 err = mlx5e_update_tx_netdev_queues(priv);
3019 err = netif_set_real_num_rx_queues(netdev, num_rxqs);
3021 netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
3028 /* netif_set_real_num_rx_queues could fail only when nch increased. Only
3029 * one of nch and ntc is changed in this function. That means, the call
3030 * to netif_set_real_num_tx_queues below should not fail, because it
3031 * decreases the number of TX queues.
3033 WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
3036 mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc);
3040 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
3041 struct mlx5e_params *params)
3043 struct mlx5_core_dev *mdev = priv->mdev;
3044 int num_comp_vectors, ix, irq;
3046 num_comp_vectors = mlx5_comp_vectors_count(mdev);
3048 for (ix = 0; ix < params->num_channels; ix++) {
3049 cpumask_clear(priv->scratchpad.cpumask);
3051 for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
3052 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
3054 cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
3057 netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
3061 int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
3063 u16 count = priv->channels.params.num_channels;
3066 err = mlx5e_update_netdev_queues(priv);
3070 mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
3072 if (!netif_is_rxfh_configured(priv->netdev))
3073 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
3074 MLX5E_INDIR_RQT_SIZE, count);
3079 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
3081 static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
3083 int i, ch, tc, num_tc;
3085 ch = priv->channels.num;
3086 num_tc = priv->channels.params.num_tc;
3088 for (i = 0; i < ch; i++) {
3089 for (tc = 0; tc < num_tc; tc++) {
3090 struct mlx5e_channel *c = priv->channels.c[i];
3091 struct mlx5e_txqsq *sq = &c->sq[tc];
3093 priv->txq2sq[sq->txq_ix] = sq;
3094 priv->channel_tc2realtxq[i][tc] = i + tc * ch;
3098 if (!priv->channels.port_ptp)
3101 for (tc = 0; tc < num_tc; tc++) {
3102 struct mlx5e_port_ptp *c = priv->channels.port_ptp;
3103 struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
3105 priv->txq2sq[sq->txq_ix] = sq;
3106 priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc;
3110 static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
3112 /* Sync with mlx5e_select_queue. */
3113 WRITE_ONCE(priv->num_tc_x_num_ch,
3114 priv->channels.params.num_tc * priv->channels.num);
3117 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
3119 mlx5e_update_num_tc_x_num_ch(priv);
3120 mlx5e_build_txq_maps(priv);
3121 mlx5e_activate_channels(&priv->channels);
3122 mlx5e_qos_activate_queues(priv);
3123 mlx5e_xdp_tx_enable(priv);
3124 netif_tx_start_all_queues(priv->netdev);
3126 if (mlx5e_is_vport_rep(priv))
3127 mlx5e_add_sqs_fwd_rules(priv);
3129 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
3130 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
3132 mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
3135 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
3137 mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
3139 mlx5e_redirect_rqts_to_drop(priv);
3141 if (mlx5e_is_vport_rep(priv))
3142 mlx5e_remove_sqs_fwd_rules(priv);
3144 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
3145 * polling for inactive tx queues.
3147 netif_tx_stop_all_queues(priv->netdev);
3148 netif_tx_disable(priv->netdev);
3149 mlx5e_xdp_tx_disable(priv);
3150 mlx5e_deactivate_channels(&priv->channels);
3153 static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
3154 struct mlx5e_channels *new_chs,
3155 mlx5e_fp_preactivate preactivate,
3158 struct net_device *netdev = priv->netdev;
3159 struct mlx5e_channels old_chs;
3163 carrier_ok = netif_carrier_ok(netdev);
3164 netif_carrier_off(netdev);
3166 mlx5e_deactivate_priv_channels(priv);
3168 old_chs = priv->channels;
3169 priv->channels = *new_chs;
3171 /* New channels are ready to roll, call the preactivate hook if needed
3172 * to modify HW settings or update kernel parameters.
3175 err = preactivate(priv, context);
3177 priv->channels = old_chs;
3182 mlx5e_close_channels(&old_chs);
3183 priv->profile->update_rx(priv);
3186 mlx5e_activate_priv_channels(priv);
3188 /* return carrier back if needed */
3190 netif_carrier_on(netdev);
3195 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
3196 struct mlx5e_channels *new_chs,
3197 mlx5e_fp_preactivate preactivate,
3202 err = mlx5e_open_channels(priv, new_chs);
3206 err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
3213 mlx5e_close_channels(new_chs);
3218 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
3220 struct mlx5e_channels new_channels = {};
3222 new_channels.params = priv->channels.params;
3223 return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
3226 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
3228 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
3229 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
3232 static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
3233 enum mlx5_port_status state)
3235 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3236 int vport_admin_state;
3238 mlx5_set_port_admin_status(mdev, state);
3240 if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
3241 !MLX5_CAP_GEN(mdev, uplink_follow))
3244 if (state == MLX5_PORT_UP)
3245 vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
3247 vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3249 mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
3252 int mlx5e_open_locked(struct net_device *netdev)
3254 struct mlx5e_priv *priv = netdev_priv(netdev);
3257 set_bit(MLX5E_STATE_OPENED, &priv->state);
3259 err = mlx5e_open_channels(priv, &priv->channels);
3261 goto err_clear_state_opened_flag;
3263 priv->profile->update_rx(priv);
3264 mlx5e_activate_priv_channels(priv);
3265 mlx5e_apply_traps(priv, true);
3266 if (priv->profile->update_carrier)
3267 priv->profile->update_carrier(priv);
3269 mlx5e_queue_update_stats(priv);
3272 err_clear_state_opened_flag:
3273 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3277 int mlx5e_open(struct net_device *netdev)
3279 struct mlx5e_priv *priv = netdev_priv(netdev);
3282 mutex_lock(&priv->state_lock);
3283 err = mlx5e_open_locked(netdev);
3285 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
3286 mutex_unlock(&priv->state_lock);
3291 int mlx5e_close_locked(struct net_device *netdev)
3293 struct mlx5e_priv *priv = netdev_priv(netdev);
3295 /* May already be CLOSED in case a previous configuration operation
3296 * (e.g RX/TX queue size change) that involves close&open failed.
3298 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3301 mlx5e_apply_traps(priv, false);
3302 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3304 netif_carrier_off(priv->netdev);
3305 mlx5e_deactivate_priv_channels(priv);
3306 mlx5e_close_channels(&priv->channels);
3311 int mlx5e_close(struct net_device *netdev)
3313 struct mlx5e_priv *priv = netdev_priv(netdev);
3316 if (!netif_device_present(netdev))
3319 mutex_lock(&priv->state_lock);
3320 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
3321 err = mlx5e_close_locked(netdev);
3322 mutex_unlock(&priv->state_lock);
3327 static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
3329 mlx5_wq_destroy(&rq->wq_ctrl);
3332 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3333 struct mlx5e_rq *rq,
3334 struct mlx5e_rq_param *param)
3336 void *rqc = param->rqc;
3337 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
3340 param->wq.db_numa_node = param->wq.buf_numa_node;
3342 err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
3347 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3348 xdp_rxq_info_unused(&rq->xdp_rxq);
3355 static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
3356 struct mlx5e_cq *cq,
3357 struct mlx5e_cq_param *param)
3359 struct mlx5_core_dev *mdev = priv->mdev;
3361 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3362 param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3364 return mlx5e_alloc_cq_common(priv, param, cq);
3367 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3368 struct mlx5e_rq *drop_rq)
3370 struct mlx5_core_dev *mdev = priv->mdev;
3371 struct mlx5e_cq_param cq_param = {};
3372 struct mlx5e_rq_param rq_param = {};
3373 struct mlx5e_cq *cq = &drop_rq->cq;
3376 mlx5e_build_drop_rq_param(priv, &rq_param);
3378 err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
3382 err = mlx5e_create_cq(cq, &cq_param);
3386 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
3388 goto err_destroy_cq;
3390 err = mlx5e_create_rq(drop_rq, &rq_param);
3394 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3396 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3401 mlx5e_free_drop_rq(drop_rq);
3404 mlx5e_destroy_cq(cq);
3412 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
3414 mlx5e_destroy_rq(drop_rq);
3415 mlx5e_free_drop_rq(drop_rq);
3416 mlx5e_destroy_cq(&drop_rq->cq);
3417 mlx5e_free_cq(&drop_rq->cq);
3420 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
3422 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3424 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
3426 if (MLX5_GET(tisc, tisc, tls_en))
3427 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
3429 if (mlx5_lag_is_lacp_owner(mdev))
3430 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3432 return mlx5_core_create_tis(mdev, in, tisn);
3435 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
3437 mlx5_core_destroy_tis(mdev, tisn);
3440 void mlx5e_destroy_tises(struct mlx5e_priv *priv)
3444 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
3445 for (tc = 0; tc < priv->profile->max_tc; tc++)
3446 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3449 static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
3451 return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
3454 int mlx5e_create_tises(struct mlx5e_priv *priv)
3459 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
3460 for (tc = 0; tc < priv->profile->max_tc; tc++) {
3461 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
3464 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3466 MLX5_SET(tisc, tisc, prio, tc << 1);
3468 if (mlx5e_lag_should_assign_affinity(priv->mdev))
3469 MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
3471 err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
3473 goto err_close_tises;
3480 for (; i >= 0; i--) {
3481 for (tc--; tc >= 0; tc--)
3482 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3483 tc = priv->profile->max_tc;
3489 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
3491 mlx5e_destroy_tises(priv);
3494 static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
3495 u32 rqtn, u32 *tirc)
3497 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3498 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3499 MLX5_SET(tirc, tirc, indirect_table, rqtn);
3500 MLX5_SET(tirc, tirc, tunneled_offload_en,
3501 priv->channels.params.tunneled_offload_en);
3503 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
3506 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
3507 enum mlx5e_traffic_types tt,
3510 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
3511 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
3512 &tirc_default_config[tt], tirc, false);
3515 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
3517 mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
3518 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
3521 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
3522 enum mlx5e_traffic_types tt,
3525 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
3526 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
3527 &tirc_default_config[tt], tirc, true);
3530 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
3532 struct mlx5e_tir *tir;
3540 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3541 in = kvzalloc(inlen, GFP_KERNEL);
3545 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
3546 memset(in, 0, inlen);
3547 tir = &priv->indir_tir[tt];
3548 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3549 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
3550 err = mlx5e_create_tir(priv->mdev, tir, in);
3552 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
3553 goto err_destroy_inner_tirs;
3557 if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
3560 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
3561 memset(in, 0, inlen);
3562 tir = &priv->inner_indir_tir[i];
3563 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3564 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
3565 err = mlx5e_create_tir(priv->mdev, tir, in);
3567 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
3568 goto err_destroy_inner_tirs;
3577 err_destroy_inner_tirs:
3578 for (i--; i >= 0; i--)
3579 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3581 for (tt--; tt >= 0; tt--)
3582 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3589 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
3591 struct mlx5e_tir *tir;
3598 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3599 in = kvzalloc(inlen, GFP_KERNEL);
3603 for (ix = 0; ix < priv->max_nch; ix++) {
3604 memset(in, 0, inlen);
3606 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3607 mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
3608 err = mlx5e_create_tir(priv->mdev, tir, in);
3610 goto err_destroy_ch_tirs;
3615 err_destroy_ch_tirs:
3616 mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
3617 for (ix--; ix >= 0; ix--)
3618 mlx5e_destroy_tir(priv->mdev, &tirs[ix]);
3626 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
3630 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3631 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3633 /* Verify inner tirs resources allocated */
3634 if (!priv->inner_indir_tir[0].tirn)
3637 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3638 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3641 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
3645 for (i = 0; i < priv->max_nch; i++)
3646 mlx5e_destroy_tir(priv->mdev, &tirs[i]);
3649 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3654 for (i = 0; i < chs->num; i++) {
3655 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3663 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3668 for (i = 0; i < chs->num; i++) {
3669 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3677 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
3678 struct tc_mqprio_qopt *mqprio)
3680 struct mlx5e_channels new_channels = {};
3681 u8 tc = mqprio->num_tc;
3684 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3686 if (tc && tc != MLX5E_MAX_NUM_TC)
3689 mutex_lock(&priv->state_lock);
3691 /* MQPRIO is another toplevel qdisc that can't be attached
3692 * simultaneously with the offloaded HTB.
3694 if (WARN_ON(priv->htb.maj_id)) {
3699 new_channels.params = priv->channels.params;
3700 new_channels.params.num_tc = tc ? tc : 1;
3702 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3703 struct mlx5e_params old_params;
3705 old_params = priv->channels.params;
3706 priv->channels.params = new_channels.params;
3707 err = mlx5e_num_channels_changed(priv);
3709 priv->channels.params = old_params;
3714 err = mlx5e_safe_switch_channels(priv, &new_channels,
3715 mlx5e_num_channels_changed_ctx, NULL);
3718 priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3719 priv->channels.params.num_tc);
3720 mutex_unlock(&priv->state_lock);
3724 static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
3728 switch (htb->command) {
3730 return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
3732 case TC_HTB_DESTROY:
3733 return mlx5e_htb_root_del(priv);
3734 case TC_HTB_LEAF_ALLOC_QUEUE:
3735 res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
3736 htb->rate, htb->ceil, htb->extack);
3741 case TC_HTB_LEAF_TO_INNER:
3742 return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
3743 htb->rate, htb->ceil, htb->extack);
3744 case TC_HTB_LEAF_DEL:
3745 return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid,
3747 case TC_HTB_LEAF_DEL_LAST:
3748 case TC_HTB_LEAF_DEL_LAST_FORCE:
3749 return mlx5e_htb_leaf_del_last(priv, htb->classid,
3750 htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
3752 case TC_HTB_NODE_MODIFY:
3753 return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
3755 case TC_HTB_LEAF_QUERY_QUEUE:
3756 res = mlx5e_get_txq_by_classid(priv, htb->classid);
3766 static LIST_HEAD(mlx5e_block_cb_list);
3768 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3771 struct mlx5e_priv *priv = netdev_priv(dev);
3772 bool tc_unbind = false;
3775 if (type == TC_SETUP_BLOCK &&
3776 ((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND)
3779 if (!netif_device_present(dev) && !tc_unbind)
3783 case TC_SETUP_BLOCK: {
3784 struct flow_block_offload *f = type_data;
3786 f->unlocked_driver_cb = true;
3787 return flow_block_cb_setup_simple(type_data,
3788 &mlx5e_block_cb_list,
3789 mlx5e_setup_tc_block_cb,
3792 case TC_SETUP_QDISC_MQPRIO:
3793 return mlx5e_setup_tc_mqprio(priv, type_data);
3794 case TC_SETUP_QDISC_HTB:
3795 mutex_lock(&priv->state_lock);
3796 err = mlx5e_setup_tc_htb(priv, type_data);
3797 mutex_unlock(&priv->state_lock);
3804 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
3808 for (i = 0; i < priv->max_nch; i++) {
3809 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
3810 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
3811 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
3814 s->rx_packets += rq_stats->packets + xskrq_stats->packets;
3815 s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
3816 s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
3818 for (j = 0; j < priv->max_opened_tc; j++) {
3819 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
3821 s->tx_packets += sq_stats->packets;
3822 s->tx_bytes += sq_stats->bytes;
3823 s->tx_dropped += sq_stats->dropped;
3829 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3831 struct mlx5e_priv *priv = netdev_priv(dev);
3832 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3834 if (!netif_device_present(dev))
3837 /* In switchdev mode, monitor counters doesn't monitor
3838 * rx/tx stats of 802_3. The update stats mechanism
3839 * should keep the 802_3 layout counters updated
3841 if (!mlx5e_monitor_counter_supported(priv) ||
3842 mlx5e_is_uplink_rep(priv)) {
3843 /* update HW stats in background for next time */
3844 mlx5e_queue_update_stats(priv);
3847 if (mlx5e_is_uplink_rep(priv)) {
3848 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3849 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3850 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3851 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3853 mlx5e_fold_sw_stats64(priv, stats);
3856 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3858 stats->rx_length_errors =
3859 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3860 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3861 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3862 stats->rx_crc_errors =
3863 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3864 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3865 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3866 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3867 stats->rx_frame_errors;
3868 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3871 static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
3873 if (mlx5e_is_uplink_rep(priv))
3874 return; /* no rx mode for uplink rep */
3876 queue_work(priv->wq, &priv->set_rx_mode_work);
3879 static void mlx5e_set_rx_mode(struct net_device *dev)
3881 struct mlx5e_priv *priv = netdev_priv(dev);
3883 mlx5e_nic_set_rx_mode(priv);
3886 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3888 struct mlx5e_priv *priv = netdev_priv(netdev);
3889 struct sockaddr *saddr = addr;
3891 if (!is_valid_ether_addr(saddr->sa_data))
3892 return -EADDRNOTAVAIL;
3894 netif_addr_lock_bh(netdev);
3895 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3896 netif_addr_unlock_bh(netdev);
3898 mlx5e_nic_set_rx_mode(priv);
3903 #define MLX5E_SET_FEATURE(features, feature, enable) \
3906 *features |= feature; \
3908 *features &= ~feature; \
3911 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3913 static int set_feature_lro(struct net_device *netdev, bool enable)
3915 struct mlx5e_priv *priv = netdev_priv(netdev);
3916 struct mlx5_core_dev *mdev = priv->mdev;
3917 struct mlx5e_channels new_channels = {};
3918 struct mlx5e_params *cur_params;
3922 mutex_lock(&priv->state_lock);
3924 if (enable && priv->xsk.refcnt) {
3925 netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
3931 cur_params = &priv->channels.params;
3932 if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3933 netdev_warn(netdev, "can't set LRO with legacy RQ\n");
3938 reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
3940 new_channels.params = *cur_params;
3941 new_channels.params.lro_en = enable;
3943 if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
3944 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
3945 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
3950 struct mlx5e_params old_params;
3952 old_params = *cur_params;
3953 *cur_params = new_channels.params;
3954 err = mlx5e_modify_tirs_lro(priv);
3956 *cur_params = old_params;
3960 err = mlx5e_safe_switch_channels(priv, &new_channels,
3961 mlx5e_modify_tirs_lro_ctx, NULL);
3963 mutex_unlock(&priv->state_lock);
3967 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3969 struct mlx5e_priv *priv = netdev_priv(netdev);
3972 mlx5e_enable_cvlan_filter(priv);
3974 mlx5e_disable_cvlan_filter(priv);
3979 static int set_feature_hw_tc(struct net_device *netdev, bool enable)
3981 struct mlx5e_priv *priv = netdev_priv(netdev);
3983 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
3984 if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
3986 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3991 if (!enable && priv->htb.maj_id) {
3992 netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
3999 static int set_feature_rx_all(struct net_device *netdev, bool enable)
4001 struct mlx5e_priv *priv = netdev_priv(netdev);
4002 struct mlx5_core_dev *mdev = priv->mdev;
4004 return mlx5_set_port_fcs(mdev, !enable);
4007 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
4009 struct mlx5e_priv *priv = netdev_priv(netdev);
4012 mutex_lock(&priv->state_lock);
4014 priv->channels.params.scatter_fcs_en = enable;
4015 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
4017 priv->channels.params.scatter_fcs_en = !enable;
4019 mutex_unlock(&priv->state_lock);
4024 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
4026 struct mlx5e_priv *priv = netdev_priv(netdev);
4029 mutex_lock(&priv->state_lock);
4031 priv->channels.params.vlan_strip_disable = !enable;
4032 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4035 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
4037 priv->channels.params.vlan_strip_disable = enable;
4040 mutex_unlock(&priv->state_lock);
4045 #ifdef CONFIG_MLX5_EN_ARFS
4046 static int set_feature_arfs(struct net_device *netdev, bool enable)
4048 struct mlx5e_priv *priv = netdev_priv(netdev);
4052 err = mlx5e_arfs_enable(priv);
4054 err = mlx5e_arfs_disable(priv);
4060 static int mlx5e_handle_feature(struct net_device *netdev,
4061 netdev_features_t *features,
4062 netdev_features_t wanted_features,
4063 netdev_features_t feature,
4064 mlx5e_feature_handler feature_handler)
4066 netdev_features_t changes = wanted_features ^ netdev->features;
4067 bool enable = !!(wanted_features & feature);
4070 if (!(changes & feature))
4073 err = feature_handler(netdev, enable);
4075 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
4076 enable ? "Enable" : "Disable", &feature, err);
4080 MLX5E_SET_FEATURE(features, feature, enable);
4084 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
4086 netdev_features_t oper_features = netdev->features;
4089 #define MLX5E_HANDLE_FEATURE(feature, handler) \
4090 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
4092 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
4093 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
4094 set_feature_cvlan_filter);
4095 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
4096 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
4097 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
4098 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
4099 #ifdef CONFIG_MLX5_EN_ARFS
4100 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
4102 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx);
4105 netdev->features = oper_features;
4112 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
4113 netdev_features_t features)
4115 struct mlx5e_priv *priv = netdev_priv(netdev);
4116 struct mlx5e_params *params;
4118 mutex_lock(&priv->state_lock);
4119 params = &priv->channels.params;
4120 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
4121 /* HW strips the outer C-tag header, this is a problem
4122 * for S-tag traffic.
4124 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4125 if (!params->vlan_strip_disable)
4126 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
4129 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
4130 if (features & NETIF_F_LRO) {
4131 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
4132 features &= ~NETIF_F_LRO;
4136 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
4137 features &= ~NETIF_F_RXHASH;
4138 if (netdev->features & NETIF_F_RXHASH)
4139 netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
4142 mutex_unlock(&priv->state_lock);
4147 static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
4148 struct mlx5e_channels *chs,
4149 struct mlx5e_params *new_params,
4150 struct mlx5_core_dev *mdev)
4154 for (ix = 0; ix < chs->params.num_channels; ix++) {
4155 struct xsk_buff_pool *xsk_pool =
4156 mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
4157 struct mlx5e_xsk_param xsk;
4162 mlx5e_build_xsk_param(xsk_pool, &xsk);
4164 if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
4165 u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
4166 int max_mtu_frame, max_mtu_page, max_mtu;
4168 /* Two criteria must be met:
4169 * 1. HW MTU + all headrooms <= XSK frame size.
4170 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
4172 max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
4173 max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
4174 max_mtu = min(max_mtu_frame, max_mtu_page);
4176 netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
4177 new_params->sw_mtu, ix, max_mtu);
4185 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
4186 mlx5e_fp_preactivate preactivate)
4188 struct mlx5e_priv *priv = netdev_priv(netdev);
4189 struct mlx5e_channels new_channels = {};
4190 struct mlx5e_params *params;
4194 mutex_lock(&priv->state_lock);
4196 params = &priv->channels.params;
4198 reset = !params->lro_en;
4199 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
4201 new_channels.params = *params;
4202 new_channels.params.sw_mtu = new_mtu;
4203 err = mlx5e_validate_params(priv, &new_channels.params);
4207 if (params->xdp_prog &&
4208 !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
4209 netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
4210 new_mtu, mlx5e_xdp_max_mtu(params, NULL));
4215 if (priv->xsk.refcnt &&
4216 !mlx5e_xsk_validate_mtu(netdev, &priv->channels,
4217 &new_channels.params, priv->mdev)) {
4222 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
4223 bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
4224 &new_channels.params,
4226 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
4227 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL);
4229 /* If XSK is active, XSK RQs are linear. */
4230 is_linear |= priv->xsk.refcnt;
4232 /* Always reset in linear mode - hw_mtu is used in data path. */
4233 reset = reset && (is_linear || (ppw_old != ppw_new));
4237 unsigned int old_mtu = params->sw_mtu;
4239 params->sw_mtu = new_mtu;
4241 err = preactivate(priv, NULL);
4243 params->sw_mtu = old_mtu;
4247 netdev->mtu = params->sw_mtu;
4251 err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL);
4255 netdev->mtu = new_channels.params.sw_mtu;
4258 mutex_unlock(&priv->state_lock);
4262 static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
4264 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
4267 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
4269 struct hwtstamp_config config;
4272 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
4273 (mlx5_clock_get_ptp_index(priv->mdev) == -1))
4276 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4279 /* TX HW timestamp */
4280 switch (config.tx_type) {
4281 case HWTSTAMP_TX_OFF:
4282 case HWTSTAMP_TX_ON:
4288 mutex_lock(&priv->state_lock);
4289 /* RX HW timestamp */
4290 switch (config.rx_filter) {
4291 case HWTSTAMP_FILTER_NONE:
4292 /* Reset CQE compression to Admin default */
4293 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
4295 case HWTSTAMP_FILTER_ALL:
4296 case HWTSTAMP_FILTER_SOME:
4297 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4298 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4299 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4300 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4301 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4302 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4303 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4304 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4305 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4306 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4307 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4308 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4309 case HWTSTAMP_FILTER_NTP_ALL:
4310 /* Disable CQE compression */
4311 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4312 netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
4313 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
4315 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
4316 mutex_unlock(&priv->state_lock);
4319 config.rx_filter = HWTSTAMP_FILTER_ALL;
4322 mutex_unlock(&priv->state_lock);
4326 memcpy(&priv->tstamp, &config, sizeof(config));
4327 mutex_unlock(&priv->state_lock);
4329 /* might need to fix some features */
4330 netdev_update_features(priv->netdev);
4332 return copy_to_user(ifr->ifr_data, &config,
4333 sizeof(config)) ? -EFAULT : 0;
4336 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
4338 struct hwtstamp_config *cfg = &priv->tstamp;
4340 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
4343 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
4346 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4348 struct mlx5e_priv *priv = netdev_priv(dev);
4352 return mlx5e_hwstamp_set(priv, ifr);
4354 return mlx5e_hwstamp_get(priv, ifr);
4360 #ifdef CONFIG_MLX5_ESWITCH
4361 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
4363 struct mlx5e_priv *priv = netdev_priv(dev);
4364 struct mlx5_core_dev *mdev = priv->mdev;
4366 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
4369 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
4372 struct mlx5e_priv *priv = netdev_priv(dev);
4373 struct mlx5_core_dev *mdev = priv->mdev;
4375 if (vlan_proto != htons(ETH_P_8021Q))
4376 return -EPROTONOSUPPORT;
4378 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
4382 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
4384 struct mlx5e_priv *priv = netdev_priv(dev);
4385 struct mlx5_core_dev *mdev = priv->mdev;
4387 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
4390 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
4392 struct mlx5e_priv *priv = netdev_priv(dev);
4393 struct mlx5_core_dev *mdev = priv->mdev;
4395 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
4398 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
4401 struct mlx5e_priv *priv = netdev_priv(dev);
4402 struct mlx5_core_dev *mdev = priv->mdev;
4404 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
4405 max_tx_rate, min_tx_rate);
4408 static int mlx5_vport_link2ifla(u8 esw_link)
4411 case MLX5_VPORT_ADMIN_STATE_DOWN:
4412 return IFLA_VF_LINK_STATE_DISABLE;
4413 case MLX5_VPORT_ADMIN_STATE_UP:
4414 return IFLA_VF_LINK_STATE_ENABLE;
4416 return IFLA_VF_LINK_STATE_AUTO;
4419 static int mlx5_ifla_link2vport(u8 ifla_link)
4421 switch (ifla_link) {
4422 case IFLA_VF_LINK_STATE_DISABLE:
4423 return MLX5_VPORT_ADMIN_STATE_DOWN;
4424 case IFLA_VF_LINK_STATE_ENABLE:
4425 return MLX5_VPORT_ADMIN_STATE_UP;
4427 return MLX5_VPORT_ADMIN_STATE_AUTO;
4430 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
4433 struct mlx5e_priv *priv = netdev_priv(dev);
4434 struct mlx5_core_dev *mdev = priv->mdev;
4436 if (mlx5e_is_uplink_rep(priv))
4439 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4440 mlx5_ifla_link2vport(link_state));
4443 int mlx5e_get_vf_config(struct net_device *dev,
4444 int vf, struct ifla_vf_info *ivi)
4446 struct mlx5e_priv *priv = netdev_priv(dev);
4447 struct mlx5_core_dev *mdev = priv->mdev;
4450 if (!netif_device_present(dev))
4453 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
4456 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
4460 int mlx5e_get_vf_stats(struct net_device *dev,
4461 int vf, struct ifla_vf_stats *vf_stats)
4463 struct mlx5e_priv *priv = netdev_priv(dev);
4464 struct mlx5_core_dev *mdev = priv->mdev;
4466 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
4471 mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
4473 struct mlx5e_priv *priv = netdev_priv(dev);
4475 if (!netif_device_present(dev))
4478 if (!mlx5e_is_uplink_rep(priv))
4481 return mlx5e_rep_has_offload_stats(dev, attr_id);
4485 mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
4488 struct mlx5e_priv *priv = netdev_priv(dev);
4490 if (!mlx5e_is_uplink_rep(priv))
4493 return mlx5e_rep_get_offload_stats(attr_id, dev, sp);
4497 static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
4499 switch (proto_type) {
4501 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
4504 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
4505 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
4511 static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
4512 struct sk_buff *skb)
4514 switch (skb->inner_protocol) {
4515 case htons(ETH_P_IP):
4516 case htons(ETH_P_IPV6):
4517 case htons(ETH_P_TEB):
4519 case htons(ETH_P_MPLS_UC):
4520 case htons(ETH_P_MPLS_MC):
4521 return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
4526 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
4527 struct sk_buff *skb,
4528 netdev_features_t features)
4530 unsigned int offset = 0;
4531 struct udphdr *udph;
4535 switch (vlan_get_protocol(skb)) {
4536 case htons(ETH_P_IP):
4537 proto = ip_hdr(skb)->protocol;
4539 case htons(ETH_P_IPV6):
4540 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
4548 if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
4553 if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
4557 udph = udp_hdr(skb);
4558 port = be16_to_cpu(udph->dest);
4560 /* Verify if UDP port is being offloaded by HW */
4561 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
4564 #if IS_ENABLED(CONFIG_GENEVE)
4565 /* Support Geneve offload for default UDP port */
4566 if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
4572 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4573 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4576 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
4577 struct net_device *netdev,
4578 netdev_features_t features)
4580 struct mlx5e_priv *priv = netdev_priv(netdev);
4582 features = vlan_features_check(skb, features);
4583 features = vxlan_features_check(skb, features);
4585 if (mlx5e_ipsec_feature_check(skb, netdev, features))
4588 /* Validate if the tunneled packet is being offloaded by HW */
4589 if (skb->encapsulation &&
4590 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
4591 return mlx5e_tunnel_features_check(priv, skb, features);
4596 static void mlx5e_tx_timeout_work(struct work_struct *work)
4598 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4600 struct net_device *netdev = priv->netdev;
4604 mutex_lock(&priv->state_lock);
4606 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4609 for (i = 0; i < netdev->real_num_tx_queues; i++) {
4610 struct netdev_queue *dev_queue =
4611 netdev_get_tx_queue(netdev, i);
4612 struct mlx5e_txqsq *sq = priv->txq2sq[i];
4614 if (!netif_xmit_stopped(dev_queue))
4617 if (mlx5e_reporter_tx_timeout(sq))
4618 /* break if tried to reopened channels */
4623 mutex_unlock(&priv->state_lock);
4627 static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
4629 struct mlx5e_priv *priv = netdev_priv(dev);
4631 netdev_err(dev, "TX timeout detected\n");
4632 queue_work(priv->wq, &priv->tx_timeout_work);
4635 static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
4637 struct net_device *netdev = priv->netdev;
4638 struct mlx5e_channels new_channels = {};
4640 if (priv->channels.params.lro_en) {
4641 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
4645 if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
4647 "XDP is not available on Innova cards with IPsec support\n");
4651 new_channels.params = priv->channels.params;
4652 new_channels.params.xdp_prog = prog;
4654 /* No XSK params: AF_XDP can't be enabled yet at the point of setting
4657 if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
4658 netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
4659 new_channels.params.sw_mtu,
4660 mlx5e_xdp_max_mtu(&new_channels.params, NULL));
4667 static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
4669 struct bpf_prog *old_prog;
4671 old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
4672 lockdep_is_held(&rq->priv->state_lock));
4674 bpf_prog_put(old_prog);
4677 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4679 struct mlx5e_priv *priv = netdev_priv(netdev);
4680 struct bpf_prog *old_prog;
4681 bool reset, was_opened;
4685 mutex_lock(&priv->state_lock);
4688 err = mlx5e_xdp_allowed(priv, prog);
4693 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
4694 /* no need for full reset when exchanging programs */
4695 reset = (!priv->channels.params.xdp_prog || !prog);
4697 if (was_opened && !reset)
4698 /* num_channels is invariant here, so we can take the
4699 * batched reference right upfront.
4701 bpf_prog_add(prog, priv->channels.num);
4703 if (was_opened && reset) {
4704 struct mlx5e_channels new_channels = {};
4706 new_channels.params = priv->channels.params;
4707 new_channels.params.xdp_prog = prog;
4708 mlx5e_set_rq_type(priv->mdev, &new_channels.params);
4709 old_prog = priv->channels.params.xdp_prog;
4711 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
4715 /* exchange programs, extra prog reference we got from caller
4716 * as long as we don't fail from this point onwards.
4718 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
4722 bpf_prog_put(old_prog);
4724 if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */
4725 mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
4727 if (!was_opened || reset)
4730 /* exchanging programs w/o reset, we update ref counts on behalf
4731 * of the channels RQs here.
4733 for (i = 0; i < priv->channels.num; i++) {
4734 struct mlx5e_channel *c = priv->channels.c[i];
4736 mlx5e_rq_replace_xdp_prog(&c->rq, prog);
4737 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
4738 mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
4742 mutex_unlock(&priv->state_lock);
4746 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4748 switch (xdp->command) {
4749 case XDP_SETUP_PROG:
4750 return mlx5e_xdp_set(dev, xdp->prog);
4751 case XDP_SETUP_XSK_POOL:
4752 return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
4759 #ifdef CONFIG_MLX5_ESWITCH
4760 static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4761 struct net_device *dev, u32 filter_mask,
4764 struct mlx5e_priv *priv = netdev_priv(dev);
4765 struct mlx5_core_dev *mdev = priv->mdev;
4769 err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
4772 mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
4773 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4775 0, 0, nlflags, filter_mask, NULL);
4778 static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4779 u16 flags, struct netlink_ext_ack *extack)
4781 struct mlx5e_priv *priv = netdev_priv(dev);
4782 struct mlx5_core_dev *mdev = priv->mdev;
4783 struct nlattr *attr, *br_spec;
4784 u16 mode = BRIDGE_MODE_UNDEF;
4788 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4792 nla_for_each_nested(attr, br_spec, rem) {
4793 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4796 if (nla_len(attr) < sizeof(mode))
4799 mode = nla_get_u16(attr);
4800 if (mode > BRIDGE_MODE_VEPA)
4806 if (mode == BRIDGE_MODE_UNDEF)
4809 setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
4810 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
4814 const struct net_device_ops mlx5e_netdev_ops = {
4815 .ndo_open = mlx5e_open,
4816 .ndo_stop = mlx5e_close,
4817 .ndo_start_xmit = mlx5e_xmit,
4818 .ndo_setup_tc = mlx5e_setup_tc,
4819 .ndo_select_queue = mlx5e_select_queue,
4820 .ndo_get_stats64 = mlx5e_get_stats,
4821 .ndo_set_rx_mode = mlx5e_set_rx_mode,
4822 .ndo_set_mac_address = mlx5e_set_mac,
4823 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
4824 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
4825 .ndo_set_features = mlx5e_set_features,
4826 .ndo_fix_features = mlx5e_fix_features,
4827 .ndo_change_mtu = mlx5e_change_nic_mtu,
4828 .ndo_do_ioctl = mlx5e_ioctl,
4829 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
4830 .ndo_features_check = mlx5e_features_check,
4831 .ndo_tx_timeout = mlx5e_tx_timeout,
4832 .ndo_bpf = mlx5e_xdp,
4833 .ndo_xdp_xmit = mlx5e_xdp_xmit,
4834 .ndo_xsk_wakeup = mlx5e_xsk_wakeup,
4835 #ifdef CONFIG_MLX5_EN_ARFS
4836 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4838 #ifdef CONFIG_MLX5_ESWITCH
4839 .ndo_bridge_setlink = mlx5e_bridge_setlink,
4840 .ndo_bridge_getlink = mlx5e_bridge_getlink,
4842 /* SRIOV E-Switch NDOs */
4843 .ndo_set_vf_mac = mlx5e_set_vf_mac,
4844 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
4845 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
4846 .ndo_set_vf_trust = mlx5e_set_vf_trust,
4847 .ndo_set_vf_rate = mlx5e_set_vf_rate,
4848 .ndo_get_vf_config = mlx5e_get_vf_config,
4849 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
4850 .ndo_get_vf_stats = mlx5e_get_vf_stats,
4851 .ndo_has_offload_stats = mlx5e_has_offload_stats,
4852 .ndo_get_offload_stats = mlx5e_get_offload_stats,
4854 .ndo_get_devlink_port = mlx5e_get_devlink_port,
4857 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
4862 for (i = 0; i < len; i++)
4863 indirection_rqt[i] = i % num_channels;
4866 static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
4871 mlx5e_port_max_linkspeed(mdev, &link_speed);
4872 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
4873 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4874 link_speed, pci_bw);
4876 #define MLX5E_SLOW_PCI_RATIO (2)
4878 return link_speed && pci_bw &&
4879 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
4882 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
4884 struct dim_cq_moder moder;
4886 moder.cq_period_mode = cq_period_mode;
4887 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4888 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4889 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4890 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4895 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
4897 struct dim_cq_moder moder;
4899 moder.cq_period_mode = cq_period_mode;
4900 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4901 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
4902 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4903 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4908 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4910 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
4911 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4912 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4915 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
4917 if (params->tx_dim_enabled) {
4918 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4920 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4922 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4926 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
4928 if (params->rx_dim_enabled) {
4929 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4931 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4933 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
4937 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4939 mlx5e_reset_tx_moderation(params, cq_period_mode);
4940 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4941 params->tx_cq_moderation.cq_period_mode ==
4942 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4945 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4947 mlx5e_reset_rx_moderation(params, cq_period_mode);
4948 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
4949 params->rx_cq_moderation.cq_period_mode ==
4950 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4953 static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4957 /* The supported periods are organized in ascending order */
4958 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4959 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4962 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4965 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
4966 struct mlx5e_params *params)
4968 /* Prefer Striding RQ, unless any of the following holds:
4969 * - Striding RQ configuration is not possible/supported.
4970 * - Slow PCI heuristic.
4971 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
4973 * No XSK params: checking the availability of striding RQ in general.
4975 if (!slow_pci_heuristic(mdev) &&
4976 mlx5e_striding_rq_possible(mdev, params) &&
4977 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
4978 !mlx5e_rx_is_linear_skb(params, NULL)))
4979 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
4980 mlx5e_set_rq_type(mdev, params);
4981 mlx5e_init_rq_type_params(mdev, params);
4984 void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
4987 enum mlx5e_traffic_types tt;
4989 rss_params->hfunc = ETH_RSS_HASH_TOP;
4990 netdev_rss_key_fill(rss_params->toeplitz_hash_key,
4991 sizeof(rss_params->toeplitz_hash_key));
4992 mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
4993 MLX5E_INDIR_RQT_SIZE, num_channels);
4994 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
4995 rss_params->rx_hash_fields[tt] =
4996 tirc_default_config[tt].rx_hash_fields;
4999 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
5001 struct mlx5e_rss_params *rss_params = &priv->rss_params;
5002 struct mlx5e_params *params = &priv->channels.params;
5003 struct mlx5_core_dev *mdev = priv->mdev;
5004 u8 rx_cq_period_mode;
5006 priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
5008 params->sw_mtu = mtu;
5009 params->hard_mtu = MLX5E_ETH_HARD_MTU;
5010 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
5015 params->log_sq_size = is_kdump_kernel() ?
5016 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
5017 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
5018 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
5019 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
5022 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
5023 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
5025 /* set CQE compression */
5026 params->rx_cqe_compress_def = false;
5027 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
5028 MLX5_CAP_GEN(mdev, vport_group_manager))
5029 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
5031 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
5032 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
5035 mlx5e_build_rq_params(mdev, params);
5038 if (MLX5_CAP_ETH(mdev, lro_cap) &&
5039 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
5040 /* No XSK params: checking the availability of striding RQ in general. */
5041 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
5042 params->lro_en = !slow_pci_heuristic(mdev);
5044 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
5046 /* CQ moderation params */
5047 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
5048 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
5049 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
5050 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
5051 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
5052 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
5053 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
5056 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
5059 mlx5e_build_rss_params(rss_params, params->num_channels);
5060 params->tunneled_offload_en =
5061 mlx5e_tunnel_inner_ft_supported(mdev);
5066 /* Do not update netdev->features directly in here
5067 * on mlx5e_attach_netdev() we will call mlx5e_update_features()
5068 * To update netdev->features please modify mlx5e_fix_features()
5072 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
5074 struct mlx5e_priv *priv = netdev_priv(netdev);
5076 mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
5077 if (is_zero_ether_addr(netdev->dev_addr) &&
5078 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
5079 eth_hw_addr_random(netdev);
5080 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
5084 static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
5085 unsigned int entry, struct udp_tunnel_info *ti)
5087 struct mlx5e_priv *priv = netdev_priv(netdev);
5089 return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
5092 static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
5093 unsigned int entry, struct udp_tunnel_info *ti)
5095 struct mlx5e_priv *priv = netdev_priv(netdev);
5097 return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
5100 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
5102 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
5105 priv->nic_info.set_port = mlx5e_vxlan_set_port;
5106 priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
5107 priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
5108 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
5109 priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
5110 /* Don't count the space hard-coded to the IANA port */
5111 priv->nic_info.tables[0].n_entries =
5112 mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
5114 priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
5117 static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
5121 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
5122 if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt)))
5125 return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
5128 static void mlx5e_build_nic_netdev(struct net_device *netdev)
5130 struct mlx5e_priv *priv = netdev_priv(netdev);
5131 struct mlx5_core_dev *mdev = priv->mdev;
5135 SET_NETDEV_DEV(netdev, mdev->device);
5137 netdev->netdev_ops = &mlx5e_netdev_ops;
5139 mlx5e_dcbnl_build_netdev(netdev);
5141 netdev->watchdog_timeo = 15 * HZ;
5143 netdev->ethtool_ops = &mlx5e_ethtool_ops;
5145 netdev->vlan_features |= NETIF_F_SG;
5146 netdev->vlan_features |= NETIF_F_HW_CSUM;
5147 netdev->vlan_features |= NETIF_F_GRO;
5148 netdev->vlan_features |= NETIF_F_TSO;
5149 netdev->vlan_features |= NETIF_F_TSO6;
5150 netdev->vlan_features |= NETIF_F_RXCSUM;
5151 netdev->vlan_features |= NETIF_F_RXHASH;
5153 netdev->mpls_features |= NETIF_F_SG;
5154 netdev->mpls_features |= NETIF_F_HW_CSUM;
5155 netdev->mpls_features |= NETIF_F_TSO;
5156 netdev->mpls_features |= NETIF_F_TSO6;
5158 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
5159 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
5161 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
5162 mlx5e_check_fragmented_striding_rq_cap(mdev))
5163 netdev->vlan_features |= NETIF_F_LRO;
5165 netdev->hw_features = netdev->vlan_features;
5166 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
5167 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
5168 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5169 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
5171 if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
5172 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
5173 netdev->hw_enc_features |= NETIF_F_TSO;
5174 netdev->hw_enc_features |= NETIF_F_TSO6;
5175 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
5178 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
5179 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
5180 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5181 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
5182 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5183 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
5184 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
5185 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5188 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
5189 netdev->hw_features |= NETIF_F_GSO_GRE |
5190 NETIF_F_GSO_GRE_CSUM;
5191 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
5192 NETIF_F_GSO_GRE_CSUM;
5193 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
5194 NETIF_F_GSO_GRE_CSUM;
5197 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
5198 netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
5200 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
5202 netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
5206 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
5207 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
5208 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
5209 netdev->features |= NETIF_F_GSO_UDP_L4;
5211 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
5214 netdev->hw_features |= NETIF_F_RXALL;
5216 if (MLX5_CAP_ETH(mdev, scatter_fcs))
5217 netdev->hw_features |= NETIF_F_RXFCS;
5219 netdev->features = netdev->hw_features;
5223 netdev->features &= ~NETIF_F_RXALL;
5224 netdev->features &= ~NETIF_F_LRO;
5225 netdev->features &= ~NETIF_F_RXFCS;
5227 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
5228 if (FT_CAP(flow_modify_en) &&
5229 FT_CAP(modify_root) &&
5230 FT_CAP(identified_miss_table_mode) &&
5231 FT_CAP(flow_table_modify)) {
5232 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
5233 netdev->hw_features |= NETIF_F_HW_TC;
5235 #ifdef CONFIG_MLX5_EN_ARFS
5236 netdev->hw_features |= NETIF_F_NTUPLE;
5239 if (mlx5_qos_is_supported(mdev))
5240 netdev->features |= NETIF_F_HW_TC;
5242 netdev->features |= NETIF_F_HIGHDMA;
5243 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5245 netdev->priv_flags |= IFF_UNICAST_FLT;
5247 mlx5e_set_netdev_dev_addr(netdev);
5248 mlx5e_ipsec_build_netdev(priv);
5249 mlx5e_tls_build_netdev(priv);
5252 void mlx5e_create_q_counters(struct mlx5e_priv *priv)
5254 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
5255 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
5256 struct mlx5_core_dev *mdev = priv->mdev;
5259 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
5260 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5263 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
5265 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5267 priv->drop_rq_q_counter =
5268 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
5271 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
5273 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
5275 MLX5_SET(dealloc_q_counter_in, in, opcode,
5276 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
5277 if (priv->q_counter) {
5278 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5280 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5283 if (priv->drop_rq_q_counter) {
5284 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5285 priv->drop_rq_q_counter);
5286 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5290 static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
5291 struct net_device *netdev)
5293 struct mlx5e_priv *priv = netdev_priv(netdev);
5296 mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
5297 mlx5e_vxlan_set_netdev_info(priv);
5299 mlx5e_timestamp_init(priv);
5301 err = mlx5e_ipsec_init(priv);
5303 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
5305 err = mlx5e_tls_init(priv);
5307 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
5309 mlx5e_health_create_reporters(priv);
5314 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
5316 mlx5e_health_destroy_reporters(priv);
5317 mlx5e_tls_cleanup(priv);
5318 mlx5e_ipsec_cleanup(priv);
5321 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
5323 struct mlx5_core_dev *mdev = priv->mdev;
5326 mlx5e_create_q_counters(priv);
5328 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
5330 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
5331 goto err_destroy_q_counters;
5334 err = mlx5e_create_indirect_rqt(priv);
5336 goto err_close_drop_rq;
5338 err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
5340 goto err_destroy_indirect_rqts;
5342 err = mlx5e_create_indirect_tirs(priv, true);
5344 goto err_destroy_direct_rqts;
5346 err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
5348 goto err_destroy_indirect_tirs;
5350 err = mlx5e_create_direct_rqts(priv, priv->xsk_tir);
5352 goto err_destroy_direct_tirs;
5354 err = mlx5e_create_direct_tirs(priv, priv->xsk_tir);
5356 goto err_destroy_xsk_rqts;
5358 err = mlx5e_create_flow_steering(priv);
5360 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
5361 goto err_destroy_xsk_tirs;
5364 err = mlx5e_tc_nic_init(priv);
5366 goto err_destroy_flow_steering;
5368 err = mlx5e_accel_init_rx(priv);
5370 goto err_tc_nic_cleanup;
5372 #ifdef CONFIG_MLX5_EN_ARFS
5373 priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
5379 mlx5e_tc_nic_cleanup(priv);
5380 err_destroy_flow_steering:
5381 mlx5e_destroy_flow_steering(priv);
5382 err_destroy_xsk_tirs:
5383 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
5384 err_destroy_xsk_rqts:
5385 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
5386 err_destroy_direct_tirs:
5387 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
5388 err_destroy_indirect_tirs:
5389 mlx5e_destroy_indirect_tirs(priv);
5390 err_destroy_direct_rqts:
5391 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
5392 err_destroy_indirect_rqts:
5393 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
5395 mlx5e_close_drop_rq(&priv->drop_rq);
5396 err_destroy_q_counters:
5397 mlx5e_destroy_q_counters(priv);
5401 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
5403 mlx5e_accel_cleanup_rx(priv);
5404 mlx5e_tc_nic_cleanup(priv);
5405 mlx5e_destroy_flow_steering(priv);
5406 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
5407 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
5408 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
5409 mlx5e_destroy_indirect_tirs(priv);
5410 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
5411 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
5412 mlx5e_close_drop_rq(&priv->drop_rq);
5413 mlx5e_destroy_q_counters(priv);
5416 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
5420 err = mlx5e_create_tises(priv);
5422 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
5426 mlx5e_dcbnl_initialize(priv);
5430 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
5432 struct net_device *netdev = priv->netdev;
5433 struct mlx5_core_dev *mdev = priv->mdev;
5435 mlx5e_init_l2_addr(priv);
5437 /* Marking the link as currently not needed by the Driver */
5438 if (!netif_running(netdev))
5439 mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
5441 mlx5e_set_netdev_mtu_boundaries(priv);
5442 mlx5e_set_dev_port_mtu(priv);
5444 mlx5_lag_add(mdev, netdev);
5446 mlx5e_enable_async_events(priv);
5447 mlx5e_enable_blocking_events(priv);
5448 if (mlx5e_monitor_counter_supported(priv))
5449 mlx5e_monitor_counter_init(priv);
5451 mlx5e_hv_vhca_stats_create(priv);
5452 if (netdev->reg_state != NETREG_REGISTERED)
5454 mlx5e_dcbnl_init_app(priv);
5456 mlx5e_nic_set_rx_mode(priv);
5459 if (netif_running(netdev))
5461 udp_tunnel_nic_reset_ntf(priv->netdev);
5462 netif_device_attach(netdev);
5466 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
5468 struct mlx5_core_dev *mdev = priv->mdev;
5470 if (priv->netdev->reg_state == NETREG_REGISTERED)
5471 mlx5e_dcbnl_delete_app(priv);
5474 if (netif_running(priv->netdev))
5475 mlx5e_close(priv->netdev);
5476 netif_device_detach(priv->netdev);
5479 mlx5e_nic_set_rx_mode(priv);
5481 mlx5e_hv_vhca_stats_destroy(priv);
5482 if (mlx5e_monitor_counter_supported(priv))
5483 mlx5e_monitor_counter_cleanup(priv);
5485 mlx5e_disable_blocking_events(priv);
5486 if (priv->en_trap) {
5487 mlx5e_deactivate_trap(priv);
5488 mlx5e_close_trap(priv->en_trap);
5489 priv->en_trap = NULL;
5491 mlx5e_disable_async_events(priv);
5492 mlx5_lag_remove(mdev);
5493 mlx5_vxlan_reset_to_default(mdev->vxlan);
5496 int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
5498 return mlx5e_refresh_tirs(priv, false, false);
5501 static const struct mlx5e_profile mlx5e_nic_profile = {
5502 .init = mlx5e_nic_init,
5503 .cleanup = mlx5e_nic_cleanup,
5504 .init_rx = mlx5e_init_nic_rx,
5505 .cleanup_rx = mlx5e_cleanup_nic_rx,
5506 .init_tx = mlx5e_init_nic_tx,
5507 .cleanup_tx = mlx5e_cleanup_nic_tx,
5508 .enable = mlx5e_nic_enable,
5509 .disable = mlx5e_nic_disable,
5510 .update_rx = mlx5e_update_nic_rx,
5511 .update_stats = mlx5e_stats_update_ndo_stats,
5512 .update_carrier = mlx5e_update_carrier,
5513 .rx_handlers = &mlx5e_rx_handlers_nic,
5514 .max_tc = MLX5E_MAX_NUM_TC,
5515 .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
5516 .stats_grps = mlx5e_nic_stats_grps,
5517 .stats_grps_num = mlx5e_nic_stats_grps_num,
5520 /* mlx5e generic netdev management API (move to en_common.c) */
5521 int mlx5e_priv_init(struct mlx5e_priv *priv,
5522 struct net_device *netdev,
5523 struct mlx5_core_dev *mdev)
5525 memset(priv, 0, sizeof(*priv));
5529 priv->netdev = netdev;
5530 priv->msglevel = MLX5E_MSG_LEVEL;
5531 priv->max_opened_tc = 1;
5533 if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
5536 mutex_init(&priv->state_lock);
5537 hash_init(priv->htb.qos_tc2node);
5538 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
5539 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
5540 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
5541 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
5543 priv->wq = create_singlethread_workqueue("mlx5e");
5545 goto err_free_cpumask;
5550 free_cpumask_var(priv->scratchpad.cpumask);
5555 void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
5559 destroy_workqueue(priv->wq);
5560 free_cpumask_var(priv->scratchpad.cpumask);
5562 for (i = 0; i < priv->htb.max_qos_sqs; i++)
5563 kfree(priv->htb.qos_sq_stats[i]);
5564 kvfree(priv->htb.qos_sq_stats);
5568 mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)
5570 struct net_device *netdev;
5573 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
5575 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5579 err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);
5581 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
5582 goto err_free_netdev;
5585 netif_carrier_off(netdev);
5586 dev_net_set(netdev, mlx5_core_net(mdev));
5591 free_netdev(netdev);
5596 static void mlx5e_update_features(struct net_device *netdev)
5598 if (netdev->reg_state != NETREG_REGISTERED)
5599 return; /* features will be updated on netdev registration */
5602 netdev_update_features(netdev);
5606 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
5608 const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
5609 const struct mlx5e_profile *profile = priv->profile;
5613 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
5615 /* max number of channels may have changed */
5616 max_nch = mlx5e_get_max_num_channels(priv->mdev);
5617 if (priv->channels.params.num_channels > max_nch) {
5618 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5619 /* Reducing the number of channels - RXFH has to be reset, and
5620 * mlx5e_num_channels_changed below will build the RQT.
5622 priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
5623 priv->channels.params.num_channels = max_nch;
5625 /* 1. Set the real number of queues in the kernel the first time.
5626 * 2. Set our default XPS cpumask.
5629 * rtnl_lock is required by netif_set_real_num_*_queues in case the
5630 * netdev has been registered by this point (if this function was called
5631 * in the reload or resume flow).
5635 err = mlx5e_num_channels_changed(priv);
5641 err = profile->init_tx(priv);
5645 err = profile->init_rx(priv);
5647 goto err_cleanup_tx;
5649 if (profile->enable)
5650 profile->enable(priv);
5652 mlx5e_update_features(priv->netdev);
5657 profile->cleanup_tx(priv);
5660 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5661 cancel_work_sync(&priv->update_stats_work);
5665 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
5667 const struct mlx5e_profile *profile = priv->profile;
5669 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5671 if (profile->disable)
5672 profile->disable(priv);
5673 flush_workqueue(priv->wq);
5675 profile->cleanup_rx(priv);
5676 profile->cleanup_tx(priv);
5677 cancel_work_sync(&priv->update_stats_work);
5681 mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
5682 const struct mlx5e_profile *new_profile, void *new_ppriv)
5684 struct net_device *netdev = priv->netdev;
5685 struct mlx5_core_dev *mdev = priv->mdev;
5688 err = mlx5e_priv_init(priv, netdev, mdev);
5690 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
5693 netif_carrier_off(netdev);
5694 priv->profile = new_profile;
5695 priv->ppriv = new_ppriv;
5696 err = new_profile->init(priv->mdev, priv->netdev);
5699 err = mlx5e_attach_netdev(priv);
5701 new_profile->cleanup(priv);
5705 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
5706 const struct mlx5e_profile *new_profile, void *new_ppriv)
5708 unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
5709 const struct mlx5e_profile *orig_profile = priv->profile;
5710 void *orig_ppriv = priv->ppriv;
5711 int err, rollback_err;
5714 if (new_max_nch != priv->max_nch) {
5715 netdev_warn(priv->netdev,
5716 "%s: Replacing profile with different max channels\n",
5721 /* cleanup old profile */
5722 mlx5e_detach_netdev(priv);
5723 priv->profile->cleanup(priv);
5724 mlx5e_priv_cleanup(priv);
5726 err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv);
5727 if (err) { /* roll back to original profile */
5728 netdev_warn(priv->netdev, "%s: new profile init failed, %d\n",
5736 rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv);
5738 netdev_err(priv->netdev,
5739 "%s: failed to rollback to orig profile, %d\n",
5740 __func__, rollback_err);
5745 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
5747 struct net_device *netdev = priv->netdev;
5749 mlx5e_priv_cleanup(priv);
5750 free_netdev(netdev);
5753 static int mlx5e_resume(struct auxiliary_device *adev)
5755 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
5756 struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
5757 struct net_device *netdev = priv->netdev;
5758 struct mlx5_core_dev *mdev = edev->mdev;
5761 if (netif_device_present(netdev))
5764 err = mlx5e_create_mdev_resources(mdev);
5768 err = mlx5e_attach_netdev(priv);
5770 mlx5e_destroy_mdev_resources(mdev);
5777 static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
5779 struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
5780 struct net_device *netdev = priv->netdev;
5781 struct mlx5_core_dev *mdev = priv->mdev;
5783 if (!netif_device_present(netdev))
5786 mlx5e_detach_netdev(priv);
5787 mlx5e_destroy_mdev_resources(mdev);
5791 static int mlx5e_probe(struct auxiliary_device *adev,
5792 const struct auxiliary_device_id *id)
5794 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
5795 const struct mlx5e_profile *profile = &mlx5e_nic_profile;
5796 struct mlx5_core_dev *mdev = edev->mdev;
5797 struct net_device *netdev;
5798 pm_message_t state = {};
5799 unsigned int txqs, rxqs, ptp_txqs = 0;
5800 struct mlx5e_priv *priv;
5805 if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
5806 ptp_txqs = profile->max_tc;
5808 if (mlx5_qos_is_supported(mdev))
5809 qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
5811 nch = mlx5e_get_max_num_channels(mdev);
5812 txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
5813 rxqs = nch * profile->rq_groups;
5814 netdev = mlx5e_create_netdev(mdev, txqs, rxqs);
5816 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
5820 mlx5e_build_nic_netdev(netdev);
5822 priv = netdev_priv(netdev);
5823 dev_set_drvdata(&adev->dev, priv);
5825 priv->profile = profile;
5828 err = mlx5e_devlink_port_register(priv);
5830 mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
5831 goto err_destroy_netdev;
5834 err = profile->init(mdev, netdev);
5836 mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
5837 goto err_devlink_cleanup;
5840 err = mlx5e_resume(adev);
5842 mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
5843 goto err_profile_cleanup;
5846 err = register_netdev(netdev);
5848 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
5852 mlx5e_devlink_port_type_eth_set(priv);
5854 mlx5e_dcbnl_init_app(priv);
5858 mlx5e_suspend(adev, state);
5859 err_profile_cleanup:
5860 profile->cleanup(priv);
5861 err_devlink_cleanup:
5862 mlx5e_devlink_port_unregister(priv);
5864 mlx5e_destroy_netdev(priv);
5868 static void mlx5e_remove(struct auxiliary_device *adev)
5870 struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
5871 pm_message_t state = {};
5873 mlx5e_dcbnl_delete_app(priv);
5874 unregister_netdev(priv->netdev);
5875 mlx5e_suspend(adev, state);
5876 priv->profile->cleanup(priv);
5877 mlx5e_devlink_port_unregister(priv);
5878 mlx5e_destroy_netdev(priv);
5881 static const struct auxiliary_device_id mlx5e_id_table[] = {
5882 { .name = MLX5_ADEV_NAME ".eth", },
5886 MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table);
5888 static struct auxiliary_driver mlx5e_driver = {
5890 .probe = mlx5e_probe,
5891 .remove = mlx5e_remove,
5892 .suspend = mlx5e_suspend,
5893 .resume = mlx5e_resume,
5894 .id_table = mlx5e_id_table,
5897 int mlx5e_init(void)
5901 mlx5e_ipsec_build_inverse_table();
5902 mlx5e_build_ptys2ethtool_map();
5903 ret = mlx5e_rep_init();
5907 ret = auxiliary_driver_register(&mlx5e_driver);
5909 mlx5e_rep_cleanup();
5913 void mlx5e_cleanup(void)
5915 auxiliary_driver_unregister(&mlx5e_driver);
5916 mlx5e_rep_cleanup();