2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/vxlan.h>
36 #include <net/geneve.h>
37 #include <linux/bpf.h>
38 #include <linux/debugfs.h>
39 #include <linux/if_bridge.h>
40 #include <linux/filter.h>
41 #include <net/page_pool.h>
42 #include <net/pkt_sched.h>
43 #include <net/xdp_sock_drv.h>
49 #include "en_accel/ipsec.h"
50 #include "en_accel/macsec.h"
51 #include "en_accel/en_accel.h"
52 #include "en_accel/ktls.h"
53 #include "lib/vxlan.h"
54 #include "lib/clock.h"
58 #include "en/monitor_stats.h"
59 #include "en/health.h"
60 #include "en/params.h"
61 #include "en/xsk/pool.h"
62 #include "en/xsk/setup.h"
63 #include "en/xsk/rx.h"
64 #include "en/xsk/tx.h"
65 #include "en/hv_vhca_stats.h"
66 #include "en/devlink.h"
73 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
74 enum mlx5e_mpwrq_umr_mode umr_mode)
76 u16 umr_wqebbs, max_wqebbs;
79 striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
80 MLX5_CAP_ETH(mdev, reg_umr_sq);
84 umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
85 max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
86 /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
87 * calculated from mlx5e_get_max_sq_aligned_wqebbs.
89 if (WARN_ON(umr_wqebbs > max_wqebbs))
95 void mlx5e_update_carrier(struct mlx5e_priv *priv)
97 struct mlx5_core_dev *mdev = priv->mdev;
101 port_state = mlx5_query_vport_state(mdev,
102 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
105 up = port_state == VPORT_STATE_UP;
106 if (up == netif_carrier_ok(priv->netdev))
107 netif_carrier_event(priv->netdev);
109 netdev_info(priv->netdev, "Link up\n");
110 netif_carrier_on(priv->netdev);
112 netdev_info(priv->netdev, "Link down\n");
113 netif_carrier_off(priv->netdev);
117 static void mlx5e_update_carrier_work(struct work_struct *work)
119 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
120 update_carrier_work);
122 mutex_lock(&priv->state_lock);
123 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
124 if (priv->profile->update_carrier)
125 priv->profile->update_carrier(priv);
126 mutex_unlock(&priv->state_lock);
129 static void mlx5e_update_stats_work(struct work_struct *work)
131 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
134 mutex_lock(&priv->state_lock);
135 priv->profile->update_stats(priv);
136 mutex_unlock(&priv->state_lock);
139 void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
141 if (!priv->profile->update_stats)
144 if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
147 queue_work(priv->wq, &priv->update_stats_work);
150 static int async_event(struct notifier_block *nb, unsigned long event, void *data)
152 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
153 struct mlx5_eqe *eqe = data;
155 if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
158 switch (eqe->sub_type) {
159 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
160 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
161 queue_work(priv->wq, &priv->update_carrier_work);
170 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
172 priv->events_nb.notifier_call = async_event;
173 mlx5_notifier_register(priv->mdev, &priv->events_nb);
176 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
178 mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
181 static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
183 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
184 struct mlx5_devlink_trap_event_ctx *trap_event_ctx = data;
188 case MLX5_DRIVER_EVENT_TYPE_TRAP:
189 err = mlx5e_handle_trap_event(priv, trap_event_ctx->trap);
191 trap_event_ctx->err = err;
201 static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
203 priv->blocking_events_nb.notifier_call = blocking_event;
204 mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
207 static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
209 mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
212 static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
214 u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
217 sz = ALIGN(entries * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
219 return sz / MLX5_OCTWORD;
222 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
223 struct mlx5e_icosq *sq,
224 struct mlx5e_umr_wqe *wqe)
226 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
227 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
231 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
235 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
237 cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
239 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
240 octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
241 ucseg->xlt_octowords = cpu_to_be16(octowords);
242 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
245 static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
247 rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
249 if (!rq->mpwqe.shampo)
254 static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
256 kvfree(rq->mpwqe.shampo);
259 static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
261 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
263 shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
265 shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
266 sizeof(*shampo->info)),
268 shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
269 sizeof(*shampo->pages)),
271 if (!shampo->bitmap || !shampo->info || !shampo->pages)
277 kvfree(shampo->info);
278 kvfree(shampo->bitmap);
279 kvfree(shampo->pages);
284 static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
286 kvfree(rq->mpwqe.shampo->bitmap);
287 kvfree(rq->mpwqe.shampo->info);
288 kvfree(rq->mpwqe.shampo->pages);
291 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
293 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
296 alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info,
297 alloc_units.frag_pages,
298 rq->mpwqe.pages_per_wqe));
300 rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
304 /* For deferred page release (release right before alloc), make sure
305 * that on first round release is not called.
307 for (int i = 0; i < wq_sz; i++) {
308 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, i);
310 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
313 mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
319 static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
322 case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
323 return MLX5_MKC_ACCESS_MODE_MTT;
324 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
325 return MLX5_MKC_ACCESS_MODE_KSM;
326 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
327 return MLX5_MKC_ACCESS_MODE_KLMS;
328 case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
329 return MLX5_MKC_ACCESS_MODE_KSM;
331 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
335 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
336 u32 npages, u8 page_shift, u32 *umr_mkey,
337 dma_addr_t filler_addr,
338 enum mlx5e_mpwrq_umr_mode umr_mode,
341 struct mlx5_mtt *mtt;
342 struct mlx5_ksm *ksm;
343 struct mlx5_klm *klm;
351 if ((umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED ||
352 umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) &&
353 !MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
354 mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
358 octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode);
360 inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
361 MLX5_OCTWORD, octwords);
365 in = kvzalloc(inlen, GFP_KERNEL);
369 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
371 MLX5_SET(mkc, mkc, free, 1);
372 MLX5_SET(mkc, mkc, umr_en, 1);
373 MLX5_SET(mkc, mkc, lw, 1);
374 MLX5_SET(mkc, mkc, lr, 1);
375 MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode));
376 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
377 MLX5_SET(mkc, mkc, qpn, 0xffffff);
378 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
379 MLX5_SET64(mkc, mkc, len, npages << page_shift);
380 MLX5_SET(mkc, mkc, translations_octword_size, octwords);
381 if (umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)
382 MLX5_SET(mkc, mkc, log_page_size, page_shift - 2);
383 else if (umr_mode != MLX5E_MPWRQ_UMR_MODE_OVERSIZED)
384 MLX5_SET(mkc, mkc, log_page_size, page_shift);
385 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords);
387 /* Initialize the mkey with all MTTs pointing to a default
388 * page (filler_addr). When the channels are activated, UMR
389 * WQEs will redirect the RX WQEs to the actual memory from
390 * the RQ's pool, while the gaps (wqe_overflow) remain mapped
391 * to the default page.
394 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
395 klm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
396 for (i = 0; i < npages; i++) {
397 klm[i << 1] = (struct mlx5_klm) {
398 .va = cpu_to_be64(filler_addr),
399 .bcount = cpu_to_be32(xsk_chunk_size),
400 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
402 klm[(i << 1) + 1] = (struct mlx5_klm) {
403 .va = cpu_to_be64(filler_addr),
404 .bcount = cpu_to_be32((1 << page_shift) - xsk_chunk_size),
405 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
409 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
410 ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
411 for (i = 0; i < npages; i++)
412 ksm[i] = (struct mlx5_ksm) {
413 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
414 .va = cpu_to_be64(filler_addr),
417 case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
418 mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
419 for (i = 0; i < npages; i++)
420 mtt[i] = (struct mlx5_mtt) {
421 .ptag = cpu_to_be64(filler_addr),
424 case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
425 ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
426 for (i = 0; i < npages * 4; i++) {
427 ksm[i] = (struct mlx5_ksm) {
428 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
429 .va = cpu_to_be64(filler_addr),
435 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
441 static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
450 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
452 in = kvzalloc(inlen, GFP_KERNEL);
456 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
458 MLX5_SET(mkc, mkc, free, 1);
459 MLX5_SET(mkc, mkc, umr_en, 1);
460 MLX5_SET(mkc, mkc, lw, 1);
461 MLX5_SET(mkc, mkc, lr, 1);
462 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
463 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
464 MLX5_SET(mkc, mkc, qpn, 0xffffff);
465 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
466 MLX5_SET(mkc, mkc, translations_octword_size, nentries);
467 MLX5_SET(mkc, mkc, length64, 1);
468 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
474 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
476 u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
477 u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
478 u32 num_entries, max_num_entries;
482 max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
484 /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
485 if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
487 num_entries > max_num_entries))
488 mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
489 __func__, wq_size, rq->mpwqe.mtts_per_wqe,
492 err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
493 &umr_mkey, rq->wqe_overflow.addr,
494 rq->mpwqe.umr_mode, xsk_chunk_size);
495 rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
499 static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
502 u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
504 if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
505 mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
506 max_klm_size, rq->mpwqe.shampo->hd_per_wq);
509 return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
510 &rq->mpwqe.shampo->mkey);
513 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
515 struct mlx5e_wqe_frag_info next_frag = {};
516 struct mlx5e_wqe_frag_info *prev = NULL;
519 WARN_ON(rq->xsk_pool);
521 next_frag.frag_page = &rq->wqe.alloc_units->frag_pages[0];
523 /* Skip first release due to deferred release. */
524 next_frag.flags = BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
526 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
527 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
528 struct mlx5e_wqe_frag_info *frag =
529 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
532 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
533 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
534 /* Pages are assigned at runtime. */
535 next_frag.frag_page++;
536 next_frag.offset = 0;
538 prev->flags |= BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE);
543 next_frag.offset += frag_info[f].frag_stride;
549 prev->flags |= BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE);
552 static void mlx5e_init_xsk_buffs(struct mlx5e_rq *rq)
556 /* Assumptions used by XSK batched allocator. */
557 WARN_ON(rq->wqe.info.num_frags != 1);
558 WARN_ON(rq->wqe.info.log_num_frags != 0);
559 WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
561 /* Considering the above assumptions a fragment maps to a single
564 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
565 rq->wqe.frags[i].xskp = &rq->wqe.alloc_units->xsk_buffs[i];
567 /* Skip first release due to deferred release as WQES are
570 rq->wqe.frags[i].flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
574 static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node)
576 int wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
577 int len = wq_sz << rq->wqe.info.log_num_frags;
578 struct mlx5e_wqe_frag_info *frags;
579 union mlx5e_alloc_units *aus;
583 aus_sz = sizeof(*aus->xsk_buffs);
585 aus_sz = sizeof(*aus->frag_pages);
587 aus = kvzalloc_node(array_size(len, aus_sz), GFP_KERNEL, node);
591 frags = kvzalloc_node(array_size(len, sizeof(*frags)), GFP_KERNEL, node);
597 rq->wqe.alloc_units = aus;
598 rq->wqe.frags = frags;
601 mlx5e_init_xsk_buffs(rq);
603 mlx5e_init_frags_partition(rq);
608 static void mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq)
610 kvfree(rq->wqe.frags);
611 kvfree(rq->wqe.alloc_units);
614 static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
616 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
618 mlx5e_reporter_rq_cqe_err(rq);
621 static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
623 rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
624 if (!rq->wqe_overflow.page)
627 rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
628 PAGE_SIZE, rq->buff.map_dir);
629 if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
630 __free_page(rq->wqe_overflow.page);
636 static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
638 dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
640 __free_page(rq->wqe_overflow.page);
643 static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
646 struct mlx5_core_dev *mdev = c->mdev;
649 rq->wq_type = params->rq_wq_type;
651 rq->netdev = c->netdev;
653 rq->tstamp = c->tstamp;
654 rq->clock = &mdev->clock;
655 rq->icosq = &c->icosq;
660 MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
661 rq->xdpsq = &c->rq_xdpsq;
662 rq->stats = &c->priv->channel_stats[c->ix]->rq;
663 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
664 err = mlx5e_rq_set_handlers(rq, params, NULL);
668 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id);
671 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
672 struct mlx5e_params *params,
673 struct mlx5e_rq_param *rqp,
678 void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
682 if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
684 err = mlx5e_rq_shampo_hd_alloc(rq, node);
687 rq->mpwqe.shampo->hd_per_wq =
688 mlx5e_shampo_hd_per_wq(mdev, params, rqp);
689 err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
692 err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
694 goto err_shampo_info;
695 rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
696 if (!rq->hw_gro_data) {
698 goto err_hw_gro_data;
700 rq->mpwqe.shampo->key =
701 cpu_to_be32(rq->mpwqe.shampo->mkey);
702 rq->mpwqe.shampo->hd_per_wqe =
703 mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
704 wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
705 *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
706 MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
710 mlx5e_rq_shampo_hd_info_free(rq);
712 mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
714 mlx5e_rq_shampo_hd_free(rq);
719 static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
721 if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
724 kvfree(rq->hw_gro_data);
725 mlx5e_rq_shampo_hd_info_free(rq);
726 mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
727 mlx5e_rq_shampo_hd_free(rq);
730 static int mlx5e_alloc_rq(struct mlx5e_params *params,
731 struct mlx5e_xsk_param *xsk,
732 struct mlx5e_rq_param *rqp,
733 int node, struct mlx5e_rq *rq)
735 struct mlx5_core_dev *mdev = rq->mdev;
736 void *rqc = rqp->rqc;
737 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
743 rqp->wq.db_numa_node = node;
744 INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
746 if (params->xdp_prog)
747 bpf_prog_inc(params->xdp_prog);
748 RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
750 rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
751 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
752 pool_size = 1 << params->log_rq_mtu_frames;
754 rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
756 switch (rq->wq_type) {
757 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
758 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
761 goto err_rq_xdp_prog;
763 err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
765 goto err_rq_wq_destroy;
767 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
769 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
771 rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
772 rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
773 rq->mpwqe.pages_per_wqe =
774 mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
776 rq->mpwqe.umr_wqebbs =
777 mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
779 rq->mpwqe.mtts_per_wqe =
780 mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
783 pool_size = rq->mpwqe.pages_per_wqe <<
784 mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
786 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog)
787 pool_size *= 2; /* additional page per packet for the linear part */
789 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
790 rq->mpwqe.num_strides =
791 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
792 rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
794 rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
796 err = mlx5e_create_rq_umr_mkey(mdev, rq);
798 goto err_rq_drop_page;
800 err = mlx5e_rq_alloc_mpwqe_info(rq, node);
804 err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
806 goto err_free_mpwqe_info;
809 default: /* MLX5_WQ_TYPE_CYCLIC */
810 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
813 goto err_rq_xdp_prog;
815 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
817 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
819 rq->wqe.info = rqp->frags_info;
820 rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
822 err = mlx5e_init_wqe_alloc_info(rq, node);
824 goto err_rq_wq_destroy;
828 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
829 MEM_TYPE_XSK_BUFF_POOL, NULL);
830 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
832 /* Create a page_pool and register it with rxq */
833 struct page_pool_params pp_params = { 0 };
836 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
837 pp_params.pool_size = pool_size;
838 pp_params.nid = node;
839 pp_params.dev = rq->pdev;
840 pp_params.napi = rq->cq.napi;
841 pp_params.dma_dir = rq->buff.map_dir;
842 pp_params.max_len = PAGE_SIZE;
844 /* page_pool can be used even when there is no rq->xdp_prog,
845 * given page_pool does not handle DMA mapping there is no
846 * required state to clear. And page_pool gracefully handle
849 rq->page_pool = page_pool_create(&pp_params);
850 if (IS_ERR(rq->page_pool)) {
851 err = PTR_ERR(rq->page_pool);
852 rq->page_pool = NULL;
853 goto err_free_by_rq_type;
855 if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
856 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
857 MEM_TYPE_PAGE_POOL, rq->page_pool);
860 goto err_destroy_page_pool;
862 for (i = 0; i < wq_sz; i++) {
863 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
864 struct mlx5e_rx_wqe_ll *wqe =
865 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
867 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
868 u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
869 rq->mpwqe.page_shift;
870 u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
871 0 : rq->buff.headroom;
873 wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
874 wqe->data[0].byte_count = cpu_to_be32(byte_count);
875 wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
877 struct mlx5e_rx_wqe_cyc *wqe =
878 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
881 for (f = 0; f < rq->wqe.info.num_frags; f++) {
882 u32 frag_size = rq->wqe.info.arr[f].frag_size |
883 MLX5_HW_START_PADDING;
885 wqe->data[f].byte_count = cpu_to_be32(frag_size);
886 wqe->data[f].lkey = rq->mkey_be;
888 /* check if num_frags is not a pow of two */
889 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
890 wqe->data[f].byte_count = 0;
891 wqe->data[f].lkey = params->terminate_lkey_be;
892 wqe->data[f].addr = 0;
897 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
899 switch (params->rx_cq_moderation.cq_period_mode) {
900 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
901 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
903 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
905 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
910 err_destroy_page_pool:
911 page_pool_destroy(rq->page_pool);
913 switch (rq->wq_type) {
914 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
915 mlx5e_rq_free_shampo(rq);
917 kvfree(rq->mpwqe.info);
919 mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
921 mlx5e_free_mpwqe_rq_drop_page(rq);
923 default: /* MLX5_WQ_TYPE_CYCLIC */
924 mlx5e_free_wqe_alloc_info(rq);
927 mlx5_wq_destroy(&rq->wq_ctrl);
929 if (params->xdp_prog)
930 bpf_prog_put(params->xdp_prog);
935 static void mlx5e_free_rq(struct mlx5e_rq *rq)
937 struct bpf_prog *old_prog;
939 if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
940 old_prog = rcu_dereference_protected(rq->xdp_prog,
941 lockdep_is_held(&rq->priv->state_lock));
943 bpf_prog_put(old_prog);
946 switch (rq->wq_type) {
947 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
948 kvfree(rq->mpwqe.info);
949 mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
950 mlx5e_free_mpwqe_rq_drop_page(rq);
951 mlx5e_rq_free_shampo(rq);
953 default: /* MLX5_WQ_TYPE_CYCLIC */
954 mlx5e_free_wqe_alloc_info(rq);
957 xdp_rxq_info_unreg(&rq->xdp_rxq);
958 page_pool_destroy(rq->page_pool);
959 mlx5_wq_destroy(&rq->wq_ctrl);
962 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
964 struct mlx5_core_dev *mdev = rq->mdev;
972 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
973 sizeof(u64) * rq->wq_ctrl.buf.npages;
974 in = kvzalloc(inlen, GFP_KERNEL);
978 ts_format = mlx5_is_real_time_rq(mdev) ?
979 MLX5_TIMESTAMP_FORMAT_REAL_TIME :
980 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
981 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
982 wq = MLX5_ADDR_OF(rqc, rqc, wq);
984 memcpy(rqc, param->rqc, sizeof(param->rqc));
986 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
987 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
988 MLX5_SET(rqc, rqc, ts_format, ts_format);
989 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
990 MLX5_ADAPTER_PAGE_SHIFT);
991 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
993 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
994 MLX5_SET(wq, wq, log_headers_buffer_entry_num,
995 order_base_2(rq->mpwqe.shampo->hd_per_wq));
996 MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
999 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
1000 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1002 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
1009 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
1011 struct mlx5_core_dev *mdev = rq->mdev;
1018 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1019 in = kvzalloc(inlen, GFP_KERNEL);
1023 if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
1024 mlx5e_rqwq_reset(rq);
1026 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1028 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
1029 MLX5_SET(rqc, rqc, state, next_state);
1031 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
1038 static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
1040 struct net_device *dev = rq->netdev;
1043 err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
1045 netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
1048 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1050 netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
1057 int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
1059 mlx5e_free_rx_descs(rq);
1061 return mlx5e_rq_to_ready(rq, curr_state);
1064 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
1066 struct mlx5_core_dev *mdev = rq->mdev;
1072 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1073 in = kvzalloc(inlen, GFP_KERNEL);
1077 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1079 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
1080 MLX5_SET64(modify_rq_in, in, modify_bitmask,
1081 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
1082 MLX5_SET(rqc, rqc, vsd, vsd);
1083 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
1085 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
1092 void mlx5e_destroy_rq(struct mlx5e_rq *rq)
1094 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
1097 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
1099 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
1101 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
1104 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
1108 } while (time_before(jiffies, exp_time));
1110 netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
1111 rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
1113 mlx5e_reporter_rx_timeout(rq);
1117 void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
1119 struct mlx5_wq_ll *wq;
1123 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
1129 /* Release WQEs that are in missing state: they have been
1130 * popped from the list after completion but were not freed
1131 * due to deferred release.
1132 * Also free the linked-list reserved entry, hence the "+ 1".
1134 for (i = 0; i < mlx5_wq_ll_missing(wq) + 1; i++) {
1135 rq->dealloc_wqe(rq, head);
1136 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
1139 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
1142 len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
1143 (rq->mpwqe.shampo->hd_per_wq - 1);
1144 mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
1145 rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
1148 rq->mpwqe.actual_wq_head = wq->head;
1149 rq->mpwqe.umr_in_progress = 0;
1150 rq->mpwqe.umr_completed = 0;
1153 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
1158 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
1159 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1161 mlx5e_free_rx_missing_descs(rq);
1163 while (!mlx5_wq_ll_is_empty(wq)) {
1164 struct mlx5e_rx_wqe_ll *wqe;
1166 wqe_ix_be = *wq->tail_next;
1167 wqe_ix = be16_to_cpu(wqe_ix_be);
1168 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
1169 rq->dealloc_wqe(rq, wqe_ix);
1170 mlx5_wq_ll_pop(wq, wqe_ix_be,
1171 &wqe->next.next_wqe_index);
1174 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1175 mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
1178 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1179 u16 missing = mlx5_wq_cyc_missing(wq);
1180 u16 head = mlx5_wq_cyc_get_head(wq);
1182 while (!mlx5_wq_cyc_is_empty(wq)) {
1183 wqe_ix = mlx5_wq_cyc_get_tail(wq);
1184 rq->dealloc_wqe(rq, wqe_ix);
1185 mlx5_wq_cyc_pop(wq);
1187 /* Missing slots might also contain unreleased pages due to
1191 wqe_ix = mlx5_wq_cyc_ctr2ix(wq, head++);
1192 rq->dealloc_wqe(rq, wqe_ix);
1198 int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
1199 struct mlx5e_xsk_param *xsk, int node,
1200 struct mlx5e_rq *rq)
1202 struct mlx5_core_dev *mdev = rq->mdev;
1205 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
1206 __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
1208 err = mlx5e_alloc_rq(params, xsk, param, node, rq);
1212 err = mlx5e_create_rq(rq, param);
1216 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1218 goto err_destroy_rq;
1220 if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
1221 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
1223 if (params->rx_dim_enabled)
1224 __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
1226 /* We disable csum_complete when XDP is enabled since
1227 * XDP programs might manipulate packets which will render
1228 * skb->checksum incorrect.
1230 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || params->xdp_prog)
1231 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state);
1233 /* For CQE compression on striding RQ, use stride index provided by
1234 * HW if capability is supported.
1236 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
1237 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
1238 __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
1240 /* For enhanced CQE compression packet processing. decompress
1241 * session according to the enhanced layout.
1243 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) &&
1244 MLX5_CAP_GEN(mdev, enhanced_cqe_compression))
1245 __set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state);
1250 mlx5e_destroy_rq(rq);
1257 void mlx5e_activate_rq(struct mlx5e_rq *rq)
1259 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
1262 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
1264 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
1265 synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
1268 void mlx5e_close_rq(struct mlx5e_rq *rq)
1270 cancel_work_sync(&rq->dim.work);
1271 cancel_work_sync(&rq->recover_work);
1272 mlx5e_destroy_rq(rq);
1273 mlx5e_free_rx_descs(rq);
1277 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
1279 kvfree(sq->db.xdpi_fifo.xi);
1280 kvfree(sq->db.wqe_info);
1283 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
1285 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
1286 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1287 int entries = wq_sz * MLX5_SEND_WQEBB_NUM_DS * 2; /* upper bound for maximum num of
1288 * entries of all xmit_modes.
1292 size = array_size(sizeof(*xdpi_fifo->xi), entries);
1293 xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
1297 xdpi_fifo->pc = &sq->xdpi_fifo_pc;
1298 xdpi_fifo->cc = &sq->xdpi_fifo_cc;
1299 xdpi_fifo->mask = entries - 1;
1304 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
1306 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1310 size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
1311 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1312 if (!sq->db.wqe_info)
1315 err = mlx5e_alloc_xdpsq_fifo(sq, numa);
1317 mlx5e_free_xdpsq_db(sq);
1324 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
1325 struct mlx5e_params *params,
1326 struct xsk_buff_pool *xsk_pool,
1327 struct mlx5e_sq_param *param,
1328 struct mlx5e_xdpsq *sq,
1331 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1332 struct mlx5_core_dev *mdev = c->mdev;
1333 struct mlx5_wq_cyc *wq = &sq->wq;
1337 sq->mkey_be = c->mkey_be;
1339 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1340 sq->min_inline_mode = params->tx_min_inline_mode;
1341 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
1342 sq->xsk_pool = xsk_pool;
1344 sq->stats = sq->xsk_pool ?
1345 &c->priv->channel_stats[c->ix]->xsksq :
1347 &c->priv->channel_stats[c->ix]->xdpsq :
1348 &c->priv->channel_stats[c->ix]->rq_xdpsq;
1349 sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
1350 mlx5e_stop_room_for_max_wqe(mdev);
1351 sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
1353 param->wq.db_numa_node = cpu_to_node(c->cpu);
1354 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1357 wq->db = &wq->db[MLX5_SND_DBR];
1359 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1361 goto err_sq_wq_destroy;
1366 mlx5_wq_destroy(&sq->wq_ctrl);
1371 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1373 mlx5e_free_xdpsq_db(sq);
1374 mlx5_wq_destroy(&sq->wq_ctrl);
1377 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1379 kvfree(sq->db.wqe_info);
1382 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1384 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1387 size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
1388 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1389 if (!sq->db.wqe_info)
1395 static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
1397 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1400 mlx5e_reporter_icosq_cqe_err(sq);
1403 static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
1405 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1408 /* Not implemented yet. */
1410 netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
1413 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1414 struct mlx5e_sq_param *param,
1415 struct mlx5e_icosq *sq,
1416 work_func_t recover_work_func)
1418 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1419 struct mlx5_core_dev *mdev = c->mdev;
1420 struct mlx5_wq_cyc *wq = &sq->wq;
1424 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1425 sq->reserved_room = param->stop_room;
1427 param->wq.db_numa_node = cpu_to_node(c->cpu);
1428 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1431 wq->db = &wq->db[MLX5_SND_DBR];
1433 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1435 goto err_sq_wq_destroy;
1437 INIT_WORK(&sq->recover_work, recover_work_func);
1442 mlx5_wq_destroy(&sq->wq_ctrl);
1447 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1449 mlx5e_free_icosq_db(sq);
1450 mlx5_wq_destroy(&sq->wq_ctrl);
1453 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1455 kvfree(sq->db.wqe_info);
1456 kvfree(sq->db.skb_fifo.fifo);
1457 kvfree(sq->db.dma_fifo);
1460 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1462 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1463 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1465 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1466 sizeof(*sq->db.dma_fifo)),
1468 sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
1469 sizeof(*sq->db.skb_fifo.fifo)),
1471 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1472 sizeof(*sq->db.wqe_info)),
1474 if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
1475 mlx5e_free_txqsq_db(sq);
1479 sq->dma_fifo_mask = df_sz - 1;
1481 sq->db.skb_fifo.pc = &sq->skb_fifo_pc;
1482 sq->db.skb_fifo.cc = &sq->skb_fifo_cc;
1483 sq->db.skb_fifo.mask = df_sz - 1;
1488 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1490 struct mlx5e_params *params,
1491 struct mlx5e_sq_param *param,
1492 struct mlx5e_txqsq *sq,
1495 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1496 struct mlx5_core_dev *mdev = c->mdev;
1497 struct mlx5_wq_cyc *wq = &sq->wq;
1501 sq->clock = &mdev->clock;
1502 sq->mkey_be = c->mkey_be;
1503 sq->netdev = c->netdev;
1508 sq->txq_ix = txq_ix;
1509 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
1510 sq->min_inline_mode = params->tx_min_inline_mode;
1511 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1512 sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
1513 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
1514 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
1515 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
1516 if (mlx5_ipsec_device_caps(c->priv->mdev))
1517 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1519 set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
1520 sq->stop_room = param->stop_room;
1521 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
1523 param->wq.db_numa_node = cpu_to_node(c->cpu);
1524 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1527 wq->db = &wq->db[MLX5_SND_DBR];
1529 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1531 goto err_sq_wq_destroy;
1533 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1534 sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1539 mlx5_wq_destroy(&sq->wq_ctrl);
1544 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1546 mlx5e_free_txqsq_db(sq);
1547 mlx5_wq_destroy(&sq->wq_ctrl);
1550 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1551 struct mlx5e_sq_param *param,
1552 struct mlx5e_create_sq_param *csp,
1562 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1563 sizeof(u64) * csp->wq_ctrl->buf.npages;
1564 in = kvzalloc(inlen, GFP_KERNEL);
1568 ts_format = mlx5_is_real_time_sq(mdev) ?
1569 MLX5_TIMESTAMP_FORMAT_REAL_TIME :
1570 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
1571 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1572 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1574 memcpy(sqc, param->sqc, sizeof(param->sqc));
1575 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1576 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1577 MLX5_SET(sqc, sqc, cqn, csp->cqn);
1578 MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
1579 MLX5_SET(sqc, sqc, ts_format, ts_format);
1582 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1583 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
1585 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1586 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1588 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1589 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
1590 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
1591 MLX5_ADAPTER_PAGE_SHIFT);
1592 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
1594 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1595 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1597 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1604 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1605 struct mlx5e_modify_sq_param *p)
1613 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1614 in = kvzalloc(inlen, GFP_KERNEL);
1618 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1620 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1621 MLX5_SET(sqc, sqc, state, p->next_state);
1622 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1624 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
1626 if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
1628 MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
1630 MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
1632 err = mlx5_core_modify_sq(mdev, sqn, in);
1639 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1641 mlx5_core_destroy_sq(mdev, sqn);
1644 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1645 struct mlx5e_sq_param *param,
1646 struct mlx5e_create_sq_param *csp,
1647 u16 qos_queue_group_id,
1650 struct mlx5e_modify_sq_param msp = {0};
1653 err = mlx5e_create_sq(mdev, param, csp, sqn);
1657 msp.curr_state = MLX5_SQC_STATE_RST;
1658 msp.next_state = MLX5_SQC_STATE_RDY;
1659 if (qos_queue_group_id) {
1660 msp.qos_update = true;
1661 msp.qos_queue_group_id = qos_queue_group_id;
1663 err = mlx5e_modify_sq(mdev, *sqn, &msp);
1665 mlx5e_destroy_sq(mdev, *sqn);
1670 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1671 struct mlx5e_txqsq *sq, u32 rate);
1673 int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
1674 struct mlx5e_params *params, struct mlx5e_sq_param *param,
1675 struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
1676 struct mlx5e_sq_stats *sq_stats)
1678 struct mlx5e_create_sq_param csp = {};
1682 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
1686 sq->stats = sq_stats;
1690 csp.cqn = sq->cq.mcq.cqn;
1691 csp.wq_ctrl = &sq->wq_ctrl;
1692 csp.min_inline_mode = sq->min_inline_mode;
1693 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
1695 goto err_free_txqsq;
1697 tx_rate = c->priv->tx_rates[sq->txq_ix];
1699 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1701 if (params->tx_dim_enabled)
1702 sq->state |= BIT(MLX5E_SQ_STATE_DIM);
1707 mlx5e_free_txqsq(sq);
1712 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1714 sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
1715 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1716 netdev_tx_reset_queue(sq->txq);
1717 netif_tx_start_queue(sq->txq);
1720 void mlx5e_tx_disable_queue(struct netdev_queue *txq)
1722 __netif_tx_lock_bh(txq);
1723 netif_tx_stop_queue(txq);
1724 __netif_tx_unlock_bh(txq);
1727 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1729 struct mlx5_wq_cyc *wq = &sq->wq;
1731 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1732 synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
1734 mlx5e_tx_disable_queue(sq->txq);
1736 /* last doorbell out, godspeed .. */
1737 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1738 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1739 struct mlx5e_tx_wqe *nop;
1741 sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
1745 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1746 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1750 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1752 struct mlx5_core_dev *mdev = sq->mdev;
1753 struct mlx5_rate_limit rl = {0};
1755 cancel_work_sync(&sq->dim.work);
1756 cancel_work_sync(&sq->recover_work);
1757 mlx5e_destroy_sq(mdev, sq->sqn);
1758 if (sq->rate_limit) {
1759 rl.rate = sq->rate_limit;
1760 mlx5_rl_remove_rate(mdev, &rl);
1762 mlx5e_free_txqsq_descs(sq);
1763 mlx5e_free_txqsq(sq);
1766 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
1768 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
1771 mlx5e_reporter_tx_err_cqe(sq);
1774 static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
1775 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
1776 work_func_t recover_work_func)
1778 struct mlx5e_create_sq_param csp = {};
1781 err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
1785 csp.cqn = sq->cq.mcq.cqn;
1786 csp.wq_ctrl = &sq->wq_ctrl;
1787 csp.min_inline_mode = params->tx_min_inline_mode;
1788 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
1790 goto err_free_icosq;
1792 if (param->is_tls) {
1793 sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
1794 if (IS_ERR(sq->ktls_resync)) {
1795 err = PTR_ERR(sq->ktls_resync);
1796 goto err_destroy_icosq;
1802 mlx5e_destroy_sq(c->mdev, sq->sqn);
1804 mlx5e_free_icosq(sq);
1809 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
1811 set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1814 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
1816 clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
1817 synchronize_net(); /* Sync with NAPI. */
1820 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1822 struct mlx5e_channel *c = sq->channel;
1824 if (sq->ktls_resync)
1825 mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync);
1826 mlx5e_destroy_sq(c->mdev, sq->sqn);
1827 mlx5e_free_icosq_descs(sq);
1828 mlx5e_free_icosq(sq);
1831 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
1832 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
1833 struct mlx5e_xdpsq *sq, bool is_redirect)
1835 struct mlx5e_create_sq_param csp = {};
1838 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
1843 csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
1844 csp.cqn = sq->cq.mcq.cqn;
1845 csp.wq_ctrl = &sq->wq_ctrl;
1846 csp.min_inline_mode = sq->min_inline_mode;
1847 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1849 if (param->is_xdp_mb)
1850 set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
1852 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
1854 goto err_free_xdpsq;
1856 mlx5e_set_xmit_fp(sq, param->is_mpw);
1858 if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
1859 unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
1860 unsigned int inline_hdr_sz = 0;
1863 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1864 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1868 /* Pre initialize fixed WQE fields */
1869 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1870 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1871 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1872 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1874 sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
1879 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1880 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1887 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1888 mlx5e_free_xdpsq(sq);
1893 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1895 struct mlx5e_channel *c = sq->channel;
1897 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1898 synchronize_net(); /* Sync with NAPI. */
1900 mlx5e_destroy_sq(c->mdev, sq->sqn);
1901 mlx5e_free_xdpsq_descs(sq);
1902 mlx5e_free_xdpsq(sq);
1905 static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
1906 struct mlx5e_cq_param *param,
1907 struct mlx5e_cq *cq)
1909 struct mlx5_core_dev *mdev = priv->mdev;
1910 struct mlx5_core_cq *mcq = &cq->mcq;
1914 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
1920 mcq->set_ci_db = cq->wq_ctrl.db.db;
1921 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1922 *mcq->set_ci_db = 0;
1924 mcq->vector = param->eq_ix;
1925 mcq->comp = mlx5e_completion_event;
1926 mcq->event = mlx5e_cq_error_event;
1928 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1929 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1932 cqe->validity_iteration_count = 0xff;
1936 cq->netdev = priv->netdev;
1942 static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
1943 struct mlx5e_cq_param *param,
1944 struct mlx5e_create_cq_param *ccp,
1945 struct mlx5e_cq *cq)
1949 param->wq.buf_numa_node = ccp->node;
1950 param->wq.db_numa_node = ccp->node;
1951 param->eq_ix = ccp->ix;
1953 err = mlx5e_alloc_cq_common(priv, param, cq);
1955 cq->napi = ccp->napi;
1956 cq->ch_stats = ccp->ch_stats;
1961 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1963 mlx5_wq_destroy(&cq->wq_ctrl);
1966 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1968 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
1969 struct mlx5_core_dev *mdev = cq->mdev;
1970 struct mlx5_core_cq *mcq = &cq->mcq;
1978 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
1982 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1983 sizeof(u64) * cq->wq_ctrl.buf.npages;
1984 in = kvzalloc(inlen, GFP_KERNEL);
1988 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1990 memcpy(cqc, param->cqc, sizeof(param->cqc));
1992 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1993 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1995 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1996 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
1997 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1998 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1999 MLX5_ADAPTER_PAGE_SHIFT);
2000 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
2002 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
2014 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
2016 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
2019 int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
2020 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
2021 struct mlx5e_cq *cq)
2023 struct mlx5_core_dev *mdev = priv->mdev;
2026 err = mlx5e_alloc_cq(priv, param, ccp, cq);
2030 err = mlx5e_create_cq(cq, param);
2034 if (MLX5_CAP_GEN(mdev, cq_moderation))
2035 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
2044 void mlx5e_close_cq(struct mlx5e_cq *cq)
2046 mlx5e_destroy_cq(cq);
2050 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
2051 struct mlx5e_params *params,
2052 struct mlx5e_create_cq_param *ccp,
2053 struct mlx5e_channel_param *cparam)
2058 for (tc = 0; tc < c->num_tc; tc++) {
2059 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
2060 ccp, &c->sq[tc].cq);
2062 goto err_close_tx_cqs;
2068 for (tc--; tc >= 0; tc--)
2069 mlx5e_close_cq(&c->sq[tc].cq);
2074 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
2078 for (tc = 0; tc < c->num_tc; tc++)
2079 mlx5e_close_cq(&c->sq[tc].cq);
2082 static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
2086 for (tc = 0; tc < TC_MAX_QUEUE; tc++)
2087 if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
2090 WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
2094 static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
2099 if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
2104 tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
2108 if (tc >= params->mqprio.num_tc) {
2109 WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
2110 tc, params->mqprio.num_tc);
2114 *hw_id = params->mqprio.channel.hw_id[tc];
2118 static int mlx5e_open_sqs(struct mlx5e_channel *c,
2119 struct mlx5e_params *params,
2120 struct mlx5e_channel_param *cparam)
2124 for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
2125 int txq_ix = c->ix + tc * params->num_channels;
2126 u32 qos_queue_group_id;
2128 err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
2132 err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
2133 params, &cparam->txq_sq, &c->sq[tc], tc,
2135 &c->priv->channel_stats[c->ix]->sq[tc]);
2143 for (tc--; tc >= 0; tc--)
2144 mlx5e_close_txqsq(&c->sq[tc]);
2149 static void mlx5e_close_sqs(struct mlx5e_channel *c)
2153 for (tc = 0; tc < c->num_tc; tc++)
2154 mlx5e_close_txqsq(&c->sq[tc]);
2157 static int mlx5e_set_sq_maxrate(struct net_device *dev,
2158 struct mlx5e_txqsq *sq, u32 rate)
2160 struct mlx5e_priv *priv = netdev_priv(dev);
2161 struct mlx5_core_dev *mdev = priv->mdev;
2162 struct mlx5e_modify_sq_param msp = {0};
2163 struct mlx5_rate_limit rl = {0};
2167 if (rate == sq->rate_limit)
2171 if (sq->rate_limit) {
2172 rl.rate = sq->rate_limit;
2173 /* remove current rl index to free space to next ones */
2174 mlx5_rl_remove_rate(mdev, &rl);
2181 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
2183 netdev_err(dev, "Failed configuring rate %u: %d\n",
2189 msp.curr_state = MLX5_SQC_STATE_RDY;
2190 msp.next_state = MLX5_SQC_STATE_RDY;
2191 msp.rl_index = rl_index;
2192 msp.rl_update = true;
2193 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
2195 netdev_err(dev, "Failed configuring rate %u: %d\n",
2197 /* remove the rate from the table */
2199 mlx5_rl_remove_rate(mdev, &rl);
2203 sq->rate_limit = rate;
2207 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2209 struct mlx5e_priv *priv = netdev_priv(dev);
2210 struct mlx5_core_dev *mdev = priv->mdev;
2211 struct mlx5e_txqsq *sq = priv->txq2sq[index];
2214 if (!mlx5_rl_is_supported(mdev)) {
2215 netdev_err(dev, "Rate limiting is not supported on this device\n");
2219 /* rate is given in Mb/sec, HW config is in Kb/sec */
2222 /* Check whether rate in valid range, 0 is always valid */
2223 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
2224 netdev_err(dev, "TX rate %u, is not in range\n", rate);
2228 mutex_lock(&priv->state_lock);
2229 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
2230 err = mlx5e_set_sq_maxrate(dev, sq, rate);
2232 priv->tx_rates[index] = rate;
2233 mutex_unlock(&priv->state_lock);
2238 static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
2239 struct mlx5e_rq_param *rq_params)
2243 err = mlx5e_init_rxq_rq(c, params, &c->rq);
2247 return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq);
2250 static int mlx5e_open_queues(struct mlx5e_channel *c,
2251 struct mlx5e_params *params,
2252 struct mlx5e_channel_param *cparam)
2254 struct dim_cq_moder icocq_moder = {0, 0};
2255 struct mlx5e_create_cq_param ccp;
2258 mlx5e_build_create_cq_param(&ccp, c);
2260 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
2261 &c->async_icosq.cq);
2265 err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
2268 goto err_close_async_icosq_cq;
2270 err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
2272 goto err_close_icosq_cq;
2274 err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
2277 goto err_close_tx_cqs;
2279 err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
2282 goto err_close_xdp_tx_cqs;
2284 err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
2285 &ccp, &c->rq_xdpsq.cq) : 0;
2287 goto err_close_rx_cq;
2289 spin_lock_init(&c->async_icosq_lock);
2291 err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
2292 mlx5e_async_icosq_err_cqe_work);
2294 goto err_close_xdpsq_cq;
2296 mutex_init(&c->icosq_recovery_lock);
2298 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
2299 mlx5e_icosq_err_cqe_work);
2301 goto err_close_async_icosq;
2303 err = mlx5e_open_sqs(c, params, cparam);
2305 goto err_close_icosq;
2307 err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
2312 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
2313 &c->rq_xdpsq, false);
2318 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
2320 goto err_close_xdp_sq;
2326 mlx5e_close_xdpsq(&c->rq_xdpsq);
2329 mlx5e_close_rq(&c->rq);
2335 mlx5e_close_icosq(&c->icosq);
2337 err_close_async_icosq:
2338 mlx5e_close_icosq(&c->async_icosq);
2342 mlx5e_close_cq(&c->rq_xdpsq.cq);
2345 mlx5e_close_cq(&c->rq.cq);
2347 err_close_xdp_tx_cqs:
2348 mlx5e_close_cq(&c->xdpsq.cq);
2351 mlx5e_close_tx_cqs(c);
2354 mlx5e_close_cq(&c->icosq.cq);
2356 err_close_async_icosq_cq:
2357 mlx5e_close_cq(&c->async_icosq.cq);
2362 static void mlx5e_close_queues(struct mlx5e_channel *c)
2364 mlx5e_close_xdpsq(&c->xdpsq);
2366 mlx5e_close_xdpsq(&c->rq_xdpsq);
2367 /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
2368 cancel_work_sync(&c->icosq.recover_work);
2369 mlx5e_close_rq(&c->rq);
2371 mlx5e_close_icosq(&c->icosq);
2372 mutex_destroy(&c->icosq_recovery_lock);
2373 mlx5e_close_icosq(&c->async_icosq);
2375 mlx5e_close_cq(&c->rq_xdpsq.cq);
2376 mlx5e_close_cq(&c->rq.cq);
2377 mlx5e_close_cq(&c->xdpsq.cq);
2378 mlx5e_close_tx_cqs(c);
2379 mlx5e_close_cq(&c->icosq.cq);
2380 mlx5e_close_cq(&c->async_icosq.cq);
2383 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
2385 u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
2387 return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
2390 static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
2392 if (ix > priv->stats_nch) {
2393 netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix,
2398 if (priv->channel_stats[ix])
2401 /* Asymmetric dynamic memory allocation.
2402 * Freed in mlx5e_priv_arrays_free, not on channel closure.
2404 mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix);
2405 priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
2406 GFP_KERNEL, cpu_to_node(cpu));
2407 if (!priv->channel_stats[ix])
2414 void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
2416 spin_lock_bh(&c->async_icosq_lock);
2417 mlx5e_trigger_irq(&c->async_icosq);
2418 spin_unlock_bh(&c->async_icosq_lock);
2421 void mlx5e_trigger_napi_sched(struct napi_struct *napi)
2424 napi_schedule(napi);
2428 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
2429 struct mlx5e_params *params,
2430 struct mlx5e_channel_param *cparam,
2431 struct xsk_buff_pool *xsk_pool,
2432 struct mlx5e_channel **cp)
2434 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
2435 struct net_device *netdev = priv->netdev;
2436 struct mlx5e_xsk_param xsk;
2437 struct mlx5e_channel *c;
2441 err = mlx5_vector2irqn(priv->mdev, ix, &irq);
2445 err = mlx5e_channel_stats_alloc(priv, ix, cpu);
2449 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
2454 c->mdev = priv->mdev;
2455 c->tstamp = &priv->tstamp;
2458 c->pdev = mlx5_core_dma_dev(priv->mdev);
2459 c->netdev = priv->netdev;
2460 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
2461 c->num_tc = mlx5e_get_dcb_num_tc(params);
2462 c->xdp = !!params->xdp_prog;
2463 c->stats = &priv->channel_stats[ix]->ch;
2464 c->aff_mask = irq_get_effective_affinity_mask(irq);
2465 c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
2467 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
2469 err = mlx5e_open_queues(c, params, cparam);
2474 mlx5e_build_xsk_param(xsk_pool, &xsk);
2475 err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
2477 goto err_close_queues;
2485 mlx5e_close_queues(c);
2488 netif_napi_del(&c->napi);
2495 static void mlx5e_activate_channel(struct mlx5e_channel *c)
2499 napi_enable(&c->napi);
2501 for (tc = 0; tc < c->num_tc; tc++)
2502 mlx5e_activate_txqsq(&c->sq[tc]);
2503 mlx5e_activate_icosq(&c->icosq);
2504 mlx5e_activate_icosq(&c->async_icosq);
2506 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2507 mlx5e_activate_xsk(c);
2509 mlx5e_activate_rq(&c->rq);
2512 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
2516 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2517 mlx5e_deactivate_xsk(c);
2519 mlx5e_deactivate_rq(&c->rq);
2521 mlx5e_deactivate_icosq(&c->async_icosq);
2522 mlx5e_deactivate_icosq(&c->icosq);
2523 for (tc = 0; tc < c->num_tc; tc++)
2524 mlx5e_deactivate_txqsq(&c->sq[tc]);
2525 mlx5e_qos_deactivate_queues(c);
2527 napi_disable(&c->napi);
2530 static void mlx5e_close_channel(struct mlx5e_channel *c)
2532 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2534 mlx5e_close_queues(c);
2535 mlx5e_qos_close_queues(c);
2536 netif_napi_del(&c->napi);
2541 int mlx5e_open_channels(struct mlx5e_priv *priv,
2542 struct mlx5e_channels *chs)
2544 struct mlx5e_channel_param *cparam;
2548 chs->num = chs->params.num_channels;
2550 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2551 cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2552 if (!chs->c || !cparam)
2555 err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
2559 for (i = 0; i < chs->num; i++) {
2560 struct xsk_buff_pool *xsk_pool = NULL;
2562 if (chs->params.xdp_prog)
2563 xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
2565 err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
2567 goto err_close_channels;
2570 if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) {
2571 err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
2573 goto err_close_channels;
2577 err = mlx5e_qos_open_queues(priv, chs);
2582 mlx5e_health_channels_update(priv);
2588 mlx5e_ptp_close(chs->ptp);
2591 for (i--; i >= 0; i--)
2592 mlx5e_close_channel(chs->c[i]);
2601 static void mlx5e_activate_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
2605 for (i = 0; i < chs->num; i++)
2606 mlx5e_activate_channel(chs->c[i]);
2609 mlx5e_qos_activate_queues(priv);
2611 for (i = 0; i < chs->num; i++)
2612 mlx5e_trigger_napi_icosq(chs->c[i]);
2615 mlx5e_ptp_activate_channel(chs->ptp);
2618 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2623 for (i = 0; i < chs->num; i++) {
2624 int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
2625 struct mlx5e_channel *c = chs->c[i];
2627 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2630 err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout);
2632 /* Don't wait on the XSK RQ, because the newer xdpsock sample
2633 * doesn't provide any Fill Ring entries at the setup stage.
2637 return err ? -ETIMEDOUT : 0;
2640 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2645 mlx5e_ptp_deactivate_channel(chs->ptp);
2647 for (i = 0; i < chs->num; i++)
2648 mlx5e_deactivate_channel(chs->c[i]);
2651 void mlx5e_close_channels(struct mlx5e_channels *chs)
2656 mlx5e_ptp_close(chs->ptp);
2659 for (i = 0; i < chs->num; i++)
2660 mlx5e_close_channel(chs->c[i]);
2666 static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
2668 struct mlx5e_rx_res *res = priv->rx_res;
2670 return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
2673 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
2675 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2676 struct mlx5e_params *params, u16 mtu)
2678 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
2681 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2685 /* Update vport context MTU */
2686 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2690 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2691 struct mlx5e_params *params, u16 *mtu)
2696 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2697 if (err || !hw_mtu) /* fallback to port oper mtu */
2698 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2700 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
2703 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2705 struct mlx5e_params *params = &priv->channels.params;
2706 struct net_device *netdev = priv->netdev;
2707 struct mlx5_core_dev *mdev = priv->mdev;
2711 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2715 mlx5e_query_mtu(mdev, params, &mtu);
2716 if (mtu != params->sw_mtu)
2717 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2718 __func__, mtu, params->sw_mtu);
2720 params->sw_mtu = mtu;
2724 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu);
2726 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
2728 struct mlx5e_params *params = &priv->channels.params;
2729 struct net_device *netdev = priv->netdev;
2730 struct mlx5_core_dev *mdev = priv->mdev;
2733 /* MTU range: 68 - hw-specific max */
2734 netdev->min_mtu = ETH_MIN_MTU;
2736 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2737 netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
2741 static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
2742 struct netdev_tc_txq *tc_to_txq)
2746 netdev_reset_tc(netdev);
2751 err = netdev_set_num_tc(netdev, ntc);
2753 netdev_WARN(netdev, "netdev_set_num_tc failed (%d), ntc = %d\n", err, ntc);
2757 for (tc = 0; tc < ntc; tc++) {
2760 count = tc_to_txq[tc].count;
2761 offset = tc_to_txq[tc].offset;
2762 netdev_set_tc_queue(netdev, tc, count, offset);
2768 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
2770 int nch, ntc, num_txqs, err;
2774 qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
2776 nch = priv->channels.params.num_channels;
2777 ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
2778 num_txqs = nch * ntc + qos_queues;
2779 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
2782 mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
2783 err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
2785 netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
2790 static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
2792 struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
2793 struct net_device *netdev = priv->netdev;
2794 int old_num_txqs, old_ntc;
2799 old_num_txqs = netdev->real_num_tx_queues;
2800 old_ntc = netdev->num_tc ? : 1;
2801 for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
2802 old_tc_to_txq[i] = netdev->tc_to_txq[i];
2804 nch = priv->channels.params.num_channels;
2805 ntc = priv->channels.params.mqprio.num_tc;
2806 tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
2808 err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
2811 err = mlx5e_update_tx_netdev_queues(priv);
2814 err = netif_set_real_num_rx_queues(netdev, nch);
2816 netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
2823 /* netif_set_real_num_rx_queues could fail only when nch increased. Only
2824 * one of nch and ntc is changed in this function. That means, the call
2825 * to netif_set_real_num_tx_queues below should not fail, because it
2826 * decreases the number of TX queues.
2828 WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
2831 WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
2837 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
2839 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
2840 struct mlx5e_params *params)
2842 struct mlx5_core_dev *mdev = priv->mdev;
2843 int num_comp_vectors, ix, irq;
2845 num_comp_vectors = mlx5_comp_vectors_count(mdev);
2847 for (ix = 0; ix < params->num_channels; ix++) {
2848 cpumask_clear(priv->scratchpad.cpumask);
2850 for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
2851 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
2853 cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
2856 netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
2860 static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
2862 u16 count = priv->channels.params.num_channels;
2865 err = mlx5e_update_netdev_queues(priv);
2869 mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
2871 /* This function may be called on attach, before priv->rx_res is created. */
2872 if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res)
2873 mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
2878 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
2880 static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
2882 int i, ch, tc, num_tc;
2884 ch = priv->channels.num;
2885 num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
2887 for (i = 0; i < ch; i++) {
2888 for (tc = 0; tc < num_tc; tc++) {
2889 struct mlx5e_channel *c = priv->channels.c[i];
2890 struct mlx5e_txqsq *sq = &c->sq[tc];
2892 priv->txq2sq[sq->txq_ix] = sq;
2896 if (!priv->channels.ptp)
2899 if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
2902 for (tc = 0; tc < num_tc; tc++) {
2903 struct mlx5e_ptp *c = priv->channels.ptp;
2904 struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
2906 priv->txq2sq[sq->txq_ix] = sq;
2910 /* Make the change to txq2sq visible before the queue is started.
2911 * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
2912 * which pairs with this barrier.
2917 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2919 mlx5e_build_txq_maps(priv);
2920 mlx5e_activate_channels(priv, &priv->channels);
2921 mlx5e_xdp_tx_enable(priv);
2923 /* dev_watchdog() wants all TX queues to be started when the carrier is
2924 * OK, including the ones in range real_num_tx_queues..num_tx_queues-1.
2925 * Make it happy to avoid TX timeout false alarms.
2927 netif_tx_start_all_queues(priv->netdev);
2929 if (mlx5e_is_vport_rep(priv))
2930 mlx5e_rep_activate_channels(priv);
2932 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2935 mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
2938 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2941 mlx5e_rx_res_channels_deactivate(priv->rx_res);
2943 if (mlx5e_is_vport_rep(priv))
2944 mlx5e_rep_deactivate_channels(priv);
2946 /* The results of ndo_select_queue are unreliable, while netdev config
2947 * is being changed (real_num_tx_queues, num_tc). Stop all queues to
2948 * prevent ndo_start_xmit from being called, so that it can assume that
2949 * the selected queue is always valid.
2951 netif_tx_disable(priv->netdev);
2953 mlx5e_xdp_tx_disable(priv);
2954 mlx5e_deactivate_channels(&priv->channels);
2957 static int mlx5e_switch_priv_params(struct mlx5e_priv *priv,
2958 struct mlx5e_params *new_params,
2959 mlx5e_fp_preactivate preactivate,
2962 struct mlx5e_params old_params;
2964 old_params = priv->channels.params;
2965 priv->channels.params = *new_params;
2970 err = preactivate(priv, context);
2972 priv->channels.params = old_params;
2980 static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2981 struct mlx5e_channels *new_chs,
2982 mlx5e_fp_preactivate preactivate,
2985 struct net_device *netdev = priv->netdev;
2986 struct mlx5e_channels old_chs;
2990 carrier_ok = netif_carrier_ok(netdev);
2991 netif_carrier_off(netdev);
2993 mlx5e_deactivate_priv_channels(priv);
2995 old_chs = priv->channels;
2996 priv->channels = *new_chs;
2998 /* New channels are ready to roll, call the preactivate hook if needed
2999 * to modify HW settings or update kernel parameters.
3002 err = preactivate(priv, context);
3004 priv->channels = old_chs;
3009 mlx5e_close_channels(&old_chs);
3010 priv->profile->update_rx(priv);
3012 mlx5e_selq_apply(&priv->selq);
3014 mlx5e_activate_priv_channels(priv);
3016 /* return carrier back if needed */
3018 netif_carrier_on(netdev);
3023 int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
3024 struct mlx5e_params *params,
3025 mlx5e_fp_preactivate preactivate,
3026 void *context, bool reset)
3028 struct mlx5e_channels *new_chs;
3031 reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
3033 return mlx5e_switch_priv_params(priv, params, preactivate, context);
3035 new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL);
3038 new_chs->params = *params;
3040 mlx5e_selq_prepare_params(&priv->selq, &new_chs->params);
3042 err = mlx5e_open_channels(priv, new_chs);
3044 goto err_cancel_selq;
3046 err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
3054 mlx5e_close_channels(new_chs);
3057 mlx5e_selq_cancel(&priv->selq);
3062 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
3064 return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true);
3067 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
3069 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
3070 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
3073 static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
3074 enum mlx5_port_status state)
3076 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3077 int vport_admin_state;
3079 mlx5_set_port_admin_status(mdev, state);
3081 if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
3082 !MLX5_CAP_GEN(mdev, uplink_follow))
3085 if (state == MLX5_PORT_UP)
3086 vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
3088 vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3090 mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
3093 int mlx5e_open_locked(struct net_device *netdev)
3095 struct mlx5e_priv *priv = netdev_priv(netdev);
3098 mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
3100 set_bit(MLX5E_STATE_OPENED, &priv->state);
3102 err = mlx5e_open_channels(priv, &priv->channels);
3104 goto err_clear_state_opened_flag;
3106 err = priv->profile->update_rx(priv);
3108 goto err_close_channels;
3110 mlx5e_selq_apply(&priv->selq);
3111 mlx5e_activate_priv_channels(priv);
3112 mlx5e_apply_traps(priv, true);
3113 if (priv->profile->update_carrier)
3114 priv->profile->update_carrier(priv);
3116 mlx5e_queue_update_stats(priv);
3120 mlx5e_close_channels(&priv->channels);
3121 err_clear_state_opened_flag:
3122 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3123 mlx5e_selq_cancel(&priv->selq);
3127 int mlx5e_open(struct net_device *netdev)
3129 struct mlx5e_priv *priv = netdev_priv(netdev);
3132 mutex_lock(&priv->state_lock);
3133 err = mlx5e_open_locked(netdev);
3135 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
3136 mutex_unlock(&priv->state_lock);
3141 int mlx5e_close_locked(struct net_device *netdev)
3143 struct mlx5e_priv *priv = netdev_priv(netdev);
3145 /* May already be CLOSED in case a previous configuration operation
3146 * (e.g RX/TX queue size change) that involves close&open failed.
3148 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3151 mlx5e_apply_traps(priv, false);
3152 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3154 netif_carrier_off(priv->netdev);
3155 mlx5e_deactivate_priv_channels(priv);
3156 mlx5e_close_channels(&priv->channels);
3161 int mlx5e_close(struct net_device *netdev)
3163 struct mlx5e_priv *priv = netdev_priv(netdev);
3166 if (!netif_device_present(netdev))
3169 mutex_lock(&priv->state_lock);
3170 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
3171 err = mlx5e_close_locked(netdev);
3172 mutex_unlock(&priv->state_lock);
3177 static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
3179 mlx5_wq_destroy(&rq->wq_ctrl);
3182 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3183 struct mlx5e_rq *rq,
3184 struct mlx5e_rq_param *param)
3186 void *rqc = param->rqc;
3187 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
3190 param->wq.db_numa_node = param->wq.buf_numa_node;
3192 err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
3197 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3198 xdp_rxq_info_unused(&rq->xdp_rxq);
3205 static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
3206 struct mlx5e_cq *cq,
3207 struct mlx5e_cq_param *param)
3209 struct mlx5_core_dev *mdev = priv->mdev;
3211 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3212 param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3214 return mlx5e_alloc_cq_common(priv, param, cq);
3217 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3218 struct mlx5e_rq *drop_rq)
3220 struct mlx5_core_dev *mdev = priv->mdev;
3221 struct mlx5e_cq_param cq_param = {};
3222 struct mlx5e_rq_param rq_param = {};
3223 struct mlx5e_cq *cq = &drop_rq->cq;
3226 mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param);
3228 err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
3232 err = mlx5e_create_cq(cq, &cq_param);
3236 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
3238 goto err_destroy_cq;
3240 err = mlx5e_create_rq(drop_rq, &rq_param);
3244 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3246 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3251 mlx5e_free_drop_rq(drop_rq);
3254 mlx5e_destroy_cq(cq);
3262 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
3264 mlx5e_destroy_rq(drop_rq);
3265 mlx5e_free_drop_rq(drop_rq);
3266 mlx5e_destroy_cq(&drop_rq->cq);
3267 mlx5e_free_cq(&drop_rq->cq);
3270 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
3272 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3274 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
3276 if (MLX5_GET(tisc, tisc, tls_en))
3277 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
3279 if (mlx5_lag_is_lacp_owner(mdev))
3280 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3282 return mlx5_core_create_tis(mdev, in, tisn);
3285 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
3287 mlx5_core_destroy_tis(mdev, tisn);
3290 void mlx5e_destroy_tises(struct mlx5e_priv *priv)
3294 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
3295 for (tc = 0; tc < priv->profile->max_tc; tc++)
3296 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3299 static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
3301 return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
3304 int mlx5e_create_tises(struct mlx5e_priv *priv)
3309 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
3310 for (tc = 0; tc < priv->profile->max_tc; tc++) {
3311 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
3314 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3316 MLX5_SET(tisc, tisc, prio, tc << 1);
3318 if (mlx5e_lag_should_assign_affinity(priv->mdev))
3319 MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
3321 err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
3323 goto err_close_tises;
3330 for (; i >= 0; i--) {
3331 for (tc--; tc >= 0; tc--)
3332 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3333 tc = priv->profile->max_tc;
3339 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
3341 if (priv->mqprio_rl) {
3342 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3343 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3344 priv->mqprio_rl = NULL;
3346 mlx5e_accel_cleanup_tx(priv);
3347 mlx5e_destroy_tises(priv);
3350 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3355 for (i = 0; i < chs->num; i++) {
3356 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3360 if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
3361 return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
3366 static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
3371 memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
3373 /* Map netdev TCs to offset 0.
3374 * We have our own UP to TXQ mapping for DCB mode of QoS
3376 for (tc = 0; tc < ntc; tc++) {
3377 tc_to_txq[tc] = (struct netdev_tc_txq) {
3384 static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
3385 struct tc_mqprio_qopt *qopt)
3389 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
3390 tc_to_txq[tc] = (struct netdev_tc_txq) {
3391 .count = qopt->count[tc],
3392 .offset = qopt->offset[tc],
3397 static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
3399 params->mqprio.mode = TC_MQPRIO_MODE_DCB;
3400 params->mqprio.num_tc = num_tc;
3401 mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
3402 params->num_channels);
3405 static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
3406 struct mlx5e_mqprio_rl *rl)
3410 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
3414 mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
3415 params->mqprio.channel.hw_id[tc] = hw_id;
3419 static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
3420 struct tc_mqprio_qopt_offload *mqprio,
3421 struct mlx5e_mqprio_rl *rl)
3425 params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
3426 params->mqprio.num_tc = mqprio->qopt.num_tc;
3428 for (tc = 0; tc < TC_MAX_QUEUE; tc++)
3429 params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
3431 mlx5e_mqprio_rl_update_params(params, rl);
3432 mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
3435 static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
3437 mlx5e_params_mqprio_dcb_set(params, 1);
3440 static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
3441 struct tc_mqprio_qopt *mqprio)
3443 struct mlx5e_params new_params;
3444 u8 tc = mqprio->num_tc;
3447 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3449 if (tc && tc != MLX5E_MAX_NUM_TC)
3452 new_params = priv->channels.params;
3453 mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
3455 err = mlx5e_safe_switch_params(priv, &new_params,
3456 mlx5e_num_channels_changed_ctx, NULL, true);
3458 if (!err && priv->mqprio_rl) {
3459 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3460 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3461 priv->mqprio_rl = NULL;
3464 priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3465 mlx5e_get_dcb_num_tc(&priv->channels.params));
3469 static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
3470 struct tc_mqprio_qopt_offload *mqprio)
3472 struct net_device *netdev = priv->netdev;
3473 struct mlx5e_ptp *ptp_channel;
3477 ptp_channel = priv->channels.ptp;
3478 if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
3480 "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
3484 if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
3485 mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
3488 for (i = 0; i < mqprio->qopt.num_tc; i++) {
3489 if (!mqprio->qopt.count[i]) {
3490 netdev_err(netdev, "Zero size for queue-group (%d) is not supported\n", i);
3493 if (mqprio->min_rate[i]) {
3494 netdev_err(netdev, "Min tx rate is not supported\n");
3498 if (mqprio->max_rate[i]) {
3501 err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
3506 if (mqprio->qopt.offset[i] != agg_count) {
3507 netdev_err(netdev, "Discontinuous queues config is not supported\n");
3510 agg_count += mqprio->qopt.count[i];
3513 if (priv->channels.params.num_channels != agg_count) {
3514 netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
3515 agg_count, priv->channels.params.num_channels);
3522 static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
3526 for (tc = 0; tc < num_tc; tc++)
3532 static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
3533 u8 num_tc, u64 max_rate[])
3535 struct mlx5e_mqprio_rl *rl;
3538 if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
3541 rl = mlx5e_mqprio_rl_alloc();
3543 return ERR_PTR(-ENOMEM);
3545 err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
3547 mlx5e_mqprio_rl_free(rl);
3548 return ERR_PTR(err);
3554 static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
3555 struct tc_mqprio_qopt_offload *mqprio)
3557 mlx5e_fp_preactivate preactivate;
3558 struct mlx5e_params new_params;
3559 struct mlx5e_mqprio_rl *rl;
3563 err = mlx5e_mqprio_channel_validate(priv, mqprio);
3567 rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
3571 new_params = priv->channels.params;
3572 mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
3574 nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
3575 preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
3576 mlx5e_update_netdev_queues_ctx;
3577 err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
3580 mlx5e_mqprio_rl_cleanup(rl);
3581 mlx5e_mqprio_rl_free(rl);
3586 if (priv->mqprio_rl) {
3587 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3588 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3590 priv->mqprio_rl = rl;
3595 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
3596 struct tc_mqprio_qopt_offload *mqprio)
3598 /* MQPRIO is another toplevel qdisc that can't be attached
3599 * simultaneously with the offloaded HTB.
3601 if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
3604 switch (mqprio->mode) {
3605 case TC_MQPRIO_MODE_DCB:
3606 return mlx5e_setup_tc_mqprio_dcb(priv, &mqprio->qopt);
3607 case TC_MQPRIO_MODE_CHANNEL:
3608 return mlx5e_setup_tc_mqprio_channel(priv, mqprio);
3614 static LIST_HEAD(mlx5e_block_cb_list);
3616 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3619 struct mlx5e_priv *priv = netdev_priv(dev);
3620 bool tc_unbind = false;
3623 if (type == TC_SETUP_BLOCK &&
3624 ((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND)
3627 if (!netif_device_present(dev) && !tc_unbind)
3631 case TC_SETUP_BLOCK: {
3632 struct flow_block_offload *f = type_data;
3634 f->unlocked_driver_cb = true;
3635 return flow_block_cb_setup_simple(type_data,
3636 &mlx5e_block_cb_list,
3637 mlx5e_setup_tc_block_cb,
3640 case TC_SETUP_QDISC_MQPRIO:
3641 mutex_lock(&priv->state_lock);
3642 err = mlx5e_setup_tc_mqprio(priv, type_data);
3643 mutex_unlock(&priv->state_lock);
3645 case TC_SETUP_QDISC_HTB:
3646 mutex_lock(&priv->state_lock);
3647 err = mlx5e_htb_setup_tc(priv, type_data);
3648 mutex_unlock(&priv->state_lock);
3655 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
3659 for (i = 0; i < priv->stats_nch; i++) {
3660 struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
3661 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
3662 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
3665 s->rx_packets += rq_stats->packets + xskrq_stats->packets;
3666 s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
3667 s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
3669 for (j = 0; j < priv->max_opened_tc; j++) {
3670 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
3672 s->tx_packets += sq_stats->packets;
3673 s->tx_bytes += sq_stats->bytes;
3674 s->tx_dropped += sq_stats->dropped;
3677 if (priv->tx_ptp_opened) {
3678 for (i = 0; i < priv->max_opened_tc; i++) {
3679 struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[i];
3681 s->tx_packets += sq_stats->packets;
3682 s->tx_bytes += sq_stats->bytes;
3683 s->tx_dropped += sq_stats->dropped;
3686 if (priv->rx_ptp_opened) {
3687 struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
3689 s->rx_packets += rq_stats->packets;
3690 s->rx_bytes += rq_stats->bytes;
3691 s->multicast += rq_stats->mcast_packets;
3696 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3698 struct mlx5e_priv *priv = netdev_priv(dev);
3699 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3701 if (!netif_device_present(dev))
3704 /* In switchdev mode, monitor counters doesn't monitor
3705 * rx/tx stats of 802_3. The update stats mechanism
3706 * should keep the 802_3 layout counters updated
3708 if (!mlx5e_monitor_counter_supported(priv) ||
3709 mlx5e_is_uplink_rep(priv)) {
3710 /* update HW stats in background for next time */
3711 mlx5e_queue_update_stats(priv);
3714 if (mlx5e_is_uplink_rep(priv)) {
3715 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3717 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3718 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3719 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3720 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3722 /* vport multicast also counts packets that are dropped due to steering
3723 * or rx out of buffer
3725 stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3727 mlx5e_fold_sw_stats64(priv, stats);
3730 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3732 stats->rx_length_errors =
3733 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3734 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3735 PPORT_802_3_GET(pstats, a_frame_too_long_errors) +
3736 VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small);
3737 stats->rx_crc_errors =
3738 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3739 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3740 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3741 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3742 stats->rx_frame_errors;
3743 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3746 static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
3748 if (mlx5e_is_uplink_rep(priv))
3749 return; /* no rx mode for uplink rep */
3751 queue_work(priv->wq, &priv->set_rx_mode_work);
3754 static void mlx5e_set_rx_mode(struct net_device *dev)
3756 struct mlx5e_priv *priv = netdev_priv(dev);
3758 mlx5e_nic_set_rx_mode(priv);
3761 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3763 struct mlx5e_priv *priv = netdev_priv(netdev);
3764 struct sockaddr *saddr = addr;
3766 if (!is_valid_ether_addr(saddr->sa_data))
3767 return -EADDRNOTAVAIL;
3769 netif_addr_lock_bh(netdev);
3770 eth_hw_addr_set(netdev, saddr->sa_data);
3771 netif_addr_unlock_bh(netdev);
3773 mlx5e_nic_set_rx_mode(priv);
3778 #define MLX5E_SET_FEATURE(features, feature, enable) \
3781 *features |= feature; \
3783 *features &= ~feature; \
3786 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3788 static int set_feature_lro(struct net_device *netdev, bool enable)
3790 struct mlx5e_priv *priv = netdev_priv(netdev);
3791 struct mlx5_core_dev *mdev = priv->mdev;
3792 struct mlx5e_params *cur_params;
3793 struct mlx5e_params new_params;
3797 mutex_lock(&priv->state_lock);
3799 cur_params = &priv->channels.params;
3800 new_params = *cur_params;
3803 new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
3804 else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
3805 new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
3809 if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
3810 new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
3811 if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3812 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
3813 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
3818 err = mlx5e_safe_switch_params(priv, &new_params,
3819 mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
3821 mutex_unlock(&priv->state_lock);
3825 static int set_feature_hw_gro(struct net_device *netdev, bool enable)
3827 struct mlx5e_priv *priv = netdev_priv(netdev);
3828 struct mlx5e_params new_params;
3832 mutex_lock(&priv->state_lock);
3833 new_params = priv->channels.params;
3836 new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
3837 new_params.packet_merge.shampo.match_criteria_type =
3838 MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
3839 new_params.packet_merge.shampo.alignment_granularity =
3840 MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
3841 } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
3842 new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
3847 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
3849 mutex_unlock(&priv->state_lock);
3853 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3855 struct mlx5e_priv *priv = netdev_priv(netdev);
3858 mlx5e_enable_cvlan_filter(priv->fs,
3859 !!(priv->netdev->flags & IFF_PROMISC));
3861 mlx5e_disable_cvlan_filter(priv->fs,
3862 !!(priv->netdev->flags & IFF_PROMISC));
3867 static int set_feature_hw_tc(struct net_device *netdev, bool enable)
3869 struct mlx5e_priv *priv = netdev_priv(netdev);
3872 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
3873 int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
3874 MLX5_TC_FLAG(NIC_OFFLOAD);
3875 if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
3877 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3882 mutex_lock(&priv->state_lock);
3883 if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
3884 netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
3887 mutex_unlock(&priv->state_lock);
3892 static int set_feature_rx_all(struct net_device *netdev, bool enable)
3894 struct mlx5e_priv *priv = netdev_priv(netdev);
3895 struct mlx5_core_dev *mdev = priv->mdev;
3897 return mlx5_set_port_fcs(mdev, !enable);
3900 static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
3902 u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
3903 bool supported, curr_state;
3906 if (!MLX5_CAP_GEN(mdev, ports_check))
3909 err = mlx5_query_ports_check(mdev, in, sizeof(in));
3913 supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
3914 curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
3916 if (!supported || enable == curr_state)
3919 MLX5_SET(pcmr_reg, in, local_port, 1);
3920 MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
3922 return mlx5_set_ports_check(mdev, in, sizeof(in));
3925 static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
3927 struct mlx5_core_dev *mdev = priv->mdev;
3928 bool enable = *(bool *)ctx;
3930 return mlx5e_set_rx_port_ts(mdev, enable);
3933 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3935 struct mlx5e_priv *priv = netdev_priv(netdev);
3936 struct mlx5e_channels *chs = &priv->channels;
3937 struct mlx5e_params new_params;
3940 mutex_lock(&priv->state_lock);
3942 new_params = chs->params;
3943 new_params.scatter_fcs_en = enable;
3944 err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
3945 &new_params.scatter_fcs_en, true);
3946 mutex_unlock(&priv->state_lock);
3950 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3952 struct mlx5e_priv *priv = netdev_priv(netdev);
3955 mutex_lock(&priv->state_lock);
3957 mlx5e_fs_set_vlan_strip_disable(priv->fs, !enable);
3958 priv->channels.params.vlan_strip_disable = !enable;
3960 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3963 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3965 mlx5e_fs_set_vlan_strip_disable(priv->fs, enable);
3966 priv->channels.params.vlan_strip_disable = enable;
3969 mutex_unlock(&priv->state_lock);
3974 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3976 struct mlx5e_priv *priv = netdev_priv(dev);
3977 struct mlx5e_flow_steering *fs = priv->fs;
3979 if (mlx5e_is_uplink_rep(priv))
3980 return 0; /* no vlan table for uplink rep */
3982 return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
3985 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3987 struct mlx5e_priv *priv = netdev_priv(dev);
3988 struct mlx5e_flow_steering *fs = priv->fs;
3990 if (mlx5e_is_uplink_rep(priv))
3991 return 0; /* no vlan table for uplink rep */
3993 return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
3996 #ifdef CONFIG_MLX5_EN_ARFS
3997 static int set_feature_arfs(struct net_device *netdev, bool enable)
3999 struct mlx5e_priv *priv = netdev_priv(netdev);
4003 err = mlx5e_arfs_enable(priv->fs);
4005 err = mlx5e_arfs_disable(priv->fs);
4011 static int mlx5e_handle_feature(struct net_device *netdev,
4012 netdev_features_t *features,
4013 netdev_features_t feature,
4014 mlx5e_feature_handler feature_handler)
4016 netdev_features_t changes = *features ^ netdev->features;
4017 bool enable = !!(*features & feature);
4020 if (!(changes & feature))
4023 err = feature_handler(netdev, enable);
4025 MLX5E_SET_FEATURE(features, feature, !enable);
4026 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
4027 enable ? "Enable" : "Disable", &feature, err);
4034 void mlx5e_set_xdp_feature(struct net_device *netdev)
4036 struct mlx5e_priv *priv = netdev_priv(netdev);
4037 struct mlx5e_params *params = &priv->channels.params;
4040 if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
4041 xdp_clear_features_flag(netdev);
4045 val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
4046 NETDEV_XDP_ACT_XSK_ZEROCOPY |
4047 NETDEV_XDP_ACT_RX_SG |
4048 NETDEV_XDP_ACT_NDO_XMIT |
4049 NETDEV_XDP_ACT_NDO_XMIT_SG;
4050 xdp_set_features_flag(netdev, val);
4053 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
4055 netdev_features_t oper_features = features;
4058 #define MLX5E_HANDLE_FEATURE(feature, handler) \
4059 mlx5e_handle_feature(netdev, &oper_features, feature, handler)
4061 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
4062 err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
4063 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
4064 set_feature_cvlan_filter);
4065 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
4066 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
4067 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
4068 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
4069 #ifdef CONFIG_MLX5_EN_ARFS
4070 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
4072 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx);
4075 netdev->features = oper_features;
4079 /* update XDP supported features */
4080 mlx5e_set_xdp_feature(netdev);
4085 static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
4086 netdev_features_t features)
4088 features &= ~NETIF_F_HW_TLS_RX;
4089 if (netdev->features & NETIF_F_HW_TLS_RX)
4090 netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
4092 features &= ~NETIF_F_HW_TLS_TX;
4093 if (netdev->features & NETIF_F_HW_TLS_TX)
4094 netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
4096 features &= ~NETIF_F_NTUPLE;
4097 if (netdev->features & NETIF_F_NTUPLE)
4098 netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
4100 features &= ~NETIF_F_GRO_HW;
4101 if (netdev->features & NETIF_F_GRO_HW)
4102 netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
4104 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4105 if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
4106 netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
4111 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
4112 netdev_features_t features)
4114 struct mlx5e_priv *priv = netdev_priv(netdev);
4115 struct mlx5e_vlan_table *vlan;
4116 struct mlx5e_params *params;
4118 if (!netif_device_present(netdev))
4121 vlan = mlx5e_fs_get_vlan(priv->fs);
4122 mutex_lock(&priv->state_lock);
4123 params = &priv->channels.params;
4125 !bitmap_empty(mlx5e_vlan_get_active_svlans(vlan), VLAN_N_VID)) {
4126 /* HW strips the outer C-tag header, this is a problem
4127 * for S-tag traffic.
4129 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4130 if (!params->vlan_strip_disable)
4131 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
4134 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
4135 if (features & NETIF_F_LRO) {
4136 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
4137 features &= ~NETIF_F_LRO;
4139 if (features & NETIF_F_GRO_HW) {
4140 netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n");
4141 features &= ~NETIF_F_GRO_HW;
4145 if (params->xdp_prog) {
4146 if (features & NETIF_F_LRO) {
4147 netdev_warn(netdev, "LRO is incompatible with XDP\n");
4148 features &= ~NETIF_F_LRO;
4150 if (features & NETIF_F_GRO_HW) {
4151 netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
4152 features &= ~NETIF_F_GRO_HW;
4156 if (priv->xsk.refcnt) {
4157 if (features & NETIF_F_LRO) {
4158 netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
4160 features &= ~NETIF_F_LRO;
4162 if (features & NETIF_F_GRO_HW) {
4163 netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
4165 features &= ~NETIF_F_GRO_HW;
4169 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
4170 features &= ~NETIF_F_RXHASH;
4171 if (netdev->features & NETIF_F_RXHASH)
4172 netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
4174 if (features & NETIF_F_GRO_HW) {
4175 netdev_warn(netdev, "Disabling HW-GRO, not supported when CQE compress is active\n");
4176 features &= ~NETIF_F_GRO_HW;
4180 if (mlx5e_is_uplink_rep(priv)) {
4181 features = mlx5e_fix_uplink_rep_features(netdev, features);
4182 features |= NETIF_F_NETNS_LOCAL;
4184 features &= ~NETIF_F_NETNS_LOCAL;
4187 mutex_unlock(&priv->state_lock);
4192 static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
4193 struct mlx5e_channels *chs,
4194 struct mlx5e_params *new_params,
4195 struct mlx5_core_dev *mdev)
4199 for (ix = 0; ix < chs->params.num_channels; ix++) {
4200 struct xsk_buff_pool *xsk_pool =
4201 mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
4202 struct mlx5e_xsk_param xsk;
4208 mlx5e_build_xsk_param(xsk_pool, &xsk);
4209 max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
4211 /* Validate XSK params and XDP MTU in advance */
4212 if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
4213 new_params->sw_mtu > max_xdp_mtu) {
4214 u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
4215 int max_mtu_frame, max_mtu_page, max_mtu;
4217 /* Two criteria must be met:
4218 * 1. HW MTU + all headrooms <= XSK frame size.
4219 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
4221 max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
4222 max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
4223 max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
4225 netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
4226 new_params->sw_mtu, ix, max_mtu);
4234 static bool mlx5e_params_validate_xdp(struct net_device *netdev,
4235 struct mlx5_core_dev *mdev,
4236 struct mlx5e_params *params)
4240 /* No XSK params: AF_XDP can't be enabled yet at the point of setting
4243 is_linear = params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC ?
4244 mlx5e_rx_is_linear_skb(mdev, params, NULL) :
4245 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL);
4248 if (!params->xdp_prog->aux->xdp_has_frags) {
4249 netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
4251 mlx5e_xdp_max_mtu(params, NULL));
4254 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
4255 !mlx5e_verify_params_rx_mpwqe_strides(mdev, params, NULL)) {
4256 netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
4258 mlx5e_xdp_max_mtu(params, NULL));
4266 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
4267 mlx5e_fp_preactivate preactivate)
4269 struct mlx5e_priv *priv = netdev_priv(netdev);
4270 struct mlx5e_params new_params;
4271 struct mlx5e_params *params;
4275 mutex_lock(&priv->state_lock);
4277 params = &priv->channels.params;
4279 new_params = *params;
4280 new_params.sw_mtu = new_mtu;
4281 err = mlx5e_validate_params(priv->mdev, &new_params);
4285 if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
4291 if (priv->xsk.refcnt &&
4292 !mlx5e_xsk_validate_mtu(netdev, &priv->channels,
4293 &new_params, priv->mdev)) {
4298 if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
4301 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
4302 params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) {
4303 bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
4304 bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
4306 u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
4307 u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
4309 /* Always reset in linear mode - hw_mtu is used in data path.
4310 * Check that the mode was non-linear and didn't change.
4311 * If XSK is active, XSK RQs are linear.
4312 * Reset if the RQ size changed, even if it's non-linear.
4314 if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
4319 err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
4322 netdev->mtu = params->sw_mtu;
4323 mutex_unlock(&priv->state_lock);
4327 static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
4329 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
4332 int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
4334 bool set = *(bool *)ctx;
4336 return mlx5e_ptp_rx_manage_fs(priv, set);
4339 static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
4341 bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
4345 /* Reset CQE compression to Admin default */
4346 return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
4348 if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4351 /* Disable CQE compression */
4352 netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
4353 err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
4355 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
4360 static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
4362 struct mlx5e_params new_params;
4364 if (ptp_rx == priv->channels.params.ptp_rx)
4367 new_params = priv->channels.params;
4368 new_params.ptp_rx = ptp_rx;
4369 return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
4370 &new_params.ptp_rx, true);
4373 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
4375 struct hwtstamp_config config;
4376 bool rx_cqe_compress_def;
4380 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
4381 (mlx5_clock_get_ptp_index(priv->mdev) == -1))
4384 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4387 /* TX HW timestamp */
4388 switch (config.tx_type) {
4389 case HWTSTAMP_TX_OFF:
4390 case HWTSTAMP_TX_ON:
4396 mutex_lock(&priv->state_lock);
4397 rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
4399 /* RX HW timestamp */
4400 switch (config.rx_filter) {
4401 case HWTSTAMP_FILTER_NONE:
4404 case HWTSTAMP_FILTER_ALL:
4405 case HWTSTAMP_FILTER_SOME:
4406 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4407 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4408 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4409 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4410 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4411 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4412 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4413 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4414 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4415 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4416 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4417 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4418 case HWTSTAMP_FILTER_NTP_ALL:
4419 config.rx_filter = HWTSTAMP_FILTER_ALL;
4420 /* ptp_rx is set if both HW TS is set and CQE
4421 * compression is set
4423 ptp_rx = rx_cqe_compress_def;
4430 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
4431 err = mlx5e_hwstamp_config_no_ptp_rx(priv,
4432 config.rx_filter != HWTSTAMP_FILTER_NONE);
4434 err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
4438 memcpy(&priv->tstamp, &config, sizeof(config));
4439 mutex_unlock(&priv->state_lock);
4441 /* might need to fix some features */
4442 netdev_update_features(priv->netdev);
4444 return copy_to_user(ifr->ifr_data, &config,
4445 sizeof(config)) ? -EFAULT : 0;
4447 mutex_unlock(&priv->state_lock);
4451 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
4453 struct hwtstamp_config *cfg = &priv->tstamp;
4455 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
4458 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
4461 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4463 struct mlx5e_priv *priv = netdev_priv(dev);
4467 return mlx5e_hwstamp_set(priv, ifr);
4469 return mlx5e_hwstamp_get(priv, ifr);
4475 #ifdef CONFIG_MLX5_ESWITCH
4476 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
4478 struct mlx5e_priv *priv = netdev_priv(dev);
4479 struct mlx5_core_dev *mdev = priv->mdev;
4481 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
4484 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
4487 struct mlx5e_priv *priv = netdev_priv(dev);
4488 struct mlx5_core_dev *mdev = priv->mdev;
4490 if (vlan_proto != htons(ETH_P_8021Q))
4491 return -EPROTONOSUPPORT;
4493 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
4497 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
4499 struct mlx5e_priv *priv = netdev_priv(dev);
4500 struct mlx5_core_dev *mdev = priv->mdev;
4502 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
4505 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
4507 struct mlx5e_priv *priv = netdev_priv(dev);
4508 struct mlx5_core_dev *mdev = priv->mdev;
4510 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
4513 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
4516 struct mlx5e_priv *priv = netdev_priv(dev);
4517 struct mlx5_core_dev *mdev = priv->mdev;
4519 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
4520 max_tx_rate, min_tx_rate);
4523 static int mlx5_vport_link2ifla(u8 esw_link)
4526 case MLX5_VPORT_ADMIN_STATE_DOWN:
4527 return IFLA_VF_LINK_STATE_DISABLE;
4528 case MLX5_VPORT_ADMIN_STATE_UP:
4529 return IFLA_VF_LINK_STATE_ENABLE;
4531 return IFLA_VF_LINK_STATE_AUTO;
4534 static int mlx5_ifla_link2vport(u8 ifla_link)
4536 switch (ifla_link) {
4537 case IFLA_VF_LINK_STATE_DISABLE:
4538 return MLX5_VPORT_ADMIN_STATE_DOWN;
4539 case IFLA_VF_LINK_STATE_ENABLE:
4540 return MLX5_VPORT_ADMIN_STATE_UP;
4542 return MLX5_VPORT_ADMIN_STATE_AUTO;
4545 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
4548 struct mlx5e_priv *priv = netdev_priv(dev);
4549 struct mlx5_core_dev *mdev = priv->mdev;
4551 if (mlx5e_is_uplink_rep(priv))
4554 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4555 mlx5_ifla_link2vport(link_state));
4558 int mlx5e_get_vf_config(struct net_device *dev,
4559 int vf, struct ifla_vf_info *ivi)
4561 struct mlx5e_priv *priv = netdev_priv(dev);
4562 struct mlx5_core_dev *mdev = priv->mdev;
4565 if (!netif_device_present(dev))
4568 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
4571 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
4575 int mlx5e_get_vf_stats(struct net_device *dev,
4576 int vf, struct ifla_vf_stats *vf_stats)
4578 struct mlx5e_priv *priv = netdev_priv(dev);
4579 struct mlx5_core_dev *mdev = priv->mdev;
4581 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
4586 mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
4588 struct mlx5e_priv *priv = netdev_priv(dev);
4590 if (!netif_device_present(dev))
4593 if (!mlx5e_is_uplink_rep(priv))
4596 return mlx5e_rep_has_offload_stats(dev, attr_id);
4600 mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
4603 struct mlx5e_priv *priv = netdev_priv(dev);
4605 if (!mlx5e_is_uplink_rep(priv))
4608 return mlx5e_rep_get_offload_stats(attr_id, dev, sp);
4612 static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
4614 switch (proto_type) {
4616 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
4619 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
4620 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
4626 static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
4627 struct sk_buff *skb)
4629 switch (skb->inner_protocol) {
4630 case htons(ETH_P_IP):
4631 case htons(ETH_P_IPV6):
4632 case htons(ETH_P_TEB):
4634 case htons(ETH_P_MPLS_UC):
4635 case htons(ETH_P_MPLS_MC):
4636 return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
4641 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
4642 struct sk_buff *skb,
4643 netdev_features_t features)
4645 unsigned int offset = 0;
4646 struct udphdr *udph;
4650 switch (vlan_get_protocol(skb)) {
4651 case htons(ETH_P_IP):
4652 proto = ip_hdr(skb)->protocol;
4654 case htons(ETH_P_IPV6):
4655 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
4663 if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
4668 if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
4672 udph = udp_hdr(skb);
4673 port = be16_to_cpu(udph->dest);
4675 /* Verify if UDP port is being offloaded by HW */
4676 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
4679 #if IS_ENABLED(CONFIG_GENEVE)
4680 /* Support Geneve offload for default UDP port */
4681 if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
4685 #ifdef CONFIG_MLX5_EN_IPSEC
4687 return mlx5e_ipsec_feature_check(skb, features);
4692 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4693 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4696 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
4697 struct net_device *netdev,
4698 netdev_features_t features)
4700 struct mlx5e_priv *priv = netdev_priv(netdev);
4702 features = vlan_features_check(skb, features);
4703 features = vxlan_features_check(skb, features);
4705 /* Validate if the tunneled packet is being offloaded by HW */
4706 if (skb->encapsulation &&
4707 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
4708 return mlx5e_tunnel_features_check(priv, skb, features);
4713 static void mlx5e_tx_timeout_work(struct work_struct *work)
4715 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4717 struct net_device *netdev = priv->netdev;
4721 mutex_lock(&priv->state_lock);
4723 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4726 for (i = 0; i < netdev->real_num_tx_queues; i++) {
4727 struct netdev_queue *dev_queue =
4728 netdev_get_tx_queue(netdev, i);
4729 struct mlx5e_txqsq *sq = priv->txq2sq[i];
4731 if (!netif_xmit_stopped(dev_queue))
4734 if (mlx5e_reporter_tx_timeout(sq))
4735 /* break if tried to reopened channels */
4740 mutex_unlock(&priv->state_lock);
4744 static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
4746 struct mlx5e_priv *priv = netdev_priv(dev);
4748 netdev_err(dev, "TX timeout detected\n");
4749 queue_work(priv->wq, &priv->tx_timeout_work);
4752 static int mlx5e_xdp_allowed(struct net_device *netdev, struct mlx5_core_dev *mdev,
4753 struct mlx5e_params *params)
4755 if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
4756 netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
4760 if (!mlx5e_params_validate_xdp(netdev, mdev, params))
4766 static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
4768 struct bpf_prog *old_prog;
4770 old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
4771 lockdep_is_held(&rq->priv->state_lock));
4773 bpf_prog_put(old_prog);
4776 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4778 struct mlx5e_priv *priv = netdev_priv(netdev);
4779 struct mlx5e_params new_params;
4780 struct bpf_prog *old_prog;
4785 mutex_lock(&priv->state_lock);
4787 new_params = priv->channels.params;
4788 new_params.xdp_prog = prog;
4791 err = mlx5e_xdp_allowed(netdev, priv->mdev, &new_params);
4796 /* no need for full reset when exchanging programs */
4797 reset = (!priv->channels.params.xdp_prog || !prog);
4799 old_prog = priv->channels.params.xdp_prog;
4801 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
4806 bpf_prog_put(old_prog);
4808 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
4811 /* exchanging programs w/o reset, we update ref counts on behalf
4812 * of the channels RQs here.
4814 bpf_prog_add(prog, priv->channels.num);
4815 for (i = 0; i < priv->channels.num; i++) {
4816 struct mlx5e_channel *c = priv->channels.c[i];
4818 mlx5e_rq_replace_xdp_prog(&c->rq, prog);
4819 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
4821 mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
4826 mutex_unlock(&priv->state_lock);
4828 /* Need to fix some features. */
4830 netdev_update_features(netdev);
4835 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4837 switch (xdp->command) {
4838 case XDP_SETUP_PROG:
4839 return mlx5e_xdp_set(dev, xdp->prog);
4840 case XDP_SETUP_XSK_POOL:
4841 return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
4848 #ifdef CONFIG_MLX5_ESWITCH
4849 static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4850 struct net_device *dev, u32 filter_mask,
4853 struct mlx5e_priv *priv = netdev_priv(dev);
4854 struct mlx5_core_dev *mdev = priv->mdev;
4858 err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
4861 mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
4862 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4864 0, 0, nlflags, filter_mask, NULL);
4867 static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4868 u16 flags, struct netlink_ext_ack *extack)
4870 struct mlx5e_priv *priv = netdev_priv(dev);
4871 struct mlx5_core_dev *mdev = priv->mdev;
4872 struct nlattr *attr, *br_spec;
4873 u16 mode = BRIDGE_MODE_UNDEF;
4877 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4881 nla_for_each_nested(attr, br_spec, rem) {
4882 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4885 if (nla_len(attr) < sizeof(mode))
4888 mode = nla_get_u16(attr);
4889 if (mode > BRIDGE_MODE_VEPA)
4895 if (mode == BRIDGE_MODE_UNDEF)
4898 setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
4899 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
4903 const struct net_device_ops mlx5e_netdev_ops = {
4904 .ndo_open = mlx5e_open,
4905 .ndo_stop = mlx5e_close,
4906 .ndo_start_xmit = mlx5e_xmit,
4907 .ndo_setup_tc = mlx5e_setup_tc,
4908 .ndo_select_queue = mlx5e_select_queue,
4909 .ndo_get_stats64 = mlx5e_get_stats,
4910 .ndo_set_rx_mode = mlx5e_set_rx_mode,
4911 .ndo_set_mac_address = mlx5e_set_mac,
4912 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
4913 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
4914 .ndo_set_features = mlx5e_set_features,
4915 .ndo_fix_features = mlx5e_fix_features,
4916 .ndo_change_mtu = mlx5e_change_nic_mtu,
4917 .ndo_eth_ioctl = mlx5e_ioctl,
4918 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
4919 .ndo_features_check = mlx5e_features_check,
4920 .ndo_tx_timeout = mlx5e_tx_timeout,
4921 .ndo_bpf = mlx5e_xdp,
4922 .ndo_xdp_xmit = mlx5e_xdp_xmit,
4923 .ndo_xsk_wakeup = mlx5e_xsk_wakeup,
4924 #ifdef CONFIG_MLX5_EN_ARFS
4925 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4927 #ifdef CONFIG_MLX5_ESWITCH
4928 .ndo_bridge_setlink = mlx5e_bridge_setlink,
4929 .ndo_bridge_getlink = mlx5e_bridge_getlink,
4931 /* SRIOV E-Switch NDOs */
4932 .ndo_set_vf_mac = mlx5e_set_vf_mac,
4933 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
4934 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
4935 .ndo_set_vf_trust = mlx5e_set_vf_trust,
4936 .ndo_set_vf_rate = mlx5e_set_vf_rate,
4937 .ndo_get_vf_config = mlx5e_get_vf_config,
4938 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
4939 .ndo_get_vf_stats = mlx5e_get_vf_stats,
4940 .ndo_has_offload_stats = mlx5e_has_offload_stats,
4941 .ndo_get_offload_stats = mlx5e_get_offload_stats,
4945 static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4949 /* The supported periods are organized in ascending order */
4950 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4951 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4954 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4957 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
4959 struct mlx5e_params *params = &priv->channels.params;
4960 struct mlx5_core_dev *mdev = priv->mdev;
4961 u8 rx_cq_period_mode;
4963 params->sw_mtu = mtu;
4964 params->hard_mtu = MLX5E_ETH_HARD_MTU;
4965 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
4967 mlx5e_params_mqprio_reset(params);
4970 params->log_sq_size = is_kdump_kernel() ?
4971 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4972 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
4973 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
4976 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
4978 /* set CQE compression */
4979 params->rx_cqe_compress_def = false;
4980 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
4981 MLX5_CAP_GEN(mdev, vport_group_manager))
4982 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
4984 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4985 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
4988 mlx5e_build_rq_params(mdev, params);
4990 params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev);
4992 params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
4994 /* CQ moderation params */
4995 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
4996 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4997 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4998 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4999 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
5000 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
5001 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
5004 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
5009 /* Do not update netdev->features directly in here
5010 * on mlx5e_attach_netdev() we will call mlx5e_update_features()
5011 * To update netdev->features please modify mlx5e_fix_features()
5015 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
5017 struct mlx5e_priv *priv = netdev_priv(netdev);
5020 mlx5_query_mac_address(priv->mdev, addr);
5021 if (is_zero_ether_addr(addr) &&
5022 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
5023 eth_hw_addr_random(netdev);
5024 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
5028 eth_hw_addr_set(netdev, addr);
5031 static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
5032 unsigned int entry, struct udp_tunnel_info *ti)
5034 struct mlx5e_priv *priv = netdev_priv(netdev);
5036 return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
5039 static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
5040 unsigned int entry, struct udp_tunnel_info *ti)
5042 struct mlx5e_priv *priv = netdev_priv(netdev);
5044 return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
5047 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
5049 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
5052 priv->nic_info.set_port = mlx5e_vxlan_set_port;
5053 priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
5054 priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
5055 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
5056 priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
5057 /* Don't count the space hard-coded to the IANA port */
5058 priv->nic_info.tables[0].n_entries =
5059 mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
5061 priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
5064 static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
5068 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
5069 if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5_get_proto_by_tunnel_type(tt)))
5072 return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
5075 static void mlx5e_build_nic_netdev(struct net_device *netdev)
5077 struct mlx5e_priv *priv = netdev_priv(netdev);
5078 struct mlx5_core_dev *mdev = priv->mdev;
5082 SET_NETDEV_DEV(netdev, mdev->device);
5084 netdev->netdev_ops = &mlx5e_netdev_ops;
5085 netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
5087 mlx5e_dcbnl_build_netdev(netdev);
5089 netdev->watchdog_timeo = 15 * HZ;
5091 netdev->ethtool_ops = &mlx5e_ethtool_ops;
5093 netdev->vlan_features |= NETIF_F_SG;
5094 netdev->vlan_features |= NETIF_F_HW_CSUM;
5095 netdev->vlan_features |= NETIF_F_HW_MACSEC;
5096 netdev->vlan_features |= NETIF_F_GRO;
5097 netdev->vlan_features |= NETIF_F_TSO;
5098 netdev->vlan_features |= NETIF_F_TSO6;
5099 netdev->vlan_features |= NETIF_F_RXCSUM;
5100 netdev->vlan_features |= NETIF_F_RXHASH;
5101 netdev->vlan_features |= NETIF_F_GSO_PARTIAL;
5103 netdev->mpls_features |= NETIF_F_SG;
5104 netdev->mpls_features |= NETIF_F_HW_CSUM;
5105 netdev->mpls_features |= NETIF_F_TSO;
5106 netdev->mpls_features |= NETIF_F_TSO6;
5108 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
5109 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
5111 /* Tunneled LRO is not supported in the driver, and the same RQs are
5112 * shared between inner and outer TIRs, so the driver can't disable LRO
5113 * for inner TIRs while having it enabled for outer TIRs. Due to this,
5114 * block LRO altogether if the firmware declares tunneled LRO support.
5116 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
5117 !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
5118 !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
5119 mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
5120 MLX5E_MPWRQ_UMR_MODE_ALIGNED))
5121 netdev->vlan_features |= NETIF_F_LRO;
5123 netdev->hw_features = netdev->vlan_features;
5124 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
5125 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
5126 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5127 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
5129 if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
5130 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
5131 netdev->hw_enc_features |= NETIF_F_TSO;
5132 netdev->hw_enc_features |= NETIF_F_TSO6;
5133 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
5136 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
5137 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
5138 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5139 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
5140 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5141 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
5142 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
5143 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5146 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
5147 netdev->hw_features |= NETIF_F_GSO_GRE |
5148 NETIF_F_GSO_GRE_CSUM;
5149 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
5150 NETIF_F_GSO_GRE_CSUM;
5151 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
5152 NETIF_F_GSO_GRE_CSUM;
5155 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
5156 netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
5158 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
5160 netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
5164 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
5165 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
5166 netdev->features |= NETIF_F_GSO_UDP_L4;
5168 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
5171 netdev->hw_features |= NETIF_F_RXALL;
5173 if (MLX5_CAP_ETH(mdev, scatter_fcs))
5174 netdev->hw_features |= NETIF_F_RXFCS;
5176 if (mlx5_qos_is_supported(mdev))
5177 netdev->hw_features |= NETIF_F_HW_TC;
5179 netdev->features = netdev->hw_features;
5183 netdev->features &= ~NETIF_F_RXALL;
5184 netdev->features &= ~NETIF_F_LRO;
5185 netdev->features &= ~NETIF_F_GRO_HW;
5186 netdev->features &= ~NETIF_F_RXFCS;
5188 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
5189 if (FT_CAP(flow_modify_en) &&
5190 FT_CAP(modify_root) &&
5191 FT_CAP(identified_miss_table_mode) &&
5192 FT_CAP(flow_table_modify)) {
5193 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
5194 netdev->hw_features |= NETIF_F_HW_TC;
5196 #ifdef CONFIG_MLX5_EN_ARFS
5197 netdev->hw_features |= NETIF_F_NTUPLE;
5201 netdev->features |= NETIF_F_HIGHDMA;
5202 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5204 netdev->priv_flags |= IFF_UNICAST_FLT;
5206 netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
5207 mlx5e_set_xdp_feature(netdev);
5208 mlx5e_set_netdev_dev_addr(netdev);
5209 mlx5e_macsec_build_netdev(priv);
5210 mlx5e_ipsec_build_netdev(priv);
5211 mlx5e_ktls_build_netdev(priv);
5214 void mlx5e_create_q_counters(struct mlx5e_priv *priv)
5216 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
5217 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
5218 struct mlx5_core_dev *mdev = priv->mdev;
5221 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
5222 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5225 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
5227 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5229 priv->drop_rq_q_counter =
5230 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
5233 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
5235 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
5237 MLX5_SET(dealloc_q_counter_in, in, opcode,
5238 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
5239 if (priv->q_counter) {
5240 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5242 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5245 if (priv->drop_rq_q_counter) {
5246 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5247 priv->drop_rq_q_counter);
5248 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5252 static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
5253 struct net_device *netdev)
5255 struct mlx5e_priv *priv = netdev_priv(netdev);
5256 struct mlx5e_flow_steering *fs;
5259 mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
5260 mlx5e_vxlan_set_netdev_info(priv);
5262 mlx5e_timestamp_init(priv);
5264 priv->dfs_root = debugfs_create_dir("nic",
5265 mlx5_debugfs_get_dev_root(mdev));
5267 fs = mlx5e_fs_init(priv->profile, mdev,
5268 !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
5272 mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
5273 debugfs_remove_recursive(priv->dfs_root);
5278 err = mlx5e_ktls_init(priv);
5280 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
5282 mlx5e_health_create_reporters(priv);
5283 /* update XDP supported features */
5284 mlx5e_set_xdp_feature(netdev);
5289 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
5291 mlx5e_health_destroy_reporters(priv);
5292 mlx5e_ktls_cleanup(priv);
5293 mlx5e_fs_cleanup(priv->fs);
5294 debugfs_remove_recursive(priv->dfs_root);
5298 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
5300 struct mlx5_core_dev *mdev = priv->mdev;
5301 enum mlx5e_rx_res_features features;
5304 priv->rx_res = mlx5e_rx_res_alloc();
5308 mlx5e_create_q_counters(priv);
5310 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
5312 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
5313 goto err_destroy_q_counters;
5316 features = MLX5E_RX_RES_FEATURE_PTP;
5317 if (mlx5_tunnel_inner_ft_supported(mdev))
5318 features |= MLX5E_RX_RES_FEATURE_INNER_FT;
5319 err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
5320 priv->max_nch, priv->drop_rq.rqn,
5321 &priv->channels.params.packet_merge,
5322 priv->channels.params.num_channels);
5324 goto err_close_drop_rq;
5326 err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
5329 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
5330 goto err_destroy_rx_res;
5333 err = mlx5e_tc_nic_init(priv);
5335 goto err_destroy_flow_steering;
5337 err = mlx5e_accel_init_rx(priv);
5339 goto err_tc_nic_cleanup;
5341 #ifdef CONFIG_MLX5_EN_ARFS
5342 priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
5348 mlx5e_tc_nic_cleanup(priv);
5349 err_destroy_flow_steering:
5350 mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
5353 mlx5e_rx_res_destroy(priv->rx_res);
5355 mlx5e_close_drop_rq(&priv->drop_rq);
5356 err_destroy_q_counters:
5357 mlx5e_destroy_q_counters(priv);
5358 mlx5e_rx_res_free(priv->rx_res);
5359 priv->rx_res = NULL;
5363 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
5365 mlx5e_accel_cleanup_rx(priv);
5366 mlx5e_tc_nic_cleanup(priv);
5367 mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
5369 mlx5e_rx_res_destroy(priv->rx_res);
5370 mlx5e_close_drop_rq(&priv->drop_rq);
5371 mlx5e_destroy_q_counters(priv);
5372 mlx5e_rx_res_free(priv->rx_res);
5373 priv->rx_res = NULL;
5376 static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
5378 struct mlx5e_params *params;
5379 struct mlx5e_mqprio_rl *rl;
5381 params = &priv->channels.params;
5382 if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
5385 rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
5386 params->mqprio.channel.max_rate);
5389 priv->mqprio_rl = rl;
5390 mlx5e_mqprio_rl_update_params(params, rl);
5393 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
5397 err = mlx5e_create_tises(priv);
5399 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
5403 err = mlx5e_accel_init_tx(priv);
5405 goto err_destroy_tises;
5407 mlx5e_set_mqprio_rl(priv);
5408 mlx5e_dcbnl_initialize(priv);
5412 mlx5e_destroy_tises(priv);
5416 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
5418 struct net_device *netdev = priv->netdev;
5419 struct mlx5_core_dev *mdev = priv->mdev;
5422 mlx5e_fs_init_l2_addr(priv->fs, netdev);
5423 mlx5e_ipsec_init(priv);
5425 err = mlx5e_macsec_init(priv);
5427 mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
5429 /* Marking the link as currently not needed by the Driver */
5430 if (!netif_running(netdev))
5431 mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
5433 mlx5e_set_netdev_mtu_boundaries(priv);
5434 mlx5e_set_dev_port_mtu(priv);
5436 mlx5_lag_add_netdev(mdev, netdev);
5438 mlx5e_enable_async_events(priv);
5439 mlx5e_enable_blocking_events(priv);
5440 if (mlx5e_monitor_counter_supported(priv))
5441 mlx5e_monitor_counter_init(priv);
5443 mlx5e_hv_vhca_stats_create(priv);
5444 if (netdev->reg_state != NETREG_REGISTERED)
5446 mlx5e_dcbnl_init_app(priv);
5448 mlx5e_nic_set_rx_mode(priv);
5451 if (netif_running(netdev))
5453 udp_tunnel_nic_reset_ntf(priv->netdev);
5454 netif_device_attach(netdev);
5458 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
5460 struct mlx5_core_dev *mdev = priv->mdev;
5462 if (priv->netdev->reg_state == NETREG_REGISTERED)
5463 mlx5e_dcbnl_delete_app(priv);
5466 if (netif_running(priv->netdev))
5467 mlx5e_close(priv->netdev);
5468 netif_device_detach(priv->netdev);
5471 mlx5e_nic_set_rx_mode(priv);
5473 mlx5e_hv_vhca_stats_destroy(priv);
5474 if (mlx5e_monitor_counter_supported(priv))
5475 mlx5e_monitor_counter_cleanup(priv);
5477 mlx5e_disable_blocking_events(priv);
5478 if (priv->en_trap) {
5479 mlx5e_deactivate_trap(priv);
5480 mlx5e_close_trap(priv->en_trap);
5481 priv->en_trap = NULL;
5483 mlx5e_disable_async_events(priv);
5484 mlx5_lag_remove_netdev(mdev, priv->netdev);
5485 mlx5_vxlan_reset_to_default(mdev->vxlan);
5486 mlx5e_macsec_cleanup(priv);
5487 mlx5e_ipsec_cleanup(priv);
5490 int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
5492 return mlx5e_refresh_tirs(priv, false, false);
5495 static const struct mlx5e_profile mlx5e_nic_profile = {
5496 .init = mlx5e_nic_init,
5497 .cleanup = mlx5e_nic_cleanup,
5498 .init_rx = mlx5e_init_nic_rx,
5499 .cleanup_rx = mlx5e_cleanup_nic_rx,
5500 .init_tx = mlx5e_init_nic_tx,
5501 .cleanup_tx = mlx5e_cleanup_nic_tx,
5502 .enable = mlx5e_nic_enable,
5503 .disable = mlx5e_nic_disable,
5504 .update_rx = mlx5e_update_nic_rx,
5505 .update_stats = mlx5e_stats_update_ndo_stats,
5506 .update_carrier = mlx5e_update_carrier,
5507 .rx_handlers = &mlx5e_rx_handlers_nic,
5508 .max_tc = MLX5E_MAX_NUM_TC,
5509 .stats_grps = mlx5e_nic_stats_grps,
5510 .stats_grps_num = mlx5e_nic_stats_grps_num,
5511 .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
5512 BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
5513 BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
5514 BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
5515 BIT(MLX5E_PROFILE_FEATURE_FS_TC),
5518 static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
5519 const struct mlx5e_profile *profile)
5523 nch = mlx5e_get_max_num_channels(mdev);
5525 if (profile->max_nch_limit)
5526 nch = min_t(int, nch, profile->max_nch_limit(mdev));
5531 mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
5532 const struct mlx5e_profile *profile)
5535 unsigned int max_nch, tmp;
5537 /* core resources */
5538 max_nch = mlx5e_profile_max_num_channels(mdev, profile);
5540 /* netdev rx queues */
5541 max_nch = min_t(unsigned int, max_nch, netdev->num_rx_queues);
5543 /* netdev tx queues */
5544 tmp = netdev->num_tx_queues;
5545 if (mlx5_qos_is_supported(mdev))
5546 tmp -= mlx5e_qos_max_leaf_nodes(mdev);
5547 if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
5548 tmp -= profile->max_tc;
5549 tmp = tmp / profile->max_tc;
5550 max_nch = min_t(unsigned int, max_nch, tmp);
5555 int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
5557 /* Indirect TIRS: 2 sets of TTCs (inner + outer steering)
5558 * and 1 set of direct TIRS
5560 return 2 * MLX5E_NUM_INDIR_TIRS
5561 + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
5564 void mlx5e_set_rx_mode_work(struct work_struct *work)
5566 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
5569 return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
5572 /* mlx5e generic netdev management API (move to en_common.c) */
5573 int mlx5e_priv_init(struct mlx5e_priv *priv,
5574 const struct mlx5e_profile *profile,
5575 struct net_device *netdev,
5576 struct mlx5_core_dev *mdev)
5578 int nch, num_txqs, node;
5581 num_txqs = netdev->num_tx_queues;
5582 nch = mlx5e_calc_max_nch(mdev, netdev, profile);
5583 node = dev_to_node(mlx5_core_dma_dev(mdev));
5587 priv->netdev = netdev;
5588 priv->msglevel = MLX5E_MSG_LEVEL;
5589 priv->max_nch = nch;
5590 priv->max_opened_tc = 1;
5592 if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
5595 mutex_init(&priv->state_lock);
5597 err = mlx5e_selq_init(&priv->selq, &priv->state_lock);
5599 goto err_free_cpumask;
5601 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
5602 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
5603 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
5604 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
5606 priv->wq = create_singlethread_workqueue("mlx5e");
5610 priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
5612 goto err_destroy_workqueue;
5614 priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
5615 if (!priv->tx_rates)
5616 goto err_free_txq2sq;
5618 priv->channel_stats =
5619 kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
5620 if (!priv->channel_stats)
5621 goto err_free_tx_rates;
5626 kfree(priv->tx_rates);
5628 kfree(priv->txq2sq);
5629 err_destroy_workqueue:
5630 destroy_workqueue(priv->wq);
5632 mlx5e_selq_cleanup(&priv->selq);
5634 free_cpumask_var(priv->scratchpad.cpumask);
5638 void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
5642 /* bail if change profile failed and also rollback failed */
5646 for (i = 0; i < priv->stats_nch; i++)
5647 kvfree(priv->channel_stats[i]);
5648 kfree(priv->channel_stats);
5649 kfree(priv->tx_rates);
5650 kfree(priv->txq2sq);
5651 destroy_workqueue(priv->wq);
5652 mutex_lock(&priv->state_lock);
5653 mlx5e_selq_cleanup(&priv->selq);
5654 mutex_unlock(&priv->state_lock);
5655 free_cpumask_var(priv->scratchpad.cpumask);
5657 for (i = 0; i < priv->htb_max_qos_sqs; i++)
5658 kfree(priv->htb_qos_sq_stats[i]);
5659 kvfree(priv->htb_qos_sq_stats);
5661 memset(priv, 0, sizeof(*priv));
5664 static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
5665 const struct mlx5e_profile *profile)
5667 unsigned int nch, ptp_txqs, qos_txqs;
5669 nch = mlx5e_profile_max_num_channels(mdev, profile);
5671 ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
5672 mlx5e_profile_feature_cap(profile, PTP_TX) ?
5673 profile->max_tc : 0;
5675 qos_txqs = mlx5_qos_is_supported(mdev) &&
5676 mlx5e_profile_feature_cap(profile, QOS_HTB) ?
5677 mlx5e_qos_max_leaf_nodes(mdev) : 0;
5679 return nch * profile->max_tc + ptp_txqs + qos_txqs;
5682 static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
5683 const struct mlx5e_profile *profile)
5685 return mlx5e_profile_max_num_channels(mdev, profile);
5689 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
5691 struct net_device *netdev;
5692 unsigned int txqs, rxqs;
5695 txqs = mlx5e_get_max_num_txqs(mdev, profile);
5696 rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
5698 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
5700 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5704 err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
5706 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
5707 goto err_free_netdev;
5710 netif_carrier_off(netdev);
5711 netif_tx_disable(netdev);
5712 dev_net_set(netdev, mlx5_core_net(mdev));
5717 free_netdev(netdev);
5722 static void mlx5e_update_features(struct net_device *netdev)
5724 if (netdev->reg_state != NETREG_REGISTERED)
5725 return; /* features will be updated on netdev registration */
5728 netdev_update_features(netdev);
5732 static void mlx5e_reset_channels(struct net_device *netdev)
5734 netdev_reset_tc(netdev);
5737 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
5739 const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
5740 const struct mlx5e_profile *profile = priv->profile;
5744 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
5746 mlx5e_fs_set_state_destroy(priv->fs,
5747 !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
5749 /* Validate the max_wqe_size_sq capability. */
5750 if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
5751 mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %u\n",
5752 mlx5e_get_max_sq_wqebbs(priv->mdev), (unsigned int)MLX5E_MAX_TX_WQEBBS);
5756 /* max number of channels may have changed */
5757 max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
5758 if (priv->channels.params.num_channels > max_nch) {
5759 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5760 /* Reducing the number of channels - RXFH has to be reset, and
5761 * mlx5e_num_channels_changed below will build the RQT.
5763 priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
5764 priv->channels.params.num_channels = max_nch;
5765 if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
5766 mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
5767 mlx5e_params_mqprio_reset(&priv->channels.params);
5770 if (max_nch != priv->max_nch) {
5771 mlx5_core_warn(priv->mdev,
5772 "MLX5E: Updating max number of channels from %u to %u\n",
5773 priv->max_nch, max_nch);
5774 priv->max_nch = max_nch;
5777 /* 1. Set the real number of queues in the kernel the first time.
5778 * 2. Set our default XPS cpumask.
5781 * rtnl_lock is required by netif_set_real_num_*_queues in case the
5782 * netdev has been registered by this point (if this function was called
5783 * in the reload or resume flow).
5787 err = mlx5e_num_channels_changed(priv);
5793 err = profile->init_tx(priv);
5797 err = profile->init_rx(priv);
5799 goto err_cleanup_tx;
5801 if (profile->enable)
5802 profile->enable(priv);
5804 mlx5e_update_features(priv->netdev);
5809 profile->cleanup_tx(priv);
5812 mlx5e_reset_channels(priv->netdev);
5813 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5815 mlx5e_fs_set_state_destroy(priv->fs,
5816 !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
5817 cancel_work_sync(&priv->update_stats_work);
5821 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
5823 const struct mlx5e_profile *profile = priv->profile;
5825 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5827 mlx5e_fs_set_state_destroy(priv->fs,
5828 !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
5830 if (profile->disable)
5831 profile->disable(priv);
5832 flush_workqueue(priv->wq);
5834 profile->cleanup_rx(priv);
5835 profile->cleanup_tx(priv);
5836 mlx5e_reset_channels(priv->netdev);
5837 cancel_work_sync(&priv->update_stats_work);
5841 mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
5842 const struct mlx5e_profile *new_profile, void *new_ppriv)
5844 struct mlx5e_priv *priv = netdev_priv(netdev);
5847 err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
5849 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
5852 netif_carrier_off(netdev);
5853 priv->profile = new_profile;
5854 priv->ppriv = new_ppriv;
5855 err = new_profile->init(priv->mdev, priv->netdev);
5862 mlx5e_priv_cleanup(priv);
5867 mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
5868 const struct mlx5e_profile *new_profile, void *new_ppriv)
5870 struct mlx5e_priv *priv = netdev_priv(netdev);
5873 err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
5877 err = mlx5e_attach_netdev(priv);
5879 goto profile_cleanup;
5883 new_profile->cleanup(priv);
5884 mlx5e_priv_cleanup(priv);
5888 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
5889 const struct mlx5e_profile *new_profile, void *new_ppriv)
5891 const struct mlx5e_profile *orig_profile = priv->profile;
5892 struct net_device *netdev = priv->netdev;
5893 struct mlx5_core_dev *mdev = priv->mdev;
5894 void *orig_ppriv = priv->ppriv;
5895 int err, rollback_err;
5897 /* cleanup old profile */
5898 mlx5e_detach_netdev(priv);
5899 priv->profile->cleanup(priv);
5900 mlx5e_priv_cleanup(priv);
5902 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5903 mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
5904 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5908 err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
5909 if (err) { /* roll back to original profile */
5910 netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
5917 rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
5919 netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
5920 __func__, rollback_err);
5924 void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
5926 mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
5929 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
5931 struct net_device *netdev = priv->netdev;
5933 mlx5e_priv_cleanup(priv);
5934 free_netdev(netdev);
5937 static int mlx5e_resume(struct auxiliary_device *adev)
5939 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
5940 struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
5941 struct mlx5e_priv *priv = mlx5e_dev->priv;
5942 struct net_device *netdev = priv->netdev;
5943 struct mlx5_core_dev *mdev = edev->mdev;
5946 if (netif_device_present(netdev))
5949 err = mlx5e_create_mdev_resources(mdev);
5953 err = mlx5e_attach_netdev(priv);
5955 mlx5e_destroy_mdev_resources(mdev);
5962 static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
5964 struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
5965 struct mlx5e_priv *priv = mlx5e_dev->priv;
5966 struct net_device *netdev = priv->netdev;
5967 struct mlx5_core_dev *mdev = priv->mdev;
5969 if (!netif_device_present(netdev)) {
5970 if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
5971 mlx5e_destroy_mdev_resources(mdev);
5975 mlx5e_detach_netdev(priv);
5976 mlx5e_destroy_mdev_resources(mdev);
5980 static int mlx5e_probe(struct auxiliary_device *adev,
5981 const struct auxiliary_device_id *id)
5983 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
5984 const struct mlx5e_profile *profile = &mlx5e_nic_profile;
5985 struct mlx5_core_dev *mdev = edev->mdev;
5986 struct mlx5e_dev *mlx5e_dev;
5987 struct net_device *netdev;
5988 pm_message_t state = {};
5989 struct mlx5e_priv *priv;
5992 mlx5e_dev = mlx5e_create_devlink(&adev->dev, mdev);
5993 if (IS_ERR(mlx5e_dev))
5994 return PTR_ERR(mlx5e_dev);
5995 auxiliary_set_drvdata(adev, mlx5e_dev);
5997 err = mlx5e_devlink_port_register(mlx5e_dev, mdev);
5999 mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
6000 goto err_devlink_unregister;
6003 netdev = mlx5e_create_netdev(mdev, profile);
6005 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
6007 goto err_devlink_port_unregister;
6009 SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
6011 mlx5e_build_nic_netdev(netdev);
6013 priv = netdev_priv(netdev);
6014 mlx5e_dev->priv = priv;
6016 priv->profile = profile;
6019 err = profile->init(mdev, netdev);
6021 mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
6022 goto err_destroy_netdev;
6025 err = mlx5e_resume(adev);
6027 mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
6028 goto err_profile_cleanup;
6031 err = register_netdev(netdev);
6033 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
6037 mlx5e_dcbnl_init_app(priv);
6038 mlx5_core_uplink_netdev_set(mdev, netdev);
6039 mlx5e_params_print_info(mdev, &priv->channels.params);
6043 mlx5e_suspend(adev, state);
6044 err_profile_cleanup:
6045 profile->cleanup(priv);
6047 mlx5e_destroy_netdev(priv);
6048 err_devlink_port_unregister:
6049 mlx5e_devlink_port_unregister(mlx5e_dev);
6050 err_devlink_unregister:
6051 mlx5e_destroy_devlink(mlx5e_dev);
6055 static void mlx5e_remove(struct auxiliary_device *adev)
6057 struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
6058 struct mlx5e_priv *priv = mlx5e_dev->priv;
6059 pm_message_t state = {};
6061 mlx5_core_uplink_netdev_set(priv->mdev, NULL);
6062 mlx5e_dcbnl_delete_app(priv);
6063 unregister_netdev(priv->netdev);
6064 mlx5e_suspend(adev, state);
6065 priv->profile->cleanup(priv);
6066 mlx5e_destroy_netdev(priv);
6067 mlx5e_devlink_port_unregister(mlx5e_dev);
6068 mlx5e_destroy_devlink(mlx5e_dev);
6071 static const struct auxiliary_device_id mlx5e_id_table[] = {
6072 { .name = MLX5_ADEV_NAME ".eth", },
6076 MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table);
6078 static struct auxiliary_driver mlx5e_driver = {
6080 .probe = mlx5e_probe,
6081 .remove = mlx5e_remove,
6082 .suspend = mlx5e_suspend,
6083 .resume = mlx5e_resume,
6084 .id_table = mlx5e_id_table,
6087 int mlx5e_init(void)
6091 mlx5e_build_ptys2ethtool_map();
6092 ret = auxiliary_driver_register(&mlx5e_driver);
6096 ret = mlx5e_rep_init();
6098 auxiliary_driver_unregister(&mlx5e_driver);
6102 void mlx5e_cleanup(void)
6104 mlx5e_rep_cleanup();
6105 auxiliary_driver_unregister(&mlx5e_driver);