1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies */
4 #include <net/page_pool.h>
9 static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
11 struct mlx5e_trap *trap_ctx = container_of(napi, struct mlx5e_trap, napi);
12 struct mlx5e_ch_stats *ch_stats = trap_ctx->stats;
13 struct mlx5e_rq *rq = &trap_ctx->rq;
19 work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
20 busy |= work_done == budget;
21 busy |= rq->post_wqes(rq);
26 if (unlikely(!napi_complete_done(napi, work_done)))
29 mlx5e_cq_arm(&rq->cq);
33 static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp,
34 struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
35 struct mlx5e_ch_stats *ch_stats,
38 void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
39 struct mlx5_core_dev *mdev = priv->mdev;
40 struct page_pool_params pp_params = {};
41 int node = dev_to_node(mdev->device);
47 rqp->wq.db_numa_node = node;
49 rq->wq_type = params->rq_wq_type;
50 rq->pdev = mdev->device;
51 rq->netdev = priv->netdev;
55 rq->clock = &mdev->clock;
56 rq->tstamp = &priv->tstamp;
57 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
59 xdp_rxq_info_unused(&rq->xdp_rxq);
61 rq->buff.map_dir = DMA_FROM_DEVICE;
62 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL);
63 pool_size = 1 << params->log_rq_mtu_frames;
65 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl);
69 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
71 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
73 rq->wqe.info = rqp->frags_info;
74 rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
75 rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
76 (wq_sz << rq->wqe.info.log_num_frags)),
80 goto err_wq_cyc_destroy;
83 err = mlx5e_init_di_list(rq, wq_sz, node);
87 rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key);
89 mlx5e_rq_set_trap_handlers(rq, params);
91 /* Create a page_pool and register it with rxq */
93 pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
94 pp_params.pool_size = pool_size;
96 pp_params.dev = mdev->device;
97 pp_params.dma_dir = rq->buff.map_dir;
99 /* page_pool can be used even when there is no rq->xdp_prog,
100 * given page_pool does not handle DMA mapping there is no
101 * required state to clear. And page_pool gracefully handle
104 rq->page_pool = page_pool_create(&pp_params);
105 if (IS_ERR(rq->page_pool)) {
106 err = PTR_ERR(rq->page_pool);
107 rq->page_pool = NULL;
108 goto err_free_di_list;
110 for (i = 0; i < wq_sz; i++) {
111 struct mlx5e_rx_wqe_cyc *wqe =
112 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
115 for (f = 0; f < rq->wqe.info.num_frags; f++) {
116 u32 frag_size = rq->wqe.info.arr[f].frag_size |
117 MLX5_HW_START_PADDING;
119 wqe->data[f].byte_count = cpu_to_be32(frag_size);
120 wqe->data[f].lkey = rq->mkey_be;
122 /* check if num_frags is not a pow of two */
123 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
124 wqe->data[f].byte_count = 0;
125 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
126 wqe->data[f].addr = 0;
132 mlx5e_free_di_list(rq);
134 kvfree(rq->wqe.frags);
136 mlx5_wq_destroy(&rq->wq_ctrl);
141 static void mlx5e_free_trap_rq(struct mlx5e_rq *rq)
143 page_pool_destroy(rq->page_pool);
144 mlx5e_free_di_list(rq);
145 kvfree(rq->wqe.frags);
146 mlx5_wq_destroy(&rq->wq_ctrl);
149 static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi,
150 struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
151 struct mlx5e_rq_param *rq_param,
152 struct mlx5e_ch_stats *ch_stats,
155 struct mlx5_core_dev *mdev = priv->mdev;
156 struct mlx5e_create_cq_param ccp = {};
157 struct dim_cq_moder trap_moder = {};
158 struct mlx5e_cq *cq = &rq->cq;
161 ccp.node = dev_to_node(mdev->device);
162 ccp.ch_stats = ch_stats;
165 err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq);
169 err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq);
173 err = mlx5e_create_rq(rq, rq_param);
177 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
184 mlx5e_destroy_rq(rq);
185 mlx5e_free_rx_descs(rq);
187 mlx5e_free_trap_rq(rq);
194 static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
196 mlx5e_destroy_rq(rq);
197 mlx5e_free_rx_descs(rq);
198 mlx5e_free_trap_rq(rq);
199 mlx5e_close_cq(&rq->cq);
202 static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
210 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
211 in = kvzalloc(inlen, GFP_KERNEL);
215 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
216 MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
217 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
218 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
219 MLX5_SET(tirc, tirc, inline_rqn, rqn);
220 err = mlx5e_create_tir(mdev, tir, in);
226 static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir)
228 mlx5e_destroy_tir(mdev, tir);
231 static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq)
233 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
236 static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq)
238 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
241 static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t)
243 struct mlx5e_params *params = &t->params;
245 params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
246 mlx5e_init_rq_type_params(priv->mdev, params);
247 params->sw_mtu = priv->netdev->max_mtu;
248 mlx5e_build_rq_param(priv, params, NULL, &t->rq_param);
251 static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
253 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
254 struct net_device *netdev = priv->netdev;
255 struct mlx5e_trap *t;
258 t = kvzalloc_node(sizeof(*t), GFP_KERNEL, cpu_to_node(cpu));
260 return ERR_PTR(-ENOMEM);
262 mlx5e_build_trap_params(priv, t);
265 t->mdev = priv->mdev;
266 t->tstamp = &priv->tstamp;
267 t->pdev = mlx5_core_dma_dev(priv->mdev);
268 t->netdev = priv->netdev;
269 t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
270 t->stats = &priv->trap_stats.ch;
272 netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
274 err = mlx5e_open_trap_rq(priv, &t->napi,
275 &priv->trap_stats.rq,
276 &t->params, &t->rq_param,
277 &priv->trap_stats.ch,
282 err = mlx5e_create_trap_direct_rq_tir(t->mdev, &t->tir, t->rq.rqn);
284 goto err_close_trap_rq;
289 mlx5e_close_trap_rq(&t->rq);
291 netif_napi_del(&t->napi);
296 void mlx5e_close_trap(struct mlx5e_trap *trap)
298 mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir);
299 mlx5e_close_trap_rq(&trap->rq);
300 netif_napi_del(&trap->napi);
304 static void mlx5e_activate_trap(struct mlx5e_trap *trap)
306 napi_enable(&trap->napi);
307 mlx5e_activate_trap_rq(&trap->rq);
308 napi_schedule(&trap->napi);
311 void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
313 struct mlx5e_trap *trap = priv->en_trap;
315 mlx5e_deactivate_trap_rq(&trap->rq);
316 napi_disable(&trap->napi);
319 static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
321 struct mlx5e_trap *trap;
323 trap = mlx5e_open_trap(priv);
327 mlx5e_activate_trap(trap);
332 static void mlx5e_del_trap_queue(struct mlx5e_priv *priv)
334 mlx5e_deactivate_trap(priv);
335 mlx5e_close_trap(priv->en_trap);
336 priv->en_trap = NULL;
339 static int mlx5e_trap_get_tirn(struct mlx5e_trap *en_trap)
341 return en_trap->tir.tirn;
344 static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
346 bool open_queue = !priv->en_trap;
347 struct mlx5e_trap *trap;
351 trap = mlx5e_add_trap_queue(priv);
353 return PTR_ERR(trap);
354 priv->en_trap = trap;
358 case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
359 err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
363 case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
364 err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
369 netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
377 mlx5e_del_trap_queue(priv);
381 static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
384 case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
385 mlx5e_remove_vlan_trap(priv);
387 case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
388 mlx5e_remove_mac_trap(priv);
391 netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
394 if (priv->en_trap && !mlx5_devlink_trap_get_num_active(priv->mdev))
395 mlx5e_del_trap_queue(priv);
400 int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx)
404 /* Traps are unarmed when interface is down, no need to update
405 * them. The configuration is saved in the core driver,
406 * queried and applied upon interface up operation in
407 * mlx5e_open_locked().
409 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
412 switch (trap_ctx->action) {
413 case DEVLINK_TRAP_ACTION_TRAP:
414 err = mlx5e_handle_action_trap(priv, trap_ctx->id);
416 case DEVLINK_TRAP_ACTION_DROP:
417 err = mlx5e_handle_action_drop(priv, trap_ctx->id);
420 netdev_warn(priv->netdev, "%s: Unsupported action %d\n", __func__,
427 static int mlx5e_apply_trap(struct mlx5e_priv *priv, int trap_id, bool enable)
429 enum devlink_trap_action action;
432 err = mlx5_devlink_traps_get_action(priv->mdev, trap_id, &action);
435 if (action == DEVLINK_TRAP_ACTION_TRAP)
436 err = enable ? mlx5e_handle_action_trap(priv, trap_id) :
437 mlx5e_handle_action_drop(priv, trap_id);
441 static const int mlx5e_traps_arr[] = {
442 DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
443 DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
446 int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable)
451 for (i = 0; i < ARRAY_SIZE(mlx5e_traps_arr); i++) {
452 err = mlx5e_apply_trap(priv, mlx5e_traps_arr[i], enable);