1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2021, Mellanox Technologies inc. All rights reserved.
6 #include <linux/interrupt.h>
7 #include <linux/notifier.h>
8 #include <linux/mlx5/driver.h>
9 #include <linux/mlx5/vport.h>
10 #include <linux/mlx5/eq.h>
11 #ifdef CONFIG_RFS_ACCEL
12 #include <linux/cpu_rmap.h>
14 #include "mlx5_core.h"
16 #include "fpga/core.h"
18 #include "lib/clock.h"
19 #include "diag/fw_tracer.h"
23 #include "en_accel/ipsec.h"
26 MLX5_EQE_OWNER_INIT_VAL = 0x1,
30 MLX5_EQ_STATE_ARMED = 0x9,
31 MLX5_EQ_STATE_FIRED = 0xa,
32 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
36 MLX5_EQ_DOORBEL_OFFSET = 0x40,
39 /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
40 * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
41 * used to set the EQ size, budget must be smaller than the EQ size.
44 MLX5_EQ_POLLING_BUDGET = 128,
47 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
49 struct mlx5_eq_table {
50 struct list_head comp_eqs_list;
51 struct mlx5_eq_async pages_eq;
52 struct mlx5_eq_async cmd_eq;
53 struct mlx5_eq_async async_eq;
55 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
57 /* Since CQ DB is stored in async_eq */
58 struct mlx5_nb cq_err_nb;
60 struct mutex lock; /* sync async eqs creations */
62 struct mlx5_irq_table *irq_table;
63 struct mlx5_irq **comp_irqs;
64 struct mlx5_irq *ctrl_irq;
65 struct cpu_rmap *rmap;
68 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
69 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
70 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
71 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
72 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
73 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
74 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
75 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
76 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
77 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
78 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
79 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
81 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
83 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
85 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
86 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
87 return mlx5_cmd_exec_in(dev, destroy_eq, in);
90 /* caller must eventually call mlx5_cq_put on the returned cq */
91 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
93 struct mlx5_cq_table *table = &eq->cq_table;
94 struct mlx5_core_cq *cq = NULL;
97 cq = radix_tree_lookup(&table->tree, cqn);
105 static int mlx5_eq_comp_int(struct notifier_block *nb,
106 __always_unused unsigned long action,
107 __always_unused void *data)
109 struct mlx5_eq_comp *eq_comp =
110 container_of(nb, struct mlx5_eq_comp, irq_nb);
111 struct mlx5_eq *eq = &eq_comp->core;
112 struct mlx5_eqe *eqe;
116 eqe = next_eqe_sw(eq);
121 struct mlx5_core_cq *cq;
123 /* Make sure we read EQ entry contents after we've
124 * checked the ownership bit.
127 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
128 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
130 cq = mlx5_eq_cq_get(eq, cqn);
136 dev_dbg_ratelimited(eq->dev->device,
137 "Completion event for bogus CQ 0x%x\n", cqn);
142 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
148 tasklet_schedule(&eq_comp->tasklet_ctx.task);
153 /* Some architectures don't latch interrupts when they are disabled, so using
154 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
155 * avoid losing them. It is not recommended to use it, unless this is the last
158 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
162 disable_irq(eq->core.irqn);
163 count_eqe = eq->core.cons_index;
164 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
165 count_eqe = eq->core.cons_index - count_eqe;
166 enable_irq(eq->core.irqn);
171 static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
172 unsigned long *flags)
173 __acquires(&eq->lock)
176 spin_lock(&eq->lock);
178 spin_lock_irqsave(&eq->lock, *flags);
181 static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
182 unsigned long *flags)
183 __releases(&eq->lock)
186 spin_unlock(&eq->lock);
188 spin_unlock_irqrestore(&eq->lock, *flags);
191 enum async_eq_nb_action {
192 ASYNC_EQ_IRQ_HANDLER = 0,
193 ASYNC_EQ_RECOVER = 1,
196 static int mlx5_eq_async_int(struct notifier_block *nb,
197 unsigned long action, void *data)
199 struct mlx5_eq_async *eq_async =
200 container_of(nb, struct mlx5_eq_async, irq_nb);
201 struct mlx5_eq *eq = &eq_async->core;
202 struct mlx5_eq_table *eqt;
203 struct mlx5_core_dev *dev;
204 struct mlx5_eqe *eqe;
210 eqt = dev->priv.eq_table;
212 recovery = action == ASYNC_EQ_RECOVER;
213 mlx5_eq_async_int_lock(eq_async, recovery, &flags);
215 eqe = next_eqe_sw(eq);
221 * Make sure we read EQ entry contents after we've
222 * checked the ownership bit.
226 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
227 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
231 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
235 mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
237 return unlikely(recovery) ? num_eqes : 0;
240 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
242 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
245 eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
247 mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
250 static void init_eq_buf(struct mlx5_eq *eq)
252 struct mlx5_eqe *eqe;
255 for (i = 0; i < eq_get_size(eq); i++) {
256 eqe = get_eqe(eq, i);
257 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
262 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
263 struct mlx5_eq_param *param)
265 u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
266 struct mlx5_cq_table *cq_table = &eq->cq_table;
267 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
268 u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
269 struct mlx5_priv *priv = &dev->priv;
279 memset(cq_table, 0, sizeof(*cq_table));
280 spin_lock_init(&cq_table->lock);
281 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
285 err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
286 &eq->frag_buf, dev->priv.numa_node);
290 mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
293 eq->irq = param->irq;
294 vecidx = mlx5_irq_get_index(eq->irq);
296 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
297 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
299 in = kvzalloc(inlen, GFP_KERNEL);
305 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
306 mlx5_fill_page_frag_array(&eq->frag_buf, pas);
308 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
309 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
310 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
312 for (i = 0; i < 4; i++)
313 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
316 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
317 MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
318 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
319 MLX5_SET(eqc, eqc, intr, vecidx);
320 MLX5_SET(eqc, eqc, log_page_size,
321 eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
323 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
328 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
329 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
331 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
333 err = mlx5_debug_eq_add(dev, eq);
341 mlx5_cmd_destroy_eq(dev, eq->eqn);
347 mlx5_frag_buf_free(dev, &eq->frag_buf);
352 * mlx5_eq_enable - Enable EQ for receiving EQEs
353 * @dev : Device which owns the eq
355 * @nb : Notifier call block
357 * Must be called after EQ is created in device.
359 * @return: 0 if no error
361 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
362 struct notifier_block *nb)
366 err = mlx5_irq_attach_nb(eq->irq, nb);
372 EXPORT_SYMBOL(mlx5_eq_enable);
375 * mlx5_eq_disable - Disable EQ for receiving EQEs
376 * @dev : Device which owns the eq
377 * @eq : EQ to disable
378 * @nb : Notifier call block
380 * Must be called before EQ is destroyed.
382 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
383 struct notifier_block *nb)
385 mlx5_irq_detach_nb(eq->irq, nb);
387 EXPORT_SYMBOL(mlx5_eq_disable);
389 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
393 mlx5_debug_eq_remove(dev, eq);
395 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
397 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
400 mlx5_frag_buf_free(dev, &eq->frag_buf);
404 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
406 struct mlx5_cq_table *table = &eq->cq_table;
409 spin_lock(&table->lock);
410 err = radix_tree_insert(&table->tree, cq->cqn, cq);
411 spin_unlock(&table->lock);
416 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
418 struct mlx5_cq_table *table = &eq->cq_table;
419 struct mlx5_core_cq *tmp;
421 spin_lock(&table->lock);
422 tmp = radix_tree_delete(&table->tree, cq->cqn);
423 spin_unlock(&table->lock);
426 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
432 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
436 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
438 struct mlx5_eq_table *eq_table;
441 eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
442 dev->priv.numa_node);
446 dev->priv.eq_table = eq_table;
448 mlx5_eq_debugfs_init(dev);
450 mutex_init(&eq_table->lock);
451 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
452 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
454 eq_table->irq_table = mlx5_irq_table_get(dev);
458 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
460 mlx5_eq_debugfs_cleanup(dev);
461 kvfree(dev->priv.eq_table);
466 static int create_async_eq(struct mlx5_core_dev *dev,
467 struct mlx5_eq *eq, struct mlx5_eq_param *param)
469 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
472 mutex_lock(&eq_table->lock);
473 err = create_map_eq(dev, eq, param);
474 mutex_unlock(&eq_table->lock);
478 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
480 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
483 mutex_lock(&eq_table->lock);
484 err = destroy_unmap_eq(dev, eq);
485 mutex_unlock(&eq_table->lock);
489 static int cq_err_event_notifier(struct notifier_block *nb,
490 unsigned long type, void *data)
492 struct mlx5_eq_table *eqt;
493 struct mlx5_core_cq *cq;
494 struct mlx5_eqe *eqe;
498 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
500 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
501 eq = &eqt->async_eq.core;
504 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
505 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
506 cqn, eqe->data.cq_err.syndrome);
508 cq = mlx5_eq_cq_get(eq, cqn);
510 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
522 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
524 __be64 *user_unaffiliated_events;
525 __be64 *user_affiliated_events;
528 user_affiliated_events =
529 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
530 user_unaffiliated_events =
531 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
533 for (i = 0; i < 4; i++)
534 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
535 user_unaffiliated_events[i]);
538 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
540 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
542 if (MLX5_VPORT_MANAGER(dev))
543 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
545 if (MLX5_CAP_GEN(dev, general_notification_event))
546 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
548 if (MLX5_CAP_GEN(dev, port_module_event))
549 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
551 mlx5_core_dbg(dev, "port_module_event is not set\n");
553 if (MLX5_PPS_CAP(dev))
554 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
556 if (MLX5_CAP_GEN(dev, fpga))
557 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
558 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
559 if (MLX5_CAP_GEN_MAX(dev, dct))
560 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
562 if (MLX5_CAP_GEN(dev, temp_warn_event))
563 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
565 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
566 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
568 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
569 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
571 if (mlx5_eswitch_is_funcs_handler(dev))
573 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
575 if (MLX5_CAP_GEN_MAX(dev, vhca_state))
576 async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
578 if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
579 async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
581 if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
583 (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
585 mask[0] = async_event_mask;
587 if (MLX5_CAP_GEN(dev, event_cap))
588 gather_user_async_events(dev, mask);
592 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
593 struct mlx5_eq_param *param, const char *name)
597 eq->irq_nb.notifier_call = mlx5_eq_async_int;
598 spin_lock_init(&eq->lock);
600 err = create_async_eq(dev, &eq->core, param);
602 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
605 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
607 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
608 destroy_async_eq(dev, &eq->core);
613 static void cleanup_async_eq(struct mlx5_core_dev *dev,
614 struct mlx5_eq_async *eq, const char *name)
618 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
619 err = destroy_async_eq(dev, &eq->core);
621 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
625 static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
627 struct devlink *devlink = priv_to_devlink(dev);
628 union devlink_param_value val;
631 err = devl_param_driverinit_value_get(devlink,
632 DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
636 mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
637 return MLX5_NUM_ASYNC_EQE;
640 static int create_async_eqs(struct mlx5_core_dev *dev)
642 struct mlx5_eq_table *table = dev->priv.eq_table;
643 struct mlx5_eq_param param = {};
646 /* All the async_eqs are using single IRQ, request one IRQ and share its
647 * index among all the async_eqs of this device.
649 table->ctrl_irq = mlx5_ctrl_irq_request(dev);
650 if (IS_ERR(table->ctrl_irq))
651 return PTR_ERR(table->ctrl_irq);
653 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
654 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
656 param = (struct mlx5_eq_param) {
657 .irq = table->ctrl_irq,
658 .nent = MLX5_NUM_CMD_EQE,
659 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
661 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
662 err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
666 mlx5_cmd_use_events(dev);
667 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
669 param = (struct mlx5_eq_param) {
670 .irq = table->ctrl_irq,
671 .nent = async_eq_depth_devlink_param_get(dev),
674 gather_async_events_mask(dev, param.mask);
675 err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
679 param = (struct mlx5_eq_param) {
680 .irq = table->ctrl_irq,
681 .nent = /* TODO: sriov max_vf + */ 1,
682 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
685 err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
692 cleanup_async_eq(dev, &table->async_eq, "async");
694 mlx5_cmd_use_polling(dev);
695 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
697 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
698 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
699 mlx5_ctrl_irq_release(table->ctrl_irq);
703 static void destroy_async_eqs(struct mlx5_core_dev *dev)
705 struct mlx5_eq_table *table = dev->priv.eq_table;
707 cleanup_async_eq(dev, &table->pages_eq, "pages");
708 cleanup_async_eq(dev, &table->async_eq, "async");
709 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
710 mlx5_cmd_use_polling(dev);
711 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
712 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
713 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
714 mlx5_ctrl_irq_release(table->ctrl_irq);
717 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
719 return &dev->priv.eq_table->async_eq.core;
722 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
724 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
727 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
729 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
732 /* Generic EQ API for mlx5_core consumers
733 * Needed For RDMA ODP EQ for now
736 mlx5_eq_create_generic(struct mlx5_core_dev *dev,
737 struct mlx5_eq_param *param)
739 struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
740 dev->priv.numa_node);
744 return ERR_PTR(-ENOMEM);
746 param->irq = dev->priv.eq_table->ctrl_irq;
747 err = create_async_eq(dev, eq, param);
755 EXPORT_SYMBOL(mlx5_eq_create_generic);
757 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
764 err = destroy_async_eq(dev, eq);
772 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
774 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
776 u32 ci = eq->cons_index + cc;
777 u32 nent = eq_get_size(eq);
778 struct mlx5_eqe *eqe;
780 eqe = get_eqe(eq, ci & (nent - 1));
781 eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
782 /* Make sure we read EQ entry contents after we've
783 * checked the ownership bit.
790 EXPORT_SYMBOL(mlx5_eq_get_eqe);
792 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
794 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
797 eq->cons_index += cc;
798 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
800 __raw_writel((__force u32)cpu_to_be32(val), addr);
801 /* We still want ordering, just not swabbing, so add a barrier */
804 EXPORT_SYMBOL(mlx5_eq_update_ci);
806 static void comp_irqs_release_pci(struct mlx5_core_dev *dev)
808 struct mlx5_eq_table *table = dev->priv.eq_table;
810 mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
813 static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
815 struct mlx5_eq_table *table = dev->priv.eq_table;
816 const struct cpumask *prev = cpu_none_mask;
817 const struct cpumask *mask;
824 ncomp_eqs = table->num_comp_eqs;
825 cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
831 for_each_numa_hop_mask(mask, dev->priv.numa_node) {
832 for_each_cpu_andnot(cpu, mask, prev) {
834 if (++i == ncomp_eqs)
841 ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs, &table->rmap);
846 static void comp_irqs_release_sf(struct mlx5_core_dev *dev)
848 struct mlx5_eq_table *table = dev->priv.eq_table;
850 mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
853 static int comp_irqs_request_sf(struct mlx5_core_dev *dev)
855 struct mlx5_eq_table *table = dev->priv.eq_table;
856 int ncomp_eqs = table->num_comp_eqs;
858 return mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
861 static void comp_irqs_release(struct mlx5_core_dev *dev)
863 struct mlx5_eq_table *table = dev->priv.eq_table;
865 mlx5_core_is_sf(dev) ? comp_irqs_release_sf(dev) :
866 comp_irqs_release_pci(dev);
868 kfree(table->comp_irqs);
871 static int comp_irqs_request(struct mlx5_core_dev *dev)
873 struct mlx5_eq_table *table = dev->priv.eq_table;
877 ncomp_eqs = table->num_comp_eqs;
878 table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
879 if (!table->comp_irqs)
882 ret = mlx5_core_is_sf(dev) ? comp_irqs_request_sf(dev) :
883 comp_irqs_request_pci(dev);
885 kfree(table->comp_irqs);
890 #ifdef CONFIG_RFS_ACCEL
891 static int alloc_rmap(struct mlx5_core_dev *mdev)
893 struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
895 /* rmap is a mapping between irq number and queue number.
896 * Each irq can be assigned only to a single rmap.
897 * Since SFs share IRQs, rmap mapping cannot function correctly
898 * for irqs that are shared between different core/netdev RX rings.
899 * Hence we don't allow netdev rmap for SFs.
901 if (mlx5_core_is_sf(mdev))
904 eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
910 static void free_rmap(struct mlx5_core_dev *mdev)
912 struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
914 if (eq_table->rmap) {
915 free_irq_cpu_rmap(eq_table->rmap);
916 eq_table->rmap = NULL;
920 static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
921 static void free_rmap(struct mlx5_core_dev *mdev) {}
924 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
926 struct mlx5_eq_table *table = dev->priv.eq_table;
927 struct mlx5_eq_comp *eq, *n;
929 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
931 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
932 if (destroy_unmap_eq(dev, &eq->core))
933 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
935 tasklet_disable(&eq->tasklet_ctx.task);
938 comp_irqs_release(dev);
942 static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
944 struct devlink *devlink = priv_to_devlink(dev);
945 union devlink_param_value val;
948 err = devl_param_driverinit_value_get(devlink,
949 DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
953 mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
954 return MLX5_COMP_EQ_SIZE;
957 static int create_comp_eqs(struct mlx5_core_dev *dev)
959 struct mlx5_eq_table *table = dev->priv.eq_table;
960 struct mlx5_eq_comp *eq;
966 err = alloc_rmap(dev);
970 ncomp_eqs = comp_irqs_request(dev);
976 INIT_LIST_HEAD(&table->comp_eqs_list);
977 nent = comp_eq_depth_devlink_param_get(dev);
979 for (i = 0; i < ncomp_eqs; i++) {
980 struct mlx5_eq_param param = {};
982 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
988 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
989 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
990 spin_lock_init(&eq->tasklet_ctx.lock);
991 tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
993 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
994 param = (struct mlx5_eq_param) {
995 .irq = table->comp_irqs[i],
999 err = create_map_eq(dev, &eq->core, ¶m);
1002 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
1004 destroy_unmap_eq(dev, &eq->core);
1008 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
1009 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
1010 list_add_tail(&eq->list, &table->comp_eqs_list);
1013 table->num_comp_eqs = ncomp_eqs;
1019 destroy_comp_eqs(dev);
1025 static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1028 struct mlx5_eq_table *table = dev->priv.eq_table;
1029 struct mlx5_eq_comp *eq;
1033 list_for_each_entry(eq, &table->comp_eqs_list, list) {
1034 if (i++ == vector) {
1036 *irqn = eq->core.irqn;
1038 *eqn = eq->core.eqn;
1047 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
1049 return vector2eqnirqn(dev, vector, eqn, NULL);
1051 EXPORT_SYMBOL(mlx5_vector2eqn);
1053 int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
1055 return vector2eqnirqn(dev, vector, NULL, irqn);
1058 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
1060 return dev->priv.eq_table->num_comp_eqs;
1062 EXPORT_SYMBOL(mlx5_comp_vectors_count);
1065 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
1067 struct mlx5_eq_table *table = dev->priv.eq_table;
1068 struct mlx5_eq_comp *eq;
1071 list_for_each_entry(eq, &table->comp_eqs_list, list) {
1073 return mlx5_irq_get_affinity_mask(eq->core.irq);
1079 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
1081 #ifdef CONFIG_RFS_ACCEL
1082 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
1084 return dev->priv.eq_table->rmap;
1088 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
1090 struct mlx5_eq_table *table = dev->priv.eq_table;
1091 struct mlx5_eq_comp *eq;
1093 list_for_each_entry(eq, &table->comp_eqs_list, list) {
1094 if (eq->core.eqn == eqn)
1098 return ERR_PTR(-ENOENT);
1101 /* This function should only be called after mlx5_cmd_force_teardown_hca */
1102 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
1104 struct mlx5_eq_table *table = dev->priv.eq_table;
1106 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
1107 mlx5_irq_table_free_irqs(dev);
1108 mutex_unlock(&table->lock);
1111 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1112 #define MLX5_MAX_ASYNC_EQS 4
1114 #define MLX5_MAX_ASYNC_EQS 3
1117 static int get_num_eqs(struct mlx5_core_dev *dev)
1119 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
1124 /* If ethernet is disabled we use just a single completion vector to
1125 * have the other vectors available for other drivers using mlx5_core. For
1126 * example, mlx5_vdpa
1128 if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev))
1131 max_dev_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
1132 MLX5_CAP_GEN(dev, max_num_eqs) :
1133 1 << MLX5_CAP_GEN(dev, log_max_eq);
1135 num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
1136 max_dev_eqs - MLX5_MAX_ASYNC_EQS);
1137 if (mlx5_core_is_sf(dev)) {
1138 max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
1139 mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
1140 num_eqs = min_t(int, num_eqs, max_eqs_sf);
1146 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1148 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
1151 eq_table->num_comp_eqs = get_num_eqs(dev);
1152 err = create_async_eqs(dev);
1154 mlx5_core_err(dev, "Failed to create async EQs\n");
1158 err = create_comp_eqs(dev);
1160 mlx5_core_err(dev, "Failed to create completion EQs\n");
1167 destroy_async_eqs(dev);
1172 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1174 destroy_comp_eqs(dev);
1175 destroy_async_eqs(dev);
1178 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1180 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1182 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
1184 EXPORT_SYMBOL(mlx5_eq_notifier_register);
1186 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1188 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1190 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
1192 EXPORT_SYMBOL(mlx5_eq_notifier_unregister);