1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
5 #include <linux/interrupt.h>
6 #include <linux/notifier.h>
7 #include <linux/mlx5/driver.h>
8 #include <linux/mlx5/vport.h>
14 #ifdef CONFIG_RFS_ACCEL
15 #include <linux/cpu_rmap.h>
18 #define MLX5_SFS_PER_CTRL_IRQ 64
19 #define MLX5_IRQ_CTRL_SF_MAX 8
20 /* min num of vectors for SFs to be enabled */
21 #define MLX5_IRQ_VEC_COMP_BASE_SF 2
23 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
24 #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
25 #define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
26 #define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
29 struct atomic_notifier_head nh;
31 char name[MLX5_MAX_IRQ_NAME];
32 struct mlx5_irq_pool *pool;
38 struct mlx5_irq_table {
39 struct mlx5_irq_pool *pcif_pool;
40 struct mlx5_irq_pool *sf_ctrl_pool;
41 struct mlx5_irq_pool *sf_comp_pool;
45 * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
46 * to be ssigned to each VF.
48 * @num_vfs: Number of enabled VFs
50 int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
52 int num_vf_msix, min_msix, max_msix;
54 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
58 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
59 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
61 /* Limit maximum number of MSI-X vectors so the default configuration
62 * has some available in the pool. This will allow the user to increase
63 * the number of vectors in a VF without having to first size-down other
66 return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
70 * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
72 * @function_id: Internal PCI VF function IDd
73 * @msix_vec_count: Number of MSI-X vectors to set
75 int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
78 int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
79 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
80 void *hca_cap = NULL, *query_cap = NULL, *cap;
81 int num_vf_msix, min_msix, max_msix;
84 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
88 if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
91 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
92 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
94 if (msix_vec_count < min_msix)
97 if (msix_vec_count > max_msix)
100 query_cap = kvzalloc(query_sz, GFP_KERNEL);
101 hca_cap = kvzalloc(set_sz, GFP_KERNEL);
102 if (!hca_cap || !query_cap) {
107 ret = mlx5_vport_get_other_func_general_cap(dev, function_id, query_cap);
111 cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
112 memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
113 MLX5_UN_SZ_BYTES(hca_cap_union));
114 MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
116 MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
117 MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
118 MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
120 MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
121 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
122 ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
129 static void irq_release(struct mlx5_irq *irq)
131 struct mlx5_irq_pool *pool = irq->pool;
132 #ifdef CONFIG_RFS_ACCEL
133 struct cpu_rmap *rmap;
136 xa_erase(&pool->irqs, irq->pool_index);
137 /* free_irq requires that affinity_hint and rmap will be cleared before
138 * calling it. To satisfy this requirement, we call
139 * irq_cpu_rmap_remove() to remove the notifier
141 irq_update_affinity_hint(irq->map.virq, NULL);
142 #ifdef CONFIG_RFS_ACCEL
143 rmap = mlx5_eq_table_get_rmap(pool->dev);
145 irq_cpu_rmap_remove(rmap, irq->map.virq);
148 free_cpumask_var(irq->mask);
149 free_irq(irq->map.virq, &irq->nh);
150 if (irq->map.index && pci_msix_can_alloc_dyn(pool->dev->pdev))
151 pci_msix_free_irq(pool->dev->pdev, irq->map);
155 int mlx5_irq_put(struct mlx5_irq *irq)
157 struct mlx5_irq_pool *pool = irq->pool;
160 mutex_lock(&pool->lock);
162 if (!irq->refcount) {
166 mutex_unlock(&pool->lock);
170 int mlx5_irq_read_locked(struct mlx5_irq *irq)
172 lockdep_assert_held(&irq->pool->lock);
173 return irq->refcount;
176 int mlx5_irq_get_locked(struct mlx5_irq *irq)
178 lockdep_assert_held(&irq->pool->lock);
179 if (WARN_ON_ONCE(!irq->refcount))
185 static int irq_get(struct mlx5_irq *irq)
189 mutex_lock(&irq->pool->lock);
190 err = mlx5_irq_get_locked(irq);
191 mutex_unlock(&irq->pool->lock);
195 static irqreturn_t irq_int_handler(int irq, void *nh)
197 atomic_notifier_call_chain(nh, 0, NULL);
201 static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
203 snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
206 static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
208 if (!pool->xa_num_irqs.max) {
209 /* in case we only have a single irq for the device */
210 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
215 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
219 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
222 struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
223 struct irq_affinity_desc *af_desc,
224 struct cpu_rmap **rmap)
226 struct mlx5_core_dev *dev = pool->dev;
227 char name[MLX5_MAX_IRQ_NAME];
228 struct mlx5_irq *irq;
231 irq = kzalloc(sizeof(*irq), GFP_KERNEL);
233 return ERR_PTR(-ENOMEM);
234 if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) {
235 /* The vector at index 0 is always statically allocated. If
236 * dynamic irq is not supported all vectors are statically
237 * allocated. In both cases just get the irq number and set
240 irq->map.virq = pci_irq_vector(dev->pdev, i);
243 irq->map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, af_desc);
244 if (!irq->map.virq) {
245 err = irq->map.index;
250 if (i && rmap && *rmap) {
251 #ifdef CONFIG_RFS_ACCEL
252 err = irq_cpu_rmap_add(*rmap, irq->map.virq);
257 if (!mlx5_irq_pool_is_sf_pool(pool))
258 irq_set_name(pool, name, i);
260 irq_sf_set_name(pool, name, i);
261 ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
262 snprintf(irq->name, MLX5_MAX_IRQ_NAME,
263 "%s@pci:%s", name, pci_name(dev->pdev));
264 err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
267 mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
270 if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
271 mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
276 cpumask_copy(irq->mask, &af_desc->mask);
277 irq_set_affinity_and_hint(irq->map.virq, irq->mask);
282 err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL));
284 mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
285 irq->pool_index, err);
291 irq_update_affinity_hint(irq->map.virq, NULL);
292 free_cpumask_var(irq->mask);
294 free_irq(irq->map.virq, &irq->nh);
296 #ifdef CONFIG_RFS_ACCEL
297 if (i && rmap && *rmap) {
298 free_irq_cpu_rmap(*rmap);
303 if (i && pci_msix_can_alloc_dyn(dev->pdev))
304 pci_msix_free_irq(dev->pdev, irq->map);
310 int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
316 /* Something very bad happens here, we are enabling EQ
317 * on non-existing IRQ.
320 ret = atomic_notifier_chain_register(&irq->nh, nb);
326 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
330 err = atomic_notifier_chain_unregister(&irq->nh, nb);
335 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
340 int mlx5_irq_get_index(struct mlx5_irq *irq)
342 return irq->map.index;
347 /* requesting an irq from a given pool according to given index */
348 static struct mlx5_irq *
349 irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
350 struct irq_affinity_desc *af_desc,
351 struct cpu_rmap **rmap)
353 struct mlx5_irq *irq;
355 mutex_lock(&pool->lock);
356 irq = xa_load(&pool->irqs, vecidx);
358 mlx5_irq_get_locked(irq);
361 irq = mlx5_irq_alloc(pool, vecidx, af_desc, rmap);
363 mutex_unlock(&pool->lock);
367 static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table)
369 return irq_table->sf_ctrl_pool;
372 static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
374 return irq_table->sf_comp_pool;
377 struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
379 struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
380 struct mlx5_irq_pool *pool = NULL;
382 if (mlx5_core_is_sf(dev))
383 pool = sf_irq_pool_get(irq_table);
385 /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
386 * the PF IRQs pool in case the SF pool doesn't exist.
388 return pool ? pool : irq_table->pcif_pool;
391 static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
393 struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
394 struct mlx5_irq_pool *pool = NULL;
396 if (mlx5_core_is_sf(dev))
397 pool = sf_ctrl_irq_pool_get(irq_table);
399 /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
400 * the PF IRQs pool in case the SF pool doesn't exist.
402 return pool ? pool : irq_table->pcif_pool;
406 * mlx5_irqs_release - release one or more IRQs back to the system.
407 * @irqs: IRQs to be released.
408 * @nirqs: number of IRQs to be released.
410 static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
414 for (i = 0; i < nirqs; i++) {
415 synchronize_irq(irqs[i]->map.virq);
416 mlx5_irq_put(irqs[i]);
421 * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
422 * @ctrl_irq: ctrl IRQ to be released.
424 void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
426 mlx5_irqs_release(&ctrl_irq, 1);
430 * mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device.
431 * @dev: mlx5 device that requesting the IRQ.
433 * This function returns a pointer to IRQ, or ERR_PTR in case of error.
435 struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
437 struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
438 struct irq_affinity_desc af_desc;
439 struct mlx5_irq *irq;
441 cpumask_copy(&af_desc.mask, cpu_online_mask);
442 af_desc.is_managed = false;
443 if (!mlx5_irq_pool_is_sf_pool(pool)) {
444 /* In case we are allocating a control IRQ from a pci device's pool.
445 * This can happen also for a SF if the SFs pool is empty.
447 if (!pool->xa_num_irqs.max) {
448 cpumask_clear(&af_desc.mask);
449 /* In case we only have a single IRQ for PF/VF */
450 cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
452 /* Allocate the IRQ in index 0. The vector was already allocated */
453 irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
455 irq = mlx5_irq_affinity_request(pool, &af_desc);
462 * mlx5_irq_request - request an IRQ for mlx5 PF/VF device.
463 * @dev: mlx5 device that requesting the IRQ.
464 * @vecidx: vector index of the IRQ. This argument is ignore if affinity is
466 * @af_desc: affinity descriptor for this IRQ.
467 * @rmap: pointer to reverse map pointer for completion interrupts
469 * This function returns a pointer to IRQ, or ERR_PTR in case of error.
471 struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
472 struct irq_affinity_desc *af_desc,
473 struct cpu_rmap **rmap)
475 struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
476 struct mlx5_irq_pool *pool;
477 struct mlx5_irq *irq;
479 pool = irq_table->pcif_pool;
480 irq = irq_pool_request_vector(pool, vecidx, af_desc, rmap);
483 mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
484 irq->map.virq, cpumask_pr_args(&af_desc->mask),
485 irq->refcount / MLX5_EQ_REFS_PER_IRQ);
490 * mlx5_msix_alloc - allocate msix interrupt
491 * @dev: mlx5 device from which to request
492 * @handler: interrupt handler
493 * @affdesc: affinity descriptor
494 * @name: interrupt name
496 * Returns: struct msi_map with result encoded.
497 * Note: the caller must make sure to release the irq by calling
498 * mlx5_msix_free() if shutdown was initiated.
500 struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
501 irqreturn_t (*handler)(int, void *),
502 const struct irq_affinity_desc *affdesc,
514 map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, affdesc);
518 err = request_irq(map.virq, handler, 0, name, NULL);
520 mlx5_core_warn(dev, "err %d\n", err);
521 pci_msix_free_irq(dev->pdev, map);
527 EXPORT_SYMBOL(mlx5_msix_alloc);
530 * mlx5_msix_free - free a previously allocated msix interrupt
531 * @dev: mlx5 device associated with interrupt
532 * @map: map previously returned by mlx5_msix_alloc()
534 void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
536 free_irq(map.virq, NULL);
537 pci_msix_free_irq(dev->pdev, map);
539 EXPORT_SYMBOL(mlx5_msix_free);
542 * mlx5_irqs_release_vectors - release one or more IRQs back to the system.
543 * @irqs: IRQs to be released.
544 * @nirqs: number of IRQs to be released.
546 void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
548 mlx5_irqs_release(irqs, nirqs);
552 * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
553 * @dev: mlx5 device that is requesting the IRQs.
554 * @cpus: CPUs array for binding the IRQs
555 * @nirqs: number of IRQs to request.
556 * @irqs: an output array of IRQs pointers.
557 * @rmap: pointer to reverse map pointer for completion interrupts
559 * Each IRQ is bound to at most 1 CPU.
560 * This function is requests nirqs IRQs, starting from @vecidx.
562 * This function returns the number of IRQs requested, (which might be smaller than
563 * @nirqs), if successful, or a negative error code in case of an error.
565 int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
566 struct mlx5_irq **irqs, struct cpu_rmap **rmap)
568 struct irq_affinity_desc af_desc;
569 struct mlx5_irq *irq;
572 af_desc.is_managed = false;
573 for (i = 0; i < nirqs; i++) {
574 cpumask_clear(&af_desc.mask);
575 cpumask_set_cpu(cpus[i], &af_desc.mask);
576 irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap);
582 return i ? i : PTR_ERR(irq);
585 static struct mlx5_irq_pool *
586 irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
587 u32 min_threshold, u32 max_threshold)
589 struct mlx5_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
592 return ERR_PTR(-ENOMEM);
594 mutex_init(&pool->lock);
595 xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
596 pool->xa_num_irqs.min = start;
597 pool->xa_num_irqs.max = start + size - 1;
599 snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
601 pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
602 pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
603 mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
608 static void irq_pool_free(struct mlx5_irq_pool *pool)
610 struct mlx5_irq *irq;
613 /* There are cases in which we are destrying the irq_table before
614 * freeing all the IRQs, fast teardown for example. Hence, free the irqs
615 * which might not have been freed.
617 xa_for_each(&pool->irqs, index, irq)
619 xa_destroy(&pool->irqs);
620 mutex_destroy(&pool->lock);
621 kfree(pool->irqs_per_cpu);
625 static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
627 struct mlx5_irq_table *table = dev->priv.irq_table;
628 int num_sf_ctrl_by_msix;
629 int num_sf_ctrl_by_sfs;
634 table->pcif_pool = irq_pool_alloc(dev, 0, pcif_vec, NULL,
635 MLX5_EQ_SHARE_IRQ_MIN_COMP,
636 MLX5_EQ_SHARE_IRQ_MAX_COMP);
637 if (IS_ERR(table->pcif_pool))
638 return PTR_ERR(table->pcif_pool);
639 if (!mlx5_sf_max_functions(dev))
641 if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
642 mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
646 /* init sf_ctrl_pool */
647 num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
648 num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
649 MLX5_SFS_PER_CTRL_IRQ);
650 num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
651 num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
652 table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
654 MLX5_EQ_SHARE_IRQ_MIN_CTRL,
655 MLX5_EQ_SHARE_IRQ_MAX_CTRL);
656 if (IS_ERR(table->sf_ctrl_pool)) {
657 err = PTR_ERR(table->sf_ctrl_pool);
660 /* init sf_comp_pool */
661 table->sf_comp_pool = irq_pool_alloc(dev, pcif_vec + num_sf_ctrl,
662 sf_vec - num_sf_ctrl, "mlx5_sf_comp",
663 MLX5_EQ_SHARE_IRQ_MIN_COMP,
664 MLX5_EQ_SHARE_IRQ_MAX_COMP);
665 if (IS_ERR(table->sf_comp_pool)) {
666 err = PTR_ERR(table->sf_comp_pool);
670 table->sf_comp_pool->irqs_per_cpu = kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL);
671 if (!table->sf_comp_pool->irqs_per_cpu) {
673 goto err_irqs_per_cpu;
679 irq_pool_free(table->sf_comp_pool);
681 irq_pool_free(table->sf_ctrl_pool);
683 irq_pool_free(table->pcif_pool);
687 static void irq_pools_destroy(struct mlx5_irq_table *table)
689 if (table->sf_ctrl_pool) {
690 irq_pool_free(table->sf_comp_pool);
691 irq_pool_free(table->sf_ctrl_pool);
693 irq_pool_free(table->pcif_pool);
696 static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
698 struct mlx5_irq *irq;
701 xa_for_each(&pool->irqs, index, irq)
702 free_irq(irq->map.virq, &irq->nh);
705 static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
707 if (table->sf_ctrl_pool) {
708 mlx5_irq_pool_free_irqs(table->sf_comp_pool);
709 mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
711 mlx5_irq_pool_free_irqs(table->pcif_pool);
716 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
718 struct mlx5_irq_table *irq_table;
720 if (mlx5_core_is_sf(dev))
723 irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL,
724 dev->priv.numa_node);
728 dev->priv.irq_table = irq_table;
732 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
734 if (mlx5_core_is_sf(dev))
737 kvfree(dev->priv.irq_table);
740 int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
742 if (!table->pcif_pool->xa_num_irqs.max)
744 return table->pcif_pool->xa_num_irqs.max - table->pcif_pool->xa_num_irqs.min;
747 int mlx5_irq_table_create(struct mlx5_core_dev *dev)
749 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
750 MLX5_CAP_GEN(dev, max_num_eqs) :
751 1 << MLX5_CAP_GEN(dev, log_max_eq);
758 if (mlx5_core_is_sf(dev))
761 pcif_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
762 pcif_vec = min_t(int, pcif_vec, num_eqs);
764 total_vec = pcif_vec;
765 if (mlx5_sf_max_functions(dev))
766 total_vec += MLX5_IRQ_CTRL_SF_MAX +
767 MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
768 total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev));
769 pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
771 req_vec = pci_msix_can_alloc_dyn(dev->pdev) ? 1 : total_vec;
772 n = pci_alloc_irq_vectors(dev->pdev, 1, req_vec, PCI_IRQ_MSIX);
776 err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec);
778 pci_free_irq_vectors(dev->pdev);
783 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
785 struct mlx5_irq_table *table = dev->priv.irq_table;
787 if (mlx5_core_is_sf(dev))
790 /* There are cases where IRQs still will be in used when we reaching
791 * to here. Hence, making sure all the irqs are released.
793 irq_pools_destroy(table);
794 pci_free_irq_vectors(dev->pdev);
797 void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
799 struct mlx5_irq_table *table = dev->priv.irq_table;
801 if (mlx5_core_is_sf(dev))
804 mlx5_irq_pools_free_irqs(table);
805 pci_free_irq_vectors(dev->pdev);
808 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
810 if (table->sf_comp_pool)
811 return min_t(int, num_online_cpus(),
812 table->sf_comp_pool->xa_num_irqs.max -
813 table->sf_comp_pool->xa_num_irqs.min + 1);
815 return mlx5_irq_table_get_num_comp(table);
818 struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
820 #ifdef CONFIG_MLX5_SF
821 if (mlx5_core_is_sf(dev))
822 return dev->priv.parent_mdev->priv.irq_table;
824 return dev->priv.irq_table;