return found_cpu;
}
+static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->priv.eq_table->rmap;
+#endif
+ return dev->priv.eq_table->rmap;
+#else
+ return NULL;
+#endif
+}
+
static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct cpu_rmap *rmap;
struct mlx5_irq *irq;
int cpu;
+ rmap = mlx5_eq_table_get_pci_rmap(dev);
cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
- irq = mlx5_irq_request_vector(dev, cpu, vecidx, &table->rmap);
+ irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
if (IS_ERR(irq))
return PTR_ERR(irq);
struct mlx5_irq *irq;
irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
- if (IS_ERR(irq))
+ if (IS_ERR(irq)) {
+ /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ if (PTR_ERR(irq) == -ENOENT)
+ return comp_irq_request_pci(dev, vecidx);
+
return PTR_ERR(irq);
+ }
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
+ if (!mlx5_irq_pool_is_sf_pool(pool))
+ return ERR_PTR(-ENOENT);
+
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
- if (mlx5_irq_pool_is_sf_pool(pool))
- irq = mlx5_irq_affinity_request(pool, &af_desc);
- else
- /* In case SF pool doesn't exists, fallback to the PF IRQs.
- * The PF IRQs are already allocated and binded to CPU
- * at this point. Hence, only an index is needed.
- */
- irq = mlx5_irq_request(dev, vecidx, NULL, NULL);
+ irq = mlx5_irq_affinity_request(pool, &af_desc);
if (IS_ERR(irq))
return irq;