net/mlx5: Handle SF IRQ request in the absence of SF IRQ pool
authorMaher Sanalla <msanalla@nvidia.com>
Thu, 22 Jun 2023 16:05:46 +0000 (19:05 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Mon, 7 Aug 2023 17:53:51 +0000 (10:53 -0700)
In case the SF IRQ pool is not available due to setup limitations,
SF currently relies on the already allocated PF IRQs to fulfill
its IRQ vector requests.

However, with the dynamic EQ allocation introduced in the next patch,
it is possible that not all IRQs of PF will be allocated after the driver
is loaded. In such case, if a SF requests a completion IRQ without having
its own independent IRQ pool, SF will lack a PF IRQ to utilize.

To address this scenario, allocate an IRQ for the SF from the PF's IRQ pool
on demand. The new IRQ will be shared between the SF and it's PF.

Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c

index 6272962ea077fc9bf135f006cdcbdc7b6af5aa22..6e6e0a1c12b593f6942f9717218ef87f0e61e8d7 100644 (file)
@@ -850,14 +850,29 @@ spread_done:
        return found_cpu;
 }
 
+static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_SF
+       if (mlx5_core_is_sf(dev))
+               return dev->priv.parent_mdev->priv.eq_table->rmap;
+#endif
+       return dev->priv.eq_table->rmap;
+#else
+       return NULL;
+#endif
+}
+
 static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
 {
        struct mlx5_eq_table *table = dev->priv.eq_table;
+       struct cpu_rmap *rmap;
        struct mlx5_irq *irq;
        int cpu;
 
+       rmap = mlx5_eq_table_get_pci_rmap(dev);
        cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
-       irq = mlx5_irq_request_vector(dev, cpu, vecidx, &table->rmap);
+       irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
        if (IS_ERR(irq))
                return PTR_ERR(irq);
 
@@ -883,8 +898,13 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
        struct mlx5_irq *irq;
 
        irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
-       if (IS_ERR(irq))
+       if (IS_ERR(irq)) {
+               /* In case SF irq pool does not exist, fallback to the PF irqs*/
+               if (PTR_ERR(irq) == -ENOENT)
+                       return comp_irq_request_pci(dev, vecidx);
+
                return PTR_ERR(irq);
+       }
 
        return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
 }
index ed51800f9f6720a6d062f67d8a1fc19a5e52e8e4..047d5fed5f89e62cb58c4e9f8d2e094247430a16 100644 (file)
@@ -191,17 +191,13 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
        struct irq_affinity_desc af_desc = {};
        struct mlx5_irq *irq;
 
+       if (!mlx5_irq_pool_is_sf_pool(pool))
+               return ERR_PTR(-ENOENT);
+
        af_desc.is_managed = 1;
        cpumask_copy(&af_desc.mask, cpu_online_mask);
        cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
-       if (mlx5_irq_pool_is_sf_pool(pool))
-               irq = mlx5_irq_affinity_request(pool, &af_desc);
-       else
-               /* In case SF pool doesn't exists, fallback to the PF IRQs.
-                * The PF IRQs are already allocated and binded to CPU
-                * at this point. Hence, only an index is needed.
-                */
-               irq = mlx5_irq_request(dev, vecidx, NULL, NULL);
+       irq = mlx5_irq_affinity_request(pool, &af_desc);
 
        if (IS_ERR(irq))
                return irq;