net/mlx5: Prevent high-rate FW commands from populating all slots
authorTariq Toukan <tariqt@nvidia.com>
Tue, 2 Aug 2022 11:47:30 +0000 (14:47 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Wed, 11 Jan 2023 05:24:42 +0000 (21:24 -0800)
Certain connection-based device-offload protocols (like TLS) use
per-connection HW objects to track the state, maintain the context, and
perform the offload properly. Some of these objects are created,
modified, and destroyed via FW commands. Under high connection rate,
this type of FW commands might continuously populate all slots of the FW
command interface and throttle it, while starving other critical control
FW commands.

Limit these throttle commands to using only up to a portion (half) of
the FW command interface slots. FW commands maximal rate is not hit, and
the same high rate is still reached when applying this limitation.

Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
include/linux/mlx5/driver.h

index 541eecf..24da9c5 100644 (file)
@@ -94,6 +94,21 @@ static u16 in_to_opcode(void *in)
        return MLX5_GET(mbox_in, in, opcode);
 }
 
+/* Returns true for opcodes that might be triggered very frequently and throttle
+ * the command interface. Limit their command slots usage.
+ */
+static bool mlx5_cmd_is_throttle_opcode(u16 op)
+{
+       switch (op) {
+       case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+       case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
+       case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+       case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+               return true;
+       }
+       return false;
+}
+
 static struct mlx5_cmd_work_ent *
 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
              struct mlx5_cmd_msg *out, void *uout, int uout_size,
@@ -1825,6 +1840,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
 {
        struct mlx5_cmd_msg *inb, *outb;
        u16 opcode = in_to_opcode(in);
+       bool throttle_op;
        int pages_queue;
        gfp_t gfp;
        u8 token;
@@ -1833,13 +1849,21 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
        if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
                return -ENXIO;
 
+       throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
+       if (throttle_op) {
+               /* atomic context may not sleep */
+               if (callback)
+                       return -EINVAL;
+               down(&dev->cmd.throttle_sem);
+       }
+
        pages_queue = is_manage_pages(in);
        gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
 
        inb = alloc_msg(dev, in_size, gfp);
        if (IS_ERR(inb)) {
                err = PTR_ERR(inb);
-               return err;
+               goto out_up;
        }
 
        token = alloc_token(&dev->cmd);
@@ -1873,6 +1897,9 @@ out_out:
        mlx5_free_cmd_msg(dev, outb);
 out_in:
        free_msg(dev, inb);
+out_up:
+       if (throttle_op)
+               up(&dev->cmd.throttle_sem);
        return err;
 }
 
@@ -2222,6 +2249,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
 
        sema_init(&cmd->sem, cmd->max_reg_cmds);
        sema_init(&cmd->pages_sem, 1);
+       sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2));
 
        cmd_h = (u32)((u64)(cmd->dma) >> 32);
        cmd_l = (u32)(cmd->dma);
index 50a5780..7c393da 100644 (file)
@@ -310,6 +310,7 @@ struct mlx5_cmd {
        struct workqueue_struct *wq;
        struct semaphore sem;
        struct semaphore pages_sem;
+       struct semaphore throttle_sem;
        int     mode;
        u16     allowed_opcode;
        struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];