scsi: target: core: Make completion affinity configurable
authorMike Christie <michael.christie@oracle.com>
Sat, 27 Feb 2021 17:00:06 +0000 (11:00 -0600)
committerMartin K. Petersen <martin.petersen@oracle.com>
Thu, 4 Mar 2021 22:37:03 +0000 (17:37 -0500)
It may not always be best to complete the IO on same CPU as it was
submitted on. This commit allows userspace to configure it.

This has been useful for vhost-scsi where we have a single thread for
submissions and completions. If we force the completion on the submission
CPU we may be adding conflicts with what the user has setup in the lower
levels with settings like the block layer rq_affinity or the driver's IRQ
or softirq (the network's rps_cpus value) settings.

We may also want to set it up where the vhost thread runs on CPU N and does
its submissions/completions there, and then have LIO do its completion
booking on CPU M, but can't configure the lower levels due to issues like
using dm-multipath with lots of paths (the path selector can throw commands
all over the system because it's only taking into account latency/throughput
at its level).

The new setting is in:

    /sys/kernel/config/target/$fabric/$target/param/cmd_completion_affinity

Writing:

    -1 -> Gives the current default behavior of completing on the
          submission CPU.

    -2 -> Completes the cmd on the CPU the lower layers sent it to us from.

   > 0 -> Completes on the CPU userspace has specified.

Link: https://lore.kernel.org/r/20210227170006.5077-26-michael.christie@oracle.com
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_internal.h
drivers/target/target_core_transport.c
include/target/target_core_base.h

index ee85602..fc7edc0 100644 (file)
@@ -892,6 +892,7 @@ static void target_fabric_release_wwn(struct config_item *item)
        struct target_fabric_configfs *tf = wwn->wwn_tf;
 
        configfs_remove_default_groups(&wwn->fabric_stat_group);
+       configfs_remove_default_groups(&wwn->param_group);
        tf->tf_ops->fabric_drop_wwn(wwn);
 }
 
@@ -918,6 +919,57 @@ TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
 
 /* End of tfc_wwn_fabric_stats_cit */
 
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item,
+                                              char *page)
+{
+       struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+                                         param_group);
+       return sprintf(page, "%d\n",
+                      wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ?
+                      SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity);
+}
+
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
+                                               const char *page, size_t count)
+{
+       struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+                                         param_group);
+       int compl_val;
+
+       if (kstrtoint(page, 0, &compl_val))
+               return -EINVAL;
+
+       switch (compl_val) {
+       case SE_COMPL_AFFINITY_CPUID:
+               wwn->cmd_compl_affinity = compl_val;
+               break;
+       case SE_COMPL_AFFINITY_CURR_CPU:
+               wwn->cmd_compl_affinity = WORK_CPU_UNBOUND;
+               break;
+       default:
+               if (compl_val < 0 || compl_val >= nr_cpu_ids ||
+                   !cpu_online(compl_val)) {
+                       pr_err("Command completion value must be between %d and %d or an online CPU.\n",
+                              SE_COMPL_AFFINITY_CPUID,
+                              SE_COMPL_AFFINITY_CURR_CPU);
+                       return -EINVAL;
+               }
+               wwn->cmd_compl_affinity = compl_val;
+       }
+
+       return count;
+}
+CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
+
+static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
+       &target_fabric_wwn_attr_cmd_completion_affinity,
+       NULL,
+};
+
+TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs);
+
 /* Start of tfc_wwn_cit */
 
 static struct config_group *target_fabric_make_wwn(
@@ -937,6 +989,7 @@ static struct config_group *target_fabric_make_wwn(
        if (!wwn || IS_ERR(wwn))
                return ERR_PTR(-EINVAL);
 
+       wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID;
        wwn->wwn_tf = tf;
 
        config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
@@ -945,6 +998,10 @@ static struct config_group *target_fabric_make_wwn(
                        &tf->tf_wwn_fabric_stats_cit);
        configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
 
+       config_group_init_type_name(&wwn->param_group, "param",
+                       &tf->tf_wwn_param_cit);
+       configfs_add_default_group(&wwn->param_group, &wwn->wwn_group);
+
        if (tf->tf_ops->add_wwn_groups)
                tf->tf_ops->add_wwn_groups(wwn);
        return &wwn->wwn_group;
@@ -974,6 +1031,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
        target_fabric_setup_discovery_cit(tf);
        target_fabric_setup_wwn_cit(tf);
        target_fabric_setup_wwn_fabric_stats_cit(tf);
+       target_fabric_setup_wwn_param_cit(tf);
        target_fabric_setup_tpg_cit(tf);
        target_fabric_setup_tpg_base_cit(tf);
        target_fabric_setup_tpg_port_cit(tf);
index 56f841f..a343bcf 100644 (file)
@@ -34,6 +34,7 @@ struct target_fabric_configfs {
        struct config_item_type tf_discovery_cit;
        struct config_item_type tf_wwn_cit;
        struct config_item_type tf_wwn_fabric_stats_cit;
+       struct config_item_type tf_wwn_param_cit;
        struct config_item_type tf_tpg_cit;
        struct config_item_type tf_tpg_base_cit;
        struct config_item_type tf_tpg_lun_cit;
index 1245c28..a75591c 100644 (file)
@@ -857,7 +857,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
 /* May be called from interrupt context so must not sleep. */
 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 {
-       int success;
+       struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
+       int success, cpu;
        unsigned long flags;
 
        if (target_cmd_interrupted(cmd))
@@ -884,7 +885,13 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 
        INIT_WORK(&cmd->work, success ? target_complete_ok_work :
                  target_complete_failure_work);
-       queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+
+       if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+               cpu = cmd->cpuid;
+       else
+               cpu = wwn->cmd_compl_affinity;
+
+       queue_work_on(cpu, target_completion_wq, &cmd->work);
 }
 EXPORT_SYMBOL(target_complete_cmd);
 
index cf445c3..d1f7d2a 100644 (file)
@@ -944,11 +944,20 @@ static inline struct se_portal_group *param_to_tpg(struct config_item *item)
                        tpg_param_group);
 }
 
+enum {
+       /* Use se_cmd's cpuid for completion */
+       SE_COMPL_AFFINITY_CPUID         = -1,
+       /* Complete on current CPU */
+       SE_COMPL_AFFINITY_CURR_CPU      = -2,
+};
+
 struct se_wwn {
        struct target_fabric_configfs *wwn_tf;
        void                    *priv;
        struct config_group     wwn_group;
        struct config_group     fabric_stat_group;
+       struct config_group     param_group;
+       int                     cmd_compl_affinity;
 };
 
 static inline void atomic_inc_mb(atomic_t *v)