aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_fabric_configfs.c
diff options
context:
space:
mode:
authorMike Christie <michael.christie@oracle.com>2021-02-27 11:00:06 -0600
committerMartin K. Petersen <martin.petersen@oracle.com>2021-03-04 17:37:03 -0500
commit39ae3edda325e9cf9e978c9788affe88231f3b34 (patch)
treeee148448ca96958fd380291d4871a8ad719ad9f0 /drivers/target/target_core_fabric_configfs.c
parentscsi: target: core: Flush submission work during TMR processing (diff)
downloadlinux-dev-39ae3edda325e9cf9e978c9788affe88231f3b34.tar.xz
linux-dev-39ae3edda325e9cf9e978c9788affe88231f3b34.zip
scsi: target: core: Make completion affinity configurable
It may not always be best to complete the IO on same CPU as it was submitted on. This commit allows userspace to configure it. This has been useful for vhost-scsi where we have a single thread for submissions and completions. If we force the completion on the submission CPU we may be adding conflicts with what the user has setup in the lower levels with settings like the block layer rq_affinity or the driver's IRQ or softirq (the network's rps_cpus value) settings. We may also want to set it up where the vhost thread runs on CPU N and does its submissions/completions there, and then have LIO do its completion booking on CPU M, but can't configure the lower levels due to issues like using dm-multipath with lots of paths (the path selector can throw commands all over the system because it's only taking into account latency/throughput at its level). The new setting is in: /sys/kernel/config/target/$fabric/$target/param/cmd_completion_affinity Writing: -1 -> Gives the current default behavior of completing on the submission CPU. -2 -> Completes the cmd on the CPU the lower layers sent it to us from. > 0 -> Completes on the CPU userspace has specified. Link: https://lore.kernel.org/r/20210227170006.5077-26-michael.christie@oracle.com Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com> Signed-off-by: Mike Christie <michael.christie@oracle.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/target/target_core_fabric_configfs.c')
-rw-r--r--drivers/target/target_core_fabric_configfs.c58
1 files changed, 58 insertions, 0 deletions
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index ee85602213f7..fc7edc04ee09 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -892,6 +892,7 @@ static void target_fabric_release_wwn(struct config_item *item)
struct target_fabric_configfs *tf = wwn->wwn_tf;
configfs_remove_default_groups(&wwn->fabric_stat_group);
+ configfs_remove_default_groups(&wwn->param_group);
tf->tf_ops->fabric_drop_wwn(wwn);
}
@@ -918,6 +919,57 @@ TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
/* End of tfc_wwn_fabric_stats_cit */
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item,
+ char *page)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ return sprintf(page, "%d\n",
+ wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ?
+ SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity);
+}
+
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ int compl_val;
+
+ if (kstrtoint(page, 0, &compl_val))
+ return -EINVAL;
+
+ switch (compl_val) {
+ case SE_COMPL_AFFINITY_CPUID:
+ wwn->cmd_compl_affinity = compl_val;
+ break;
+ case SE_COMPL_AFFINITY_CURR_CPU:
+ wwn->cmd_compl_affinity = WORK_CPU_UNBOUND;
+ break;
+ default:
+ if (compl_val < 0 || compl_val >= nr_cpu_ids ||
+ !cpu_online(compl_val)) {
+ pr_err("Command completion value must be between %d and %d or an online CPU.\n",
+ SE_COMPL_AFFINITY_CPUID,
+ SE_COMPL_AFFINITY_CURR_CPU);
+ return -EINVAL;
+ }
+ wwn->cmd_compl_affinity = compl_val;
+ }
+
+ return count;
+}
+CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
+
+static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
+ &target_fabric_wwn_attr_cmd_completion_affinity,
+ NULL,
+};
+
+TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs);
+
/* Start of tfc_wwn_cit */
static struct config_group *target_fabric_make_wwn(
@@ -937,6 +989,7 @@ static struct config_group *target_fabric_make_wwn(
if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
+ wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID;
wwn->wwn_tf = tf;
config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
@@ -945,6 +998,10 @@ static struct config_group *target_fabric_make_wwn(
&tf->tf_wwn_fabric_stats_cit);
configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
+ config_group_init_type_name(&wwn->param_group, "param",
+ &tf->tf_wwn_param_cit);
+ configfs_add_default_group(&wwn->param_group, &wwn->wwn_group);
+
if (tf->tf_ops->add_wwn_groups)
tf->tf_ops->add_wwn_groups(wwn);
return &wwn->wwn_group;
@@ -974,6 +1031,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
target_fabric_setup_wwn_fabric_stats_cit(tf);
+ target_fabric_setup_wwn_param_cit(tf);
target_fabric_setup_tpg_cit(tf);
target_fabric_setup_tpg_base_cit(tf);
target_fabric_setup_tpg_port_cit(tf);