1 /*******************************************************************************
2 * Filename: target_core_configfs.c
4 * This file contains ConfigFS logic for the Generic Target Engine project.
6 * (c) Copyright 2008-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <generated/utsrelease.h>
26 #include <linux/utsname.h>
27 #include <linux/init.h>
29 #include <linux/namei.h>
30 #include <linux/slab.h>
31 #include <linux/types.h>
32 #include <linux/delay.h>
33 #include <linux/unistd.h>
34 #include <linux/string.h>
35 #include <linux/parser.h>
36 #include <linux/syscalls.h>
37 #include <linux/configfs.h>
38 #include <linux/spinlock.h>
40 #include <target/target_core_base.h>
41 #include <target/target_core_backend.h>
42 #include <target/target_core_fabric.h>
44 #include "target_core_internal.h"
45 #include "target_core_alua.h"
46 #include "target_core_pr.h"
47 #include "target_core_rd.h"
48 #include "target_core_xcopy.h"
50 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
51 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
53 struct config_item_type *cit = &tb->tb_##_name##_cit; \
55 cit->ct_item_ops = _item_ops; \
56 cit->ct_group_ops = _group_ops; \
57 cit->ct_attrs = _attrs; \
58 cit->ct_owner = tb->ops->owner; \
59 pr_debug("Setup generic %s\n", __stringify(_name)); \
62 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
63 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
65 struct config_item_type *cit = &tb->tb_##_name##_cit; \
67 cit->ct_item_ops = _item_ops; \
68 cit->ct_group_ops = _group_ops; \
69 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
70 cit->ct_owner = tb->ops->owner; \
71 pr_debug("Setup generic %s\n", __stringify(_name)); \
74 extern struct t10_alua_lu_gp *default_lu_gp;
76 static LIST_HEAD(g_tf_list);
77 static DEFINE_MUTEX(g_tf_lock);
79 static struct config_group target_core_hbagroup;
80 static struct config_group alua_group;
81 static struct config_group alua_lu_gps_group;
83 static inline struct se_hba *
84 item_to_hba(struct config_item *item)
86 return container_of(to_config_group(item), struct se_hba, hba_group);
90 * Attributes for /sys/kernel/config/target/
92 static ssize_t target_core_item_version_show(struct config_item *item,
95 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
96 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
97 utsname()->sysname, utsname()->machine);
100 CONFIGFS_ATTR_RO(target_core_item_, version);
102 static struct target_fabric_configfs *target_core_get_fabric(
105 struct target_fabric_configfs *tf;
110 mutex_lock(&g_tf_lock);
111 list_for_each_entry(tf, &g_tf_list, tf_list) {
112 if (!strcmp(tf->tf_ops->name, name)) {
113 atomic_inc(&tf->tf_access_cnt);
114 mutex_unlock(&g_tf_lock);
118 mutex_unlock(&g_tf_lock);
124 * Called from struct target_core_group_ops->make_group()
126 static struct config_group *target_core_register_fabric(
127 struct config_group *group,
130 struct target_fabric_configfs *tf;
133 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
134 " %s\n", group, name);
136 tf = target_core_get_fabric(name);
138 pr_debug("target_core_register_fabric() trying autoload for %s\n",
142 * Below are some hardcoded request_module() calls to automatically
143 * local fabric modules when the following is called:
145 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
147 * Note that this does not limit which TCM fabric module can be
148 * registered, but simply provids auto loading logic for modules with
149 * mkdir(2) system calls with known TCM fabric modules.
152 if (!strncmp(name, "iscsi", 5)) {
154 * Automatically load the LIO Target fabric module when the
155 * following is called:
157 * mkdir -p $CONFIGFS/target/iscsi
159 ret = request_module("iscsi_target_mod");
161 pr_debug("request_module() failed for"
162 " iscsi_target_mod.ko: %d\n", ret);
163 return ERR_PTR(-EINVAL);
165 } else if (!strncmp(name, "loopback", 8)) {
167 * Automatically load the tcm_loop fabric module when the
168 * following is called:
170 * mkdir -p $CONFIGFS/target/loopback
172 ret = request_module("tcm_loop");
174 pr_debug("request_module() failed for"
175 " tcm_loop.ko: %d\n", ret);
176 return ERR_PTR(-EINVAL);
180 tf = target_core_get_fabric(name);
184 pr_debug("target_core_get_fabric() failed for %s\n",
186 return ERR_PTR(-EINVAL);
188 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
189 " %s\n", tf->tf_ops->name);
191 * On a successful target_core_get_fabric() look, the returned
192 * struct target_fabric_configfs *tf will contain a usage reference.
194 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
197 tf->tf_group.default_groups = tf->tf_default_groups;
198 tf->tf_group.default_groups[0] = &tf->tf_disc_group;
199 tf->tf_group.default_groups[1] = NULL;
201 config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
202 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
203 &tf->tf_discovery_cit);
205 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
206 " %s\n", tf->tf_group.cg_item.ci_name);
207 return &tf->tf_group;
211 * Called from struct target_core_group_ops->drop_item()
213 static void target_core_deregister_fabric(
214 struct config_group *group,
215 struct config_item *item)
217 struct target_fabric_configfs *tf = container_of(
218 to_config_group(item), struct target_fabric_configfs, tf_group);
219 struct config_group *tf_group;
220 struct config_item *df_item;
223 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
224 " tf list\n", config_item_name(item));
226 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
227 " %s\n", tf->tf_ops->name);
228 atomic_dec(&tf->tf_access_cnt);
230 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
231 " %s\n", config_item_name(item));
233 tf_group = &tf->tf_group;
234 for (i = 0; tf_group->default_groups[i]; i++) {
235 df_item = &tf_group->default_groups[i]->cg_item;
236 tf_group->default_groups[i] = NULL;
237 config_item_put(df_item);
239 config_item_put(item);
242 static struct configfs_group_operations target_core_fabric_group_ops = {
243 .make_group = &target_core_register_fabric,
244 .drop_item = &target_core_deregister_fabric,
248 * All item attributes appearing in /sys/kernel/target/ appear here.
250 static struct configfs_attribute *target_core_fabric_item_attrs[] = {
251 &target_core_item_attr_version,
256 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
258 static struct config_item_type target_core_fabrics_item = {
259 .ct_group_ops = &target_core_fabric_group_ops,
260 .ct_attrs = target_core_fabric_item_attrs,
261 .ct_owner = THIS_MODULE,
264 static struct configfs_subsystem target_core_fabrics = {
267 .ci_namebuf = "target",
268 .ci_type = &target_core_fabrics_item,
273 int target_depend_item(struct config_item *item)
275 return configfs_depend_item(&target_core_fabrics, item);
277 EXPORT_SYMBOL(target_depend_item);
279 void target_undepend_item(struct config_item *item)
281 return configfs_undepend_item(item);
283 EXPORT_SYMBOL(target_undepend_item);
285 /*##############################################################################
286 // Start functions called by external Target Fabrics Modules
287 //############################################################################*/
289 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
292 pr_err("Missing tfo->name\n");
295 if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
296 pr_err("Passed name: %s exceeds TARGET_FABRIC"
297 "_NAME_SIZE\n", tfo->name);
300 if (!tfo->get_fabric_name) {
301 pr_err("Missing tfo->get_fabric_name()\n");
304 if (!tfo->tpg_get_wwn) {
305 pr_err("Missing tfo->tpg_get_wwn()\n");
308 if (!tfo->tpg_get_tag) {
309 pr_err("Missing tfo->tpg_get_tag()\n");
312 if (!tfo->tpg_check_demo_mode) {
313 pr_err("Missing tfo->tpg_check_demo_mode()\n");
316 if (!tfo->tpg_check_demo_mode_cache) {
317 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
320 if (!tfo->tpg_check_demo_mode_write_protect) {
321 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
324 if (!tfo->tpg_check_prod_mode_write_protect) {
325 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
328 if (!tfo->tpg_get_inst_index) {
329 pr_err("Missing tfo->tpg_get_inst_index()\n");
332 if (!tfo->release_cmd) {
333 pr_err("Missing tfo->release_cmd()\n");
336 if (!tfo->shutdown_session) {
337 pr_err("Missing tfo->shutdown_session()\n");
340 if (!tfo->close_session) {
341 pr_err("Missing tfo->close_session()\n");
344 if (!tfo->sess_get_index) {
345 pr_err("Missing tfo->sess_get_index()\n");
348 if (!tfo->write_pending) {
349 pr_err("Missing tfo->write_pending()\n");
352 if (!tfo->write_pending_status) {
353 pr_err("Missing tfo->write_pending_status()\n");
356 if (!tfo->set_default_node_attributes) {
357 pr_err("Missing tfo->set_default_node_attributes()\n");
360 if (!tfo->get_cmd_state) {
361 pr_err("Missing tfo->get_cmd_state()\n");
364 if (!tfo->queue_data_in) {
365 pr_err("Missing tfo->queue_data_in()\n");
368 if (!tfo->queue_status) {
369 pr_err("Missing tfo->queue_status()\n");
372 if (!tfo->queue_tm_rsp) {
373 pr_err("Missing tfo->queue_tm_rsp()\n");
376 if (!tfo->aborted_task) {
377 pr_err("Missing tfo->aborted_task()\n");
381 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
382 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
383 * target_core_fabric_configfs.c WWN+TPG group context code.
385 if (!tfo->fabric_make_wwn) {
386 pr_err("Missing tfo->fabric_make_wwn()\n");
389 if (!tfo->fabric_drop_wwn) {
390 pr_err("Missing tfo->fabric_drop_wwn()\n");
393 if (!tfo->fabric_make_tpg) {
394 pr_err("Missing tfo->fabric_make_tpg()\n");
397 if (!tfo->fabric_drop_tpg) {
398 pr_err("Missing tfo->fabric_drop_tpg()\n");
405 int target_register_template(const struct target_core_fabric_ops *fo)
407 struct target_fabric_configfs *tf;
410 ret = target_fabric_tf_ops_check(fo);
414 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
416 pr_err("%s: could not allocate memory!\n", __func__);
420 INIT_LIST_HEAD(&tf->tf_list);
421 atomic_set(&tf->tf_access_cnt, 0);
423 target_fabric_setup_cits(tf);
425 mutex_lock(&g_tf_lock);
426 list_add_tail(&tf->tf_list, &g_tf_list);
427 mutex_unlock(&g_tf_lock);
431 EXPORT_SYMBOL(target_register_template);
433 void target_unregister_template(const struct target_core_fabric_ops *fo)
435 struct target_fabric_configfs *t;
437 mutex_lock(&g_tf_lock);
438 list_for_each_entry(t, &g_tf_list, tf_list) {
439 if (!strcmp(t->tf_ops->name, fo->name)) {
440 BUG_ON(atomic_read(&t->tf_access_cnt));
441 list_del(&t->tf_list);
442 mutex_unlock(&g_tf_lock);
444 * Wait for any outstanding fabric se_deve_entry->rcu_head
445 * callbacks to complete post kfree_rcu(), before allowing
446 * fabric driver unload of TFO->module to proceed.
453 mutex_unlock(&g_tf_lock);
455 EXPORT_SYMBOL(target_unregister_template);
457 /*##############################################################################
458 // Stop functions called by external Target Fabrics Modules
459 //############################################################################*/
461 static inline struct se_dev_attrib *to_attrib(struct config_item *item)
463 return container_of(to_config_group(item), struct se_dev_attrib,
467 /* Start functions for struct config_item_type tb_dev_attrib_cit */
468 #define DEF_CONFIGFS_ATTRIB_SHOW(_name) \
469 static ssize_t _name##_show(struct config_item *item, char *page) \
471 return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
474 DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
475 DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
476 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
477 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
478 DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
479 DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
480 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
481 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
482 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
483 DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
484 DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
485 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
486 DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
487 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_format);
488 DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
489 DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
490 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
491 DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
492 DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
493 DEF_CONFIGFS_ATTRIB_SHOW(block_size);
494 DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
495 DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
496 DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
497 DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
498 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
499 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
500 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
501 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
502 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
503 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
505 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
506 static ssize_t _name##_store(struct config_item *item, const char *page,\
509 struct se_dev_attrib *da = to_attrib(item); \
513 ret = kstrtou32(page, 0, &val); \
520 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
521 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
522 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
523 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
524 DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
526 #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \
527 static ssize_t _name##_store(struct config_item *item, const char *page, \
530 struct se_dev_attrib *da = to_attrib(item); \
534 ret = strtobool(page, &flag); \
541 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
542 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
543 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
544 DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
545 DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
547 #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \
548 static ssize_t _name##_store(struct config_item *item, const char *page,\
551 printk_once(KERN_WARNING \
552 "ignoring deprecated %s attribute\n", \
553 __stringify(_name)); \
557 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
558 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
560 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
562 const char *configname;
564 configname = config_item_name(&dev->dev_group.cg_item);
565 if (strlen(configname) >= 16) {
566 pr_warn("dev[%p]: Backstore name '%s' is too long for "
567 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
570 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
573 static ssize_t emulate_model_alias_store(struct config_item *item,
574 const char *page, size_t count)
576 struct se_dev_attrib *da = to_attrib(item);
577 struct se_device *dev = da->da_dev;
581 if (dev->export_count) {
582 pr_err("dev[%p]: Unable to change model alias"
583 " while export_count is %d\n",
584 dev, dev->export_count);
588 ret = strtobool(page, &flag);
593 dev_set_t10_wwn_model_alias(dev);
595 strncpy(&dev->t10_wwn.model[0],
596 dev->transport->inquiry_prod, 16);
598 da->emulate_model_alias = flag;
602 static ssize_t emulate_write_cache_store(struct config_item *item,
603 const char *page, size_t count)
605 struct se_dev_attrib *da = to_attrib(item);
609 ret = strtobool(page, &flag);
613 if (flag && da->da_dev->transport->get_write_cache) {
614 pr_err("emulate_write_cache not supported for this device\n");
618 da->emulate_write_cache = flag;
619 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
624 static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
625 const char *page, size_t count)
627 struct se_dev_attrib *da = to_attrib(item);
631 ret = kstrtou32(page, 0, &val);
635 if (val != 0 && val != 1 && val != 2) {
636 pr_err("Illegal value %d\n", val);
640 if (da->da_dev->export_count) {
641 pr_err("dev[%p]: Unable to change SE Device"
642 " UA_INTRLCK_CTRL while export_count is %d\n",
643 da->da_dev, da->da_dev->export_count);
646 da->emulate_ua_intlck_ctrl = val;
647 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
652 static ssize_t emulate_tas_store(struct config_item *item,
653 const char *page, size_t count)
655 struct se_dev_attrib *da = to_attrib(item);
659 ret = strtobool(page, &flag);
663 if (da->da_dev->export_count) {
664 pr_err("dev[%p]: Unable to change SE Device TAS while"
665 " export_count is %d\n",
666 da->da_dev, da->da_dev->export_count);
669 da->emulate_tas = flag;
670 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
671 da->da_dev, flag ? "Enabled" : "Disabled");
676 static ssize_t emulate_tpu_store(struct config_item *item,
677 const char *page, size_t count)
679 struct se_dev_attrib *da = to_attrib(item);
683 ret = strtobool(page, &flag);
688 * We expect this value to be non-zero when generic Block Layer
689 * Discard supported is detected iblock_create_virtdevice().
691 if (flag && !da->max_unmap_block_desc_count) {
692 pr_err("Generic Block Discard not supported\n");
696 da->emulate_tpu = flag;
697 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
702 static ssize_t emulate_tpws_store(struct config_item *item,
703 const char *page, size_t count)
705 struct se_dev_attrib *da = to_attrib(item);
709 ret = strtobool(page, &flag);
714 * We expect this value to be non-zero when generic Block Layer
715 * Discard supported is detected iblock_create_virtdevice().
717 if (flag && !da->max_unmap_block_desc_count) {
718 pr_err("Generic Block Discard not supported\n");
722 da->emulate_tpws = flag;
723 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
728 static ssize_t pi_prot_type_store(struct config_item *item,
729 const char *page, size_t count)
731 struct se_dev_attrib *da = to_attrib(item);
732 int old_prot = da->pi_prot_type, ret;
733 struct se_device *dev = da->da_dev;
736 ret = kstrtou32(page, 0, &flag);
740 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
741 pr_err("Illegal value %d for pi_prot_type\n", flag);
745 pr_err("DIF TYPE2 protection currently not supported\n");
748 if (da->hw_pi_prot_type) {
749 pr_warn("DIF protection enabled on underlying hardware,"
753 if (!dev->transport->init_prot || !dev->transport->free_prot) {
754 /* 0 is only allowed value for non-supporting backends */
758 pr_err("DIF protection not supported by backend: %s\n",
759 dev->transport->name);
762 if (!(dev->dev_flags & DF_CONFIGURED)) {
763 pr_err("DIF protection requires device to be configured\n");
766 if (dev->export_count) {
767 pr_err("dev[%p]: Unable to change SE Device PROT type while"
768 " export_count is %d\n", dev, dev->export_count);
772 da->pi_prot_type = flag;
774 if (flag && !old_prot) {
775 ret = dev->transport->init_prot(dev);
777 da->pi_prot_type = old_prot;
781 } else if (!flag && old_prot) {
782 dev->transport->free_prot(dev);
785 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
789 static ssize_t pi_prot_format_store(struct config_item *item,
790 const char *page, size_t count)
792 struct se_dev_attrib *da = to_attrib(item);
793 struct se_device *dev = da->da_dev;
797 ret = strtobool(page, &flag);
804 if (!dev->transport->format_prot) {
805 pr_err("DIF protection format not supported by backend %s\n",
806 dev->transport->name);
809 if (!(dev->dev_flags & DF_CONFIGURED)) {
810 pr_err("DIF protection format requires device to be configured\n");
813 if (dev->export_count) {
814 pr_err("dev[%p]: Unable to format SE Device PROT type while"
815 " export_count is %d\n", dev, dev->export_count);
819 ret = dev->transport->format_prot(dev);
823 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
827 static ssize_t force_pr_aptpl_store(struct config_item *item,
828 const char *page, size_t count)
830 struct se_dev_attrib *da = to_attrib(item);
834 ret = strtobool(page, &flag);
837 if (da->da_dev->export_count) {
838 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
839 " export_count is %d\n",
840 da->da_dev, da->da_dev->export_count);
844 da->force_pr_aptpl = flag;
845 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
849 static ssize_t emulate_rest_reord_store(struct config_item *item,
850 const char *page, size_t count)
852 struct se_dev_attrib *da = to_attrib(item);
856 ret = strtobool(page, &flag);
861 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
862 " reordering not implemented\n", da->da_dev);
865 da->emulate_rest_reord = flag;
866 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
871 static ssize_t unmap_zeroes_data_store(struct config_item *item,
872 const char *page, size_t count)
874 struct se_dev_attrib *da = to_attrib(item);
878 ret = strtobool(page, &flag);
882 if (da->da_dev->export_count) {
883 pr_err("dev[%p]: Unable to change SE Device"
884 " unmap_zeroes_data while export_count is %d\n",
885 da->da_dev, da->da_dev->export_count);
889 * We expect this value to be non-zero when generic Block Layer
890 * Discard supported is detected iblock_configure_device().
892 if (flag && !da->max_unmap_block_desc_count) {
893 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
894 " because max_unmap_block_desc_count is zero\n",
898 da->unmap_zeroes_data = flag;
899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
905 * Note, this can only be called on unexported SE Device Object.
907 static ssize_t queue_depth_store(struct config_item *item,
908 const char *page, size_t count)
910 struct se_dev_attrib *da = to_attrib(item);
911 struct se_device *dev = da->da_dev;
915 ret = kstrtou32(page, 0, &val);
919 if (dev->export_count) {
920 pr_err("dev[%p]: Unable to change SE Device TCQ while"
921 " export_count is %d\n",
922 dev, dev->export_count);
926 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
930 if (val > dev->dev_attrib.queue_depth) {
931 if (val > dev->dev_attrib.hw_queue_depth) {
932 pr_err("dev[%p]: Passed queue_depth:"
933 " %u exceeds TCM/SE_Device MAX"
934 " TCQ: %u\n", dev, val,
935 dev->dev_attrib.hw_queue_depth);
939 da->queue_depth = dev->queue_depth = val;
940 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
944 static ssize_t optimal_sectors_store(struct config_item *item,
945 const char *page, size_t count)
947 struct se_dev_attrib *da = to_attrib(item);
951 ret = kstrtou32(page, 0, &val);
955 if (da->da_dev->export_count) {
956 pr_err("dev[%p]: Unable to change SE Device"
957 " optimal_sectors while export_count is %d\n",
958 da->da_dev, da->da_dev->export_count);
961 if (val > da->hw_max_sectors) {
962 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
963 " greater than hw_max_sectors: %u\n",
964 da->da_dev, val, da->hw_max_sectors);
968 da->optimal_sectors = val;
969 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
974 static ssize_t block_size_store(struct config_item *item,
975 const char *page, size_t count)
977 struct se_dev_attrib *da = to_attrib(item);
981 ret = kstrtou32(page, 0, &val);
985 if (da->da_dev->export_count) {
986 pr_err("dev[%p]: Unable to change SE Device block_size"
987 " while export_count is %d\n",
988 da->da_dev, da->da_dev->export_count);
992 if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
993 pr_err("dev[%p]: Illegal value for block_device: %u"
994 " for SE device, must be 512, 1024, 2048 or 4096\n",
999 da->block_size = val;
1000 if (da->max_bytes_per_io)
1001 da->hw_max_sectors = da->max_bytes_per_io / val;
1003 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1008 CONFIGFS_ATTR(, emulate_model_alias);
1009 CONFIGFS_ATTR(, emulate_dpo);
1010 CONFIGFS_ATTR(, emulate_fua_write);
1011 CONFIGFS_ATTR(, emulate_fua_read);
1012 CONFIGFS_ATTR(, emulate_write_cache);
1013 CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
1014 CONFIGFS_ATTR(, emulate_tas);
1015 CONFIGFS_ATTR(, emulate_tpu);
1016 CONFIGFS_ATTR(, emulate_tpws);
1017 CONFIGFS_ATTR(, emulate_caw);
1018 CONFIGFS_ATTR(, emulate_3pc);
1019 CONFIGFS_ATTR(, pi_prot_type);
1020 CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1021 CONFIGFS_ATTR(, pi_prot_format);
1022 CONFIGFS_ATTR(, enforce_pr_isids);
1023 CONFIGFS_ATTR(, is_nonrot);
1024 CONFIGFS_ATTR(, emulate_rest_reord);
1025 CONFIGFS_ATTR(, force_pr_aptpl);
1026 CONFIGFS_ATTR_RO(, hw_block_size);
1027 CONFIGFS_ATTR(, block_size);
1028 CONFIGFS_ATTR_RO(, hw_max_sectors);
1029 CONFIGFS_ATTR(, optimal_sectors);
1030 CONFIGFS_ATTR_RO(, hw_queue_depth);
1031 CONFIGFS_ATTR(, queue_depth);
1032 CONFIGFS_ATTR(, max_unmap_lba_count);
1033 CONFIGFS_ATTR(, max_unmap_block_desc_count);
1034 CONFIGFS_ATTR(, unmap_granularity);
1035 CONFIGFS_ATTR(, unmap_granularity_alignment);
1036 CONFIGFS_ATTR(, unmap_zeroes_data);
1037 CONFIGFS_ATTR(, max_write_same_len);
1040 * dev_attrib attributes for devices using the target core SBC/SPC
1041 * interpreter. Any backend using spc_parse_cdb should be using
1044 struct configfs_attribute *sbc_attrib_attrs[] = {
1045 &attr_emulate_model_alias,
1047 &attr_emulate_fua_write,
1048 &attr_emulate_fua_read,
1049 &attr_emulate_write_cache,
1050 &attr_emulate_ua_intlck_ctrl,
1057 &attr_hw_pi_prot_type,
1058 &attr_pi_prot_format,
1059 &attr_enforce_pr_isids,
1061 &attr_emulate_rest_reord,
1062 &attr_force_pr_aptpl,
1063 &attr_hw_block_size,
1065 &attr_hw_max_sectors,
1066 &attr_optimal_sectors,
1067 &attr_hw_queue_depth,
1069 &attr_max_unmap_lba_count,
1070 &attr_max_unmap_block_desc_count,
1071 &attr_unmap_granularity,
1072 &attr_unmap_granularity_alignment,
1073 &attr_unmap_zeroes_data,
1074 &attr_max_write_same_len,
1077 EXPORT_SYMBOL(sbc_attrib_attrs);
1080 * Minimal dev_attrib attributes for devices passing through CDBs.
1081 * In this case we only provide a few read-only attributes for
1082 * backwards compatibility.
1084 struct configfs_attribute *passthrough_attrib_attrs[] = {
1085 &attr_hw_pi_prot_type,
1086 &attr_hw_block_size,
1087 &attr_hw_max_sectors,
1088 &attr_hw_queue_depth,
1091 EXPORT_SYMBOL(passthrough_attrib_attrs);
1093 TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
1095 /* End functions for struct config_item_type tb_dev_attrib_cit */
1097 /* Start functions for struct config_item_type tb_dev_wwn_cit */
1099 static struct t10_wwn *to_t10_wwn(struct config_item *item)
1101 return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
1105 * VPD page 0x80 Unit serial
1107 static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
1110 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
1111 &to_t10_wwn(item)->unit_serial[0]);
1114 static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
1115 const char *page, size_t count)
1117 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1118 struct se_device *dev = t10_wwn->t10_dev;
1119 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
1122 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
1123 * from the struct scsi_device level firmware, do not allow
1124 * VPD Unit Serial to be emulated.
1126 * Note this struct scsi_device could also be emulating VPD
1127 * information from its drivers/scsi LLD. But for now we assume
1128 * it is doing 'the right thing' wrt a world wide unique
1129 * VPD Unit Serial Number that OS dependent multipath can depend on.
1131 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
1132 pr_err("Underlying SCSI device firmware provided VPD"
1133 " Unit Serial, ignoring request\n");
1137 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
1138 pr_err("Emulated VPD Unit Serial exceeds"
1139 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
1143 * Check to see if any active $FABRIC_MOD exports exist. If they
1144 * do exist, fail here as changing this information on the fly
1145 * (underneath the initiator side OS dependent multipath code)
1146 * could cause negative effects.
1148 if (dev->export_count) {
1149 pr_err("Unable to set VPD Unit Serial while"
1150 " active %d $FABRIC_MOD exports exist\n",
1156 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
1158 * Also, strip any newline added from the userspace
1159 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
1161 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
1162 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
1163 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
1164 "%s", strstrip(buf));
1165 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
1167 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
1168 " %s\n", dev->t10_wwn.unit_serial);
1174 * VPD page 0x83 Protocol Identifier
1176 static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
1179 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1180 struct t10_vpd *vpd;
1181 unsigned char buf[VPD_TMP_BUF_SIZE];
1184 memset(buf, 0, VPD_TMP_BUF_SIZE);
1186 spin_lock(&t10_wwn->t10_vpd_lock);
1187 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
1188 if (!vpd->protocol_identifier_set)
1191 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
1193 if (len + strlen(buf) >= PAGE_SIZE)
1196 len += sprintf(page+len, "%s", buf);
1198 spin_unlock(&t10_wwn->t10_vpd_lock);
1204 * Generic wrapper for dumping VPD identifiers by association.
1206 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
1207 static ssize_t target_wwn_##_name##_show(struct config_item *item, \
1210 struct t10_wwn *t10_wwn = to_t10_wwn(item); \
1211 struct t10_vpd *vpd; \
1212 unsigned char buf[VPD_TMP_BUF_SIZE]; \
1215 spin_lock(&t10_wwn->t10_vpd_lock); \
1216 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
1217 if (vpd->association != _assoc) \
1220 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1221 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
1222 if (len + strlen(buf) >= PAGE_SIZE) \
1224 len += sprintf(page+len, "%s", buf); \
1226 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1227 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
1228 if (len + strlen(buf) >= PAGE_SIZE) \
1230 len += sprintf(page+len, "%s", buf); \
1232 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1233 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
1234 if (len + strlen(buf) >= PAGE_SIZE) \
1236 len += sprintf(page+len, "%s", buf); \
1238 spin_unlock(&t10_wwn->t10_vpd_lock); \
1243 /* VPD page 0x83 Association: Logical Unit */
1244 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
1245 /* VPD page 0x83 Association: Target Port */
1246 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1247 /* VPD page 0x83 Association: SCSI Target Device */
1248 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1250 CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
1251 CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
1252 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
1253 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
1254 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
1256 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1257 &target_wwn_attr_vpd_unit_serial,
1258 &target_wwn_attr_vpd_protocol_identifier,
1259 &target_wwn_attr_vpd_assoc_logical_unit,
1260 &target_wwn_attr_vpd_assoc_target_port,
1261 &target_wwn_attr_vpd_assoc_scsi_target_device,
1265 TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
1267 /* End functions for struct config_item_type tb_dev_wwn_cit */
1269 /* Start functions for struct config_item_type tb_dev_pr_cit */
1271 static struct se_device *pr_to_dev(struct config_item *item)
1273 return container_of(to_config_group(item), struct se_device,
1277 static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
1280 struct se_node_acl *se_nacl;
1281 struct t10_pr_registration *pr_reg;
1282 char i_buf[PR_REG_ISID_ID_LEN];
1284 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1286 pr_reg = dev->dev_pr_res_holder;
1288 return sprintf(page, "No SPC-3 Reservation holder\n");
1290 se_nacl = pr_reg->pr_reg_nacl;
1291 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1293 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
1294 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
1295 se_nacl->initiatorname, i_buf);
1298 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
1301 struct se_node_acl *se_nacl;
1304 se_nacl = dev->dev_reserved_node_acl;
1307 "SPC-2 Reservation: %s Initiator: %s\n",
1308 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
1309 se_nacl->initiatorname);
1311 len = sprintf(page, "No SPC-2 Reservation holder\n");
1316 static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
1318 struct se_device *dev = pr_to_dev(item);
1321 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1322 return sprintf(page, "Passthrough\n");
1324 spin_lock(&dev->dev_reservation_lock);
1325 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1326 ret = target_core_dev_pr_show_spc2_res(dev, page);
1328 ret = target_core_dev_pr_show_spc3_res(dev, page);
1329 spin_unlock(&dev->dev_reservation_lock);
1333 static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
1336 struct se_device *dev = pr_to_dev(item);
1339 spin_lock(&dev->dev_reservation_lock);
1340 if (!dev->dev_pr_res_holder) {
1341 len = sprintf(page, "No SPC-3 Reservation holder\n");
1342 } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
1343 len = sprintf(page, "SPC-3 Reservation: All Target"
1344 " Ports registration\n");
1346 len = sprintf(page, "SPC-3 Reservation: Single"
1347 " Target Port registration\n");
1350 spin_unlock(&dev->dev_reservation_lock);
1354 static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
1357 return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
1361 static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
1364 struct se_device *dev = pr_to_dev(item);
1365 struct se_node_acl *se_nacl;
1366 struct se_portal_group *se_tpg;
1367 struct t10_pr_registration *pr_reg;
1368 const struct target_core_fabric_ops *tfo;
1371 spin_lock(&dev->dev_reservation_lock);
1372 pr_reg = dev->dev_pr_res_holder;
1374 len = sprintf(page, "No SPC-3 Reservation holder\n");
1378 se_nacl = pr_reg->pr_reg_nacl;
1379 se_tpg = se_nacl->se_tpg;
1380 tfo = se_tpg->se_tpg_tfo;
1382 len += sprintf(page+len, "SPC-3 Reservation: %s"
1383 " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
1384 tfo->tpg_get_wwn(se_tpg));
1385 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1386 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1387 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
1388 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
1389 tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun);
1392 spin_unlock(&dev->dev_reservation_lock);
1397 static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
1400 struct se_device *dev = pr_to_dev(item);
1401 const struct target_core_fabric_ops *tfo;
1402 struct t10_pr_registration *pr_reg;
1403 unsigned char buf[384];
1404 char i_buf[PR_REG_ISID_ID_LEN];
1408 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1410 spin_lock(&dev->t10_pr.registration_lock);
1411 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
1414 memset(buf, 0, 384);
1415 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1416 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1417 core_pr_dump_initiator_port(pr_reg, i_buf,
1418 PR_REG_ISID_ID_LEN);
1419 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1420 tfo->get_fabric_name(),
1421 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
1422 pr_reg->pr_res_generation);
1424 if (len + strlen(buf) >= PAGE_SIZE)
1427 len += sprintf(page+len, "%s", buf);
1430 spin_unlock(&dev->t10_pr.registration_lock);
1433 len += sprintf(page+len, "None\n");
1438 static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
1440 struct se_device *dev = pr_to_dev(item);
1441 struct t10_pr_registration *pr_reg;
1444 spin_lock(&dev->dev_reservation_lock);
1445 pr_reg = dev->dev_pr_res_holder;
1447 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1448 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1450 len = sprintf(page, "No SPC-3 Reservation holder\n");
1453 spin_unlock(&dev->dev_reservation_lock);
1457 static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
1459 struct se_device *dev = pr_to_dev(item);
1461 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1462 return sprintf(page, "SPC_PASSTHROUGH\n");
1463 else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1464 return sprintf(page, "SPC2_RESERVATIONS\n");
1466 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1469 static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
1472 struct se_device *dev = pr_to_dev(item);
1474 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1477 return sprintf(page, "APTPL Bit Status: %s\n",
1478 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
1481 static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
1484 struct se_device *dev = pr_to_dev(item);
1486 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1489 return sprintf(page, "Ready to process PR APTPL metadata..\n");
1493 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1494 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1495 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1496 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1499 static match_table_t tokens = {
1500 {Opt_initiator_fabric, "initiator_fabric=%s"},
1501 {Opt_initiator_node, "initiator_node=%s"},
1502 {Opt_initiator_sid, "initiator_sid=%s"},
1503 {Opt_sa_res_key, "sa_res_key=%s"},
1504 {Opt_res_holder, "res_holder=%d"},
1505 {Opt_res_type, "res_type=%d"},
1506 {Opt_res_scope, "res_scope=%d"},
1507 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1508 {Opt_mapped_lun, "mapped_lun=%lld"},
1509 {Opt_target_fabric, "target_fabric=%s"},
1510 {Opt_target_node, "target_node=%s"},
1511 {Opt_tpgt, "tpgt=%d"},
1512 {Opt_port_rtpi, "port_rtpi=%d"},
1513 {Opt_target_lun, "target_lun=%lld"},
1517 static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1518 const char *page, size_t count)
1520 struct se_device *dev = pr_to_dev(item);
1521 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1522 unsigned char *t_fabric = NULL, *t_port = NULL;
1523 char *orig, *ptr, *opts;
1524 substring_t args[MAX_OPT_ARGS];
1525 unsigned long long tmp_ll;
1527 u64 mapped_lun = 0, target_lun = 0;
1528 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1532 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1534 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1537 if (dev->export_count) {
1538 pr_debug("Unable to process APTPL metadata while"
1539 " active fabric exports exist\n");
1543 opts = kstrdup(page, GFP_KERNEL);
1548 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1552 token = match_token(ptr, tokens, args);
1554 case Opt_initiator_fabric:
1555 i_fabric = match_strdup(args);
1561 case Opt_initiator_node:
1562 i_port = match_strdup(args);
1567 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1568 pr_err("APTPL metadata initiator_node="
1569 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1570 PR_APTPL_MAX_IPORT_LEN);
1575 case Opt_initiator_sid:
1576 isid = match_strdup(args);
1581 if (strlen(isid) >= PR_REG_ISID_LEN) {
1582 pr_err("APTPL metadata initiator_isid"
1583 "= exceeds PR_REG_ISID_LEN: %d\n",
1589 case Opt_sa_res_key:
1590 ret = kstrtoull(args->from, 0, &tmp_ll);
1592 pr_err("kstrtoull() failed for sa_res_key=\n");
1595 sa_res_key = (u64)tmp_ll;
1598 * PR APTPL Metadata for Reservation
1600 case Opt_res_holder:
1601 ret = match_int(args, &arg);
1607 ret = match_int(args, &arg);
1613 ret = match_int(args, &arg);
1617 case Opt_res_all_tg_pt:
1618 ret = match_int(args, &arg);
1621 all_tg_pt = (int)arg;
1623 case Opt_mapped_lun:
1624 ret = match_int(args, &arg);
1627 mapped_lun = (u64)arg;
1630 * PR APTPL Metadata for Target Port
1632 case Opt_target_fabric:
1633 t_fabric = match_strdup(args);
1639 case Opt_target_node:
1640 t_port = match_strdup(args);
1645 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1646 pr_err("APTPL metadata target_node="
1647 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1648 PR_APTPL_MAX_TPORT_LEN);
1654 ret = match_int(args, &arg);
1660 ret = match_int(args, &arg);
1664 case Opt_target_lun:
1665 ret = match_int(args, &arg);
1668 target_lun = (u64)arg;
1675 if (!i_port || !t_port || !sa_res_key) {
1676 pr_err("Illegal parameters for APTPL registration\n");
1681 if (res_holder && !(type)) {
1682 pr_err("Illegal PR type: 0x%02x for reservation"
1688 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
1689 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1690 res_holder, all_tg_pt, type);
1698 return (ret == 0) ? count : ret;
1702 CONFIGFS_ATTR_RO(target_pr_, res_holder);
1703 CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
1704 CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
1705 CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
1706 CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
1707 CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
1708 CONFIGFS_ATTR_RO(target_pr_, res_type);
1709 CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
1710 CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
1712 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1713 &target_pr_attr_res_holder,
1714 &target_pr_attr_res_pr_all_tgt_pts,
1715 &target_pr_attr_res_pr_generation,
1716 &target_pr_attr_res_pr_holder_tg_port,
1717 &target_pr_attr_res_pr_registered_i_pts,
1718 &target_pr_attr_res_pr_type,
1719 &target_pr_attr_res_type,
1720 &target_pr_attr_res_aptpl_active,
1721 &target_pr_attr_res_aptpl_metadata,
1725 TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
1727 /* End functions for struct config_item_type tb_dev_pr_cit */
1729 /* Start functions for struct config_item_type tb_dev_cit */
1731 static inline struct se_device *to_device(struct config_item *item)
1733 return container_of(to_config_group(item), struct se_device, dev_group);
1736 static ssize_t target_dev_info_show(struct config_item *item, char *page)
1738 struct se_device *dev = to_device(item);
1740 ssize_t read_bytes = 0;
1742 transport_dump_dev_state(dev, page, &bl);
1744 read_bytes += dev->transport->show_configfs_dev_params(dev,
1749 static ssize_t target_dev_control_store(struct config_item *item,
1750 const char *page, size_t count)
1752 struct se_device *dev = to_device(item);
1754 return dev->transport->set_configfs_dev_params(dev, page, count);
1757 static ssize_t target_dev_alias_show(struct config_item *item, char *page)
1759 struct se_device *dev = to_device(item);
1761 if (!(dev->dev_flags & DF_USING_ALIAS))
1764 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
1767 static ssize_t target_dev_alias_store(struct config_item *item,
1768 const char *page, size_t count)
1770 struct se_device *dev = to_device(item);
1771 struct se_hba *hba = dev->se_hba;
1774 if (count > (SE_DEV_ALIAS_LEN-1)) {
1775 pr_err("alias count: %d exceeds"
1776 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1777 SE_DEV_ALIAS_LEN-1);
1781 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
1784 if (dev->dev_alias[read_bytes - 1] == '\n')
1785 dev->dev_alias[read_bytes - 1] = '\0';
1787 dev->dev_flags |= DF_USING_ALIAS;
1789 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1790 config_item_name(&hba->hba_group.cg_item),
1791 config_item_name(&dev->dev_group.cg_item),
1797 static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
1799 struct se_device *dev = to_device(item);
1801 if (!(dev->dev_flags & DF_USING_UDEV_PATH))
1804 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
1807 static ssize_t target_dev_udev_path_store(struct config_item *item,
1808 const char *page, size_t count)
1810 struct se_device *dev = to_device(item);
1811 struct se_hba *hba = dev->se_hba;
1814 if (count > (SE_UDEV_PATH_LEN-1)) {
1815 pr_err("udev_path count: %d exceeds"
1816 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1817 SE_UDEV_PATH_LEN-1);
1821 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
1825 if (dev->udev_path[read_bytes - 1] == '\n')
1826 dev->udev_path[read_bytes - 1] = '\0';
1828 dev->dev_flags |= DF_USING_UDEV_PATH;
1830 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1831 config_item_name(&hba->hba_group.cg_item),
1832 config_item_name(&dev->dev_group.cg_item),
1838 static ssize_t target_dev_enable_show(struct config_item *item, char *page)
1840 struct se_device *dev = to_device(item);
1842 return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
1845 static ssize_t target_dev_enable_store(struct config_item *item,
1846 const char *page, size_t count)
1848 struct se_device *dev = to_device(item);
1852 ptr = strstr(page, "1");
1854 pr_err("For dev_enable ops, only valid value"
1859 ret = target_configure_device(dev);
1865 static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
1867 struct se_device *dev = to_device(item);
1868 struct config_item *lu_ci;
1869 struct t10_alua_lu_gp *lu_gp;
1870 struct t10_alua_lu_gp_member *lu_gp_mem;
1873 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1877 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1878 lu_gp = lu_gp_mem->lu_gp;
1880 lu_ci = &lu_gp->lu_gp_group.cg_item;
1881 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
1882 config_item_name(lu_ci), lu_gp->lu_gp_id);
1884 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1889 static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
1890 const char *page, size_t count)
1892 struct se_device *dev = to_device(item);
1893 struct se_hba *hba = dev->se_hba;
1894 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1895 struct t10_alua_lu_gp_member *lu_gp_mem;
1896 unsigned char buf[LU_GROUP_NAME_BUF];
1899 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1903 if (count > LU_GROUP_NAME_BUF) {
1904 pr_err("ALUA LU Group Alias too large!\n");
1907 memset(buf, 0, LU_GROUP_NAME_BUF);
1908 memcpy(buf, page, count);
1910 * Any ALUA logical unit alias besides "NULL" means we will be
1911 * making a new group association.
1913 if (strcmp(strstrip(buf), "NULL")) {
1915 * core_alua_get_lu_gp_by_name() will increment reference to
1916 * struct t10_alua_lu_gp. This reference is released with
1917 * core_alua_get_lu_gp_by_name below().
1919 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
1924 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1925 lu_gp = lu_gp_mem->lu_gp;
1928 * Clearing an existing lu_gp association, and replacing
1932 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
1933 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1935 config_item_name(&hba->hba_group.cg_item),
1936 config_item_name(&dev->dev_group.cg_item),
1937 config_item_name(&lu_gp->lu_gp_group.cg_item),
1940 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1941 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1946 * Removing existing association of lu_gp_mem with lu_gp
1948 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1952 * Associate lu_gp_mem with lu_gp_new.
1954 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
1955 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1957 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
1958 " core/alua/lu_gps/%s, ID: %hu\n",
1959 (move) ? "Moving" : "Adding",
1960 config_item_name(&hba->hba_group.cg_item),
1961 config_item_name(&dev->dev_group.cg_item),
1962 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1963 lu_gp_new->lu_gp_id);
1965 core_alua_put_lu_gp_from_name(lu_gp_new);
1969 static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
1971 struct se_device *dev = to_device(item);
1972 struct t10_alua_lba_map *map;
1973 struct t10_alua_lba_map_member *mem;
1978 spin_lock(&dev->t10_alua.lba_map_lock);
1979 if (!list_empty(&dev->t10_alua.lba_map_list))
1980 bl += sprintf(b + bl, "%u %u\n",
1981 dev->t10_alua.lba_map_segment_size,
1982 dev->t10_alua.lba_map_segment_multiplier);
1983 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
1984 bl += sprintf(b + bl, "%llu %llu",
1985 map->lba_map_first_lba, map->lba_map_last_lba);
1986 list_for_each_entry(mem, &map->lba_map_mem_list,
1988 switch (mem->lba_map_mem_alua_state) {
1989 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
1992 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
1995 case ALUA_ACCESS_STATE_STANDBY:
1998 case ALUA_ACCESS_STATE_UNAVAILABLE:
2005 bl += sprintf(b + bl, " %d:%c",
2006 mem->lba_map_mem_alua_pg_id, state);
2008 bl += sprintf(b + bl, "\n");
2010 spin_unlock(&dev->t10_alua.lba_map_lock);
2014 static ssize_t target_dev_lba_map_store(struct config_item *item,
2015 const char *page, size_t count)
2017 struct se_device *dev = to_device(item);
2018 struct t10_alua_lba_map *lba_map = NULL;
2019 struct list_head lba_list;
2020 char *map_entries, *orig, *ptr;
2022 int pg_num = -1, pg;
2023 int ret = 0, num = 0, pg_id, alua_state;
2024 unsigned long start_lba = -1, end_lba = -1;
2025 unsigned long segment_size = -1, segment_mult = -1;
2027 orig = map_entries = kstrdup(page, GFP_KERNEL);
2031 INIT_LIST_HEAD(&lba_list);
2032 while ((ptr = strsep(&map_entries, "\n")) != NULL) {
2037 if (sscanf(ptr, "%lu %lu\n",
2038 &segment_size, &segment_mult) != 2) {
2039 pr_err("Invalid line %d\n", num);
2046 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
2047 pr_err("Invalid line %d\n", num);
2051 ptr = strchr(ptr, ' ');
2053 pr_err("Invalid line %d, missing end lba\n", num);
2058 ptr = strchr(ptr, ' ');
2060 pr_err("Invalid line %d, missing state definitions\n",
2066 lba_map = core_alua_allocate_lba_map(&lba_list,
2067 start_lba, end_lba);
2068 if (IS_ERR(lba_map)) {
2069 ret = PTR_ERR(lba_map);
2073 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
2076 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
2079 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
2082 alua_state = ALUA_ACCESS_STATE_STANDBY;
2085 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
2088 pr_err("Invalid ALUA state '%c'\n", state);
2093 ret = core_alua_allocate_lba_map_mem(lba_map,
2096 pr_err("Invalid target descriptor %d:%c "
2102 ptr = strchr(ptr, ' ');
2110 else if (pg != pg_num) {
2111 pr_err("Only %d from %d port groups definitions "
2112 "at line %d\n", pg, pg_num, num);
2120 core_alua_free_lba_map(&lba_list);
2123 core_alua_set_lba_map(dev, &lba_list,
2124 segment_size, segment_mult);
2129 CONFIGFS_ATTR_RO(target_dev_, info);
2130 CONFIGFS_ATTR_WO(target_dev_, control);
2131 CONFIGFS_ATTR(target_dev_, alias);
2132 CONFIGFS_ATTR(target_dev_, udev_path);
2133 CONFIGFS_ATTR(target_dev_, enable);
2134 CONFIGFS_ATTR(target_dev_, alua_lu_gp);
2135 CONFIGFS_ATTR(target_dev_, lba_map);
2137 static struct configfs_attribute *target_core_dev_attrs[] = {
2138 &target_dev_attr_info,
2139 &target_dev_attr_control,
2140 &target_dev_attr_alias,
2141 &target_dev_attr_udev_path,
2142 &target_dev_attr_enable,
2143 &target_dev_attr_alua_lu_gp,
2144 &target_dev_attr_lba_map,
2148 static void target_core_dev_release(struct config_item *item)
2150 struct config_group *dev_cg = to_config_group(item);
2151 struct se_device *dev =
2152 container_of(dev_cg, struct se_device, dev_group);
2154 kfree(dev_cg->default_groups);
2155 target_free_device(dev);
2158 static struct configfs_item_operations target_core_dev_item_ops = {
2159 .release = target_core_dev_release,
2162 TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
2164 /* End functions for struct config_item_type tb_dev_cit */
2166 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2168 static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
2170 return container_of(to_config_group(item), struct t10_alua_lu_gp,
2174 static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
2176 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2178 if (!lu_gp->lu_gp_valid_id)
2180 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2183 static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
2184 const char *page, size_t count)
2186 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2187 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2188 unsigned long lu_gp_id;
2191 ret = kstrtoul(page, 0, &lu_gp_id);
2193 pr_err("kstrtoul() returned %d for"
2194 " lu_gp_id\n", ret);
2197 if (lu_gp_id > 0x0000ffff) {
2198 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2199 " 0x0000ffff\n", lu_gp_id);
2203 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2207 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2208 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2209 config_item_name(&alua_lu_gp_cg->cg_item),
2215 static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
2217 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2218 struct se_device *dev;
2220 struct t10_alua_lu_gp_member *lu_gp_mem;
2221 ssize_t len = 0, cur_len;
2222 unsigned char buf[LU_GROUP_NAME_BUF];
2224 memset(buf, 0, LU_GROUP_NAME_BUF);
2226 spin_lock(&lu_gp->lu_gp_lock);
2227 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2228 dev = lu_gp_mem->lu_gp_mem_dev;
2231 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2232 config_item_name(&hba->hba_group.cg_item),
2233 config_item_name(&dev->dev_group.cg_item));
2234 cur_len++; /* Extra byte for NULL terminator */
2236 if ((cur_len + len) > PAGE_SIZE) {
2237 pr_warn("Ran out of lu_gp_show_attr"
2238 "_members buffer\n");
2241 memcpy(page+len, buf, cur_len);
2244 spin_unlock(&lu_gp->lu_gp_lock);
2249 CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
2250 CONFIGFS_ATTR_RO(target_lu_gp_, members);
2252 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2253 &target_lu_gp_attr_lu_gp_id,
2254 &target_lu_gp_attr_members,
2258 static void target_core_alua_lu_gp_release(struct config_item *item)
2260 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2261 struct t10_alua_lu_gp, lu_gp_group);
2263 core_alua_free_lu_gp(lu_gp);
2266 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2267 .release = target_core_alua_lu_gp_release,
2270 static struct config_item_type target_core_alua_lu_gp_cit = {
2271 .ct_item_ops = &target_core_alua_lu_gp_ops,
2272 .ct_attrs = target_core_alua_lu_gp_attrs,
2273 .ct_owner = THIS_MODULE,
2276 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2278 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2280 static struct config_group *target_core_alua_create_lu_gp(
2281 struct config_group *group,
2284 struct t10_alua_lu_gp *lu_gp;
2285 struct config_group *alua_lu_gp_cg = NULL;
2286 struct config_item *alua_lu_gp_ci = NULL;
2288 lu_gp = core_alua_allocate_lu_gp(name, 0);
2292 alua_lu_gp_cg = &lu_gp->lu_gp_group;
2293 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2295 config_group_init_type_name(alua_lu_gp_cg, name,
2296 &target_core_alua_lu_gp_cit);
2298 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2299 " Group: core/alua/lu_gps/%s\n",
2300 config_item_name(alua_lu_gp_ci));
2302 return alua_lu_gp_cg;
2306 static void target_core_alua_drop_lu_gp(
2307 struct config_group *group,
2308 struct config_item *item)
2310 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2311 struct t10_alua_lu_gp, lu_gp_group);
2313 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2314 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2315 config_item_name(item), lu_gp->lu_gp_id);
2317 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2318 * -> target_core_alua_lu_gp_release()
2320 config_item_put(item);
2323 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2324 .make_group = &target_core_alua_create_lu_gp,
2325 .drop_item = &target_core_alua_drop_lu_gp,
2328 static struct config_item_type target_core_alua_lu_gps_cit = {
2329 .ct_item_ops = NULL,
2330 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
2331 .ct_owner = THIS_MODULE,
2334 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2336 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2338 static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
2340 return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
2344 static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2347 return sprintf(page, "%d\n",
2348 atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state));
2351 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
2352 const char *page, size_t count)
2354 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2355 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2359 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2360 pr_err("Unable to do implicit ALUA on non valid"
2361 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2364 if (!(dev->dev_flags & DF_CONFIGURED)) {
2365 pr_err("Unable to set alua_access_state while device is"
2366 " not configured\n");
2370 ret = kstrtoul(page, 0, &tmp);
2372 pr_err("Unable to extract new ALUA access state from"
2376 new_state = (int)tmp;
2378 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2379 pr_err("Unable to process implicit configfs ALUA"
2380 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2383 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
2384 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
2385 /* LBA DEPENDENT is only allowed with implicit ALUA */
2386 pr_err("Unable to process implicit configfs ALUA transition"
2387 " while explicit ALUA management is enabled\n");
2391 ret = core_alua_do_port_transition(tg_pt_gp, dev,
2392 NULL, NULL, new_state, 0);
2393 return (!ret) ? count : -EINVAL;
2396 static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
2399 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2400 return sprintf(page, "%s\n",
2401 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2404 static ssize_t target_tg_pt_gp_alua_access_status_store(
2405 struct config_item *item, const char *page, size_t count)
2407 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2409 int new_status, ret;
2411 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2412 pr_err("Unable to do set ALUA access status on non"
2413 " valid tg_pt_gp ID: %hu\n",
2414 tg_pt_gp->tg_pt_gp_valid_id);
2418 ret = kstrtoul(page, 0, &tmp);
2420 pr_err("Unable to extract new ALUA access status"
2421 " from %s\n", page);
2424 new_status = (int)tmp;
2426 if ((new_status != ALUA_STATUS_NONE) &&
2427 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2428 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2429 pr_err("Illegal ALUA access status: 0x%02x\n",
2434 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2438 static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
2441 return core_alua_show_access_type(to_tg_pt_gp(item), page);
2444 static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
2445 const char *page, size_t count)
2447 return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
2450 #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \
2451 static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \
2452 struct config_item *item, char *p) \
2454 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
2455 return sprintf(p, "%d\n", \
2456 !!(t->tg_pt_gp_alua_supported_states & _bit)); \
2459 static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
2460 struct config_item *item, const char *p, size_t c) \
2462 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
2463 unsigned long tmp; \
2466 if (!t->tg_pt_gp_valid_id) { \
2467 pr_err("Unable to do set ##_name ALUA state on non" \
2468 " valid tg_pt_gp ID: %hu\n", \
2469 t->tg_pt_gp_valid_id); \
2473 ret = kstrtoul(p, 0, &tmp); \
2475 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2479 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2483 t->tg_pt_gp_alua_supported_states |= _bit; \
2485 t->tg_pt_gp_alua_supported_states &= ~_bit; \
2490 ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
2491 ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
2492 ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
2493 ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
2494 ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
2495 ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
2496 ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
2498 static ssize_t target_tg_pt_gp_alua_write_metadata_show(
2499 struct config_item *item, char *page)
2501 return sprintf(page, "%d\n",
2502 to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
2505 static ssize_t target_tg_pt_gp_alua_write_metadata_store(
2506 struct config_item *item, const char *page, size_t count)
2508 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2512 ret = kstrtoul(page, 0, &tmp);
2514 pr_err("Unable to extract alua_write_metadata\n");
2518 if ((tmp != 0) && (tmp != 1)) {
2519 pr_err("Illegal value for alua_write_metadata:"
2523 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2528 static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
2531 return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
2534 static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
2535 const char *page, size_t count)
2537 return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
2541 static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
2544 return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
2547 static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
2548 const char *page, size_t count)
2550 return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
2554 static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
2555 struct config_item *item, char *page)
2557 return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
2560 static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
2561 struct config_item *item, const char *page, size_t count)
2563 return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
2567 static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
2570 return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
2573 static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
2574 const char *page, size_t count)
2576 return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
2579 static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
2582 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2584 if (!tg_pt_gp->tg_pt_gp_valid_id)
2586 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2589 static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
2590 const char *page, size_t count)
2592 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2593 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2594 unsigned long tg_pt_gp_id;
2597 ret = kstrtoul(page, 0, &tg_pt_gp_id);
2599 pr_err("kstrtoul() returned %d for"
2600 " tg_pt_gp_id\n", ret);
2603 if (tg_pt_gp_id > 0x0000ffff) {
2604 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
2605 " 0x0000ffff\n", tg_pt_gp_id);
2609 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2613 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2614 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2615 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2616 tg_pt_gp->tg_pt_gp_id);
2621 static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
2624 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2626 ssize_t len = 0, cur_len;
2627 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2629 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2631 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2632 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
2633 lun_tg_pt_gp_link) {
2634 struct se_portal_group *tpg = lun->lun_tpg;
2636 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2637 "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
2638 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2639 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2640 config_item_name(&lun->lun_group.cg_item));
2641 cur_len++; /* Extra byte for NULL terminator */
2643 if ((cur_len + len) > PAGE_SIZE) {
2644 pr_warn("Ran out of lu_gp_show_attr"
2645 "_members buffer\n");
2648 memcpy(page+len, buf, cur_len);
2651 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2656 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
2657 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
2658 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
2659 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
2660 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
2661 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
2662 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
2663 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
2664 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
2665 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
2666 CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
2667 CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
2668 CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
2669 CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
2670 CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
2671 CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
2672 CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
2674 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2675 &target_tg_pt_gp_attr_alua_access_state,
2676 &target_tg_pt_gp_attr_alua_access_status,
2677 &target_tg_pt_gp_attr_alua_access_type,
2678 &target_tg_pt_gp_attr_alua_support_transitioning,
2679 &target_tg_pt_gp_attr_alua_support_offline,
2680 &target_tg_pt_gp_attr_alua_support_lba_dependent,
2681 &target_tg_pt_gp_attr_alua_support_unavailable,
2682 &target_tg_pt_gp_attr_alua_support_standby,
2683 &target_tg_pt_gp_attr_alua_support_active_nonoptimized,
2684 &target_tg_pt_gp_attr_alua_support_active_optimized,
2685 &target_tg_pt_gp_attr_alua_write_metadata,
2686 &target_tg_pt_gp_attr_nonop_delay_msecs,
2687 &target_tg_pt_gp_attr_trans_delay_msecs,
2688 &target_tg_pt_gp_attr_implicit_trans_secs,
2689 &target_tg_pt_gp_attr_preferred,
2690 &target_tg_pt_gp_attr_tg_pt_gp_id,
2691 &target_tg_pt_gp_attr_members,
2695 static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2697 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2698 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2700 core_alua_free_tg_pt_gp(tg_pt_gp);
2703 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2704 .release = target_core_alua_tg_pt_gp_release,
2707 static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2708 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
2709 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
2710 .ct_owner = THIS_MODULE,
2713 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2715 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2717 static struct config_group *target_core_alua_create_tg_pt_gp(
2718 struct config_group *group,
2721 struct t10_alua *alua = container_of(group, struct t10_alua,
2722 alua_tg_pt_gps_group);
2723 struct t10_alua_tg_pt_gp *tg_pt_gp;
2724 struct config_group *alua_tg_pt_gp_cg = NULL;
2725 struct config_item *alua_tg_pt_gp_ci = NULL;
2727 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
2731 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2732 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2734 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2735 &target_core_alua_tg_pt_gp_cit);
2737 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
2738 " Group: alua/tg_pt_gps/%s\n",
2739 config_item_name(alua_tg_pt_gp_ci));
2741 return alua_tg_pt_gp_cg;
2744 static void target_core_alua_drop_tg_pt_gp(
2745 struct config_group *group,
2746 struct config_item *item)
2748 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2749 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2751 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
2752 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2753 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2755 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2756 * -> target_core_alua_tg_pt_gp_release().
2758 config_item_put(item);
2761 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2762 .make_group = &target_core_alua_create_tg_pt_gp,
2763 .drop_item = &target_core_alua_drop_tg_pt_gp,
2766 TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
2768 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2770 /* Start functions for struct config_item_type target_core_alua_cit */
2773 * target_core_alua_cit is a ConfigFS group that lives under
2774 * /sys/kernel/config/target/core/alua. There are default groups
2775 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2776 * target_core_alua_cit in target_core_init_configfs() below.
2778 static struct config_item_type target_core_alua_cit = {
2779 .ct_item_ops = NULL,
2781 .ct_owner = THIS_MODULE,
2784 /* End functions for struct config_item_type target_core_alua_cit */
2786 /* Start functions for struct config_item_type tb_dev_stat_cit */
2788 static struct config_group *target_core_stat_mkdir(
2789 struct config_group *group,
2792 return ERR_PTR(-ENOSYS);
2795 static void target_core_stat_rmdir(
2796 struct config_group *group,
2797 struct config_item *item)
2802 static struct configfs_group_operations target_core_stat_group_ops = {
2803 .make_group = &target_core_stat_mkdir,
2804 .drop_item = &target_core_stat_rmdir,
2807 TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
2809 /* End functions for struct config_item_type tb_dev_stat_cit */
2811 /* Start functions for struct config_item_type target_core_hba_cit */
2813 static struct config_group *target_core_make_subdev(
2814 struct config_group *group,
2817 struct t10_alua_tg_pt_gp *tg_pt_gp;
2818 struct config_item *hba_ci = &group->cg_item;
2819 struct se_hba *hba = item_to_hba(hba_ci);
2820 struct target_backend *tb = hba->backend;
2821 struct se_device *dev;
2822 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
2823 struct config_group *dev_stat_grp = NULL;
2824 int errno = -ENOMEM, ret;
2826 ret = mutex_lock_interruptible(&hba->hba_access_mutex);
2828 return ERR_PTR(ret);
2830 dev = target_alloc_device(hba, name);
2834 dev_cg = &dev->dev_group;
2836 dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
2838 if (!dev_cg->default_groups)
2839 goto out_free_device;
2841 config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit);
2842 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
2843 &tb->tb_dev_attrib_cit);
2844 config_group_init_type_name(&dev->dev_pr_group, "pr",
2845 &tb->tb_dev_pr_cit);
2846 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
2847 &tb->tb_dev_wwn_cit);
2848 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
2849 "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
2850 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
2851 "statistics", &tb->tb_dev_stat_cit);
2853 dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
2854 dev_cg->default_groups[1] = &dev->dev_pr_group;
2855 dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
2856 dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
2857 dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
2858 dev_cg->default_groups[5] = NULL;
2860 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2862 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
2864 goto out_free_dev_cg_default_groups;
2865 dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2867 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
2868 tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
2870 if (!tg_pt_gp_cg->default_groups) {
2871 pr_err("Unable to allocate tg_pt_gp_cg->"
2872 "default_groups\n");
2873 goto out_free_tg_pt_gp;
2876 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2877 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2878 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2879 tg_pt_gp_cg->default_groups[1] = NULL;
2881 * Add core/$HBA/$DEV/statistics/ default groups
2883 dev_stat_grp = &dev->dev_stat_grps.stat_group;
2884 dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
2886 if (!dev_stat_grp->default_groups) {
2887 pr_err("Unable to allocate dev_stat_grp->default_groups\n");
2888 goto out_free_tg_pt_gp_cg_default_groups;
2890 target_stat_setup_dev_default_groups(dev);
2892 mutex_unlock(&hba->hba_access_mutex);
2895 out_free_tg_pt_gp_cg_default_groups:
2896 kfree(tg_pt_gp_cg->default_groups);
2898 core_alua_free_tg_pt_gp(tg_pt_gp);
2899 out_free_dev_cg_default_groups:
2900 kfree(dev_cg->default_groups);
2902 target_free_device(dev);
2904 mutex_unlock(&hba->hba_access_mutex);
2905 return ERR_PTR(errno);
2908 static void target_core_drop_subdev(
2909 struct config_group *group,
2910 struct config_item *item)
2912 struct config_group *dev_cg = to_config_group(item);
2913 struct se_device *dev =
2914 container_of(dev_cg, struct se_device, dev_group);
2916 struct config_item *df_item;
2917 struct config_group *tg_pt_gp_cg, *dev_stat_grp;
2920 hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
2922 mutex_lock(&hba->hba_access_mutex);
2924 dev_stat_grp = &dev->dev_stat_grps.stat_group;
2925 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2926 df_item = &dev_stat_grp->default_groups[i]->cg_item;
2927 dev_stat_grp->default_groups[i] = NULL;
2928 config_item_put(df_item);
2930 kfree(dev_stat_grp->default_groups);
2932 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
2933 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2934 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2935 tg_pt_gp_cg->default_groups[i] = NULL;
2936 config_item_put(df_item);
2938 kfree(tg_pt_gp_cg->default_groups);
2940 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2941 * directly from target_core_alua_tg_pt_gp_release().
2943 dev->t10_alua.default_tg_pt_gp = NULL;
2945 for (i = 0; dev_cg->default_groups[i]; i++) {
2946 df_item = &dev_cg->default_groups[i]->cg_item;
2947 dev_cg->default_groups[i] = NULL;
2948 config_item_put(df_item);
2951 * se_dev is released from target_core_dev_item_ops->release()
2953 config_item_put(item);
2954 mutex_unlock(&hba->hba_access_mutex);
2957 static struct configfs_group_operations target_core_hba_group_ops = {
2958 .make_group = target_core_make_subdev,
2959 .drop_item = target_core_drop_subdev,
2963 static inline struct se_hba *to_hba(struct config_item *item)
2965 return container_of(to_config_group(item), struct se_hba, hba_group);
2968 static ssize_t target_hba_info_show(struct config_item *item, char *page)
2970 struct se_hba *hba = to_hba(item);
2972 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
2973 hba->hba_id, hba->backend->ops->name,
2974 TARGET_CORE_VERSION);
2977 static ssize_t target_hba_mode_show(struct config_item *item, char *page)
2979 struct se_hba *hba = to_hba(item);
2982 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
2985 return sprintf(page, "%d\n", hba_mode);
2988 static ssize_t target_hba_mode_store(struct config_item *item,
2989 const char *page, size_t count)
2991 struct se_hba *hba = to_hba(item);
2992 unsigned long mode_flag;
2995 if (hba->backend->ops->pmode_enable_hba == NULL)
2998 ret = kstrtoul(page, 0, &mode_flag);
3000 pr_err("Unable to extract hba mode flag: %d\n", ret);
3004 if (hba->dev_count) {
3005 pr_err("Unable to set hba_mode with active devices\n");
3009 ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
3013 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
3015 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
3020 CONFIGFS_ATTR_RO(target_, hba_info);
3021 CONFIGFS_ATTR(target_, hba_mode);
3023 static void target_core_hba_release(struct config_item *item)
3025 struct se_hba *hba = container_of(to_config_group(item),
3026 struct se_hba, hba_group);
3027 core_delete_hba(hba);
3030 static struct configfs_attribute *target_core_hba_attrs[] = {
3031 &target_attr_hba_info,
3032 &target_attr_hba_mode,
3036 static struct configfs_item_operations target_core_hba_item_ops = {
3037 .release = target_core_hba_release,
3040 static struct config_item_type target_core_hba_cit = {
3041 .ct_item_ops = &target_core_hba_item_ops,
3042 .ct_group_ops = &target_core_hba_group_ops,
3043 .ct_attrs = target_core_hba_attrs,
3044 .ct_owner = THIS_MODULE,
3047 static struct config_group *target_core_call_addhbatotarget(
3048 struct config_group *group,
3051 char *se_plugin_str, *str, *str2;
3053 char buf[TARGET_CORE_NAME_MAX_LEN];
3054 unsigned long plugin_dep_id = 0;
3057 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3058 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3059 pr_err("Passed *name strlen(): %d exceeds"
3060 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3061 TARGET_CORE_NAME_MAX_LEN);
3062 return ERR_PTR(-ENAMETOOLONG);
3064 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3066 str = strstr(buf, "_");
3068 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3069 return ERR_PTR(-EINVAL);
3071 se_plugin_str = buf;
3073 * Special case for subsystem plugins that have "_" in their names.
3074 * Namely rd_direct and rd_mcp..
3076 str2 = strstr(str+1, "_");
3078 *str2 = '\0'; /* Terminate for *se_plugin_str */
3079 str2++; /* Skip to start of plugin dependent ID */
3082 *str = '\0'; /* Terminate for *se_plugin_str */
3083 str++; /* Skip to start of plugin dependent ID */
3086 ret = kstrtoul(str, 0, &plugin_dep_id);
3088 pr_err("kstrtoul() returned %d for"
3089 " plugin_dep_id\n", ret);
3090 return ERR_PTR(ret);
3093 * Load up TCM subsystem plugins if they have not already been loaded.
3095 transport_subsystem_check_init();
3097 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3099 return ERR_CAST(hba);
3101 config_group_init_type_name(&hba->hba_group, name,
3102 &target_core_hba_cit);
3104 return &hba->hba_group;
3107 static void target_core_call_delhbafromtarget(
3108 struct config_group *group,
3109 struct config_item *item)
3112 * core_delete_hba() is called from target_core_hba_item_ops->release()
3113 * -> target_core_hba_release()
3115 config_item_put(item);
3118 static struct configfs_group_operations target_core_group_ops = {
3119 .make_group = target_core_call_addhbatotarget,
3120 .drop_item = target_core_call_delhbafromtarget,
3123 static struct config_item_type target_core_cit = {
3124 .ct_item_ops = NULL,
3125 .ct_group_ops = &target_core_group_ops,
3127 .ct_owner = THIS_MODULE,
3130 /* Stop functions for struct config_item_type target_core_hba_cit */
3132 void target_setup_backend_cits(struct target_backend *tb)
3134 target_core_setup_dev_cit(tb);
3135 target_core_setup_dev_attrib_cit(tb);
3136 target_core_setup_dev_pr_cit(tb);
3137 target_core_setup_dev_wwn_cit(tb);
3138 target_core_setup_dev_alua_tg_pt_gps_cit(tb);
3139 target_core_setup_dev_stat_cit(tb);
3142 static int __init target_core_init_configfs(void)
3144 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
3145 struct config_group *lu_gp_cg = NULL;
3146 struct configfs_subsystem *subsys = &target_core_fabrics;
3147 struct t10_alua_lu_gp *lu_gp;
3150 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3151 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3152 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3154 config_group_init(&subsys->su_group);
3155 mutex_init(&subsys->su_mutex);
3157 ret = init_se_kmem_caches();
3161 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3162 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3164 target_cg = &subsys->su_group;
3165 target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
3167 if (!target_cg->default_groups) {
3168 pr_err("Unable to allocate target_cg->default_groups\n");
3173 config_group_init_type_name(&target_core_hbagroup,
3174 "core", &target_core_cit);
3175 target_cg->default_groups[0] = &target_core_hbagroup;
3176 target_cg->default_groups[1] = NULL;
3178 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3180 hba_cg = &target_core_hbagroup;
3181 hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
3183 if (!hba_cg->default_groups) {
3184 pr_err("Unable to allocate hba_cg->default_groups\n");
3188 config_group_init_type_name(&alua_group,
3189 "alua", &target_core_alua_cit);
3190 hba_cg->default_groups[0] = &alua_group;
3191 hba_cg->default_groups[1] = NULL;
3193 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3194 * groups under /sys/kernel/config/target/core/alua/
3196 alua_cg = &alua_group;
3197 alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
3199 if (!alua_cg->default_groups) {
3200 pr_err("Unable to allocate alua_cg->default_groups\n");
3205 config_group_init_type_name(&alua_lu_gps_group,
3206 "lu_gps", &target_core_alua_lu_gps_cit);
3207 alua_cg->default_groups[0] = &alua_lu_gps_group;
3208 alua_cg->default_groups[1] = NULL;
3210 * Add core/alua/lu_gps/default_lu_gp
3212 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3213 if (IS_ERR(lu_gp)) {
3218 lu_gp_cg = &alua_lu_gps_group;
3219 lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
3221 if (!lu_gp_cg->default_groups) {
3222 pr_err("Unable to allocate lu_gp_cg->default_groups\n");
3227 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3228 &target_core_alua_lu_gp_cit);
3229 lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
3230 lu_gp_cg->default_groups[1] = NULL;
3231 default_lu_gp = lu_gp;
3233 * Register the target_core_mod subsystem with configfs.
3235 ret = configfs_register_subsystem(subsys);
3237 pr_err("Error %d while registering subsystem %s\n",
3238 ret, subsys->su_group.cg_item.ci_namebuf);
3241 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3242 " Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
3243 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3245 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3247 ret = rd_module_init();
3251 ret = core_dev_setup_virtual_lun0();
3255 ret = target_xcopy_setup_pt();
3262 configfs_unregister_subsystem(subsys);
3263 core_dev_release_virtual_lun0();
3266 if (default_lu_gp) {
3267 core_alua_free_lu_gp(default_lu_gp);
3268 default_lu_gp = NULL;
3271 kfree(lu_gp_cg->default_groups);
3273 kfree(alua_cg->default_groups);
3275 kfree(hba_cg->default_groups);
3276 kfree(target_cg->default_groups);
3277 release_se_kmem_caches();
3281 static void __exit target_core_exit_configfs(void)
3283 struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
3284 struct config_item *item;
3287 lu_gp_cg = &alua_lu_gps_group;
3288 for (i = 0; lu_gp_cg->default_groups[i]; i++) {
3289 item = &lu_gp_cg->default_groups[i]->cg_item;
3290 lu_gp_cg->default_groups[i] = NULL;
3291 config_item_put(item);
3293 kfree(lu_gp_cg->default_groups);
3294 lu_gp_cg->default_groups = NULL;
3296 alua_cg = &alua_group;
3297 for (i = 0; alua_cg->default_groups[i]; i++) {
3298 item = &alua_cg->default_groups[i]->cg_item;
3299 alua_cg->default_groups[i] = NULL;
3300 config_item_put(item);
3302 kfree(alua_cg->default_groups);
3303 alua_cg->default_groups = NULL;
3305 hba_cg = &target_core_hbagroup;
3306 for (i = 0; hba_cg->default_groups[i]; i++) {
3307 item = &hba_cg->default_groups[i]->cg_item;
3308 hba_cg->default_groups[i] = NULL;
3309 config_item_put(item);
3311 kfree(hba_cg->default_groups);
3312 hba_cg->default_groups = NULL;
3314 * We expect subsys->su_group.default_groups to be released
3315 * by configfs subsystem provider logic..
3317 configfs_unregister_subsystem(&target_core_fabrics);
3318 kfree(target_core_fabrics.su_group.default_groups);
3320 core_alua_free_lu_gp(default_lu_gp);
3321 default_lu_gp = NULL;
3323 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3324 " Infrastructure\n");
3326 core_dev_release_virtual_lun0();
3328 target_xcopy_release_pt();
3329 release_se_kmem_caches();
3332 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3333 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3334 MODULE_LICENSE("GPL");
3336 module_init(target_core_init_configfs);
3337 module_exit(target_core_exit_configfs);