1 /*******************************************************************************
2 * Filename: target_core_configfs.c
4 * This file contains ConfigFS logic for the Generic Target Engine project.
6 * (c) Copyright 2008-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <generated/utsrelease.h>
26 #include <linux/utsname.h>
27 #include <linux/init.h>
29 #include <linux/namei.h>
30 #include <linux/slab.h>
31 #include <linux/types.h>
32 #include <linux/delay.h>
33 #include <linux/unistd.h>
34 #include <linux/string.h>
35 #include <linux/parser.h>
36 #include <linux/syscalls.h>
37 #include <linux/configfs.h>
38 #include <linux/spinlock.h>
40 #include <target/target_core_base.h>
41 #include <target/target_core_backend.h>
42 #include <target/target_core_fabric.h>
44 #include "target_core_internal.h"
45 #include "target_core_alua.h"
46 #include "target_core_pr.h"
47 #include "target_core_rd.h"
48 #include "target_core_xcopy.h"
50 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
51 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
53 struct config_item_type *cit = &tb->tb_##_name##_cit; \
55 cit->ct_item_ops = _item_ops; \
56 cit->ct_group_ops = _group_ops; \
57 cit->ct_attrs = _attrs; \
58 cit->ct_owner = tb->ops->owner; \
59 pr_debug("Setup generic %s\n", __stringify(_name)); \
62 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
63 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
65 struct config_item_type *cit = &tb->tb_##_name##_cit; \
67 cit->ct_item_ops = _item_ops; \
68 cit->ct_group_ops = _group_ops; \
69 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
70 cit->ct_owner = tb->ops->owner; \
71 pr_debug("Setup generic %s\n", __stringify(_name)); \
74 extern struct t10_alua_lu_gp *default_lu_gp;
76 static LIST_HEAD(g_tf_list);
77 static DEFINE_MUTEX(g_tf_lock);
79 static struct config_group target_core_hbagroup;
80 static struct config_group alua_group;
81 static struct config_group alua_lu_gps_group;
83 static inline struct se_hba *
84 item_to_hba(struct config_item *item)
86 return container_of(to_config_group(item), struct se_hba, hba_group);
90 * Attributes for /sys/kernel/config/target/
92 static ssize_t target_core_item_version_show(struct config_item *item,
95 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
96 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
97 utsname()->sysname, utsname()->machine);
100 CONFIGFS_ATTR_RO(target_core_item_, version);
102 char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
103 static char db_root_stage[DB_ROOT_LEN];
105 static ssize_t target_core_item_dbroot_show(struct config_item *item,
108 return sprintf(page, "%s\n", db_root);
111 static ssize_t target_core_item_dbroot_store(struct config_item *item,
112 const char *page, size_t count)
117 mutex_lock(&g_tf_lock);
118 if (!list_empty(&g_tf_list)) {
119 mutex_unlock(&g_tf_lock);
120 pr_err("db_root: cannot be changed: target drivers registered");
124 if (count > (DB_ROOT_LEN - 1)) {
125 mutex_unlock(&g_tf_lock);
126 pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
127 (int)count, DB_ROOT_LEN - 1);
131 read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
133 mutex_unlock(&g_tf_lock);
136 if (db_root_stage[read_bytes - 1] == '\n')
137 db_root_stage[read_bytes - 1] = '\0';
139 /* validate new db root before accepting it */
140 fp = filp_open(db_root_stage, O_RDONLY, 0);
142 mutex_unlock(&g_tf_lock);
143 pr_err("db_root: cannot open: %s\n", db_root_stage);
146 if (!S_ISDIR(file_inode(fp)->i_mode)) {
147 filp_close(fp, NULL);
148 mutex_unlock(&g_tf_lock);
149 pr_err("db_root: not a directory: %s\n", db_root_stage);
152 filp_close(fp, NULL);
154 strncpy(db_root, db_root_stage, read_bytes);
156 mutex_unlock(&g_tf_lock);
158 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
163 CONFIGFS_ATTR(target_core_item_, dbroot);
165 static struct target_fabric_configfs *target_core_get_fabric(
168 struct target_fabric_configfs *tf;
173 mutex_lock(&g_tf_lock);
174 list_for_each_entry(tf, &g_tf_list, tf_list) {
175 const char *cmp_name = tf->tf_ops->fabric_alias;
177 cmp_name = tf->tf_ops->fabric_name;
178 if (!strcmp(cmp_name, name)) {
179 atomic_inc(&tf->tf_access_cnt);
180 mutex_unlock(&g_tf_lock);
184 mutex_unlock(&g_tf_lock);
190 * Called from struct target_core_group_ops->make_group()
192 static struct config_group *target_core_register_fabric(
193 struct config_group *group,
196 struct target_fabric_configfs *tf;
199 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
200 " %s\n", group, name);
202 tf = target_core_get_fabric(name);
204 pr_debug("target_core_register_fabric() trying autoload for %s\n",
208 * Below are some hardcoded request_module() calls to automatically
209 * local fabric modules when the following is called:
211 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
213 * Note that this does not limit which TCM fabric module can be
214 * registered, but simply provids auto loading logic for modules with
215 * mkdir(2) system calls with known TCM fabric modules.
218 if (!strncmp(name, "iscsi", 5)) {
220 * Automatically load the LIO Target fabric module when the
221 * following is called:
223 * mkdir -p $CONFIGFS/target/iscsi
225 ret = request_module("iscsi_target_mod");
227 pr_debug("request_module() failed for"
228 " iscsi_target_mod.ko: %d\n", ret);
229 return ERR_PTR(-EINVAL);
231 } else if (!strncmp(name, "loopback", 8)) {
233 * Automatically load the tcm_loop fabric module when the
234 * following is called:
236 * mkdir -p $CONFIGFS/target/loopback
238 ret = request_module("tcm_loop");
240 pr_debug("request_module() failed for"
241 " tcm_loop.ko: %d\n", ret);
242 return ERR_PTR(-EINVAL);
246 tf = target_core_get_fabric(name);
250 pr_debug("target_core_get_fabric() failed for %s\n",
252 return ERR_PTR(-EINVAL);
254 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
255 " %s\n", tf->tf_ops->fabric_name);
257 * On a successful target_core_get_fabric() look, the returned
258 * struct target_fabric_configfs *tf will contain a usage reference.
260 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
263 config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
265 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
266 &tf->tf_discovery_cit);
267 configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
269 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
270 config_item_name(&tf->tf_group.cg_item));
271 return &tf->tf_group;
275 * Called from struct target_core_group_ops->drop_item()
277 static void target_core_deregister_fabric(
278 struct config_group *group,
279 struct config_item *item)
281 struct target_fabric_configfs *tf = container_of(
282 to_config_group(item), struct target_fabric_configfs, tf_group);
284 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
285 " tf list\n", config_item_name(item));
287 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
288 " %s\n", tf->tf_ops->fabric_name);
289 atomic_dec(&tf->tf_access_cnt);
291 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
292 " %s\n", config_item_name(item));
294 configfs_remove_default_groups(&tf->tf_group);
295 config_item_put(item);
298 static struct configfs_group_operations target_core_fabric_group_ops = {
299 .make_group = &target_core_register_fabric,
300 .drop_item = &target_core_deregister_fabric,
304 * All item attributes appearing in /sys/kernel/target/ appear here.
306 static struct configfs_attribute *target_core_fabric_item_attrs[] = {
307 &target_core_item_attr_version,
308 &target_core_item_attr_dbroot,
313 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
315 static const struct config_item_type target_core_fabrics_item = {
316 .ct_group_ops = &target_core_fabric_group_ops,
317 .ct_attrs = target_core_fabric_item_attrs,
318 .ct_owner = THIS_MODULE,
321 static struct configfs_subsystem target_core_fabrics = {
324 .ci_namebuf = "target",
325 .ci_type = &target_core_fabrics_item,
330 int target_depend_item(struct config_item *item)
332 return configfs_depend_item(&target_core_fabrics, item);
334 EXPORT_SYMBOL(target_depend_item);
336 void target_undepend_item(struct config_item *item)
338 return configfs_undepend_item(item);
340 EXPORT_SYMBOL(target_undepend_item);
342 /*##############################################################################
343 // Start functions called by external Target Fabrics Modules
344 //############################################################################*/
346 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
348 if (tfo->fabric_alias) {
349 if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
350 pr_err("Passed alias: %s exceeds "
351 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
355 if (!tfo->fabric_name) {
356 pr_err("Missing tfo->fabric_name\n");
359 if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
360 pr_err("Passed name: %s exceeds "
361 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
364 if (!tfo->tpg_get_wwn) {
365 pr_err("Missing tfo->tpg_get_wwn()\n");
368 if (!tfo->tpg_get_tag) {
369 pr_err("Missing tfo->tpg_get_tag()\n");
372 if (!tfo->tpg_check_demo_mode) {
373 pr_err("Missing tfo->tpg_check_demo_mode()\n");
376 if (!tfo->tpg_check_demo_mode_cache) {
377 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
380 if (!tfo->tpg_check_demo_mode_write_protect) {
381 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
384 if (!tfo->tpg_check_prod_mode_write_protect) {
385 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
388 if (!tfo->tpg_get_inst_index) {
389 pr_err("Missing tfo->tpg_get_inst_index()\n");
392 if (!tfo->release_cmd) {
393 pr_err("Missing tfo->release_cmd()\n");
396 if (!tfo->sess_get_index) {
397 pr_err("Missing tfo->sess_get_index()\n");
400 if (!tfo->write_pending) {
401 pr_err("Missing tfo->write_pending()\n");
404 if (!tfo->write_pending_status) {
405 pr_err("Missing tfo->write_pending_status()\n");
408 if (!tfo->set_default_node_attributes) {
409 pr_err("Missing tfo->set_default_node_attributes()\n");
412 if (!tfo->get_cmd_state) {
413 pr_err("Missing tfo->get_cmd_state()\n");
416 if (!tfo->queue_data_in) {
417 pr_err("Missing tfo->queue_data_in()\n");
420 if (!tfo->queue_status) {
421 pr_err("Missing tfo->queue_status()\n");
424 if (!tfo->queue_tm_rsp) {
425 pr_err("Missing tfo->queue_tm_rsp()\n");
428 if (!tfo->aborted_task) {
429 pr_err("Missing tfo->aborted_task()\n");
432 if (!tfo->check_stop_free) {
433 pr_err("Missing tfo->check_stop_free()\n");
437 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
438 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
439 * target_core_fabric_configfs.c WWN+TPG group context code.
441 if (!tfo->fabric_make_wwn) {
442 pr_err("Missing tfo->fabric_make_wwn()\n");
445 if (!tfo->fabric_drop_wwn) {
446 pr_err("Missing tfo->fabric_drop_wwn()\n");
449 if (!tfo->fabric_make_tpg) {
450 pr_err("Missing tfo->fabric_make_tpg()\n");
453 if (!tfo->fabric_drop_tpg) {
454 pr_err("Missing tfo->fabric_drop_tpg()\n");
461 int target_register_template(const struct target_core_fabric_ops *fo)
463 struct target_fabric_configfs *tf;
466 ret = target_fabric_tf_ops_check(fo);
470 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
472 pr_err("%s: could not allocate memory!\n", __func__);
476 INIT_LIST_HEAD(&tf->tf_list);
477 atomic_set(&tf->tf_access_cnt, 0);
479 target_fabric_setup_cits(tf);
481 mutex_lock(&g_tf_lock);
482 list_add_tail(&tf->tf_list, &g_tf_list);
483 mutex_unlock(&g_tf_lock);
487 EXPORT_SYMBOL(target_register_template);
489 void target_unregister_template(const struct target_core_fabric_ops *fo)
491 struct target_fabric_configfs *t;
493 mutex_lock(&g_tf_lock);
494 list_for_each_entry(t, &g_tf_list, tf_list) {
495 if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
496 BUG_ON(atomic_read(&t->tf_access_cnt));
497 list_del(&t->tf_list);
498 mutex_unlock(&g_tf_lock);
500 * Wait for any outstanding fabric se_deve_entry->rcu_head
501 * callbacks to complete post kfree_rcu(), before allowing
502 * fabric driver unload of TFO->module to proceed.
509 mutex_unlock(&g_tf_lock);
511 EXPORT_SYMBOL(target_unregister_template);
513 /*##############################################################################
514 // Stop functions called by external Target Fabrics Modules
515 //############################################################################*/
517 static inline struct se_dev_attrib *to_attrib(struct config_item *item)
519 return container_of(to_config_group(item), struct se_dev_attrib,
523 /* Start functions for struct config_item_type tb_dev_attrib_cit */
524 #define DEF_CONFIGFS_ATTRIB_SHOW(_name) \
525 static ssize_t _name##_show(struct config_item *item, char *page) \
527 return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
530 DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
531 DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
532 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
533 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
534 DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
535 DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
536 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
537 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
538 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
539 DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
540 DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
541 DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
542 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
543 DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
544 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
545 DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
546 DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
547 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
548 DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
549 DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
550 DEF_CONFIGFS_ATTRIB_SHOW(block_size);
551 DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
552 DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
553 DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
554 DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
555 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
556 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
557 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
558 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
559 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
560 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
562 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
563 static ssize_t _name##_store(struct config_item *item, const char *page,\
566 struct se_dev_attrib *da = to_attrib(item); \
570 ret = kstrtou32(page, 0, &val); \
577 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
578 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
579 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
580 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
581 DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
583 #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \
584 static ssize_t _name##_store(struct config_item *item, const char *page, \
587 struct se_dev_attrib *da = to_attrib(item); \
591 ret = strtobool(page, &flag); \
598 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
599 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
600 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
601 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
602 DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
603 DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
605 #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \
606 static ssize_t _name##_store(struct config_item *item, const char *page,\
609 printk_once(KERN_WARNING \
610 "ignoring deprecated %s attribute\n", \
611 __stringify(_name)); \
615 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
616 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
618 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
620 const char *configname;
622 configname = config_item_name(&dev->dev_group.cg_item);
623 if (strlen(configname) >= INQUIRY_MODEL_LEN) {
624 pr_warn("dev[%p]: Backstore name '%s' is too long for "
625 "INQUIRY_MODEL, truncating to 15 characters\n", dev,
629 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
630 * here without potentially breaking existing setups, so continue to
631 * truncate one byte shorter than what can be carried in INQUIRY.
633 strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
636 static ssize_t emulate_model_alias_store(struct config_item *item,
637 const char *page, size_t count)
639 struct se_dev_attrib *da = to_attrib(item);
640 struct se_device *dev = da->da_dev;
644 if (dev->export_count) {
645 pr_err("dev[%p]: Unable to change model alias"
646 " while export_count is %d\n",
647 dev, dev->export_count);
651 ret = strtobool(page, &flag);
655 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
657 dev_set_t10_wwn_model_alias(dev);
659 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
660 sizeof(dev->t10_wwn.model));
662 da->emulate_model_alias = flag;
666 static ssize_t emulate_write_cache_store(struct config_item *item,
667 const char *page, size_t count)
669 struct se_dev_attrib *da = to_attrib(item);
673 ret = strtobool(page, &flag);
677 if (flag && da->da_dev->transport->get_write_cache) {
678 pr_err("emulate_write_cache not supported for this device\n");
682 da->emulate_write_cache = flag;
683 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
688 static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
689 const char *page, size_t count)
691 struct se_dev_attrib *da = to_attrib(item);
695 ret = kstrtou32(page, 0, &val);
699 if (val != 0 && val != 1 && val != 2) {
700 pr_err("Illegal value %d\n", val);
704 if (da->da_dev->export_count) {
705 pr_err("dev[%p]: Unable to change SE Device"
706 " UA_INTRLCK_CTRL while export_count is %d\n",
707 da->da_dev, da->da_dev->export_count);
710 da->emulate_ua_intlck_ctrl = val;
711 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
716 static ssize_t emulate_tas_store(struct config_item *item,
717 const char *page, size_t count)
719 struct se_dev_attrib *da = to_attrib(item);
723 ret = strtobool(page, &flag);
727 if (da->da_dev->export_count) {
728 pr_err("dev[%p]: Unable to change SE Device TAS while"
729 " export_count is %d\n",
730 da->da_dev, da->da_dev->export_count);
733 da->emulate_tas = flag;
734 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
735 da->da_dev, flag ? "Enabled" : "Disabled");
740 static ssize_t emulate_tpu_store(struct config_item *item,
741 const char *page, size_t count)
743 struct se_dev_attrib *da = to_attrib(item);
747 ret = strtobool(page, &flag);
752 * We expect this value to be non-zero when generic Block Layer
753 * Discard supported is detected iblock_create_virtdevice().
755 if (flag && !da->max_unmap_block_desc_count) {
756 pr_err("Generic Block Discard not supported\n");
760 da->emulate_tpu = flag;
761 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
766 static ssize_t emulate_tpws_store(struct config_item *item,
767 const char *page, size_t count)
769 struct se_dev_attrib *da = to_attrib(item);
773 ret = strtobool(page, &flag);
778 * We expect this value to be non-zero when generic Block Layer
779 * Discard supported is detected iblock_create_virtdevice().
781 if (flag && !da->max_unmap_block_desc_count) {
782 pr_err("Generic Block Discard not supported\n");
786 da->emulate_tpws = flag;
787 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
792 static ssize_t pi_prot_type_store(struct config_item *item,
793 const char *page, size_t count)
795 struct se_dev_attrib *da = to_attrib(item);
796 int old_prot = da->pi_prot_type, ret;
797 struct se_device *dev = da->da_dev;
800 ret = kstrtou32(page, 0, &flag);
804 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
805 pr_err("Illegal value %d for pi_prot_type\n", flag);
809 pr_err("DIF TYPE2 protection currently not supported\n");
812 if (da->hw_pi_prot_type) {
813 pr_warn("DIF protection enabled on underlying hardware,"
817 if (!dev->transport->init_prot || !dev->transport->free_prot) {
818 /* 0 is only allowed value for non-supporting backends */
822 pr_err("DIF protection not supported by backend: %s\n",
823 dev->transport->name);
826 if (!target_dev_configured(dev)) {
827 pr_err("DIF protection requires device to be configured\n");
830 if (dev->export_count) {
831 pr_err("dev[%p]: Unable to change SE Device PROT type while"
832 " export_count is %d\n", dev, dev->export_count);
836 da->pi_prot_type = flag;
838 if (flag && !old_prot) {
839 ret = dev->transport->init_prot(dev);
841 da->pi_prot_type = old_prot;
842 da->pi_prot_verify = (bool) da->pi_prot_type;
846 } else if (!flag && old_prot) {
847 dev->transport->free_prot(dev);
850 da->pi_prot_verify = (bool) da->pi_prot_type;
851 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
855 static ssize_t pi_prot_format_store(struct config_item *item,
856 const char *page, size_t count)
858 struct se_dev_attrib *da = to_attrib(item);
859 struct se_device *dev = da->da_dev;
863 ret = strtobool(page, &flag);
870 if (!dev->transport->format_prot) {
871 pr_err("DIF protection format not supported by backend %s\n",
872 dev->transport->name);
875 if (!target_dev_configured(dev)) {
876 pr_err("DIF protection format requires device to be configured\n");
879 if (dev->export_count) {
880 pr_err("dev[%p]: Unable to format SE Device PROT type while"
881 " export_count is %d\n", dev, dev->export_count);
885 ret = dev->transport->format_prot(dev);
889 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
893 static ssize_t pi_prot_verify_store(struct config_item *item,
894 const char *page, size_t count)
896 struct se_dev_attrib *da = to_attrib(item);
900 ret = strtobool(page, &flag);
905 da->pi_prot_verify = flag;
908 if (da->hw_pi_prot_type) {
909 pr_warn("DIF protection enabled on underlying hardware,"
913 if (!da->pi_prot_type) {
914 pr_warn("DIF protection not supported by backend, ignoring\n");
917 da->pi_prot_verify = flag;
922 static ssize_t force_pr_aptpl_store(struct config_item *item,
923 const char *page, size_t count)
925 struct se_dev_attrib *da = to_attrib(item);
929 ret = strtobool(page, &flag);
932 if (da->da_dev->export_count) {
933 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
934 " export_count is %d\n",
935 da->da_dev, da->da_dev->export_count);
939 da->force_pr_aptpl = flag;
940 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
944 static ssize_t emulate_rest_reord_store(struct config_item *item,
945 const char *page, size_t count)
947 struct se_dev_attrib *da = to_attrib(item);
951 ret = strtobool(page, &flag);
956 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
957 " reordering not implemented\n", da->da_dev);
960 da->emulate_rest_reord = flag;
961 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
966 static ssize_t unmap_zeroes_data_store(struct config_item *item,
967 const char *page, size_t count)
969 struct se_dev_attrib *da = to_attrib(item);
973 ret = strtobool(page, &flag);
977 if (da->da_dev->export_count) {
978 pr_err("dev[%p]: Unable to change SE Device"
979 " unmap_zeroes_data while export_count is %d\n",
980 da->da_dev, da->da_dev->export_count);
984 * We expect this value to be non-zero when generic Block Layer
985 * Discard supported is detected iblock_configure_device().
987 if (flag && !da->max_unmap_block_desc_count) {
988 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
989 " because max_unmap_block_desc_count is zero\n",
993 da->unmap_zeroes_data = flag;
994 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
1000 * Note, this can only be called on unexported SE Device Object.
1002 static ssize_t queue_depth_store(struct config_item *item,
1003 const char *page, size_t count)
1005 struct se_dev_attrib *da = to_attrib(item);
1006 struct se_device *dev = da->da_dev;
1010 ret = kstrtou32(page, 0, &val);
1014 if (dev->export_count) {
1015 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1016 " export_count is %d\n",
1017 dev, dev->export_count);
1021 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
1025 if (val > dev->dev_attrib.queue_depth) {
1026 if (val > dev->dev_attrib.hw_queue_depth) {
1027 pr_err("dev[%p]: Passed queue_depth:"
1028 " %u exceeds TCM/SE_Device MAX"
1029 " TCQ: %u\n", dev, val,
1030 dev->dev_attrib.hw_queue_depth);
1034 da->queue_depth = dev->queue_depth = val;
1035 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
1039 static ssize_t optimal_sectors_store(struct config_item *item,
1040 const char *page, size_t count)
1042 struct se_dev_attrib *da = to_attrib(item);
1046 ret = kstrtou32(page, 0, &val);
1050 if (da->da_dev->export_count) {
1051 pr_err("dev[%p]: Unable to change SE Device"
1052 " optimal_sectors while export_count is %d\n",
1053 da->da_dev, da->da_dev->export_count);
1056 if (val > da->hw_max_sectors) {
1057 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1058 " greater than hw_max_sectors: %u\n",
1059 da->da_dev, val, da->hw_max_sectors);
1063 da->optimal_sectors = val;
1064 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1069 static ssize_t block_size_store(struct config_item *item,
1070 const char *page, size_t count)
1072 struct se_dev_attrib *da = to_attrib(item);
1076 ret = kstrtou32(page, 0, &val);
1080 if (da->da_dev->export_count) {
1081 pr_err("dev[%p]: Unable to change SE Device block_size"
1082 " while export_count is %d\n",
1083 da->da_dev, da->da_dev->export_count);
1087 if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
1088 pr_err("dev[%p]: Illegal value for block_device: %u"
1089 " for SE device, must be 512, 1024, 2048 or 4096\n",
1094 da->block_size = val;
1095 if (da->max_bytes_per_io)
1096 da->hw_max_sectors = da->max_bytes_per_io / val;
1098 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1103 static ssize_t alua_support_show(struct config_item *item, char *page)
1105 struct se_dev_attrib *da = to_attrib(item);
1106 u8 flags = da->da_dev->transport->transport_flags;
1108 return snprintf(page, PAGE_SIZE, "%d\n",
1109 flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
1112 static ssize_t pgr_support_show(struct config_item *item, char *page)
1114 struct se_dev_attrib *da = to_attrib(item);
1115 u8 flags = da->da_dev->transport->transport_flags;
1117 return snprintf(page, PAGE_SIZE, "%d\n",
1118 flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
1121 CONFIGFS_ATTR(, emulate_model_alias);
1122 CONFIGFS_ATTR(, emulate_dpo);
1123 CONFIGFS_ATTR(, emulate_fua_write);
1124 CONFIGFS_ATTR(, emulate_fua_read);
1125 CONFIGFS_ATTR(, emulate_write_cache);
1126 CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
1127 CONFIGFS_ATTR(, emulate_tas);
1128 CONFIGFS_ATTR(, emulate_tpu);
1129 CONFIGFS_ATTR(, emulate_tpws);
1130 CONFIGFS_ATTR(, emulate_caw);
1131 CONFIGFS_ATTR(, emulate_3pc);
1132 CONFIGFS_ATTR(, emulate_pr);
1133 CONFIGFS_ATTR(, pi_prot_type);
1134 CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1135 CONFIGFS_ATTR_WO(, pi_prot_format);
1136 CONFIGFS_ATTR(, pi_prot_verify);
1137 CONFIGFS_ATTR(, enforce_pr_isids);
1138 CONFIGFS_ATTR(, is_nonrot);
1139 CONFIGFS_ATTR(, emulate_rest_reord);
1140 CONFIGFS_ATTR(, force_pr_aptpl);
1141 CONFIGFS_ATTR_RO(, hw_block_size);
1142 CONFIGFS_ATTR(, block_size);
1143 CONFIGFS_ATTR_RO(, hw_max_sectors);
1144 CONFIGFS_ATTR(, optimal_sectors);
1145 CONFIGFS_ATTR_RO(, hw_queue_depth);
1146 CONFIGFS_ATTR(, queue_depth);
1147 CONFIGFS_ATTR(, max_unmap_lba_count);
1148 CONFIGFS_ATTR(, max_unmap_block_desc_count);
1149 CONFIGFS_ATTR(, unmap_granularity);
1150 CONFIGFS_ATTR(, unmap_granularity_alignment);
1151 CONFIGFS_ATTR(, unmap_zeroes_data);
1152 CONFIGFS_ATTR(, max_write_same_len);
1153 CONFIGFS_ATTR_RO(, alua_support);
1154 CONFIGFS_ATTR_RO(, pgr_support);
1157 * dev_attrib attributes for devices using the target core SBC/SPC
1158 * interpreter. Any backend using spc_parse_cdb should be using
1161 struct configfs_attribute *sbc_attrib_attrs[] = {
1162 &attr_emulate_model_alias,
1164 &attr_emulate_fua_write,
1165 &attr_emulate_fua_read,
1166 &attr_emulate_write_cache,
1167 &attr_emulate_ua_intlck_ctrl,
1175 &attr_hw_pi_prot_type,
1176 &attr_pi_prot_format,
1177 &attr_pi_prot_verify,
1178 &attr_enforce_pr_isids,
1180 &attr_emulate_rest_reord,
1181 &attr_force_pr_aptpl,
1182 &attr_hw_block_size,
1184 &attr_hw_max_sectors,
1185 &attr_optimal_sectors,
1186 &attr_hw_queue_depth,
1188 &attr_max_unmap_lba_count,
1189 &attr_max_unmap_block_desc_count,
1190 &attr_unmap_granularity,
1191 &attr_unmap_granularity_alignment,
1192 &attr_unmap_zeroes_data,
1193 &attr_max_write_same_len,
1198 EXPORT_SYMBOL(sbc_attrib_attrs);
1201 * Minimal dev_attrib attributes for devices passing through CDBs.
1202 * In this case we only provide a few read-only attributes for
1203 * backwards compatibility.
1205 struct configfs_attribute *passthrough_attrib_attrs[] = {
1206 &attr_hw_pi_prot_type,
1207 &attr_hw_block_size,
1208 &attr_hw_max_sectors,
1209 &attr_hw_queue_depth,
1214 EXPORT_SYMBOL(passthrough_attrib_attrs);
1216 TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
1217 TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
1219 /* End functions for struct config_item_type tb_dev_attrib_cit */
1221 /* Start functions for struct config_item_type tb_dev_wwn_cit */
1223 static struct t10_wwn *to_t10_wwn(struct config_item *item)
1225 return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
1229 * STANDARD and VPD page 0x83 T10 Vendor Identification
1231 static ssize_t target_wwn_vendor_id_show(struct config_item *item,
1234 return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
1237 static ssize_t target_wwn_vendor_id_store(struct config_item *item,
1238 const char *page, size_t count)
1240 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1241 struct se_device *dev = t10_wwn->t10_dev;
1242 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1243 unsigned char buf[INQUIRY_VENDOR_LEN + 2];
1244 char *stripped = NULL;
1248 len = strlcpy(buf, page, sizeof(buf));
1249 if (len < sizeof(buf)) {
1250 /* Strip any newline added from userspace. */
1251 stripped = strstrip(buf);
1252 len = strlen(stripped);
1254 if (len > INQUIRY_VENDOR_LEN) {
1255 pr_err("Emulated T10 Vendor Identification exceeds"
1256 " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
1263 * ASCII data fields shall contain only ASCII printable characters (i.e.,
1264 * code values 20h to 7Eh) and may be terminated with one or more ASCII
1265 * null (00h) characters.
1267 for (i = 0; i < len; i++) {
1268 if ((stripped[i] < 0x20) || (stripped[i] > 0x7E)) {
1269 pr_err("Emulated T10 Vendor Identification contains"
1270 " non-ASCII-printable characters\n");
1276 * Check to see if any active exports exist. If they do exist, fail
1277 * here as changing this information on the fly (underneath the
1278 * initiator side OS dependent multipath code) could cause negative
1281 if (dev->export_count) {
1282 pr_err("Unable to set T10 Vendor Identification while"
1283 " active %d exports exist\n", dev->export_count);
1287 BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
1288 strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
1290 pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
1291 " %s\n", dev->t10_wwn.vendor);
1297 * VPD page 0x80 Unit serial
1299 static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
1302 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
1303 &to_t10_wwn(item)->unit_serial[0]);
1306 static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
1307 const char *page, size_t count)
1309 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1310 struct se_device *dev = t10_wwn->t10_dev;
1311 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
1314 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
1315 * from the struct scsi_device level firmware, do not allow
1316 * VPD Unit Serial to be emulated.
1318 * Note this struct scsi_device could also be emulating VPD
1319 * information from its drivers/scsi LLD. But for now we assume
1320 * it is doing 'the right thing' wrt a world wide unique
1321 * VPD Unit Serial Number that OS dependent multipath can depend on.
1323 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
1324 pr_err("Underlying SCSI device firmware provided VPD"
1325 " Unit Serial, ignoring request\n");
1329 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
1330 pr_err("Emulated VPD Unit Serial exceeds"
1331 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
1335 * Check to see if any active $FABRIC_MOD exports exist. If they
1336 * do exist, fail here as changing this information on the fly
1337 * (underneath the initiator side OS dependent multipath code)
1338 * could cause negative effects.
1340 if (dev->export_count) {
1341 pr_err("Unable to set VPD Unit Serial while"
1342 " active %d $FABRIC_MOD exports exist\n",
1348 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
1350 * Also, strip any newline added from the userspace
1351 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
1353 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
1354 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
1355 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
1356 "%s", strstrip(buf));
1357 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
1359 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
1360 " %s\n", dev->t10_wwn.unit_serial);
1366 * VPD page 0x83 Protocol Identifier
1368 static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
1371 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1372 struct t10_vpd *vpd;
1373 unsigned char buf[VPD_TMP_BUF_SIZE];
1376 memset(buf, 0, VPD_TMP_BUF_SIZE);
1378 spin_lock(&t10_wwn->t10_vpd_lock);
1379 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
1380 if (!vpd->protocol_identifier_set)
1383 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
1385 if (len + strlen(buf) >= PAGE_SIZE)
1388 len += sprintf(page+len, "%s", buf);
1390 spin_unlock(&t10_wwn->t10_vpd_lock);
1396 * Generic wrapper for dumping VPD identifiers by association.
1398 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
1399 static ssize_t target_wwn_##_name##_show(struct config_item *item, \
1402 struct t10_wwn *t10_wwn = to_t10_wwn(item); \
1403 struct t10_vpd *vpd; \
1404 unsigned char buf[VPD_TMP_BUF_SIZE]; \
1407 spin_lock(&t10_wwn->t10_vpd_lock); \
1408 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
1409 if (vpd->association != _assoc) \
1412 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1413 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
1414 if (len + strlen(buf) >= PAGE_SIZE) \
1416 len += sprintf(page+len, "%s", buf); \
1418 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1419 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
1420 if (len + strlen(buf) >= PAGE_SIZE) \
1422 len += sprintf(page+len, "%s", buf); \
1424 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1425 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
1426 if (len + strlen(buf) >= PAGE_SIZE) \
1428 len += sprintf(page+len, "%s", buf); \
1430 spin_unlock(&t10_wwn->t10_vpd_lock); \
1435 /* VPD page 0x83 Association: Logical Unit */
1436 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
1437 /* VPD page 0x83 Association: Target Port */
1438 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1439 /* VPD page 0x83 Association: SCSI Target Device */
1440 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1442 CONFIGFS_ATTR(target_wwn_, vendor_id);
1443 CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
1444 CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
1445 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
1446 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
1447 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
1449 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1450 &target_wwn_attr_vendor_id,
1451 &target_wwn_attr_vpd_unit_serial,
1452 &target_wwn_attr_vpd_protocol_identifier,
1453 &target_wwn_attr_vpd_assoc_logical_unit,
1454 &target_wwn_attr_vpd_assoc_target_port,
1455 &target_wwn_attr_vpd_assoc_scsi_target_device,
1459 TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
1461 /* End functions for struct config_item_type tb_dev_wwn_cit */
1463 /* Start functions for struct config_item_type tb_dev_pr_cit */
1465 static struct se_device *pr_to_dev(struct config_item *item)
1467 return container_of(to_config_group(item), struct se_device,
1471 static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
1474 struct se_node_acl *se_nacl;
1475 struct t10_pr_registration *pr_reg;
1476 char i_buf[PR_REG_ISID_ID_LEN];
1478 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1480 pr_reg = dev->dev_pr_res_holder;
1482 return sprintf(page, "No SPC-3 Reservation holder\n");
1484 se_nacl = pr_reg->pr_reg_nacl;
1485 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1487 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
1488 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1489 se_nacl->initiatorname, i_buf);
1492 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
1495 struct se_node_acl *se_nacl;
1498 se_nacl = dev->dev_reserved_node_acl;
1501 "SPC-2 Reservation: %s Initiator: %s\n",
1502 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1503 se_nacl->initiatorname);
1505 len = sprintf(page, "No SPC-2 Reservation holder\n");
1510 static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
1512 struct se_device *dev = pr_to_dev(item);
1515 if (!dev->dev_attrib.emulate_pr)
1516 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1518 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1519 return sprintf(page, "Passthrough\n");
1521 spin_lock(&dev->dev_reservation_lock);
1522 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1523 ret = target_core_dev_pr_show_spc2_res(dev, page);
1525 ret = target_core_dev_pr_show_spc3_res(dev, page);
1526 spin_unlock(&dev->dev_reservation_lock);
1530 static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
1533 struct se_device *dev = pr_to_dev(item);
1536 spin_lock(&dev->dev_reservation_lock);
1537 if (!dev->dev_pr_res_holder) {
1538 len = sprintf(page, "No SPC-3 Reservation holder\n");
1539 } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
1540 len = sprintf(page, "SPC-3 Reservation: All Target"
1541 " Ports registration\n");
1543 len = sprintf(page, "SPC-3 Reservation: Single"
1544 " Target Port registration\n");
1547 spin_unlock(&dev->dev_reservation_lock);
1551 static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
1554 return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
1558 static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
1561 struct se_device *dev = pr_to_dev(item);
1562 struct se_node_acl *se_nacl;
1563 struct se_portal_group *se_tpg;
1564 struct t10_pr_registration *pr_reg;
1565 const struct target_core_fabric_ops *tfo;
1568 spin_lock(&dev->dev_reservation_lock);
1569 pr_reg = dev->dev_pr_res_holder;
1571 len = sprintf(page, "No SPC-3 Reservation holder\n");
1575 se_nacl = pr_reg->pr_reg_nacl;
1576 se_tpg = se_nacl->se_tpg;
1577 tfo = se_tpg->se_tpg_tfo;
1579 len += sprintf(page+len, "SPC-3 Reservation: %s"
1580 " Target Node Endpoint: %s\n", tfo->fabric_name,
1581 tfo->tpg_get_wwn(se_tpg));
1582 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1583 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1584 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
1585 tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
1586 tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
1589 spin_unlock(&dev->dev_reservation_lock);
1594 static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
1597 struct se_device *dev = pr_to_dev(item);
1598 const struct target_core_fabric_ops *tfo;
1599 struct t10_pr_registration *pr_reg;
1600 unsigned char buf[384];
1601 char i_buf[PR_REG_ISID_ID_LEN];
1605 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1607 spin_lock(&dev->t10_pr.registration_lock);
1608 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
1611 memset(buf, 0, 384);
1612 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1613 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1614 core_pr_dump_initiator_port(pr_reg, i_buf,
1615 PR_REG_ISID_ID_LEN);
1616 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1618 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
1619 pr_reg->pr_res_generation);
1621 if (len + strlen(buf) >= PAGE_SIZE)
1624 len += sprintf(page+len, "%s", buf);
1627 spin_unlock(&dev->t10_pr.registration_lock);
1630 len += sprintf(page+len, "None\n");
1635 static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
1637 struct se_device *dev = pr_to_dev(item);
1638 struct t10_pr_registration *pr_reg;
1641 spin_lock(&dev->dev_reservation_lock);
1642 pr_reg = dev->dev_pr_res_holder;
1644 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1645 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1647 len = sprintf(page, "No SPC-3 Reservation holder\n");
1650 spin_unlock(&dev->dev_reservation_lock);
1654 static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
1656 struct se_device *dev = pr_to_dev(item);
1658 if (!dev->dev_attrib.emulate_pr)
1659 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1660 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1661 return sprintf(page, "SPC_PASSTHROUGH\n");
1662 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1663 return sprintf(page, "SPC2_RESERVATIONS\n");
1665 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1668 static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
1671 struct se_device *dev = pr_to_dev(item);
1673 if (!dev->dev_attrib.emulate_pr ||
1674 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1677 return sprintf(page, "APTPL Bit Status: %s\n",
1678 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
1681 static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
1684 struct se_device *dev = pr_to_dev(item);
1686 if (!dev->dev_attrib.emulate_pr ||
1687 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1690 return sprintf(page, "Ready to process PR APTPL metadata..\n");
1694 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1695 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1696 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1697 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1700 static match_table_t tokens = {
1701 {Opt_initiator_fabric, "initiator_fabric=%s"},
1702 {Opt_initiator_node, "initiator_node=%s"},
1703 {Opt_initiator_sid, "initiator_sid=%s"},
1704 {Opt_sa_res_key, "sa_res_key=%s"},
1705 {Opt_res_holder, "res_holder=%d"},
1706 {Opt_res_type, "res_type=%d"},
1707 {Opt_res_scope, "res_scope=%d"},
1708 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1709 {Opt_mapped_lun, "mapped_lun=%u"},
1710 {Opt_target_fabric, "target_fabric=%s"},
1711 {Opt_target_node, "target_node=%s"},
1712 {Opt_tpgt, "tpgt=%d"},
1713 {Opt_port_rtpi, "port_rtpi=%d"},
1714 {Opt_target_lun, "target_lun=%u"},
1718 static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1719 const char *page, size_t count)
1721 struct se_device *dev = pr_to_dev(item);
1722 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1723 unsigned char *t_fabric = NULL, *t_port = NULL;
1724 char *orig, *ptr, *opts;
1725 substring_t args[MAX_OPT_ARGS];
1726 unsigned long long tmp_ll;
1728 u64 mapped_lun = 0, target_lun = 0;
1729 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1733 if (!dev->dev_attrib.emulate_pr ||
1734 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1736 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1739 if (dev->export_count) {
1740 pr_debug("Unable to process APTPL metadata while"
1741 " active fabric exports exist\n");
1745 opts = kstrdup(page, GFP_KERNEL);
1750 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1754 token = match_token(ptr, tokens, args);
1756 case Opt_initiator_fabric:
1757 i_fabric = match_strdup(args);
1763 case Opt_initiator_node:
1764 i_port = match_strdup(args);
1769 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1770 pr_err("APTPL metadata initiator_node="
1771 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1772 PR_APTPL_MAX_IPORT_LEN);
1777 case Opt_initiator_sid:
1778 isid = match_strdup(args);
1783 if (strlen(isid) >= PR_REG_ISID_LEN) {
1784 pr_err("APTPL metadata initiator_isid"
1785 "= exceeds PR_REG_ISID_LEN: %d\n",
1791 case Opt_sa_res_key:
1792 ret = match_u64(args, &tmp_ll);
1794 pr_err("kstrtoull() failed for sa_res_key=\n");
1797 sa_res_key = (u64)tmp_ll;
1800 * PR APTPL Metadata for Reservation
1802 case Opt_res_holder:
1803 ret = match_int(args, &arg);
1809 ret = match_int(args, &arg);
1815 ret = match_int(args, &arg);
1819 case Opt_res_all_tg_pt:
1820 ret = match_int(args, &arg);
1823 all_tg_pt = (int)arg;
1825 case Opt_mapped_lun:
1826 ret = match_u64(args, &tmp_ll);
1829 mapped_lun = (u64)tmp_ll;
1832 * PR APTPL Metadata for Target Port
1834 case Opt_target_fabric:
1835 t_fabric = match_strdup(args);
1841 case Opt_target_node:
1842 t_port = match_strdup(args);
1847 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1848 pr_err("APTPL metadata target_node="
1849 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1850 PR_APTPL_MAX_TPORT_LEN);
1856 ret = match_int(args, &arg);
1862 ret = match_int(args, &arg);
1866 case Opt_target_lun:
1867 ret = match_u64(args, &tmp_ll);
1870 target_lun = (u64)tmp_ll;
1877 if (!i_port || !t_port || !sa_res_key) {
1878 pr_err("Illegal parameters for APTPL registration\n");
1883 if (res_holder && !(type)) {
1884 pr_err("Illegal PR type: 0x%02x for reservation"
1890 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
1891 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1892 res_holder, all_tg_pt, type);
1900 return (ret == 0) ? count : ret;
1904 CONFIGFS_ATTR_RO(target_pr_, res_holder);
1905 CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
1906 CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
1907 CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
1908 CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
1909 CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
1910 CONFIGFS_ATTR_RO(target_pr_, res_type);
1911 CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
1912 CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
1914 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1915 &target_pr_attr_res_holder,
1916 &target_pr_attr_res_pr_all_tgt_pts,
1917 &target_pr_attr_res_pr_generation,
1918 &target_pr_attr_res_pr_holder_tg_port,
1919 &target_pr_attr_res_pr_registered_i_pts,
1920 &target_pr_attr_res_pr_type,
1921 &target_pr_attr_res_type,
1922 &target_pr_attr_res_aptpl_active,
1923 &target_pr_attr_res_aptpl_metadata,
1927 TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
1929 /* End functions for struct config_item_type tb_dev_pr_cit */
1931 /* Start functions for struct config_item_type tb_dev_cit */
1933 static inline struct se_device *to_device(struct config_item *item)
1935 return container_of(to_config_group(item), struct se_device, dev_group);
1938 static ssize_t target_dev_info_show(struct config_item *item, char *page)
1940 struct se_device *dev = to_device(item);
1942 ssize_t read_bytes = 0;
1944 transport_dump_dev_state(dev, page, &bl);
1946 read_bytes += dev->transport->show_configfs_dev_params(dev,
1951 static ssize_t target_dev_control_store(struct config_item *item,
1952 const char *page, size_t count)
1954 struct se_device *dev = to_device(item);
1956 return dev->transport->set_configfs_dev_params(dev, page, count);
1959 static ssize_t target_dev_alias_show(struct config_item *item, char *page)
1961 struct se_device *dev = to_device(item);
1963 if (!(dev->dev_flags & DF_USING_ALIAS))
1966 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
1969 static ssize_t target_dev_alias_store(struct config_item *item,
1970 const char *page, size_t count)
1972 struct se_device *dev = to_device(item);
1973 struct se_hba *hba = dev->se_hba;
1976 if (count > (SE_DEV_ALIAS_LEN-1)) {
1977 pr_err("alias count: %d exceeds"
1978 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1979 SE_DEV_ALIAS_LEN-1);
1983 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
1986 if (dev->dev_alias[read_bytes - 1] == '\n')
1987 dev->dev_alias[read_bytes - 1] = '\0';
1989 dev->dev_flags |= DF_USING_ALIAS;
1991 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1992 config_item_name(&hba->hba_group.cg_item),
1993 config_item_name(&dev->dev_group.cg_item),
1999 static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
2001 struct se_device *dev = to_device(item);
2003 if (!(dev->dev_flags & DF_USING_UDEV_PATH))
2006 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
2009 static ssize_t target_dev_udev_path_store(struct config_item *item,
2010 const char *page, size_t count)
2012 struct se_device *dev = to_device(item);
2013 struct se_hba *hba = dev->se_hba;
2016 if (count > (SE_UDEV_PATH_LEN-1)) {
2017 pr_err("udev_path count: %d exceeds"
2018 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
2019 SE_UDEV_PATH_LEN-1);
2023 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
2027 if (dev->udev_path[read_bytes - 1] == '\n')
2028 dev->udev_path[read_bytes - 1] = '\0';
2030 dev->dev_flags |= DF_USING_UDEV_PATH;
2032 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
2033 config_item_name(&hba->hba_group.cg_item),
2034 config_item_name(&dev->dev_group.cg_item),
2040 static ssize_t target_dev_enable_show(struct config_item *item, char *page)
2042 struct se_device *dev = to_device(item);
2044 return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
2047 static ssize_t target_dev_enable_store(struct config_item *item,
2048 const char *page, size_t count)
2050 struct se_device *dev = to_device(item);
2054 ptr = strstr(page, "1");
2056 pr_err("For dev_enable ops, only valid value"
2061 ret = target_configure_device(dev);
2067 static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
2069 struct se_device *dev = to_device(item);
2070 struct config_item *lu_ci;
2071 struct t10_alua_lu_gp *lu_gp;
2072 struct t10_alua_lu_gp_member *lu_gp_mem;
2075 lu_gp_mem = dev->dev_alua_lu_gp_mem;
2079 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2080 lu_gp = lu_gp_mem->lu_gp;
2082 lu_ci = &lu_gp->lu_gp_group.cg_item;
2083 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
2084 config_item_name(lu_ci), lu_gp->lu_gp_id);
2086 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2091 static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
2092 const char *page, size_t count)
2094 struct se_device *dev = to_device(item);
2095 struct se_hba *hba = dev->se_hba;
2096 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
2097 struct t10_alua_lu_gp_member *lu_gp_mem;
2098 unsigned char buf[LU_GROUP_NAME_BUF];
2101 lu_gp_mem = dev->dev_alua_lu_gp_mem;
2105 if (count > LU_GROUP_NAME_BUF) {
2106 pr_err("ALUA LU Group Alias too large!\n");
2109 memset(buf, 0, LU_GROUP_NAME_BUF);
2110 memcpy(buf, page, count);
2112 * Any ALUA logical unit alias besides "NULL" means we will be
2113 * making a new group association.
2115 if (strcmp(strstrip(buf), "NULL")) {
2117 * core_alua_get_lu_gp_by_name() will increment reference to
2118 * struct t10_alua_lu_gp. This reference is released with
2119 * core_alua_get_lu_gp_by_name below().
2121 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
2126 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2127 lu_gp = lu_gp_mem->lu_gp;
2130 * Clearing an existing lu_gp association, and replacing
2134 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
2135 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
2137 config_item_name(&hba->hba_group.cg_item),
2138 config_item_name(&dev->dev_group.cg_item),
2139 config_item_name(&lu_gp->lu_gp_group.cg_item),
2142 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2143 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2148 * Removing existing association of lu_gp_mem with lu_gp
2150 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2154 * Associate lu_gp_mem with lu_gp_new.
2156 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
2157 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2159 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
2160 " core/alua/lu_gps/%s, ID: %hu\n",
2161 (move) ? "Moving" : "Adding",
2162 config_item_name(&hba->hba_group.cg_item),
2163 config_item_name(&dev->dev_group.cg_item),
2164 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
2165 lu_gp_new->lu_gp_id);
2167 core_alua_put_lu_gp_from_name(lu_gp_new);
2171 static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
2173 struct se_device *dev = to_device(item);
2174 struct t10_alua_lba_map *map;
2175 struct t10_alua_lba_map_member *mem;
2180 spin_lock(&dev->t10_alua.lba_map_lock);
2181 if (!list_empty(&dev->t10_alua.lba_map_list))
2182 bl += sprintf(b + bl, "%u %u\n",
2183 dev->t10_alua.lba_map_segment_size,
2184 dev->t10_alua.lba_map_segment_multiplier);
2185 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
2186 bl += sprintf(b + bl, "%llu %llu",
2187 map->lba_map_first_lba, map->lba_map_last_lba);
2188 list_for_each_entry(mem, &map->lba_map_mem_list,
2190 switch (mem->lba_map_mem_alua_state) {
2191 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
2194 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
2197 case ALUA_ACCESS_STATE_STANDBY:
2200 case ALUA_ACCESS_STATE_UNAVAILABLE:
2207 bl += sprintf(b + bl, " %d:%c",
2208 mem->lba_map_mem_alua_pg_id, state);
2210 bl += sprintf(b + bl, "\n");
2212 spin_unlock(&dev->t10_alua.lba_map_lock);
2216 static ssize_t target_dev_lba_map_store(struct config_item *item,
2217 const char *page, size_t count)
2219 struct se_device *dev = to_device(item);
2220 struct t10_alua_lba_map *lba_map = NULL;
2221 struct list_head lba_list;
2222 char *map_entries, *orig, *ptr;
2224 int pg_num = -1, pg;
2225 int ret = 0, num = 0, pg_id, alua_state;
2226 unsigned long start_lba = -1, end_lba = -1;
2227 unsigned long segment_size = -1, segment_mult = -1;
2229 orig = map_entries = kstrdup(page, GFP_KERNEL);
2233 INIT_LIST_HEAD(&lba_list);
2234 while ((ptr = strsep(&map_entries, "\n")) != NULL) {
2239 if (sscanf(ptr, "%lu %lu\n",
2240 &segment_size, &segment_mult) != 2) {
2241 pr_err("Invalid line %d\n", num);
2248 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
2249 pr_err("Invalid line %d\n", num);
2253 ptr = strchr(ptr, ' ');
2255 pr_err("Invalid line %d, missing end lba\n", num);
2260 ptr = strchr(ptr, ' ');
2262 pr_err("Invalid line %d, missing state definitions\n",
2268 lba_map = core_alua_allocate_lba_map(&lba_list,
2269 start_lba, end_lba);
2270 if (IS_ERR(lba_map)) {
2271 ret = PTR_ERR(lba_map);
2275 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
2278 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
2281 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
2284 alua_state = ALUA_ACCESS_STATE_STANDBY;
2287 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
2290 pr_err("Invalid ALUA state '%c'\n", state);
2295 ret = core_alua_allocate_lba_map_mem(lba_map,
2298 pr_err("Invalid target descriptor %d:%c "
2304 ptr = strchr(ptr, ' ');
2312 else if (pg != pg_num) {
2313 pr_err("Only %d from %d port groups definitions "
2314 "at line %d\n", pg, pg_num, num);
2322 core_alua_free_lba_map(&lba_list);
2325 core_alua_set_lba_map(dev, &lba_list,
2326 segment_size, segment_mult);
2331 CONFIGFS_ATTR_RO(target_dev_, info);
2332 CONFIGFS_ATTR_WO(target_dev_, control);
2333 CONFIGFS_ATTR(target_dev_, alias);
2334 CONFIGFS_ATTR(target_dev_, udev_path);
2335 CONFIGFS_ATTR(target_dev_, enable);
2336 CONFIGFS_ATTR(target_dev_, alua_lu_gp);
2337 CONFIGFS_ATTR(target_dev_, lba_map);
2339 static struct configfs_attribute *target_core_dev_attrs[] = {
2340 &target_dev_attr_info,
2341 &target_dev_attr_control,
2342 &target_dev_attr_alias,
2343 &target_dev_attr_udev_path,
2344 &target_dev_attr_enable,
2345 &target_dev_attr_alua_lu_gp,
2346 &target_dev_attr_lba_map,
2350 static void target_core_dev_release(struct config_item *item)
2352 struct config_group *dev_cg = to_config_group(item);
2353 struct se_device *dev =
2354 container_of(dev_cg, struct se_device, dev_group);
2356 target_free_device(dev);
2360 * Used in target_core_fabric_configfs.c to verify valid se_device symlink
2361 * within target_fabric_port_link()
2363 struct configfs_item_operations target_core_dev_item_ops = {
2364 .release = target_core_dev_release,
2367 TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
2369 /* End functions for struct config_item_type tb_dev_cit */
2371 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2373 static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
2375 return container_of(to_config_group(item), struct t10_alua_lu_gp,
2379 static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
2381 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2383 if (!lu_gp->lu_gp_valid_id)
2385 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2388 static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
2389 const char *page, size_t count)
2391 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2392 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2393 unsigned long lu_gp_id;
2396 ret = kstrtoul(page, 0, &lu_gp_id);
2398 pr_err("kstrtoul() returned %d for"
2399 " lu_gp_id\n", ret);
2402 if (lu_gp_id > 0x0000ffff) {
2403 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2404 " 0x0000ffff\n", lu_gp_id);
2408 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2412 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2413 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2414 config_item_name(&alua_lu_gp_cg->cg_item),
2420 static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
2422 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2423 struct se_device *dev;
2425 struct t10_alua_lu_gp_member *lu_gp_mem;
2426 ssize_t len = 0, cur_len;
2427 unsigned char buf[LU_GROUP_NAME_BUF];
2429 memset(buf, 0, LU_GROUP_NAME_BUF);
2431 spin_lock(&lu_gp->lu_gp_lock);
2432 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2433 dev = lu_gp_mem->lu_gp_mem_dev;
2436 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2437 config_item_name(&hba->hba_group.cg_item),
2438 config_item_name(&dev->dev_group.cg_item));
2439 cur_len++; /* Extra byte for NULL terminator */
2441 if ((cur_len + len) > PAGE_SIZE) {
2442 pr_warn("Ran out of lu_gp_show_attr"
2443 "_members buffer\n");
2446 memcpy(page+len, buf, cur_len);
2449 spin_unlock(&lu_gp->lu_gp_lock);
2454 CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
2455 CONFIGFS_ATTR_RO(target_lu_gp_, members);
2457 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2458 &target_lu_gp_attr_lu_gp_id,
2459 &target_lu_gp_attr_members,
2463 static void target_core_alua_lu_gp_release(struct config_item *item)
2465 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2466 struct t10_alua_lu_gp, lu_gp_group);
2468 core_alua_free_lu_gp(lu_gp);
2471 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2472 .release = target_core_alua_lu_gp_release,
2475 static const struct config_item_type target_core_alua_lu_gp_cit = {
2476 .ct_item_ops = &target_core_alua_lu_gp_ops,
2477 .ct_attrs = target_core_alua_lu_gp_attrs,
2478 .ct_owner = THIS_MODULE,
2481 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2483 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2485 static struct config_group *target_core_alua_create_lu_gp(
2486 struct config_group *group,
2489 struct t10_alua_lu_gp *lu_gp;
2490 struct config_group *alua_lu_gp_cg = NULL;
2491 struct config_item *alua_lu_gp_ci = NULL;
2493 lu_gp = core_alua_allocate_lu_gp(name, 0);
2497 alua_lu_gp_cg = &lu_gp->lu_gp_group;
2498 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2500 config_group_init_type_name(alua_lu_gp_cg, name,
2501 &target_core_alua_lu_gp_cit);
2503 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2504 " Group: core/alua/lu_gps/%s\n",
2505 config_item_name(alua_lu_gp_ci));
2507 return alua_lu_gp_cg;
2511 static void target_core_alua_drop_lu_gp(
2512 struct config_group *group,
2513 struct config_item *item)
2515 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2516 struct t10_alua_lu_gp, lu_gp_group);
2518 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2519 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2520 config_item_name(item), lu_gp->lu_gp_id);
2522 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2523 * -> target_core_alua_lu_gp_release()
2525 config_item_put(item);
2528 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2529 .make_group = &target_core_alua_create_lu_gp,
2530 .drop_item = &target_core_alua_drop_lu_gp,
2533 static const struct config_item_type target_core_alua_lu_gps_cit = {
2534 .ct_item_ops = NULL,
2535 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
2536 .ct_owner = THIS_MODULE,
2539 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2541 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2543 static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
2545 return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
2549 static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2552 return sprintf(page, "%d\n",
2553 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2556 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
2557 const char *page, size_t count)
2559 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2560 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2564 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2565 pr_err("Unable to do implicit ALUA on non valid"
2566 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2569 if (!target_dev_configured(dev)) {
2570 pr_err("Unable to set alua_access_state while device is"
2571 " not configured\n");
2575 ret = kstrtoul(page, 0, &tmp);
2577 pr_err("Unable to extract new ALUA access state from"
2581 new_state = (int)tmp;
2583 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2584 pr_err("Unable to process implicit configfs ALUA"
2585 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2588 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
2589 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
2590 /* LBA DEPENDENT is only allowed with implicit ALUA */
2591 pr_err("Unable to process implicit configfs ALUA transition"
2592 " while explicit ALUA management is enabled\n");
2596 ret = core_alua_do_port_transition(tg_pt_gp, dev,
2597 NULL, NULL, new_state, 0);
2598 return (!ret) ? count : -EINVAL;
2601 static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
2604 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2605 return sprintf(page, "%s\n",
2606 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2609 static ssize_t target_tg_pt_gp_alua_access_status_store(
2610 struct config_item *item, const char *page, size_t count)
2612 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2614 int new_status, ret;
2616 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2617 pr_err("Unable to do set ALUA access status on non"
2618 " valid tg_pt_gp ID: %hu\n",
2619 tg_pt_gp->tg_pt_gp_valid_id);
2623 ret = kstrtoul(page, 0, &tmp);
2625 pr_err("Unable to extract new ALUA access status"
2626 " from %s\n", page);
2629 new_status = (int)tmp;
2631 if ((new_status != ALUA_STATUS_NONE) &&
2632 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2633 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2634 pr_err("Illegal ALUA access status: 0x%02x\n",
2639 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2643 static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
2646 return core_alua_show_access_type(to_tg_pt_gp(item), page);
2649 static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
2650 const char *page, size_t count)
2652 return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
2655 #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \
2656 static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \
2657 struct config_item *item, char *p) \
2659 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
2660 return sprintf(p, "%d\n", \
2661 !!(t->tg_pt_gp_alua_supported_states & _bit)); \
2664 static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
2665 struct config_item *item, const char *p, size_t c) \
2667 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
2668 unsigned long tmp; \
2671 if (!t->tg_pt_gp_valid_id) { \
2672 pr_err("Unable to do set " #_name " ALUA state on non" \
2673 " valid tg_pt_gp ID: %hu\n", \
2674 t->tg_pt_gp_valid_id); \
2678 ret = kstrtoul(p, 0, &tmp); \
2680 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2684 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2688 t->tg_pt_gp_alua_supported_states |= _bit; \
2690 t->tg_pt_gp_alua_supported_states &= ~_bit; \
2695 ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
2696 ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
2697 ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
2698 ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
2699 ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
2700 ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
2701 ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
2703 static ssize_t target_tg_pt_gp_alua_write_metadata_show(
2704 struct config_item *item, char *page)
2706 return sprintf(page, "%d\n",
2707 to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
2710 static ssize_t target_tg_pt_gp_alua_write_metadata_store(
2711 struct config_item *item, const char *page, size_t count)
2713 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2717 ret = kstrtoul(page, 0, &tmp);
2719 pr_err("Unable to extract alua_write_metadata\n");
2723 if ((tmp != 0) && (tmp != 1)) {
2724 pr_err("Illegal value for alua_write_metadata:"
2728 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2733 static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
2736 return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
2739 static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
2740 const char *page, size_t count)
2742 return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
2746 static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
2749 return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
2752 static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
2753 const char *page, size_t count)
2755 return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
2759 static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
2760 struct config_item *item, char *page)
2762 return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
2765 static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
2766 struct config_item *item, const char *page, size_t count)
2768 return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
2772 static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
2775 return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
2778 static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
2779 const char *page, size_t count)
2781 return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
2784 static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
2787 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2789 if (!tg_pt_gp->tg_pt_gp_valid_id)
2791 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2794 static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
2795 const char *page, size_t count)
2797 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2798 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2799 unsigned long tg_pt_gp_id;
2802 ret = kstrtoul(page, 0, &tg_pt_gp_id);
2804 pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
2808 if (tg_pt_gp_id > 0x0000ffff) {
2809 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
2814 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2818 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2819 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2820 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2821 tg_pt_gp->tg_pt_gp_id);
2826 static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
2829 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2831 ssize_t len = 0, cur_len;
2832 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2834 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2836 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2837 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
2838 lun_tg_pt_gp_link) {
2839 struct se_portal_group *tpg = lun->lun_tpg;
2841 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2842 "/%s\n", tpg->se_tpg_tfo->fabric_name,
2843 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2844 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2845 config_item_name(&lun->lun_group.cg_item));
2846 cur_len++; /* Extra byte for NULL terminator */
2848 if ((cur_len + len) > PAGE_SIZE) {
2849 pr_warn("Ran out of lu_gp_show_attr"
2850 "_members buffer\n");
2853 memcpy(page+len, buf, cur_len);
2856 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2861 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
2862 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
2863 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
2864 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
2865 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
2866 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
2867 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
2868 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
2869 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
2870 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
2871 CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
2872 CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
2873 CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
2874 CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
2875 CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
2876 CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
2877 CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
2879 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2880 &target_tg_pt_gp_attr_alua_access_state,
2881 &target_tg_pt_gp_attr_alua_access_status,
2882 &target_tg_pt_gp_attr_alua_access_type,
2883 &target_tg_pt_gp_attr_alua_support_transitioning,
2884 &target_tg_pt_gp_attr_alua_support_offline,
2885 &target_tg_pt_gp_attr_alua_support_lba_dependent,
2886 &target_tg_pt_gp_attr_alua_support_unavailable,
2887 &target_tg_pt_gp_attr_alua_support_standby,
2888 &target_tg_pt_gp_attr_alua_support_active_nonoptimized,
2889 &target_tg_pt_gp_attr_alua_support_active_optimized,
2890 &target_tg_pt_gp_attr_alua_write_metadata,
2891 &target_tg_pt_gp_attr_nonop_delay_msecs,
2892 &target_tg_pt_gp_attr_trans_delay_msecs,
2893 &target_tg_pt_gp_attr_implicit_trans_secs,
2894 &target_tg_pt_gp_attr_preferred,
2895 &target_tg_pt_gp_attr_tg_pt_gp_id,
2896 &target_tg_pt_gp_attr_members,
2900 static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2902 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2903 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2905 core_alua_free_tg_pt_gp(tg_pt_gp);
2908 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2909 .release = target_core_alua_tg_pt_gp_release,
2912 static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
2913 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
2914 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
2915 .ct_owner = THIS_MODULE,
2918 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2920 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2922 static struct config_group *target_core_alua_create_tg_pt_gp(
2923 struct config_group *group,
2926 struct t10_alua *alua = container_of(group, struct t10_alua,
2927 alua_tg_pt_gps_group);
2928 struct t10_alua_tg_pt_gp *tg_pt_gp;
2929 struct config_group *alua_tg_pt_gp_cg = NULL;
2930 struct config_item *alua_tg_pt_gp_ci = NULL;
2932 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
2936 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2937 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2939 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2940 &target_core_alua_tg_pt_gp_cit);
2942 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
2943 " Group: alua/tg_pt_gps/%s\n",
2944 config_item_name(alua_tg_pt_gp_ci));
2946 return alua_tg_pt_gp_cg;
2949 static void target_core_alua_drop_tg_pt_gp(
2950 struct config_group *group,
2951 struct config_item *item)
2953 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2954 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2956 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
2957 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2958 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2960 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2961 * -> target_core_alua_tg_pt_gp_release().
2963 config_item_put(item);
2966 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2967 .make_group = &target_core_alua_create_tg_pt_gp,
2968 .drop_item = &target_core_alua_drop_tg_pt_gp,
2971 TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
2973 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2975 /* Start functions for struct config_item_type target_core_alua_cit */
2978 * target_core_alua_cit is a ConfigFS group that lives under
2979 * /sys/kernel/config/target/core/alua. There are default groups
2980 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2981 * target_core_alua_cit in target_core_init_configfs() below.
2983 static const struct config_item_type target_core_alua_cit = {
2984 .ct_item_ops = NULL,
2986 .ct_owner = THIS_MODULE,
2989 /* End functions for struct config_item_type target_core_alua_cit */
2991 /* Start functions for struct config_item_type tb_dev_stat_cit */
2993 static struct config_group *target_core_stat_mkdir(
2994 struct config_group *group,
2997 return ERR_PTR(-ENOSYS);
3000 static void target_core_stat_rmdir(
3001 struct config_group *group,
3002 struct config_item *item)
3007 static struct configfs_group_operations target_core_stat_group_ops = {
3008 .make_group = &target_core_stat_mkdir,
3009 .drop_item = &target_core_stat_rmdir,
3012 TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
3014 /* End functions for struct config_item_type tb_dev_stat_cit */
3016 /* Start functions for struct config_item_type target_core_hba_cit */
3018 static struct config_group *target_core_make_subdev(
3019 struct config_group *group,
3022 struct t10_alua_tg_pt_gp *tg_pt_gp;
3023 struct config_item *hba_ci = &group->cg_item;
3024 struct se_hba *hba = item_to_hba(hba_ci);
3025 struct target_backend *tb = hba->backend;
3026 struct se_device *dev;
3027 int errno = -ENOMEM, ret;
3029 ret = mutex_lock_interruptible(&hba->hba_access_mutex);
3031 return ERR_PTR(ret);
3033 dev = target_alloc_device(hba, name);
3037 config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
3039 config_group_init_type_name(&dev->dev_action_group, "action",
3040 &tb->tb_dev_action_cit);
3041 configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
3043 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
3044 &tb->tb_dev_attrib_cit);
3045 configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
3047 config_group_init_type_name(&dev->dev_pr_group, "pr",
3048 &tb->tb_dev_pr_cit);
3049 configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
3051 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
3052 &tb->tb_dev_wwn_cit);
3053 configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
3056 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
3057 "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
3058 configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
3061 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
3062 "statistics", &tb->tb_dev_stat_cit);
3063 configfs_add_default_group(&dev->dev_stat_grps.stat_group,
3067 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
3069 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
3071 goto out_free_device;
3072 dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
3074 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
3075 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
3076 configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
3077 &dev->t10_alua.alua_tg_pt_gps_group);
3080 * Add core/$HBA/$DEV/statistics/ default groups
3082 target_stat_setup_dev_default_groups(dev);
3084 mutex_unlock(&hba->hba_access_mutex);
3085 return &dev->dev_group;
3088 target_free_device(dev);
3090 mutex_unlock(&hba->hba_access_mutex);
3091 return ERR_PTR(errno);
3094 static void target_core_drop_subdev(
3095 struct config_group *group,
3096 struct config_item *item)
3098 struct config_group *dev_cg = to_config_group(item);
3099 struct se_device *dev =
3100 container_of(dev_cg, struct se_device, dev_group);
3103 hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
3105 mutex_lock(&hba->hba_access_mutex);
3107 configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
3108 configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
3111 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
3112 * directly from target_core_alua_tg_pt_gp_release().
3114 dev->t10_alua.default_tg_pt_gp = NULL;
3116 configfs_remove_default_groups(dev_cg);
3119 * se_dev is released from target_core_dev_item_ops->release()
3121 config_item_put(item);
3122 mutex_unlock(&hba->hba_access_mutex);
3125 static struct configfs_group_operations target_core_hba_group_ops = {
3126 .make_group = target_core_make_subdev,
3127 .drop_item = target_core_drop_subdev,
3131 static inline struct se_hba *to_hba(struct config_item *item)
3133 return container_of(to_config_group(item), struct se_hba, hba_group);
3136 static ssize_t target_hba_info_show(struct config_item *item, char *page)
3138 struct se_hba *hba = to_hba(item);
3140 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
3141 hba->hba_id, hba->backend->ops->name,
3142 TARGET_CORE_VERSION);
3145 static ssize_t target_hba_mode_show(struct config_item *item, char *page)
3147 struct se_hba *hba = to_hba(item);
3150 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
3153 return sprintf(page, "%d\n", hba_mode);
3156 static ssize_t target_hba_mode_store(struct config_item *item,
3157 const char *page, size_t count)
3159 struct se_hba *hba = to_hba(item);
3160 unsigned long mode_flag;
3163 if (hba->backend->ops->pmode_enable_hba == NULL)
3166 ret = kstrtoul(page, 0, &mode_flag);
3168 pr_err("Unable to extract hba mode flag: %d\n", ret);
3172 if (hba->dev_count) {
3173 pr_err("Unable to set hba_mode with active devices\n");
3177 ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
3181 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
3183 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
3188 CONFIGFS_ATTR_RO(target_, hba_info);
3189 CONFIGFS_ATTR(target_, hba_mode);
3191 static void target_core_hba_release(struct config_item *item)
3193 struct se_hba *hba = container_of(to_config_group(item),
3194 struct se_hba, hba_group);
3195 core_delete_hba(hba);
3198 static struct configfs_attribute *target_core_hba_attrs[] = {
3199 &target_attr_hba_info,
3200 &target_attr_hba_mode,
3204 static struct configfs_item_operations target_core_hba_item_ops = {
3205 .release = target_core_hba_release,
3208 static const struct config_item_type target_core_hba_cit = {
3209 .ct_item_ops = &target_core_hba_item_ops,
3210 .ct_group_ops = &target_core_hba_group_ops,
3211 .ct_attrs = target_core_hba_attrs,
3212 .ct_owner = THIS_MODULE,
3215 static struct config_group *target_core_call_addhbatotarget(
3216 struct config_group *group,
3219 char *se_plugin_str, *str, *str2;
3221 char buf[TARGET_CORE_NAME_MAX_LEN];
3222 unsigned long plugin_dep_id = 0;
3225 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3226 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3227 pr_err("Passed *name strlen(): %d exceeds"
3228 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3229 TARGET_CORE_NAME_MAX_LEN);
3230 return ERR_PTR(-ENAMETOOLONG);
3232 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3234 str = strstr(buf, "_");
3236 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3237 return ERR_PTR(-EINVAL);
3239 se_plugin_str = buf;
3241 * Special case for subsystem plugins that have "_" in their names.
3242 * Namely rd_direct and rd_mcp..
3244 str2 = strstr(str+1, "_");
3246 *str2 = '\0'; /* Terminate for *se_plugin_str */
3247 str2++; /* Skip to start of plugin dependent ID */
3250 *str = '\0'; /* Terminate for *se_plugin_str */
3251 str++; /* Skip to start of plugin dependent ID */
3254 ret = kstrtoul(str, 0, &plugin_dep_id);
3256 pr_err("kstrtoul() returned %d for"
3257 " plugin_dep_id\n", ret);
3258 return ERR_PTR(ret);
3261 * Load up TCM subsystem plugins if they have not already been loaded.
3263 transport_subsystem_check_init();
3265 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3267 return ERR_CAST(hba);
3269 config_group_init_type_name(&hba->hba_group, name,
3270 &target_core_hba_cit);
3272 return &hba->hba_group;
3275 static void target_core_call_delhbafromtarget(
3276 struct config_group *group,
3277 struct config_item *item)
3280 * core_delete_hba() is called from target_core_hba_item_ops->release()
3281 * -> target_core_hba_release()
3283 config_item_put(item);
3286 static struct configfs_group_operations target_core_group_ops = {
3287 .make_group = target_core_call_addhbatotarget,
3288 .drop_item = target_core_call_delhbafromtarget,
3291 static const struct config_item_type target_core_cit = {
3292 .ct_item_ops = NULL,
3293 .ct_group_ops = &target_core_group_ops,
3295 .ct_owner = THIS_MODULE,
3298 /* Stop functions for struct config_item_type target_core_hba_cit */
3300 void target_setup_backend_cits(struct target_backend *tb)
3302 target_core_setup_dev_cit(tb);
3303 target_core_setup_dev_action_cit(tb);
3304 target_core_setup_dev_attrib_cit(tb);
3305 target_core_setup_dev_pr_cit(tb);
3306 target_core_setup_dev_wwn_cit(tb);
3307 target_core_setup_dev_alua_tg_pt_gps_cit(tb);
3308 target_core_setup_dev_stat_cit(tb);
3311 static void target_init_dbroot(void)
3315 snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
3316 fp = filp_open(db_root_stage, O_RDONLY, 0);
3318 pr_err("db_root: cannot open: %s\n", db_root_stage);
3321 if (!S_ISDIR(file_inode(fp)->i_mode)) {
3322 filp_close(fp, NULL);
3323 pr_err("db_root: not a valid directory: %s\n", db_root_stage);
3326 filp_close(fp, NULL);
3328 strncpy(db_root, db_root_stage, DB_ROOT_LEN);
3329 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
3332 static int __init target_core_init_configfs(void)
3334 struct configfs_subsystem *subsys = &target_core_fabrics;
3335 struct t10_alua_lu_gp *lu_gp;
3338 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3339 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3340 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3342 config_group_init(&subsys->su_group);
3343 mutex_init(&subsys->su_mutex);
3345 ret = init_se_kmem_caches();
3349 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3350 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3352 config_group_init_type_name(&target_core_hbagroup, "core",
3354 configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
3357 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3359 config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
3360 configfs_add_default_group(&alua_group, &target_core_hbagroup);
3363 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3364 * groups under /sys/kernel/config/target/core/alua/
3366 config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
3367 &target_core_alua_lu_gps_cit);
3368 configfs_add_default_group(&alua_lu_gps_group, &alua_group);
3371 * Add core/alua/lu_gps/default_lu_gp
3373 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3374 if (IS_ERR(lu_gp)) {
3379 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3380 &target_core_alua_lu_gp_cit);
3381 configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
3383 default_lu_gp = lu_gp;
3386 * Register the target_core_mod subsystem with configfs.
3388 ret = configfs_register_subsystem(subsys);
3390 pr_err("Error %d while registering subsystem %s\n",
3391 ret, subsys->su_group.cg_item.ci_namebuf);
3394 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3395 " Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
3396 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3398 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3400 ret = rd_module_init();
3404 ret = core_dev_setup_virtual_lun0();
3408 ret = target_xcopy_setup_pt();
3412 target_init_dbroot();
3417 configfs_unregister_subsystem(subsys);
3418 core_dev_release_virtual_lun0();
3421 if (default_lu_gp) {
3422 core_alua_free_lu_gp(default_lu_gp);
3423 default_lu_gp = NULL;
3425 release_se_kmem_caches();
3429 static void __exit target_core_exit_configfs(void)
3431 configfs_remove_default_groups(&alua_lu_gps_group);
3432 configfs_remove_default_groups(&alua_group);
3433 configfs_remove_default_groups(&target_core_hbagroup);
3436 * We expect subsys->su_group.default_groups to be released
3437 * by configfs subsystem provider logic..
3439 configfs_unregister_subsystem(&target_core_fabrics);
3441 core_alua_free_lu_gp(default_lu_gp);
3442 default_lu_gp = NULL;
3444 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3445 " Infrastructure\n");
3447 core_dev_release_virtual_lun0();
3449 target_xcopy_release_pt();
3450 release_se_kmem_caches();
3453 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3454 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3455 MODULE_LICENSE("GPL");
3457 module_init(target_core_init_configfs);
3458 module_exit(target_core_exit_configfs);