1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
46 #include "target_core_alua.h"
47 #include "target_core_pr.h"
48 #include "target_core_ua.h"
50 DEFINE_MUTEX(g_device_mutex);
51 LIST_HEAD(g_device_list);
53 static struct se_hba *lun0_hba;
54 /* not static, needed by tpg.c */
55 struct se_device *g_lun0_dev;
58 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
60 struct se_lun *se_lun = NULL;
61 struct se_session *se_sess = se_cmd->se_sess;
62 struct se_device *dev;
65 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
66 return TCM_NON_EXISTENT_LUN;
68 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
69 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
70 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
71 struct se_dev_entry *deve = se_cmd->se_deve;
75 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
76 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
78 " Access for 0x%08x\n",
79 se_cmd->se_tfo->get_fabric_name(),
81 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
82 return TCM_WRITE_PROTECTED;
85 if (se_cmd->data_direction == DMA_TO_DEVICE)
86 deve->write_bytes += se_cmd->data_length;
87 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
88 deve->read_bytes += se_cmd->data_length;
90 se_lun = deve->se_lun;
91 se_cmd->se_lun = deve->se_lun;
92 se_cmd->pr_res_key = deve->pr_res_key;
93 se_cmd->orig_fe_lun = unpacked_lun;
94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
96 percpu_ref_get(&se_lun->lun_ref);
97 se_cmd->lun_ref_active = true;
99 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
103 * Use the se_portal_group->tpg_virt_lun0 to allow for
104 * REPORT_LUNS, et al to be returned when no active
105 * MappedLUN=0 exists for this Initiator Port.
107 if (unpacked_lun != 0) {
108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
109 " Access for 0x%08x\n",
110 se_cmd->se_tfo->get_fabric_name(),
112 return TCM_NON_EXISTENT_LUN;
115 * Force WRITE PROTECT for virtual LUN 0
117 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
118 (se_cmd->data_direction != DMA_NONE))
119 return TCM_WRITE_PROTECTED;
121 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
122 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
123 se_cmd->orig_fe_lun = 0;
124 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
126 percpu_ref_get(&se_lun->lun_ref);
127 se_cmd->lun_ref_active = true;
130 /* Directly associate cmd with se_dev */
131 se_cmd->se_dev = se_lun->lun_se_dev;
133 /* TODO: get rid of this and use atomics for stats */
134 dev = se_lun->lun_se_dev;
135 spin_lock_irqsave(&dev->stats_lock, flags);
137 if (se_cmd->data_direction == DMA_TO_DEVICE)
138 dev->write_bytes += se_cmd->data_length;
139 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
140 dev->read_bytes += se_cmd->data_length;
141 spin_unlock_irqrestore(&dev->stats_lock, flags);
145 EXPORT_SYMBOL(transport_lookup_cmd_lun);
147 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
149 struct se_dev_entry *deve;
150 struct se_lun *se_lun = NULL;
151 struct se_session *se_sess = se_cmd->se_sess;
152 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
155 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
158 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
159 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
160 deve = se_cmd->se_deve;
162 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
163 se_tmr->tmr_lun = deve->se_lun;
164 se_cmd->se_lun = deve->se_lun;
165 se_lun = deve->se_lun;
166 se_cmd->pr_res_key = deve->pr_res_key;
167 se_cmd->orig_fe_lun = unpacked_lun;
169 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
172 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
173 " Access for 0x%08x\n",
174 se_cmd->se_tfo->get_fabric_name(),
179 /* Directly associate cmd with se_dev */
180 se_cmd->se_dev = se_lun->lun_se_dev;
181 se_tmr->tmr_dev = se_lun->lun_se_dev;
183 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
184 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
185 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
189 EXPORT_SYMBOL(transport_lookup_tmr_lun);
192 * This function is called from core_scsi3_emulate_pro_register_and_move()
193 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
194 * when a matching rtpi is found.
196 struct se_dev_entry *core_get_se_deve_from_rtpi(
197 struct se_node_acl *nacl,
200 struct se_dev_entry *deve;
202 struct se_port *port;
203 struct se_portal_group *tpg = nacl->se_tpg;
206 spin_lock_irq(&nacl->device_list_lock);
207 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
208 deve = nacl->device_list[i];
210 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
215 pr_err("%s device entries device pointer is"
216 " NULL, but Initiator has access.\n",
217 tpg->se_tpg_tfo->get_fabric_name());
222 pr_err("%s device entries device pointer is"
223 " NULL, but Initiator has access.\n",
224 tpg->se_tpg_tfo->get_fabric_name());
227 if (port->sep_rtpi != rtpi)
230 atomic_inc(&deve->pr_ref_count);
231 smp_mb__after_atomic_inc();
232 spin_unlock_irq(&nacl->device_list_lock);
236 spin_unlock_irq(&nacl->device_list_lock);
241 int core_free_device_list_for_node(
242 struct se_node_acl *nacl,
243 struct se_portal_group *tpg)
245 struct se_dev_entry *deve;
249 if (!nacl->device_list)
252 spin_lock_irq(&nacl->device_list_lock);
253 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
254 deve = nacl->device_list[i];
256 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
260 pr_err("%s device entries device pointer is"
261 " NULL, but Initiator has access.\n",
262 tpg->se_tpg_tfo->get_fabric_name());
267 spin_unlock_irq(&nacl->device_list_lock);
268 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
269 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
270 spin_lock_irq(&nacl->device_list_lock);
272 spin_unlock_irq(&nacl->device_list_lock);
274 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
275 nacl->device_list = NULL;
280 void core_update_device_list_access(
283 struct se_node_acl *nacl)
285 struct se_dev_entry *deve;
287 spin_lock_irq(&nacl->device_list_lock);
288 deve = nacl->device_list[mapped_lun];
289 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
290 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
291 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
293 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
294 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
296 spin_unlock_irq(&nacl->device_list_lock);
299 /* core_enable_device_list_for_node():
303 int core_enable_device_list_for_node(
305 struct se_lun_acl *lun_acl,
308 struct se_node_acl *nacl,
309 struct se_portal_group *tpg)
311 struct se_port *port = lun->lun_sep;
312 struct se_dev_entry *deve;
314 spin_lock_irq(&nacl->device_list_lock);
316 deve = nacl->device_list[mapped_lun];
319 * Check if the call is handling demo mode -> explict LUN ACL
320 * transition. This transition must be for the same struct se_lun
321 * + mapped_lun that was setup in demo mode..
323 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
324 if (deve->se_lun_acl != NULL) {
325 pr_err("struct se_dev_entry->se_lun_acl"
326 " already set for demo mode -> explict"
327 " LUN ACL transition\n");
328 spin_unlock_irq(&nacl->device_list_lock);
331 if (deve->se_lun != lun) {
332 pr_err("struct se_dev_entry->se_lun does"
333 " match passed struct se_lun for demo mode"
334 " -> explict LUN ACL transition\n");
335 spin_unlock_irq(&nacl->device_list_lock);
338 deve->se_lun_acl = lun_acl;
340 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
341 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
342 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
344 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
345 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
348 spin_unlock_irq(&nacl->device_list_lock);
353 deve->se_lun_acl = lun_acl;
354 deve->mapped_lun = mapped_lun;
355 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
357 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
358 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
359 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
361 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
362 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
365 deve->creation_time = get_jiffies_64();
366 deve->attach_count++;
367 spin_unlock_irq(&nacl->device_list_lock);
369 spin_lock_bh(&port->sep_alua_lock);
370 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
371 spin_unlock_bh(&port->sep_alua_lock);
376 /* core_disable_device_list_for_node():
380 int core_disable_device_list_for_node(
382 struct se_lun_acl *lun_acl,
385 struct se_node_acl *nacl,
386 struct se_portal_group *tpg)
388 struct se_port *port = lun->lun_sep;
389 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
392 * If the MappedLUN entry is being disabled, the entry in
393 * port->sep_alua_list must be removed now before clearing the
394 * struct se_dev_entry pointers below as logic in
395 * core_alua_do_transition_tg_pt() depends on these being present.
397 * deve->se_lun_acl will be NULL for demo-mode created LUNs
398 * that have not been explicitly converted to MappedLUNs ->
399 * struct se_lun_acl, but we remove deve->alua_port_list from
400 * port->sep_alua_list. This also means that active UAs and
401 * NodeACL context specific PR metadata for demo-mode
402 * MappedLUN *deve will be released below..
404 spin_lock_bh(&port->sep_alua_lock);
405 list_del(&deve->alua_port_list);
406 spin_unlock_bh(&port->sep_alua_lock);
408 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
409 * PR operation to complete.
411 while (atomic_read(&deve->pr_ref_count) != 0)
414 spin_lock_irq(&nacl->device_list_lock);
416 * Disable struct se_dev_entry LUN ACL mapping
418 core_scsi3_ua_release_all(deve);
420 deve->se_lun_acl = NULL;
422 deve->creation_time = 0;
423 deve->attach_count--;
424 spin_unlock_irq(&nacl->device_list_lock);
426 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
430 /* core_clear_lun_from_tpg():
434 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
436 struct se_node_acl *nacl;
437 struct se_dev_entry *deve;
440 spin_lock_irq(&tpg->acl_node_lock);
441 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
442 spin_unlock_irq(&tpg->acl_node_lock);
444 spin_lock_irq(&nacl->device_list_lock);
445 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
446 deve = nacl->device_list[i];
447 if (lun != deve->se_lun)
449 spin_unlock_irq(&nacl->device_list_lock);
451 core_disable_device_list_for_node(lun, NULL,
452 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
455 spin_lock_irq(&nacl->device_list_lock);
457 spin_unlock_irq(&nacl->device_list_lock);
459 spin_lock_irq(&tpg->acl_node_lock);
461 spin_unlock_irq(&tpg->acl_node_lock);
464 static struct se_port *core_alloc_port(struct se_device *dev)
466 struct se_port *port, *port_tmp;
468 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
470 pr_err("Unable to allocate struct se_port\n");
471 return ERR_PTR(-ENOMEM);
473 INIT_LIST_HEAD(&port->sep_alua_list);
474 INIT_LIST_HEAD(&port->sep_list);
475 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
476 spin_lock_init(&port->sep_alua_lock);
477 mutex_init(&port->sep_tg_pt_md_mutex);
479 spin_lock(&dev->se_port_lock);
480 if (dev->dev_port_count == 0x0000ffff) {
481 pr_warn("Reached dev->dev_port_count =="
483 spin_unlock(&dev->se_port_lock);
484 return ERR_PTR(-ENOSPC);
488 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
489 * Here is the table from spc4r17 section 7.7.3.8.
491 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
495 * 1h Relative port 1, historically known as port A
496 * 2h Relative port 2, historically known as port B
497 * 3h to FFFFh Relative port 3 through 65 535
499 port->sep_rtpi = dev->dev_rpti_counter++;
503 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
505 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
508 if (port->sep_rtpi == port_tmp->sep_rtpi)
511 spin_unlock(&dev->se_port_lock);
516 static void core_export_port(
517 struct se_device *dev,
518 struct se_portal_group *tpg,
519 struct se_port *port,
522 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
524 spin_lock(&dev->se_port_lock);
525 spin_lock(&lun->lun_sep_lock);
529 spin_unlock(&lun->lun_sep_lock);
531 list_add_tail(&port->sep_list, &dev->dev_sep_list);
532 spin_unlock(&dev->se_port_lock);
534 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
535 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
536 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
537 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
538 pr_err("Unable to allocate t10_alua_tg_pt"
542 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
543 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
544 dev->t10_alua.default_tg_pt_gp);
545 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
546 pr_debug("%s/%s: Adding to default ALUA Target Port"
547 " Group: alua/default_tg_pt_gp\n",
548 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
551 dev->dev_port_count++;
552 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
556 * Called with struct se_device->se_port_lock spinlock held.
558 static void core_release_port(struct se_device *dev, struct se_port *port)
559 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
562 * Wait for any port reference for PR ALL_TG_PT=1 operation
563 * to complete in __core_scsi3_alloc_registration()
565 spin_unlock(&dev->se_port_lock);
566 if (atomic_read(&port->sep_tg_pt_ref_cnt))
568 spin_lock(&dev->se_port_lock);
570 core_alua_free_tg_pt_gp_mem(port);
572 list_del(&port->sep_list);
573 dev->dev_port_count--;
578 struct se_device *dev,
579 struct se_portal_group *tpg,
582 struct se_hba *hba = dev->se_hba;
583 struct se_port *port;
585 port = core_alloc_port(dev);
587 return PTR_ERR(port);
589 lun->lun_se_dev = dev;
591 spin_lock(&hba->device_lock);
593 spin_unlock(&hba->device_lock);
595 core_export_port(dev, tpg, port, lun);
599 void core_dev_unexport(
600 struct se_device *dev,
601 struct se_portal_group *tpg,
604 struct se_hba *hba = dev->se_hba;
605 struct se_port *port = lun->lun_sep;
607 spin_lock(&lun->lun_sep_lock);
608 if (lun->lun_se_dev == NULL) {
609 spin_unlock(&lun->lun_sep_lock);
612 spin_unlock(&lun->lun_sep_lock);
614 spin_lock(&dev->se_port_lock);
615 core_release_port(dev, port);
616 spin_unlock(&dev->se_port_lock);
618 spin_lock(&hba->device_lock);
620 spin_unlock(&hba->device_lock);
622 lun->lun_se_dev = NULL;
625 static void se_release_vpd_for_dev(struct se_device *dev)
627 struct t10_vpd *vpd, *vpd_tmp;
629 spin_lock(&dev->t10_wwn.t10_vpd_lock);
630 list_for_each_entry_safe(vpd, vpd_tmp,
631 &dev->t10_wwn.t10_vpd_list, vpd_list) {
632 list_del(&vpd->vpd_list);
635 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
638 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
640 u32 aligned_max_sectors;
643 * Limit max_sectors to a PAGE_SIZE aligned value for modern
644 * transport_allocate_data_tasks() operation.
646 alignment = max(1ul, PAGE_SIZE / block_size);
647 aligned_max_sectors = rounddown(max_sectors, alignment);
649 if (max_sectors != aligned_max_sectors)
650 pr_info("Rounding down aligned max_sectors from %u to %u\n",
651 max_sectors, aligned_max_sectors);
653 return aligned_max_sectors;
656 int se_dev_set_max_unmap_lba_count(
657 struct se_device *dev,
658 u32 max_unmap_lba_count)
660 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
661 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
662 dev, dev->dev_attrib.max_unmap_lba_count);
666 int se_dev_set_max_unmap_block_desc_count(
667 struct se_device *dev,
668 u32 max_unmap_block_desc_count)
670 dev->dev_attrib.max_unmap_block_desc_count =
671 max_unmap_block_desc_count;
672 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
673 dev, dev->dev_attrib.max_unmap_block_desc_count);
677 int se_dev_set_unmap_granularity(
678 struct se_device *dev,
679 u32 unmap_granularity)
681 dev->dev_attrib.unmap_granularity = unmap_granularity;
682 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
683 dev, dev->dev_attrib.unmap_granularity);
687 int se_dev_set_unmap_granularity_alignment(
688 struct se_device *dev,
689 u32 unmap_granularity_alignment)
691 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
692 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
693 dev, dev->dev_attrib.unmap_granularity_alignment);
697 int se_dev_set_max_write_same_len(
698 struct se_device *dev,
699 u32 max_write_same_len)
701 dev->dev_attrib.max_write_same_len = max_write_same_len;
702 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
703 dev, dev->dev_attrib.max_write_same_len);
707 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
709 const char *configname;
711 configname = config_item_name(&dev->dev_group.cg_item);
712 if (strlen(configname) >= 16) {
713 pr_warn("dev[%p]: Backstore name '%s' is too long for "
714 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
717 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
720 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
722 if (dev->export_count) {
723 pr_err("dev[%p]: Unable to change model alias"
724 " while export_count is %d\n",
725 dev, dev->export_count);
729 if (flag != 0 && flag != 1) {
730 pr_err("Illegal value %d\n", flag);
735 dev_set_t10_wwn_model_alias(dev);
737 strncpy(&dev->t10_wwn.model[0],
738 dev->transport->inquiry_prod, 16);
740 dev->dev_attrib.emulate_model_alias = flag;
745 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
747 if (flag != 0 && flag != 1) {
748 pr_err("Illegal value %d\n", flag);
753 pr_err("dpo_emulated not supported\n");
760 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
762 if (flag != 0 && flag != 1) {
763 pr_err("Illegal value %d\n", flag);
768 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
769 pr_err("emulate_fua_write not supported for pSCSI\n");
772 dev->dev_attrib.emulate_fua_write = flag;
773 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
774 dev, dev->dev_attrib.emulate_fua_write);
778 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
780 if (flag != 0 && flag != 1) {
781 pr_err("Illegal value %d\n", flag);
786 pr_err("ua read emulated not supported\n");
793 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
795 if (flag != 0 && flag != 1) {
796 pr_err("Illegal value %d\n", flag);
800 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
801 pr_err("emulate_write_cache not supported for pSCSI\n");
804 if (dev->transport->get_write_cache) {
805 pr_warn("emulate_write_cache cannot be changed when underlying"
806 " HW reports WriteCacheEnabled, ignoring request\n");
810 dev->dev_attrib.emulate_write_cache = flag;
811 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
812 dev, dev->dev_attrib.emulate_write_cache);
816 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
818 if ((flag != 0) && (flag != 1) && (flag != 2)) {
819 pr_err("Illegal value %d\n", flag);
823 if (dev->export_count) {
824 pr_err("dev[%p]: Unable to change SE Device"
825 " UA_INTRLCK_CTRL while export_count is %d\n",
826 dev, dev->export_count);
829 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
830 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
831 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
836 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
838 if ((flag != 0) && (flag != 1)) {
839 pr_err("Illegal value %d\n", flag);
843 if (dev->export_count) {
844 pr_err("dev[%p]: Unable to change SE Device TAS while"
845 " export_count is %d\n",
846 dev, dev->export_count);
849 dev->dev_attrib.emulate_tas = flag;
850 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
851 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
856 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
858 if ((flag != 0) && (flag != 1)) {
859 pr_err("Illegal value %d\n", flag);
863 * We expect this value to be non-zero when generic Block Layer
864 * Discard supported is detected iblock_create_virtdevice().
866 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
867 pr_err("Generic Block Discard not supported\n");
871 dev->dev_attrib.emulate_tpu = flag;
872 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
877 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
879 if ((flag != 0) && (flag != 1)) {
880 pr_err("Illegal value %d\n", flag);
884 * We expect this value to be non-zero when generic Block Layer
885 * Discard supported is detected iblock_create_virtdevice().
887 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
888 pr_err("Generic Block Discard not supported\n");
892 dev->dev_attrib.emulate_tpws = flag;
893 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
898 int se_dev_set_emulate_caw(struct se_device *dev, int flag)
900 if (flag != 0 && flag != 1) {
901 pr_err("Illegal value %d\n", flag);
904 dev->dev_attrib.emulate_caw = flag;
905 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
911 int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
913 if (flag != 0 && flag != 1) {
914 pr_err("Illegal value %d\n", flag);
917 dev->dev_attrib.emulate_3pc = flag;
918 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
924 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
926 if ((flag != 0) && (flag != 1)) {
927 pr_err("Illegal value %d\n", flag);
930 dev->dev_attrib.enforce_pr_isids = flag;
931 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
932 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
936 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
938 if ((flag != 0) && (flag != 1)) {
939 printk(KERN_ERR "Illegal value %d\n", flag);
942 dev->dev_attrib.is_nonrot = flag;
943 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
948 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
951 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
952 " reordering not implemented\n", dev);
955 dev->dev_attrib.emulate_rest_reord = flag;
956 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
961 * Note, this can only be called on unexported SE Device Object.
963 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
965 if (dev->export_count) {
966 pr_err("dev[%p]: Unable to change SE Device TCQ while"
967 " export_count is %d\n",
968 dev, dev->export_count);
972 pr_err("dev[%p]: Illegal ZERO value for queue"
977 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
978 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
979 pr_err("dev[%p]: Passed queue_depth: %u"
980 " exceeds TCM/SE_Device TCQ: %u\n",
982 dev->dev_attrib.hw_queue_depth);
986 if (queue_depth > dev->dev_attrib.queue_depth) {
987 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
988 pr_err("dev[%p]: Passed queue_depth:"
989 " %u exceeds TCM/SE_Device MAX"
990 " TCQ: %u\n", dev, queue_depth,
991 dev->dev_attrib.hw_queue_depth);
997 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
998 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1003 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1005 int block_size = dev->dev_attrib.block_size;
1007 if (dev->export_count) {
1008 pr_err("dev[%p]: Unable to change SE Device"
1009 " fabric_max_sectors while export_count is %d\n",
1010 dev, dev->export_count);
1013 if (!fabric_max_sectors) {
1014 pr_err("dev[%p]: Illegal ZERO value for"
1015 " fabric_max_sectors\n", dev);
1018 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1019 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1020 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1021 DA_STATUS_MAX_SECTORS_MIN);
1024 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1025 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
1026 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1027 " greater than TCM/SE_Device max_sectors:"
1028 " %u\n", dev, fabric_max_sectors,
1029 dev->dev_attrib.hw_max_sectors);
1033 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1034 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1035 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1036 " %u\n", dev, fabric_max_sectors,
1037 DA_STATUS_MAX_SECTORS_MAX);
1042 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1046 pr_warn("Defaulting to 512 for zero block_size\n");
1048 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1051 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1052 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1053 dev, fabric_max_sectors);
1057 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1059 if (dev->export_count) {
1060 pr_err("dev[%p]: Unable to change SE Device"
1061 " optimal_sectors while export_count is %d\n",
1062 dev, dev->export_count);
1065 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1066 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1067 " changed for TCM/pSCSI\n", dev);
1070 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1071 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1072 " greater than fabric_max_sectors: %u\n", dev,
1073 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
1077 dev->dev_attrib.optimal_sectors = optimal_sectors;
1078 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1079 dev, optimal_sectors);
1083 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1085 if (dev->export_count) {
1086 pr_err("dev[%p]: Unable to change SE Device block_size"
1087 " while export_count is %d\n",
1088 dev, dev->export_count);
1092 if ((block_size != 512) &&
1093 (block_size != 1024) &&
1094 (block_size != 2048) &&
1095 (block_size != 4096)) {
1096 pr_err("dev[%p]: Illegal value for block_device: %u"
1097 " for SE device, must be 512, 1024, 2048 or 4096\n",
1102 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1103 pr_err("dev[%p]: Not allowed to change block_size for"
1104 " Physical Device, use for Linux/SCSI to change"
1105 " block_size for underlying hardware\n", dev);
1109 dev->dev_attrib.block_size = block_size;
1110 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1115 struct se_lun *core_dev_add_lun(
1116 struct se_portal_group *tpg,
1117 struct se_device *dev,
1120 struct se_lun *lun_p;
1123 lun_p = core_tpg_pre_addlun(tpg, lun);
1127 rc = core_tpg_post_addlun(tpg, lun_p,
1128 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1132 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1133 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1134 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1135 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1137 * Update LUN maps for dynamically added initiators when
1138 * generate_node_acl is enabled.
1140 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1141 struct se_node_acl *acl;
1142 spin_lock_irq(&tpg->acl_node_lock);
1143 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1144 if (acl->dynamic_node_acl &&
1145 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1146 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1147 spin_unlock_irq(&tpg->acl_node_lock);
1148 core_tpg_add_node_to_devs(acl, tpg);
1149 spin_lock_irq(&tpg->acl_node_lock);
1152 spin_unlock_irq(&tpg->acl_node_lock);
1158 /* core_dev_del_lun():
1162 int core_dev_del_lun(
1163 struct se_portal_group *tpg,
1168 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1170 return PTR_ERR(lun);
1172 core_tpg_post_dellun(tpg, lun);
1174 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1175 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1176 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1177 tpg->se_tpg_tfo->get_fabric_name());
1182 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1186 spin_lock(&tpg->tpg_lun_lock);
1187 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1188 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1189 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1190 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1191 TRANSPORT_MAX_LUNS_PER_TPG-1,
1192 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1193 spin_unlock(&tpg->tpg_lun_lock);
1196 lun = tpg->tpg_lun_list[unpacked_lun];
1198 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1199 pr_err("%s Logical Unit Number: %u is not free on"
1200 " Target Portal Group: %hu, ignoring request.\n",
1201 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1202 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1203 spin_unlock(&tpg->tpg_lun_lock);
1206 spin_unlock(&tpg->tpg_lun_lock);
1211 /* core_dev_get_lun():
1215 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1219 spin_lock(&tpg->tpg_lun_lock);
1220 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1221 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1222 "_TPG-1: %u for Target Portal Group: %hu\n",
1223 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1224 TRANSPORT_MAX_LUNS_PER_TPG-1,
1225 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1226 spin_unlock(&tpg->tpg_lun_lock);
1229 lun = tpg->tpg_lun_list[unpacked_lun];
1231 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1232 pr_err("%s Logical Unit Number: %u is not active on"
1233 " Target Portal Group: %hu, ignoring request.\n",
1234 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1235 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1236 spin_unlock(&tpg->tpg_lun_lock);
1239 spin_unlock(&tpg->tpg_lun_lock);
1244 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1245 struct se_portal_group *tpg,
1246 struct se_node_acl *nacl,
1250 struct se_lun_acl *lacl;
1252 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1253 pr_err("%s InitiatorName exceeds maximum size.\n",
1254 tpg->se_tpg_tfo->get_fabric_name());
1258 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1260 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1265 INIT_LIST_HEAD(&lacl->lacl_list);
1266 lacl->mapped_lun = mapped_lun;
1267 lacl->se_lun_nacl = nacl;
1268 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1269 nacl->initiatorname);
1274 int core_dev_add_initiator_node_lun_acl(
1275 struct se_portal_group *tpg,
1276 struct se_lun_acl *lacl,
1281 struct se_node_acl *nacl;
1283 lun = core_dev_get_lun(tpg, unpacked_lun);
1285 pr_err("%s Logical Unit Number: %u is not active on"
1286 " Target Portal Group: %hu, ignoring request.\n",
1287 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1288 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1292 nacl = lacl->se_lun_nacl;
1296 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1297 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1298 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1302 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1303 lun_access, nacl, tpg) < 0)
1306 spin_lock(&lun->lun_acl_lock);
1307 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1308 atomic_inc(&lun->lun_acl_count);
1309 smp_mb__after_atomic_inc();
1310 spin_unlock(&lun->lun_acl_lock);
1312 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1313 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1314 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1315 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1316 lacl->initiatorname);
1318 * Check to see if there are any existing persistent reservation APTPL
1319 * pre-registrations that need to be enabled for this LUN ACL..
1321 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1325 /* core_dev_del_initiator_node_lun_acl():
1329 int core_dev_del_initiator_node_lun_acl(
1330 struct se_portal_group *tpg,
1332 struct se_lun_acl *lacl)
1334 struct se_node_acl *nacl;
1336 nacl = lacl->se_lun_nacl;
1340 spin_lock(&lun->lun_acl_lock);
1341 list_del(&lacl->lacl_list);
1342 atomic_dec(&lun->lun_acl_count);
1343 smp_mb__after_atomic_dec();
1344 spin_unlock(&lun->lun_acl_lock);
1346 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1347 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
1349 lacl->se_lun = NULL;
1351 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1352 " InitiatorNode: %s Mapped LUN: %u\n",
1353 tpg->se_tpg_tfo->get_fabric_name(),
1354 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1355 lacl->initiatorname, lacl->mapped_lun);
1360 void core_dev_free_initiator_node_lun_acl(
1361 struct se_portal_group *tpg,
1362 struct se_lun_acl *lacl)
1364 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1365 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1366 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1367 tpg->se_tpg_tfo->get_fabric_name(),
1368 lacl->initiatorname, lacl->mapped_lun);
1373 static void scsi_dump_inquiry(struct se_device *dev)
1375 struct t10_wwn *wwn = &dev->t10_wwn;
1379 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1381 for (i = 0; i < 8; i++)
1382 if (wwn->vendor[i] >= 0x20)
1383 buf[i] = wwn->vendor[i];
1387 pr_debug(" Vendor: %s\n", buf);
1389 for (i = 0; i < 16; i++)
1390 if (wwn->model[i] >= 0x20)
1391 buf[i] = wwn->model[i];
1395 pr_debug(" Model: %s\n", buf);
1397 for (i = 0; i < 4; i++)
1398 if (wwn->revision[i] >= 0x20)
1399 buf[i] = wwn->revision[i];
1403 pr_debug(" Revision: %s\n", buf);
1405 device_type = dev->transport->get_device_type(dev);
1406 pr_debug(" Type: %s ", scsi_device_type(device_type));
1409 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1411 struct se_device *dev;
1412 struct se_lun *xcopy_lun;
1414 dev = hba->transport->alloc_device(hba, name);
1418 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
1420 dev->transport = hba->transport;
1422 INIT_LIST_HEAD(&dev->dev_list);
1423 INIT_LIST_HEAD(&dev->dev_sep_list);
1424 INIT_LIST_HEAD(&dev->dev_tmr_list);
1425 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1426 INIT_LIST_HEAD(&dev->state_list);
1427 INIT_LIST_HEAD(&dev->qf_cmd_list);
1428 INIT_LIST_HEAD(&dev->g_dev_node);
1429 spin_lock_init(&dev->stats_lock);
1430 spin_lock_init(&dev->execute_task_lock);
1431 spin_lock_init(&dev->delayed_cmd_lock);
1432 spin_lock_init(&dev->dev_reservation_lock);
1433 spin_lock_init(&dev->se_port_lock);
1434 spin_lock_init(&dev->se_tmr_lock);
1435 spin_lock_init(&dev->qf_cmd_lock);
1436 sema_init(&dev->caw_sem, 1);
1437 atomic_set(&dev->dev_ordered_id, 0);
1438 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1439 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1440 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1441 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1442 spin_lock_init(&dev->t10_pr.registration_lock);
1443 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1444 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1445 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1447 dev->t10_wwn.t10_dev = dev;
1448 dev->t10_alua.t10_dev = dev;
1450 dev->dev_attrib.da_dev = dev;
1451 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1452 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1453 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1454 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1455 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1456 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1457 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1458 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1459 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1460 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
1461 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1462 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1463 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1464 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1465 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1466 dev->dev_attrib.max_unmap_block_desc_count =
1467 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1468 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1469 dev->dev_attrib.unmap_granularity_alignment =
1470 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1471 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1472 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1473 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1475 xcopy_lun = &dev->xcopy_lun;
1476 xcopy_lun->lun_se_dev = dev;
1477 init_completion(&xcopy_lun->lun_shutdown_comp);
1478 INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
1479 spin_lock_init(&xcopy_lun->lun_acl_lock);
1480 spin_lock_init(&xcopy_lun->lun_sep_lock);
1481 init_completion(&xcopy_lun->lun_ref_comp);
1486 int target_configure_device(struct se_device *dev)
1488 struct se_hba *hba = dev->se_hba;
1491 if (dev->dev_flags & DF_CONFIGURED) {
1492 pr_err("se_dev->se_dev_ptr already set for storage"
1497 ret = dev->transport->configure_device(dev);
1500 dev->dev_flags |= DF_CONFIGURED;
1503 * XXX: there is not much point to have two different values here..
1505 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1506 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1509 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1511 dev->dev_attrib.hw_max_sectors =
1512 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1513 dev->dev_attrib.hw_block_size);
1515 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1516 dev->creation_time = get_jiffies_64();
1518 ret = core_setup_alua(dev);
1523 * Startup the struct se_device processing thread
1525 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1526 dev->transport->name);
1528 pr_err("Unable to create tmr workqueue for %s\n",
1529 dev->transport->name);
1535 * Setup work_queue for QUEUE_FULL
1537 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1540 * Preload the initial INQUIRY const values if we are doing
1541 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1542 * passthrough because this is being provided by the backend LLD.
1544 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1545 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1546 strncpy(&dev->t10_wwn.model[0],
1547 dev->transport->inquiry_prod, 16);
1548 strncpy(&dev->t10_wwn.revision[0],
1549 dev->transport->inquiry_rev, 4);
1552 scsi_dump_inquiry(dev);
1554 spin_lock(&hba->device_lock);
1556 spin_unlock(&hba->device_lock);
1558 mutex_lock(&g_device_mutex);
1559 list_add_tail(&dev->g_dev_node, &g_device_list);
1560 mutex_unlock(&g_device_mutex);
1565 core_alua_free_lu_gp_mem(dev);
1567 se_release_vpd_for_dev(dev);
1571 void target_free_device(struct se_device *dev)
1573 struct se_hba *hba = dev->se_hba;
1575 WARN_ON(!list_empty(&dev->dev_sep_list));
1577 if (dev->dev_flags & DF_CONFIGURED) {
1578 destroy_workqueue(dev->tmr_wq);
1580 mutex_lock(&g_device_mutex);
1581 list_del(&dev->g_dev_node);
1582 mutex_unlock(&g_device_mutex);
1584 spin_lock(&hba->device_lock);
1586 spin_unlock(&hba->device_lock);
1589 core_alua_free_lu_gp_mem(dev);
1590 core_scsi3_free_all_registrations(dev);
1591 se_release_vpd_for_dev(dev);
1593 dev->transport->free_device(dev);
1596 int core_dev_setup_virtual_lun0(void)
1599 struct se_device *dev;
1600 char buf[] = "rd_pages=8,rd_nullio=1";
1603 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1605 return PTR_ERR(hba);
1607 dev = target_alloc_device(hba, "virt_lun0");
1613 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1615 ret = target_configure_device(dev);
1617 goto out_free_se_dev;
1624 target_free_device(dev);
1626 core_delete_hba(hba);
1631 void core_dev_release_virtual_lun0(void)
1633 struct se_hba *hba = lun0_hba;
1639 target_free_device(g_lun0_dev);
1640 core_delete_hba(hba);