1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_device.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_tpg.h>
46 #include <target/target_core_transport.h>
47 #include <target/target_core_fabric_ops.h>
49 #include "target_core_alua.h"
50 #include "target_core_hba.h"
51 #include "target_core_pr.h"
52 #include "target_core_ua.h"
54 static void se_dev_start(struct se_device *dev);
55 static void se_dev_stop(struct se_device *dev);
57 static struct se_hba *lun0_hba;
58 static struct se_subsystem_dev *lun0_su_dev;
59 /* not static, needed by tpg.c */
60 struct se_device *g_lun0_dev;
62 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
64 struct se_lun *se_lun = NULL;
65 struct se_session *se_sess = se_cmd->se_sess;
66 struct se_device *dev;
69 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
70 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
71 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
75 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
76 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
77 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
78 struct se_dev_entry *deve = se_cmd->se_deve;
81 deve->total_bytes += se_cmd->data_length;
83 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
84 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
85 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
86 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
87 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
88 " Access for 0x%08x\n",
89 se_cmd->se_tfo->get_fabric_name(),
91 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
95 if (se_cmd->data_direction == DMA_TO_DEVICE)
96 deve->write_bytes += se_cmd->data_length;
97 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
98 deve->read_bytes += se_cmd->data_length;
102 se_lun = deve->se_lun;
103 se_cmd->se_lun = deve->se_lun;
104 se_cmd->pr_res_key = deve->pr_res_key;
105 se_cmd->orig_fe_lun = unpacked_lun;
106 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
107 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
109 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
113 * Use the se_portal_group->tpg_virt_lun0 to allow for
114 * REPORT_LUNS, et al to be returned when no active
115 * MappedLUN=0 exists for this Initiator Port.
117 if (unpacked_lun != 0) {
118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
120 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
121 " Access for 0x%08x\n",
122 se_cmd->se_tfo->get_fabric_name(),
127 * Force WRITE PROTECT for virtual LUN 0
129 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
130 (se_cmd->data_direction != DMA_NONE)) {
131 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
132 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
136 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->orig_fe_lun = 0;
139 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
140 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
143 * Determine if the struct se_lun is online.
144 * FIXME: Check for LUN_RESET + UNIT Attention
146 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
147 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
148 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
152 /* Directly associate cmd with se_dev */
153 se_cmd->se_dev = se_lun->lun_se_dev;
155 /* TODO: get rid of this and use atomics for stats */
156 dev = se_lun->lun_se_dev;
157 spin_lock_irq(&dev->stats_lock);
159 if (se_cmd->data_direction == DMA_TO_DEVICE)
160 dev->write_bytes += se_cmd->data_length;
161 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
162 dev->read_bytes += se_cmd->data_length;
163 spin_unlock_irq(&dev->stats_lock);
166 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
167 * for tracking state of struct se_cmds during LUN shutdown events.
169 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
170 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
171 atomic_set(&se_cmd->t_task.transport_lun_active, 1);
172 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
176 EXPORT_SYMBOL(transport_lookup_cmd_lun);
178 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
180 struct se_dev_entry *deve;
181 struct se_lun *se_lun = NULL;
182 struct se_session *se_sess = se_cmd->se_sess;
183 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
185 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
186 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
187 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
191 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
192 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
193 deve = se_cmd->se_deve;
195 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
196 se_tmr->tmr_lun = deve->se_lun;
197 se_cmd->se_lun = deve->se_lun;
198 se_lun = deve->se_lun;
199 se_cmd->pr_res_key = deve->pr_res_key;
200 se_cmd->orig_fe_lun = unpacked_lun;
201 se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
203 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
206 printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
207 " Access for 0x%08x\n",
208 se_cmd->se_tfo->get_fabric_name(),
210 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
214 * Determine if the struct se_lun is online.
215 * FIXME: Check for LUN_RESET + UNIT Attention
217 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
218 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
222 /* Directly associate cmd with se_dev */
223 se_cmd->se_dev = se_lun->lun_se_dev;
224 se_tmr->tmr_dev = se_lun->lun_se_dev;
226 spin_lock(&se_tmr->tmr_dev->se_tmr_lock);
227 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
228 spin_unlock(&se_tmr->tmr_dev->se_tmr_lock);
232 EXPORT_SYMBOL(transport_lookup_tmr_lun);
235 * This function is called from core_scsi3_emulate_pro_register_and_move()
236 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
237 * when a matching rtpi is found.
239 struct se_dev_entry *core_get_se_deve_from_rtpi(
240 struct se_node_acl *nacl,
243 struct se_dev_entry *deve;
245 struct se_port *port;
246 struct se_portal_group *tpg = nacl->se_tpg;
249 spin_lock_irq(&nacl->device_list_lock);
250 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
251 deve = &nacl->device_list[i];
253 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
258 printk(KERN_ERR "%s device entries device pointer is"
259 " NULL, but Initiator has access.\n",
260 tpg->se_tpg_tfo->get_fabric_name());
265 printk(KERN_ERR "%s device entries device pointer is"
266 " NULL, but Initiator has access.\n",
267 tpg->se_tpg_tfo->get_fabric_name());
270 if (port->sep_rtpi != rtpi)
273 atomic_inc(&deve->pr_ref_count);
274 smp_mb__after_atomic_inc();
275 spin_unlock_irq(&nacl->device_list_lock);
279 spin_unlock_irq(&nacl->device_list_lock);
284 int core_free_device_list_for_node(
285 struct se_node_acl *nacl,
286 struct se_portal_group *tpg)
288 struct se_dev_entry *deve;
292 if (!nacl->device_list)
295 spin_lock_irq(&nacl->device_list_lock);
296 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
297 deve = &nacl->device_list[i];
299 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
303 printk(KERN_ERR "%s device entries device pointer is"
304 " NULL, but Initiator has access.\n",
305 tpg->se_tpg_tfo->get_fabric_name());
310 spin_unlock_irq(&nacl->device_list_lock);
311 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
312 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
313 spin_lock_irq(&nacl->device_list_lock);
315 spin_unlock_irq(&nacl->device_list_lock);
317 kfree(nacl->device_list);
318 nacl->device_list = NULL;
323 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
325 struct se_dev_entry *deve;
327 spin_lock_irq(&se_nacl->device_list_lock);
328 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
330 spin_unlock_irq(&se_nacl->device_list_lock);
333 void core_update_device_list_access(
336 struct se_node_acl *nacl)
338 struct se_dev_entry *deve;
340 spin_lock_irq(&nacl->device_list_lock);
341 deve = &nacl->device_list[mapped_lun];
342 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
343 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
344 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
346 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
347 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
349 spin_unlock_irq(&nacl->device_list_lock);
352 /* core_update_device_list_for_node():
356 int core_update_device_list_for_node(
358 struct se_lun_acl *lun_acl,
361 struct se_node_acl *nacl,
362 struct se_portal_group *tpg,
365 struct se_port *port = lun->lun_sep;
366 struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
369 * If the MappedLUN entry is being disabled, the entry in
370 * port->sep_alua_list must be removed now before clearing the
371 * struct se_dev_entry pointers below as logic in
372 * core_alua_do_transition_tg_pt() depends on these being present.
376 * deve->se_lun_acl will be NULL for demo-mode created LUNs
377 * that have not been explicitly concerted to MappedLUNs ->
378 * struct se_lun_acl, but we remove deve->alua_port_list from
379 * port->sep_alua_list. This also means that active UAs and
380 * NodeACL context specific PR metadata for demo-mode
381 * MappedLUN *deve will be released below..
383 spin_lock_bh(&port->sep_alua_lock);
384 list_del(&deve->alua_port_list);
385 spin_unlock_bh(&port->sep_alua_lock);
388 spin_lock_irq(&nacl->device_list_lock);
391 * Check if the call is handling demo mode -> explict LUN ACL
392 * transition. This transition must be for the same struct se_lun
393 * + mapped_lun that was setup in demo mode..
395 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
396 if (deve->se_lun_acl != NULL) {
397 printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
398 " already set for demo mode -> explict"
399 " LUN ACL transition\n");
400 spin_unlock_irq(&nacl->device_list_lock);
403 if (deve->se_lun != lun) {
404 printk(KERN_ERR "struct se_dev_entry->se_lun does"
405 " match passed struct se_lun for demo mode"
406 " -> explict LUN ACL transition\n");
407 spin_unlock_irq(&nacl->device_list_lock);
410 deve->se_lun_acl = lun_acl;
414 deve->se_lun_acl = lun_acl;
415 deve->mapped_lun = mapped_lun;
416 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
419 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
420 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
421 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
423 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
424 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
428 spin_unlock_irq(&nacl->device_list_lock);
431 deve->creation_time = get_jiffies_64();
432 deve->attach_count++;
433 spin_unlock_irq(&nacl->device_list_lock);
435 spin_lock_bh(&port->sep_alua_lock);
436 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
437 spin_unlock_bh(&port->sep_alua_lock);
442 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
443 * PR operation to complete.
445 spin_unlock_irq(&nacl->device_list_lock);
446 while (atomic_read(&deve->pr_ref_count) != 0)
448 spin_lock_irq(&nacl->device_list_lock);
450 * Disable struct se_dev_entry LUN ACL mapping
452 core_scsi3_ua_release_all(deve);
454 deve->se_lun_acl = NULL;
456 deve->creation_time = 0;
457 deve->attach_count--;
458 spin_unlock_irq(&nacl->device_list_lock);
460 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
464 /* core_clear_lun_from_tpg():
468 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
470 struct se_node_acl *nacl;
471 struct se_dev_entry *deve;
474 spin_lock_bh(&tpg->acl_node_lock);
475 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
476 spin_unlock_bh(&tpg->acl_node_lock);
478 spin_lock_irq(&nacl->device_list_lock);
479 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
480 deve = &nacl->device_list[i];
481 if (lun != deve->se_lun)
483 spin_unlock_irq(&nacl->device_list_lock);
485 core_update_device_list_for_node(lun, NULL,
486 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
489 spin_lock_irq(&nacl->device_list_lock);
491 spin_unlock_irq(&nacl->device_list_lock);
493 spin_lock_bh(&tpg->acl_node_lock);
495 spin_unlock_bh(&tpg->acl_node_lock);
498 static struct se_port *core_alloc_port(struct se_device *dev)
500 struct se_port *port, *port_tmp;
502 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
504 printk(KERN_ERR "Unable to allocate struct se_port\n");
505 return ERR_PTR(-ENOMEM);
507 INIT_LIST_HEAD(&port->sep_alua_list);
508 INIT_LIST_HEAD(&port->sep_list);
509 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
510 spin_lock_init(&port->sep_alua_lock);
511 mutex_init(&port->sep_tg_pt_md_mutex);
513 spin_lock(&dev->se_port_lock);
514 if (dev->dev_port_count == 0x0000ffff) {
515 printk(KERN_WARNING "Reached dev->dev_port_count =="
517 spin_unlock(&dev->se_port_lock);
518 return ERR_PTR(-ENOSPC);
522 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
523 * Here is the table from spc4r17 section 7.7.3.8.
525 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
529 * 1h Relative port 1, historically known as port A
530 * 2h Relative port 2, historically known as port B
531 * 3h to FFFFh Relative port 3 through 65 535
533 port->sep_rtpi = dev->dev_rpti_counter++;
534 if (!(port->sep_rtpi))
537 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
539 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
542 if (port->sep_rtpi == port_tmp->sep_rtpi)
545 spin_unlock(&dev->se_port_lock);
550 static void core_export_port(
551 struct se_device *dev,
552 struct se_portal_group *tpg,
553 struct se_port *port,
556 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
557 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
559 spin_lock(&dev->se_port_lock);
560 spin_lock(&lun->lun_sep_lock);
564 spin_unlock(&lun->lun_sep_lock);
566 list_add_tail(&port->sep_list, &dev->dev_sep_list);
567 spin_unlock(&dev->se_port_lock);
569 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
570 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
571 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
572 printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
576 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
577 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
578 su_dev->t10_alua.default_tg_pt_gp);
579 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
580 printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
581 " Group: alua/default_tg_pt_gp\n",
582 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
585 dev->dev_port_count++;
586 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
590 * Called with struct se_device->se_port_lock spinlock held.
592 static void core_release_port(struct se_device *dev, struct se_port *port)
593 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
596 * Wait for any port reference for PR ALL_TG_PT=1 operation
597 * to complete in __core_scsi3_alloc_registration()
599 spin_unlock(&dev->se_port_lock);
600 if (atomic_read(&port->sep_tg_pt_ref_cnt))
602 spin_lock(&dev->se_port_lock);
604 core_alua_free_tg_pt_gp_mem(port);
606 list_del(&port->sep_list);
607 dev->dev_port_count--;
612 struct se_device *dev,
613 struct se_portal_group *tpg,
616 struct se_port *port;
618 port = core_alloc_port(dev);
620 return PTR_ERR(port);
622 lun->lun_se_dev = dev;
625 atomic_inc(&dev->dev_export_obj.obj_access_count);
626 core_export_port(dev, tpg, port, lun);
630 void core_dev_unexport(
631 struct se_device *dev,
632 struct se_portal_group *tpg,
635 struct se_port *port = lun->lun_sep;
637 spin_lock(&lun->lun_sep_lock);
638 if (lun->lun_se_dev == NULL) {
639 spin_unlock(&lun->lun_sep_lock);
642 spin_unlock(&lun->lun_sep_lock);
644 spin_lock(&dev->se_port_lock);
645 atomic_dec(&dev->dev_export_obj.obj_access_count);
646 core_release_port(dev, port);
647 spin_unlock(&dev->se_port_lock);
650 lun->lun_se_dev = NULL;
653 int transport_core_report_lun_response(struct se_cmd *se_cmd)
655 struct se_dev_entry *deve;
656 struct se_lun *se_lun;
657 struct se_session *se_sess = se_cmd->se_sess;
658 struct se_task *se_task;
659 unsigned char *buf = se_cmd->t_task.t_task_buf;
660 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
662 list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list)
666 printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
667 return PYX_TRANSPORT_LU_COMM_FAILURE;
671 * If no struct se_session pointer is present, this struct se_cmd is
672 * coming via a target_core_mod PASSTHROUGH op, and not through
673 * a $FABRIC_MOD. In that case, report LUN=0 only.
676 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
681 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
682 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
683 deve = &se_sess->se_node_acl->device_list[i];
684 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
686 se_lun = deve->se_lun;
688 * We determine the correct LUN LIST LENGTH even once we
689 * have reached the initial allocation length.
693 if ((cdb_offset + 8) >= se_cmd->data_length)
696 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
700 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
703 * See SPC3 r07, page 159.
707 buf[0] = ((lun_count >> 24) & 0xff);
708 buf[1] = ((lun_count >> 16) & 0xff);
709 buf[2] = ((lun_count >> 8) & 0xff);
710 buf[3] = (lun_count & 0xff);
712 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
715 /* se_release_device_for_hba():
719 void se_release_device_for_hba(struct se_device *dev)
721 struct se_hba *hba = dev->se_hba;
723 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
724 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
725 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
726 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
727 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
731 kthread_stop(dev->process_thread);
732 if (dev->transport->free_device)
733 dev->transport->free_device(dev->dev_ptr);
736 spin_lock(&hba->device_lock);
737 list_del(&dev->dev_list);
739 spin_unlock(&hba->device_lock);
741 core_scsi3_free_all_registrations(dev);
742 se_release_vpd_for_dev(dev);
747 void se_release_vpd_for_dev(struct se_device *dev)
749 struct t10_vpd *vpd, *vpd_tmp;
751 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
752 list_for_each_entry_safe(vpd, vpd_tmp,
753 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
754 list_del(&vpd->vpd_list);
757 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
760 /* se_free_virtual_device():
762 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
764 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
766 if (!list_empty(&dev->dev_sep_list))
769 core_alua_free_lu_gp_mem(dev);
770 se_release_device_for_hba(dev);
775 static void se_dev_start(struct se_device *dev)
777 struct se_hba *hba = dev->se_hba;
779 spin_lock(&hba->device_lock);
780 atomic_inc(&dev->dev_obj.obj_access_count);
781 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
782 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
783 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
784 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
785 } else if (dev->dev_status &
786 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
788 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
789 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
792 spin_unlock(&hba->device_lock);
795 static void se_dev_stop(struct se_device *dev)
797 struct se_hba *hba = dev->se_hba;
799 spin_lock(&hba->device_lock);
800 atomic_dec(&dev->dev_obj.obj_access_count);
801 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
802 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
803 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
804 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
805 } else if (dev->dev_status &
806 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
807 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
808 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
811 spin_unlock(&hba->device_lock);
814 int se_dev_check_online(struct se_device *dev)
818 spin_lock_irq(&dev->dev_status_lock);
819 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
820 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
821 spin_unlock_irq(&dev->dev_status_lock);
826 int se_dev_check_shutdown(struct se_device *dev)
830 spin_lock_irq(&dev->dev_status_lock);
831 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
832 spin_unlock_irq(&dev->dev_status_lock);
837 void se_dev_set_default_attribs(
838 struct se_device *dev,
839 struct se_dev_limits *dev_limits)
841 struct queue_limits *limits = &dev_limits->limits;
843 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
844 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
845 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
846 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
847 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
848 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
849 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
850 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
851 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
852 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
853 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
855 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
856 * iblock_create_virtdevice() from struct queue_limits values
857 * if blk_queue_discard()==1
859 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
860 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
861 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
862 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
863 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
864 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
866 * block_size is based on subsystem plugin dependent requirements.
868 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
869 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
871 * max_sectors is based on subsystem plugin dependent requirements.
873 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
874 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
876 * Set optimal_sectors from max_sectors, which can be lowered via
879 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
881 * queue_depth is based on subsystem plugin dependent requirements.
883 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
884 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
887 int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
889 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
890 printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
891 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
894 dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
895 printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
902 int se_dev_set_max_unmap_lba_count(
903 struct se_device *dev,
904 u32 max_unmap_lba_count)
906 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
907 printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
908 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
912 int se_dev_set_max_unmap_block_desc_count(
913 struct se_device *dev,
914 u32 max_unmap_block_desc_count)
916 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
917 max_unmap_block_desc_count;
918 printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
919 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
923 int se_dev_set_unmap_granularity(
924 struct se_device *dev,
925 u32 unmap_granularity)
927 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
928 printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
929 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
933 int se_dev_set_unmap_granularity_alignment(
934 struct se_device *dev,
935 u32 unmap_granularity_alignment)
937 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
938 printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
939 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
943 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
945 if ((flag != 0) && (flag != 1)) {
946 printk(KERN_ERR "Illegal value %d\n", flag);
949 if (dev->transport->dpo_emulated == NULL) {
950 printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n");
953 if (dev->transport->dpo_emulated(dev) == 0) {
954 printk(KERN_ERR "dev->transport->dpo_emulated not supported\n");
957 dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
958 printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
959 " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
963 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
965 if ((flag != 0) && (flag != 1)) {
966 printk(KERN_ERR "Illegal value %d\n", flag);
969 if (dev->transport->fua_write_emulated == NULL) {
970 printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n");
973 if (dev->transport->fua_write_emulated(dev) == 0) {
974 printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n");
977 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
978 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
979 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
983 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
985 if ((flag != 0) && (flag != 1)) {
986 printk(KERN_ERR "Illegal value %d\n", flag);
989 if (dev->transport->fua_read_emulated == NULL) {
990 printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n");
993 if (dev->transport->fua_read_emulated(dev) == 0) {
994 printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n");
997 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
998 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
999 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
1003 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1005 if ((flag != 0) && (flag != 1)) {
1006 printk(KERN_ERR "Illegal value %d\n", flag);
1009 if (dev->transport->write_cache_emulated == NULL) {
1010 printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n");
1013 if (dev->transport->write_cache_emulated(dev) == 0) {
1014 printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n");
1017 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1018 printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1019 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1023 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1025 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1026 printk(KERN_ERR "Illegal value %d\n", flag);
1030 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1031 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1032 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1034 atomic_read(&dev->dev_export_obj.obj_access_count));
1037 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1038 printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1039 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1044 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1046 if ((flag != 0) && (flag != 1)) {
1047 printk(KERN_ERR "Illegal value %d\n", flag);
1051 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1052 printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
1053 " dev_export_obj: %d count exists\n", dev,
1054 atomic_read(&dev->dev_export_obj.obj_access_count));
1057 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1058 printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1059 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1064 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1066 if ((flag != 0) && (flag != 1)) {
1067 printk(KERN_ERR "Illegal value %d\n", flag);
1071 * We expect this value to be non-zero when generic Block Layer
1072 * Discard supported is detected iblock_create_virtdevice().
1074 if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
1075 printk(KERN_ERR "Generic Block Discard not supported\n");
1079 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1080 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1085 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1087 if ((flag != 0) && (flag != 1)) {
1088 printk(KERN_ERR "Illegal value %d\n", flag);
1092 * We expect this value to be non-zero when generic Block Layer
1093 * Discard supported is detected iblock_create_virtdevice().
1095 if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
1096 printk(KERN_ERR "Generic Block Discard not supported\n");
1100 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1101 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1106 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1108 if ((flag != 0) && (flag != 1)) {
1109 printk(KERN_ERR "Illegal value %d\n", flag);
1112 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1113 printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1114 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1119 * Note, this can only be called on unexported SE Device Object.
1121 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1123 u32 orig_queue_depth = dev->queue_depth;
1125 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1126 printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
1127 " dev_export_obj: %d count exists\n", dev,
1128 atomic_read(&dev->dev_export_obj.obj_access_count));
1131 if (!(queue_depth)) {
1132 printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
1137 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1138 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1139 printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
1140 " exceeds TCM/SE_Device TCQ: %u\n",
1142 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1146 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1147 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1148 printk(KERN_ERR "dev[%p]: Passed queue_depth:"
1149 " %u exceeds TCM/SE_Device MAX"
1150 " TCQ: %u\n", dev, queue_depth,
1151 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1157 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1158 if (queue_depth > orig_queue_depth)
1159 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1160 else if (queue_depth < orig_queue_depth)
1161 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1163 printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
1168 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1170 int force = 0; /* Force setting for VDEVS */
1172 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1173 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1174 " max_sectors while dev_export_obj: %d count exists\n",
1175 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1178 if (!(max_sectors)) {
1179 printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
1180 " max_sectors\n", dev);
1183 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1184 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
1185 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1186 DA_STATUS_MAX_SECTORS_MIN);
1189 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1190 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1191 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1192 " greater than TCM/SE_Device max_sectors:"
1193 " %u\n", dev, max_sectors,
1194 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1198 if (!(force) && (max_sectors >
1199 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1200 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1201 " greater than TCM/SE_Device max_sectors"
1202 ": %u, use force=1 to override.\n", dev,
1203 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1206 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1207 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1208 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1209 " %u\n", dev, max_sectors,
1210 DA_STATUS_MAX_SECTORS_MAX);
1215 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1216 printk("dev[%p]: SE Device max_sectors changed to %u\n",
1221 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1223 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1224 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1225 " optimal_sectors while dev_export_obj: %d count exists\n",
1226 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1229 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1230 printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
1231 " changed for TCM/pSCSI\n", dev);
1234 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1235 printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
1236 " greater than max_sectors: %u\n", dev,
1237 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1241 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1242 printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
1243 dev, optimal_sectors);
1247 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1249 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1250 printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
1251 " while dev_export_obj: %d count exists\n", dev,
1252 atomic_read(&dev->dev_export_obj.obj_access_count));
1256 if ((block_size != 512) &&
1257 (block_size != 1024) &&
1258 (block_size != 2048) &&
1259 (block_size != 4096)) {
1260 printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
1261 " for SE device, must be 512, 1024, 2048 or 4096\n",
1266 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1267 printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
1268 " Physical Device, use for Linux/SCSI to change"
1269 " block_size for underlying hardware\n", dev);
1273 dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1274 printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
1279 struct se_lun *core_dev_add_lun(
1280 struct se_portal_group *tpg,
1282 struct se_device *dev,
1285 struct se_lun *lun_p;
1288 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1289 printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
1290 atomic_read(&dev->dev_access_obj.obj_access_count));
1294 lun_p = core_tpg_pre_addlun(tpg, lun);
1295 if ((IS_ERR(lun_p)) || !(lun_p))
1298 if (dev->dev_flags & DF_READ_ONLY)
1299 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1301 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1303 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1306 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1307 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1308 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1309 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
1311 * Update LUN maps for dynamically added initiators when
1312 * generate_node_acl is enabled.
1314 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1315 struct se_node_acl *acl;
1316 spin_lock_bh(&tpg->acl_node_lock);
1317 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1318 if (acl->dynamic_node_acl) {
1319 spin_unlock_bh(&tpg->acl_node_lock);
1320 core_tpg_add_node_to_devs(acl, tpg);
1321 spin_lock_bh(&tpg->acl_node_lock);
1324 spin_unlock_bh(&tpg->acl_node_lock);
1330 /* core_dev_del_lun():
1334 int core_dev_del_lun(
1335 struct se_portal_group *tpg,
1341 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1345 core_tpg_post_dellun(tpg, lun);
1347 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1348 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1349 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1350 tpg->se_tpg_tfo->get_fabric_name());
1355 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1359 spin_lock(&tpg->tpg_lun_lock);
1360 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1361 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1362 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1363 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1364 TRANSPORT_MAX_LUNS_PER_TPG-1,
1365 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1366 spin_unlock(&tpg->tpg_lun_lock);
1369 lun = &tpg->tpg_lun_list[unpacked_lun];
1371 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1372 printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
1373 " Target Portal Group: %hu, ignoring request.\n",
1374 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1375 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1376 spin_unlock(&tpg->tpg_lun_lock);
1379 spin_unlock(&tpg->tpg_lun_lock);
1384 /* core_dev_get_lun():
1388 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1392 spin_lock(&tpg->tpg_lun_lock);
1393 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1394 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1395 "_TPG-1: %u for Target Portal Group: %hu\n",
1396 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1397 TRANSPORT_MAX_LUNS_PER_TPG-1,
1398 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1399 spin_unlock(&tpg->tpg_lun_lock);
1402 lun = &tpg->tpg_lun_list[unpacked_lun];
1404 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1405 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1406 " Target Portal Group: %hu, ignoring request.\n",
1407 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1408 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1409 spin_unlock(&tpg->tpg_lun_lock);
1412 spin_unlock(&tpg->tpg_lun_lock);
1417 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1418 struct se_portal_group *tpg,
1420 char *initiatorname,
1423 struct se_lun_acl *lacl;
1424 struct se_node_acl *nacl;
1426 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1427 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
1428 tpg->se_tpg_tfo->get_fabric_name());
1432 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1437 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1439 printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
1444 INIT_LIST_HEAD(&lacl->lacl_list);
1445 lacl->mapped_lun = mapped_lun;
1446 lacl->se_lun_nacl = nacl;
1447 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1452 int core_dev_add_initiator_node_lun_acl(
1453 struct se_portal_group *tpg,
1454 struct se_lun_acl *lacl,
1459 struct se_node_acl *nacl;
1461 lun = core_dev_get_lun(tpg, unpacked_lun);
1463 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1464 " Target Portal Group: %hu, ignoring request.\n",
1465 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1466 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1470 nacl = lacl->se_lun_nacl;
1474 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1475 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1476 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1480 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1481 lun_access, nacl, tpg, 1) < 0)
1484 spin_lock(&lun->lun_acl_lock);
1485 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1486 atomic_inc(&lun->lun_acl_count);
1487 smp_mb__after_atomic_inc();
1488 spin_unlock(&lun->lun_acl_lock);
1490 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1491 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1492 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1493 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1494 lacl->initiatorname);
1496 * Check to see if there are any existing persistent reservation APTPL
1497 * pre-registrations that need to be enabled for this LUN ACL..
1499 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1503 /* core_dev_del_initiator_node_lun_acl():
1507 int core_dev_del_initiator_node_lun_acl(
1508 struct se_portal_group *tpg,
1510 struct se_lun_acl *lacl)
1512 struct se_node_acl *nacl;
1514 nacl = lacl->se_lun_nacl;
1518 spin_lock(&lun->lun_acl_lock);
1519 list_del(&lacl->lacl_list);
1520 atomic_dec(&lun->lun_acl_count);
1521 smp_mb__after_atomic_dec();
1522 spin_unlock(&lun->lun_acl_lock);
1524 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1525 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1527 lacl->se_lun = NULL;
1529 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1530 " InitiatorNode: %s Mapped LUN: %u\n",
1531 tpg->se_tpg_tfo->get_fabric_name(),
1532 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1533 lacl->initiatorname, lacl->mapped_lun);
1538 void core_dev_free_initiator_node_lun_acl(
1539 struct se_portal_group *tpg,
1540 struct se_lun_acl *lacl)
1542 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1543 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1544 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1545 tpg->se_tpg_tfo->get_fabric_name(),
1546 lacl->initiatorname, lacl->mapped_lun);
1551 int core_dev_setup_virtual_lun0(void)
1554 struct se_device *dev;
1555 struct se_subsystem_dev *se_dev = NULL;
1556 struct se_subsystem_api *t;
1560 hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
1562 return PTR_ERR(hba);
1567 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1569 printk(KERN_ERR "Unable to allocate memory for"
1570 " struct se_subsystem_dev\n");
1574 INIT_LIST_HEAD(&se_dev->se_dev_node);
1575 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1576 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1577 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1578 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1579 spin_lock_init(&se_dev->t10_pr.registration_lock);
1580 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1581 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1582 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1583 spin_lock_init(&se_dev->se_dev_lock);
1584 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1585 se_dev->t10_wwn.t10_sub_dev = se_dev;
1586 se_dev->t10_alua.t10_sub_dev = se_dev;
1587 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1588 se_dev->se_dev_hba = hba;
1590 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1591 if (!(se_dev->se_dev_su_ptr)) {
1592 printk(KERN_ERR "Unable to locate subsystem dependent pointer"
1593 " from allocate_virtdevice()\n");
1597 lun0_su_dev = se_dev;
1600 sprintf(buf, "rd_pages=8");
1601 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1603 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1608 se_dev->se_dev_ptr = dev;
1616 core_delete_hba(lun0_hba);
1623 void core_dev_release_virtual_lun0(void)
1625 struct se_hba *hba = lun0_hba;
1626 struct se_subsystem_dev *su_dev = lun0_su_dev;
1632 se_free_virtual_device(g_lun0_dev, hba);
1635 core_delete_hba(hba);