1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the iSCSI Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/smp_lock.h>
37 #include <linux/kthread.h>
41 #include <scsi/scsi.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_tpg.h>
46 #include <target/target_core_transport.h>
47 #include <target/target_core_fabric_ops.h>
49 #include "target_core_alua.h"
50 #include "target_core_hba.h"
51 #include "target_core_pr.h"
52 #include "target_core_ua.h"
54 static void se_dev_start(struct se_device *dev);
55 static void se_dev_stop(struct se_device *dev);
57 int transport_get_lun_for_cmd(
58 struct se_cmd *se_cmd,
62 struct se_dev_entry *deve;
63 struct se_lun *se_lun = NULL;
64 struct se_session *se_sess = SE_SESS(se_cmd);
68 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
69 deve = se_cmd->se_deve =
70 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
71 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
74 deve->total_bytes += se_cmd->data_length;
76 if (se_cmd->data_direction == DMA_TO_DEVICE) {
78 TRANSPORT_LUNFLAGS_READ_ONLY) {
82 deve->write_bytes += se_cmd->data_length;
83 } else if (se_cmd->data_direction ==
85 deve->read_bytes += se_cmd->data_length;
90 se_lun = se_cmd->se_lun = deve->se_lun;
91 se_cmd->pr_res_key = deve->pr_res_key;
92 se_cmd->orig_fe_lun = unpacked_lun;
93 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
97 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
101 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
102 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
103 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
104 " Access for 0x%08x\n",
105 CMD_TFO(se_cmd)->get_fabric_name(),
110 * Use the se_portal_group->tpg_virt_lun0 to allow for
111 * REPORT_LUNS, et al to be returned when no active
112 * MappedLUN=0 exists for this Initiator Port.
114 if (unpacked_lun != 0) {
115 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
116 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
117 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
118 " Access for 0x%08x\n",
119 CMD_TFO(se_cmd)->get_fabric_name(),
124 * Force WRITE PROTECT for virtual LUN 0
126 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
127 (se_cmd->data_direction != DMA_NONE)) {
128 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
129 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
133 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
134 CMD_TFO(se_cmd)->get_fabric_name());
136 se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 se_cmd->orig_fe_lun = 0;
138 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
139 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
143 * Determine if the struct se_lun is online.
145 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
146 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
147 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
148 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
153 struct se_device *dev = se_lun->lun_se_dev;
154 spin_lock(&dev->stats_lock);
156 if (se_cmd->data_direction == DMA_TO_DEVICE)
157 dev->write_bytes += se_cmd->data_length;
158 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
159 dev->read_bytes += se_cmd->data_length;
160 spin_unlock(&dev->stats_lock);
164 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
165 * for tracking state of struct se_cmds during LUN shutdown events.
167 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
168 list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
169 atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
171 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
172 CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
174 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
178 EXPORT_SYMBOL(transport_get_lun_for_cmd);
180 int transport_get_lun_for_tmr(
181 struct se_cmd *se_cmd,
184 struct se_device *dev = NULL;
185 struct se_dev_entry *deve;
186 struct se_lun *se_lun = NULL;
187 struct se_session *se_sess = SE_SESS(se_cmd);
188 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
190 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
191 deve = se_cmd->se_deve =
192 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
193 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
194 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
195 dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
196 se_cmd->pr_res_key = deve->pr_res_key;
197 se_cmd->orig_fe_lun = unpacked_lun;
198 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
199 /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
201 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
204 printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
205 " Access for 0x%08x\n",
206 CMD_TFO(se_cmd)->get_fabric_name(),
208 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
212 * Determine if the struct se_lun is online.
214 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
215 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
216 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
220 spin_lock(&dev->se_tmr_lock);
221 list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
222 spin_unlock(&dev->se_tmr_lock);
226 EXPORT_SYMBOL(transport_get_lun_for_tmr);
229 * This function is called from core_scsi3_emulate_pro_register_and_move()
230 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
231 * when a matching rtpi is found.
233 struct se_dev_entry *core_get_se_deve_from_rtpi(
234 struct se_node_acl *nacl,
237 struct se_dev_entry *deve;
239 struct se_port *port;
240 struct se_portal_group *tpg = nacl->se_tpg;
243 spin_lock_irq(&nacl->device_list_lock);
244 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
245 deve = &nacl->device_list[i];
247 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
252 printk(KERN_ERR "%s device entries device pointer is"
253 " NULL, but Initiator has access.\n",
254 TPG_TFO(tpg)->get_fabric_name());
259 printk(KERN_ERR "%s device entries device pointer is"
260 " NULL, but Initiator has access.\n",
261 TPG_TFO(tpg)->get_fabric_name());
264 if (port->sep_rtpi != rtpi)
267 atomic_inc(&deve->pr_ref_count);
268 smp_mb__after_atomic_inc();
269 spin_unlock_irq(&nacl->device_list_lock);
273 spin_unlock_irq(&nacl->device_list_lock);
278 int core_free_device_list_for_node(
279 struct se_node_acl *nacl,
280 struct se_portal_group *tpg)
282 struct se_dev_entry *deve;
286 if (!nacl->device_list)
289 spin_lock_irq(&nacl->device_list_lock);
290 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
291 deve = &nacl->device_list[i];
293 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
297 printk(KERN_ERR "%s device entries device pointer is"
298 " NULL, but Initiator has access.\n",
299 TPG_TFO(tpg)->get_fabric_name());
304 spin_unlock_irq(&nacl->device_list_lock);
305 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
306 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
307 spin_lock_irq(&nacl->device_list_lock);
309 spin_unlock_irq(&nacl->device_list_lock);
311 kfree(nacl->device_list);
312 nacl->device_list = NULL;
317 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
319 struct se_dev_entry *deve;
321 spin_lock_irq(&se_nacl->device_list_lock);
322 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
324 spin_unlock_irq(&se_nacl->device_list_lock);
329 void core_update_device_list_access(
332 struct se_node_acl *nacl)
334 struct se_dev_entry *deve;
336 spin_lock_irq(&nacl->device_list_lock);
337 deve = &nacl->device_list[mapped_lun];
338 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
339 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
340 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
342 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
343 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
345 spin_unlock_irq(&nacl->device_list_lock);
350 /* core_update_device_list_for_node():
354 int core_update_device_list_for_node(
356 struct se_lun_acl *lun_acl,
359 struct se_node_acl *nacl,
360 struct se_portal_group *tpg,
363 struct se_port *port = lun->lun_sep;
364 struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
367 * If the MappedLUN entry is being disabled, the entry in
368 * port->sep_alua_list must be removed now before clearing the
369 * struct se_dev_entry pointers below as logic in
370 * core_alua_do_transition_tg_pt() depends on these being present.
374 * deve->se_lun_acl will be NULL for demo-mode created LUNs
375 * that have not been explictly concerted to MappedLUNs ->
376 * struct se_lun_acl, but we remove deve->alua_port_list from
377 * port->sep_alua_list. This also means that active UAs and
378 * NodeACL context specific PR metadata for demo-mode
379 * MappedLUN *deve will be released below..
381 spin_lock_bh(&port->sep_alua_lock);
382 list_del(&deve->alua_port_list);
383 spin_unlock_bh(&port->sep_alua_lock);
386 spin_lock_irq(&nacl->device_list_lock);
389 * Check if the call is handling demo mode -> explict LUN ACL
390 * transition. This transition must be for the same struct se_lun
391 * + mapped_lun that was setup in demo mode..
393 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
394 if (deve->se_lun_acl != NULL) {
395 printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
396 " already set for demo mode -> explict"
397 " LUN ACL transition\n");
398 spin_unlock_irq(&nacl->device_list_lock);
401 if (deve->se_lun != lun) {
402 printk(KERN_ERR "struct se_dev_entry->se_lun does"
403 " match passed struct se_lun for demo mode"
404 " -> explict LUN ACL transition\n");
405 spin_unlock_irq(&nacl->device_list_lock);
408 deve->se_lun_acl = lun_acl;
412 deve->se_lun_acl = lun_acl;
413 deve->mapped_lun = mapped_lun;
414 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
417 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
418 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
419 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
421 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
422 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
426 spin_unlock_irq(&nacl->device_list_lock);
429 deve->creation_time = get_jiffies_64();
430 deve->attach_count++;
431 spin_unlock_irq(&nacl->device_list_lock);
433 spin_lock_bh(&port->sep_alua_lock);
434 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
435 spin_unlock_bh(&port->sep_alua_lock);
440 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
441 * PR operation to complete.
443 spin_unlock_irq(&nacl->device_list_lock);
444 while (atomic_read(&deve->pr_ref_count) != 0)
446 spin_lock_irq(&nacl->device_list_lock);
448 * Disable struct se_dev_entry LUN ACL mapping
450 core_scsi3_ua_release_all(deve);
452 deve->se_lun_acl = NULL;
454 deve->creation_time = 0;
455 deve->attach_count--;
456 spin_unlock_irq(&nacl->device_list_lock);
458 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
462 /* core_clear_lun_from_tpg():
466 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
468 struct se_node_acl *nacl;
469 struct se_dev_entry *deve;
472 spin_lock_bh(&tpg->acl_node_lock);
473 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
474 spin_unlock_bh(&tpg->acl_node_lock);
476 spin_lock_irq(&nacl->device_list_lock);
477 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
478 deve = &nacl->device_list[i];
479 if (lun != deve->se_lun)
481 spin_unlock_irq(&nacl->device_list_lock);
483 core_update_device_list_for_node(lun, NULL,
484 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
487 spin_lock_irq(&nacl->device_list_lock);
489 spin_unlock_irq(&nacl->device_list_lock);
491 spin_lock_bh(&tpg->acl_node_lock);
493 spin_unlock_bh(&tpg->acl_node_lock);
498 static struct se_port *core_alloc_port(struct se_device *dev)
500 struct se_port *port, *port_tmp;
502 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
504 printk(KERN_ERR "Unable to allocate struct se_port\n");
507 INIT_LIST_HEAD(&port->sep_alua_list);
508 INIT_LIST_HEAD(&port->sep_list);
509 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
510 spin_lock_init(&port->sep_alua_lock);
511 mutex_init(&port->sep_tg_pt_md_mutex);
513 spin_lock(&dev->se_port_lock);
514 if (dev->dev_port_count == 0x0000ffff) {
515 printk(KERN_WARNING "Reached dev->dev_port_count =="
517 spin_unlock(&dev->se_port_lock);
522 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
523 * Here is the table from spc4r17 section 7.7.3.8.
525 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
529 * 1h Relative port 1, historically known as port A
530 * 2h Relative port 2, historically known as port B
531 * 3h to FFFFh Relative port 3 through 65 535
533 port->sep_rtpi = dev->dev_rpti_counter++;
534 if (!(port->sep_rtpi))
537 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
539 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
542 if (port->sep_rtpi == port_tmp->sep_rtpi)
545 spin_unlock(&dev->se_port_lock);
550 static void core_export_port(
551 struct se_device *dev,
552 struct se_portal_group *tpg,
553 struct se_port *port,
556 struct se_subsystem_dev *su_dev = SU_DEV(dev);
557 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
559 spin_lock(&dev->se_port_lock);
560 spin_lock(&lun->lun_sep_lock);
564 spin_unlock(&lun->lun_sep_lock);
566 list_add_tail(&port->sep_list, &dev->dev_sep_list);
567 spin_unlock(&dev->se_port_lock);
569 if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
570 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
571 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
572 printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
576 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
577 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
578 T10_ALUA(su_dev)->default_tg_pt_gp);
579 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
580 printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
581 " Group: alua/default_tg_pt_gp\n",
582 TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
585 dev->dev_port_count++;
586 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
590 * Called with struct se_device->se_port_lock spinlock held.
592 static void core_release_port(struct se_device *dev, struct se_port *port)
595 * Wait for any port reference for PR ALL_TG_PT=1 operation
596 * to complete in __core_scsi3_alloc_registration()
598 spin_unlock(&dev->se_port_lock);
599 if (atomic_read(&port->sep_tg_pt_ref_cnt))
601 spin_lock(&dev->se_port_lock);
603 core_alua_free_tg_pt_gp_mem(port);
605 list_del(&port->sep_list);
606 dev->dev_port_count--;
613 struct se_device *dev,
614 struct se_portal_group *tpg,
617 struct se_port *port;
619 port = core_alloc_port(dev);
623 lun->lun_se_dev = dev;
626 atomic_inc(&dev->dev_export_obj.obj_access_count);
627 core_export_port(dev, tpg, port, lun);
631 void core_dev_unexport(
632 struct se_device *dev,
633 struct se_portal_group *tpg,
636 struct se_port *port = lun->lun_sep;
638 spin_lock(&lun->lun_sep_lock);
639 if (lun->lun_se_dev == NULL) {
640 spin_unlock(&lun->lun_sep_lock);
643 spin_unlock(&lun->lun_sep_lock);
645 spin_lock(&dev->se_port_lock);
646 atomic_dec(&dev->dev_export_obj.obj_access_count);
647 core_release_port(dev, port);
648 spin_unlock(&dev->se_port_lock);
651 lun->lun_se_dev = NULL;
654 int transport_core_report_lun_response(struct se_cmd *se_cmd)
656 struct se_dev_entry *deve;
657 struct se_lun *se_lun;
658 struct se_session *se_sess = SE_SESS(se_cmd);
659 struct se_task *se_task;
660 unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
661 u32 cdb_offset = 0, lun_count = 0, offset = 8;
664 list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
668 printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
669 return PYX_TRANSPORT_LU_COMM_FAILURE;
673 * If no struct se_session pointer is present, this struct se_cmd is
674 * coming via a target_core_mod PASSTHROUGH op, and not through
675 * a $FABRIC_MOD. In that case, report LUN=0 only.
679 buf[offset++] = ((lun >> 56) & 0xff);
680 buf[offset++] = ((lun >> 48) & 0xff);
681 buf[offset++] = ((lun >> 40) & 0xff);
682 buf[offset++] = ((lun >> 32) & 0xff);
683 buf[offset++] = ((lun >> 24) & 0xff);
684 buf[offset++] = ((lun >> 16) & 0xff);
685 buf[offset++] = ((lun >> 8) & 0xff);
686 buf[offset++] = (lun & 0xff);
691 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
692 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
693 deve = &SE_NODE_ACL(se_sess)->device_list[i];
694 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
696 se_lun = deve->se_lun;
698 * We determine the correct LUN LIST LENGTH even once we
699 * have reached the initial allocation length.
703 if ((cdb_offset + 8) >= se_cmd->data_length)
706 lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
707 buf[offset++] = ((lun >> 56) & 0xff);
708 buf[offset++] = ((lun >> 48) & 0xff);
709 buf[offset++] = ((lun >> 40) & 0xff);
710 buf[offset++] = ((lun >> 32) & 0xff);
711 buf[offset++] = ((lun >> 24) & 0xff);
712 buf[offset++] = ((lun >> 16) & 0xff);
713 buf[offset++] = ((lun >> 8) & 0xff);
714 buf[offset++] = (lun & 0xff);
717 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
720 * See SPC3 r07, page 159.
724 buf[0] = ((lun_count >> 24) & 0xff);
725 buf[1] = ((lun_count >> 16) & 0xff);
726 buf[2] = ((lun_count >> 8) & 0xff);
727 buf[3] = (lun_count & 0xff);
729 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
732 /* se_release_device_for_hba():
736 void se_release_device_for_hba(struct se_device *dev)
738 struct se_hba *hba = dev->se_hba;
740 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
741 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
742 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
743 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
744 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
748 kthread_stop(dev->process_thread);
749 if (dev->transport->free_device)
750 dev->transport->free_device(dev->dev_ptr);
753 spin_lock(&hba->device_lock);
754 list_del(&dev->dev_list);
756 spin_unlock(&hba->device_lock);
758 core_scsi3_free_all_registrations(dev);
759 se_release_vpd_for_dev(dev);
761 kfree(dev->dev_status_queue_obj);
762 kfree(dev->dev_queue_obj);
768 void se_release_vpd_for_dev(struct se_device *dev)
770 struct t10_vpd *vpd, *vpd_tmp;
772 spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
773 list_for_each_entry_safe(vpd, vpd_tmp,
774 &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
775 list_del(&vpd->vpd_list);
778 spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
784 * Called with struct se_hba->device_lock held.
786 void se_clear_dev_ports(struct se_device *dev)
788 struct se_hba *hba = dev->se_hba;
790 struct se_portal_group *tpg;
791 struct se_port *sep, *sep_tmp;
793 spin_lock(&dev->se_port_lock);
794 list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
795 spin_unlock(&dev->se_port_lock);
796 spin_unlock(&hba->device_lock);
800 spin_lock(&lun->lun_sep_lock);
801 if (lun->lun_se_dev == NULL) {
802 spin_unlock(&lun->lun_sep_lock);
805 spin_unlock(&lun->lun_sep_lock);
807 core_dev_del_lun(tpg, lun->unpacked_lun);
809 spin_lock(&hba->device_lock);
810 spin_lock(&dev->se_port_lock);
812 spin_unlock(&dev->se_port_lock);
817 /* se_free_virtual_device():
819 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
821 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
823 spin_lock(&hba->device_lock);
824 se_clear_dev_ports(dev);
825 spin_unlock(&hba->device_lock);
827 core_alua_free_lu_gp_mem(dev);
828 se_release_device_for_hba(dev);
833 static void se_dev_start(struct se_device *dev)
835 struct se_hba *hba = dev->se_hba;
837 spin_lock(&hba->device_lock);
838 atomic_inc(&dev->dev_obj.obj_access_count);
839 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
840 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
841 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
842 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
843 } else if (dev->dev_status &
844 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
846 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
847 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
850 spin_unlock(&hba->device_lock);
853 static void se_dev_stop(struct se_device *dev)
855 struct se_hba *hba = dev->se_hba;
857 spin_lock(&hba->device_lock);
858 atomic_dec(&dev->dev_obj.obj_access_count);
859 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
860 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
861 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
862 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
863 } else if (dev->dev_status &
864 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
865 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
866 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
869 spin_unlock(&hba->device_lock);
871 while (atomic_read(&hba->dev_mib_access_count))
875 int se_dev_check_online(struct se_device *dev)
879 spin_lock_irq(&dev->dev_status_lock);
880 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
881 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
882 spin_unlock_irq(&dev->dev_status_lock);
887 int se_dev_check_shutdown(struct se_device *dev)
891 spin_lock_irq(&dev->dev_status_lock);
892 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
893 spin_unlock_irq(&dev->dev_status_lock);
898 void se_dev_set_default_attribs(
899 struct se_device *dev,
900 struct se_dev_limits *dev_limits)
902 struct queue_limits *limits = &dev_limits->limits;
904 DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
905 DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
906 DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
907 DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
908 DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
909 DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
910 DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
911 DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
912 DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
913 DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
914 DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
916 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
917 * iblock_create_virtdevice() from struct queue_limits values
918 * if blk_queue_discard()==1
920 DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
921 DEV_ATTRIB(dev)->max_unmap_block_desc_count =
922 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
923 DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
924 DEV_ATTRIB(dev)->unmap_granularity_alignment =
925 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
927 * block_size is based on subsystem plugin dependent requirements.
929 DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
930 DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
932 * max_sectors is based on subsystem plugin dependent requirements.
934 DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
935 DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
937 * Set optimal_sectors from max_sectors, which can be lowered via
940 DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
942 * queue_depth is based on subsystem plugin dependent requirements.
944 DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
945 DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
948 int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
950 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
951 printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
952 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
955 DEV_ATTRIB(dev)->task_timeout = task_timeout;
956 printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
963 int se_dev_set_max_unmap_lba_count(
964 struct se_device *dev,
965 u32 max_unmap_lba_count)
967 DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
968 printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
969 dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
973 int se_dev_set_max_unmap_block_desc_count(
974 struct se_device *dev,
975 u32 max_unmap_block_desc_count)
977 DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
978 printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
979 dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
983 int se_dev_set_unmap_granularity(
984 struct se_device *dev,
985 u32 unmap_granularity)
987 DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
988 printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
989 dev, DEV_ATTRIB(dev)->unmap_granularity);
993 int se_dev_set_unmap_granularity_alignment(
994 struct se_device *dev,
995 u32 unmap_granularity_alignment)
997 DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
998 printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
999 dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
1003 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
1005 if ((flag != 0) && (flag != 1)) {
1006 printk(KERN_ERR "Illegal value %d\n", flag);
1009 if (TRANSPORT(dev)->dpo_emulated == NULL) {
1010 printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
1013 if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
1014 printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
1017 DEV_ATTRIB(dev)->emulate_dpo = flag;
1018 printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
1019 " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
1023 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
1025 if ((flag != 0) && (flag != 1)) {
1026 printk(KERN_ERR "Illegal value %d\n", flag);
1029 if (TRANSPORT(dev)->fua_write_emulated == NULL) {
1030 printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
1033 if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
1034 printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
1037 DEV_ATTRIB(dev)->emulate_fua_write = flag;
1038 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
1039 dev, DEV_ATTRIB(dev)->emulate_fua_write);
1043 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
1045 if ((flag != 0) && (flag != 1)) {
1046 printk(KERN_ERR "Illegal value %d\n", flag);
1049 if (TRANSPORT(dev)->fua_read_emulated == NULL) {
1050 printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
1053 if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
1054 printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
1057 DEV_ATTRIB(dev)->emulate_fua_read = flag;
1058 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
1059 dev, DEV_ATTRIB(dev)->emulate_fua_read);
1063 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1065 if ((flag != 0) && (flag != 1)) {
1066 printk(KERN_ERR "Illegal value %d\n", flag);
1069 if (TRANSPORT(dev)->write_cache_emulated == NULL) {
1070 printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
1073 if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
1074 printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
1077 DEV_ATTRIB(dev)->emulate_write_cache = flag;
1078 printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1079 dev, DEV_ATTRIB(dev)->emulate_write_cache);
1083 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1085 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1086 printk(KERN_ERR "Illegal value %d\n", flag);
1090 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1091 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1092 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1094 atomic_read(&dev->dev_export_obj.obj_access_count));
1097 DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
1098 printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1099 dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
1104 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1106 if ((flag != 0) && (flag != 1)) {
1107 printk(KERN_ERR "Illegal value %d\n", flag);
1111 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1112 printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
1113 " dev_export_obj: %d count exists\n", dev,
1114 atomic_read(&dev->dev_export_obj.obj_access_count));
1117 DEV_ATTRIB(dev)->emulate_tas = flag;
1118 printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1119 dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
1124 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1126 if ((flag != 0) && (flag != 1)) {
1127 printk(KERN_ERR "Illegal value %d\n", flag);
1131 * We expect this value to be non-zero when generic Block Layer
1132 * Discard supported is detected iblock_create_virtdevice().
1134 if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
1135 printk(KERN_ERR "Generic Block Discard not supported\n");
1139 DEV_ATTRIB(dev)->emulate_tpu = flag;
1140 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1145 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1147 if ((flag != 0) && (flag != 1)) {
1148 printk(KERN_ERR "Illegal value %d\n", flag);
1152 * We expect this value to be non-zero when generic Block Layer
1153 * Discard supported is detected iblock_create_virtdevice().
1155 if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
1156 printk(KERN_ERR "Generic Block Discard not supported\n");
1160 DEV_ATTRIB(dev)->emulate_tpws = flag;
1161 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1166 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1168 if ((flag != 0) && (flag != 1)) {
1169 printk(KERN_ERR "Illegal value %d\n", flag);
1172 DEV_ATTRIB(dev)->enforce_pr_isids = flag;
1173 printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1174 (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
1179 * Note, this can only be called on unexported SE Device Object.
1181 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1183 u32 orig_queue_depth = dev->queue_depth;
1185 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1186 printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
1187 " dev_export_obj: %d count exists\n", dev,
1188 atomic_read(&dev->dev_export_obj.obj_access_count));
1191 if (!(queue_depth)) {
1192 printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
1197 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1198 if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
1199 printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
1200 " exceeds TCM/SE_Device TCQ: %u\n",
1202 DEV_ATTRIB(dev)->hw_queue_depth);
1206 if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
1207 if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
1208 printk(KERN_ERR "dev[%p]: Passed queue_depth:"
1209 " %u exceeds TCM/SE_Device MAX"
1210 " TCQ: %u\n", dev, queue_depth,
1211 DEV_ATTRIB(dev)->hw_queue_depth);
1217 DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
1218 if (queue_depth > orig_queue_depth)
1219 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1220 else if (queue_depth < orig_queue_depth)
1221 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1223 printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
1228 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1230 int force = 0; /* Force setting for VDEVS */
1232 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1233 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1234 " max_sectors while dev_export_obj: %d count exists\n",
1235 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1238 if (!(max_sectors)) {
1239 printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
1240 " max_sectors\n", dev);
1243 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1244 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
1245 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1246 DA_STATUS_MAX_SECTORS_MIN);
1249 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1250 if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
1251 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1252 " greater than TCM/SE_Device max_sectors:"
1253 " %u\n", dev, max_sectors,
1254 DEV_ATTRIB(dev)->hw_max_sectors);
1258 if (!(force) && (max_sectors >
1259 DEV_ATTRIB(dev)->hw_max_sectors)) {
1260 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1261 " greater than TCM/SE_Device max_sectors"
1262 ": %u, use force=1 to override.\n", dev,
1263 max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
1266 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1267 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1268 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1269 " %u\n", dev, max_sectors,
1270 DA_STATUS_MAX_SECTORS_MAX);
1275 DEV_ATTRIB(dev)->max_sectors = max_sectors;
1276 printk("dev[%p]: SE Device max_sectors changed to %u\n",
1281 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1283 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1284 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1285 " optimal_sectors while dev_export_obj: %d count exists\n",
1286 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1289 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1290 printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
1291 " changed for TCM/pSCSI\n", dev);
1294 if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
1295 printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
1296 " greater than max_sectors: %u\n", dev,
1297 optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
1301 DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
1302 printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
1303 dev, optimal_sectors);
1307 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1309 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1310 printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
1311 " while dev_export_obj: %d count exists\n", dev,
1312 atomic_read(&dev->dev_export_obj.obj_access_count));
1316 if ((block_size != 512) &&
1317 (block_size != 1024) &&
1318 (block_size != 2048) &&
1319 (block_size != 4096)) {
1320 printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
1321 " for SE device, must be 512, 1024, 2048 or 4096\n",
1326 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1327 printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
1328 " Physical Device, use for Linux/SCSI to change"
1329 " block_size for underlying hardware\n", dev);
1333 DEV_ATTRIB(dev)->block_size = block_size;
1334 printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
1339 struct se_lun *core_dev_add_lun(
1340 struct se_portal_group *tpg,
1342 struct se_device *dev,
1345 struct se_lun *lun_p;
1348 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1349 printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
1350 atomic_read(&dev->dev_access_obj.obj_access_count));
1354 lun_p = core_tpg_pre_addlun(tpg, lun);
1355 if ((IS_ERR(lun_p)) || !(lun_p))
1358 if (dev->dev_flags & DF_READ_ONLY)
1359 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1361 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1363 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1366 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1367 " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
1368 TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
1369 TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
1371 * Update LUN maps for dynamically added initiators when
1372 * generate_node_acl is enabled.
1374 if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
1375 struct se_node_acl *acl;
1376 spin_lock_bh(&tpg->acl_node_lock);
1377 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1378 if (acl->dynamic_node_acl) {
1379 spin_unlock_bh(&tpg->acl_node_lock);
1380 core_tpg_add_node_to_devs(acl, tpg);
1381 spin_lock_bh(&tpg->acl_node_lock);
1384 spin_unlock_bh(&tpg->acl_node_lock);
1390 /* core_dev_del_lun():
1394 int core_dev_del_lun(
1395 struct se_portal_group *tpg,
1401 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1405 core_tpg_post_dellun(tpg, lun);
1407 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1408 " device object\n", TPG_TFO(tpg)->get_fabric_name(),
1409 TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
1410 TPG_TFO(tpg)->get_fabric_name());
1415 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1419 spin_lock(&tpg->tpg_lun_lock);
1420 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1421 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1422 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1423 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1424 TRANSPORT_MAX_LUNS_PER_TPG-1,
1425 TPG_TFO(tpg)->tpg_get_tag(tpg));
1426 spin_unlock(&tpg->tpg_lun_lock);
1429 lun = &tpg->tpg_lun_list[unpacked_lun];
1431 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1432 printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
1433 " Target Portal Group: %hu, ignoring request.\n",
1434 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1435 TPG_TFO(tpg)->tpg_get_tag(tpg));
1436 spin_unlock(&tpg->tpg_lun_lock);
1439 spin_unlock(&tpg->tpg_lun_lock);
1444 /* core_dev_get_lun():
1448 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1452 spin_lock(&tpg->tpg_lun_lock);
1453 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1454 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1455 "_TPG-1: %u for Target Portal Group: %hu\n",
1456 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1457 TRANSPORT_MAX_LUNS_PER_TPG-1,
1458 TPG_TFO(tpg)->tpg_get_tag(tpg));
1459 spin_unlock(&tpg->tpg_lun_lock);
1462 lun = &tpg->tpg_lun_list[unpacked_lun];
1464 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1465 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1466 " Target Portal Group: %hu, ignoring request.\n",
1467 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1468 TPG_TFO(tpg)->tpg_get_tag(tpg));
1469 spin_unlock(&tpg->tpg_lun_lock);
1472 spin_unlock(&tpg->tpg_lun_lock);
1477 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1478 struct se_portal_group *tpg,
1480 char *initiatorname,
1483 struct se_lun_acl *lacl;
1484 struct se_node_acl *nacl;
1486 if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
1487 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
1488 TPG_TFO(tpg)->get_fabric_name());
1492 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1497 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1499 printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
1504 INIT_LIST_HEAD(&lacl->lacl_list);
1505 lacl->mapped_lun = mapped_lun;
1506 lacl->se_lun_nacl = nacl;
1507 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1512 int core_dev_add_initiator_node_lun_acl(
1513 struct se_portal_group *tpg,
1514 struct se_lun_acl *lacl,
1519 struct se_node_acl *nacl;
1521 lun = core_dev_get_lun(tpg, unpacked_lun);
1523 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1524 " Target Portal Group: %hu, ignoring request.\n",
1525 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1526 TPG_TFO(tpg)->tpg_get_tag(tpg));
1530 nacl = lacl->se_lun_nacl;
1534 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1535 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1536 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1540 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1541 lun_access, nacl, tpg, 1) < 0)
1544 spin_lock(&lun->lun_acl_lock);
1545 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1546 atomic_inc(&lun->lun_acl_count);
1547 smp_mb__after_atomic_inc();
1548 spin_unlock(&lun->lun_acl_lock);
1550 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1551 " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
1552 TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1553 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1554 lacl->initiatorname);
1556 * Check to see if there are any existing persistent reservation APTPL
1557 * pre-registrations that need to be enabled for this LUN ACL..
1559 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1563 /* core_dev_del_initiator_node_lun_acl():
1567 int core_dev_del_initiator_node_lun_acl(
1568 struct se_portal_group *tpg,
1570 struct se_lun_acl *lacl)
1572 struct se_node_acl *nacl;
1574 nacl = lacl->se_lun_nacl;
1578 spin_lock(&lun->lun_acl_lock);
1579 list_del(&lacl->lacl_list);
1580 atomic_dec(&lun->lun_acl_count);
1581 smp_mb__after_atomic_dec();
1582 spin_unlock(&lun->lun_acl_lock);
1584 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1585 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1587 lacl->se_lun = NULL;
1589 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1590 " InitiatorNode: %s Mapped LUN: %u\n",
1591 TPG_TFO(tpg)->get_fabric_name(),
1592 TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
1593 lacl->initiatorname, lacl->mapped_lun);
1598 void core_dev_free_initiator_node_lun_acl(
1599 struct se_portal_group *tpg,
1600 struct se_lun_acl *lacl)
1602 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1603 " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
1604 TPG_TFO(tpg)->tpg_get_tag(tpg),
1605 TPG_TFO(tpg)->get_fabric_name(),
1606 lacl->initiatorname, lacl->mapped_lun);
1611 int core_dev_setup_virtual_lun0(void)
1614 struct se_device *dev;
1615 struct se_subsystem_dev *se_dev = NULL;
1616 struct se_subsystem_api *t;
1620 hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
1622 return PTR_ERR(hba);
1624 se_global->g_lun0_hba = hba;
1627 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1629 printk(KERN_ERR "Unable to allocate memory for"
1630 " struct se_subsystem_dev\n");
1634 INIT_LIST_HEAD(&se_dev->g_se_dev_list);
1635 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1636 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1637 INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
1638 INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
1639 spin_lock_init(&se_dev->t10_reservation.registration_lock);
1640 spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
1641 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1642 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1643 spin_lock_init(&se_dev->se_dev_lock);
1644 se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1645 se_dev->t10_wwn.t10_sub_dev = se_dev;
1646 se_dev->t10_alua.t10_sub_dev = se_dev;
1647 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1648 se_dev->se_dev_hba = hba;
1650 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1651 if (!(se_dev->se_dev_su_ptr)) {
1652 printk(KERN_ERR "Unable to locate subsystem dependent pointer"
1653 " from allocate_virtdevice()\n");
1657 se_global->g_lun0_su_dev = se_dev;
1660 sprintf(buf, "rd_pages=8");
1661 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1663 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1664 if (!(dev) || IS_ERR(dev)) {
1668 se_dev->se_dev_ptr = dev;
1669 se_global->g_lun0_dev = dev;
1673 se_global->g_lun0_su_dev = NULL;
1675 if (se_global->g_lun0_hba) {
1676 core_delete_hba(se_global->g_lun0_hba);
1677 se_global->g_lun0_hba = NULL;
1683 void core_dev_release_virtual_lun0(void)
1685 struct se_hba *hba = se_global->g_lun0_hba;
1686 struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
1691 if (se_global->g_lun0_dev)
1692 se_free_virtual_device(se_global->g_lun0_dev, hba);
1695 core_delete_hba(hba);