1 /*******************************************************************************
2 * Filename: target_core_alua.c
4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
6 * (c) Copyright 2009-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/configfs.h>
29 #include <linux/export.h>
30 #include <linux/file.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <asm/unaligned.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <target/target_core_configfs.h>
40 #include "target_core_internal.h"
41 #include "target_core_alua.h"
42 #include "target_core_ua.h"
44 static sense_reason_t core_alua_check_transition(int state, int valid,
46 static int core_alua_set_tg_pt_secondary_state(
47 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
48 struct se_port *port, int explicit, int offline);
50 static char *core_alua_dump_state(int state);
52 static u16 alua_lu_gps_counter;
53 static u32 alua_lu_gps_count;
55 static DEFINE_SPINLOCK(lu_gps_lock);
56 static LIST_HEAD(lu_gps_list);
58 struct t10_alua_lu_gp *default_lu_gp;
63 * See sbc3r35 section 5.23
66 target_emulate_report_referrals(struct se_cmd *cmd)
68 struct se_device *dev = cmd->se_dev;
69 struct t10_alua_lba_map *map;
70 struct t10_alua_lba_map_member *map_mem;
74 if (cmd->data_length < 4) {
75 pr_warn("REPORT REFERRALS allocation length %u too"
76 " small\n", cmd->data_length);
77 return TCM_INVALID_CDB_FIELD;
80 buf = transport_kmap_data_sg(cmd);
82 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
85 spin_lock(&dev->t10_alua.lba_map_lock);
86 if (list_empty(&dev->t10_alua.lba_map_list)) {
87 spin_unlock(&dev->t10_alua.lba_map_lock);
88 transport_kunmap_data_sg(cmd);
90 return TCM_UNSUPPORTED_SCSI_OPCODE;
93 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
95 int desc_num = off + 3;
99 if (cmd->data_length > off)
100 put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
102 if (cmd->data_length > off)
103 put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
107 list_for_each_entry(map_mem, &map->lba_map_mem_list,
109 int alua_state = map_mem->lba_map_mem_alua_state;
110 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
112 if (cmd->data_length > off)
113 buf[off] = alua_state & 0x0f;
115 if (cmd->data_length > off)
116 buf[off] = (alua_pg_id >> 8) & 0xff;
118 if (cmd->data_length > off)
119 buf[off] = (alua_pg_id & 0xff);
124 if (cmd->data_length > desc_num)
125 buf[desc_num] = pg_num;
127 spin_unlock(&dev->t10_alua.lba_map_lock);
130 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
132 put_unaligned_be16(rd_len, &buf[2]);
134 transport_kunmap_data_sg(cmd);
136 target_complete_cmd(cmd, GOOD);
141 * REPORT_TARGET_PORT_GROUPS
143 * See spc4r17 section 6.27
146 target_emulate_report_target_port_groups(struct se_cmd *cmd)
148 struct se_device *dev = cmd->se_dev;
149 struct se_port *port;
150 struct t10_alua_tg_pt_gp *tg_pt_gp;
151 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
154 int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
157 * Skip over RESERVED area to first Target port group descriptor
158 * depending on the PARAMETER DATA FORMAT type..
165 if (cmd->data_length < off) {
166 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
167 " small for %s header\n", cmd->data_length,
168 (ext_hdr) ? "extended" : "normal");
169 return TCM_INVALID_CDB_FIELD;
171 buf = transport_kmap_data_sg(cmd);
173 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
175 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
176 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
179 * Check if the Target port group and Target port descriptor list
180 * based on tg_pt_gp_members count will fit into the response payload.
181 * Otherwise, bump rd_len to let the initiator know we have exceeded
182 * the allocation length and the response is truncated.
184 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
186 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
190 * PREF: Preferred target port bit, determine if this
191 * bit should be set for port group.
193 if (tg_pt_gp->tg_pt_gp_pref)
196 * Set the ASYMMETRIC ACCESS State
198 buf[off++] |= (atomic_read(
199 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
201 * Set supported ASYMMETRIC ACCESS State bits
203 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
207 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
208 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
210 off++; /* Skip over Reserved */
214 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
216 * Vendor Specific field
222 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
225 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
226 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
228 port = tg_pt_gp_mem->tg_pt;
230 * Start Target Port descriptor format
232 * See spc4r17 section 6.2.7 Table 247
234 off += 2; /* Skip over Obsolete */
236 * Set RELATIVE TARGET PORT IDENTIFIER
238 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
239 buf[off++] = (port->sep_rtpi & 0xff);
242 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
244 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
246 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
248 put_unaligned_be32(rd_len, &buf[0]);
251 * Fill in the Extended header parameter data format if requested
256 * Set the implicit transition time (in seconds) for the application
257 * client to use as a base for it's transition timeout value.
259 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
260 * this CDB was received upon to determine this value individually
261 * for ALUA target port group.
263 port = cmd->se_lun->lun_sep;
264 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
266 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
267 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
269 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
270 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
273 transport_kunmap_data_sg(cmd);
275 target_complete_cmd(cmd, GOOD);
280 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
282 * See spc4r17 section 6.35
285 target_emulate_set_target_port_groups(struct se_cmd *cmd)
287 struct se_device *dev = cmd->se_dev;
288 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
289 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
290 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
291 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
294 sense_reason_t rc = TCM_NO_SENSE;
295 u32 len = 4; /* Skip over RESERVED area in header */
296 int alua_access_state, primary = 0, valid_states;
300 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
302 if (cmd->data_length < 4) {
303 pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
304 " small\n", cmd->data_length);
305 return TCM_INVALID_PARAMETER_LIST;
308 buf = transport_kmap_data_sg(cmd);
310 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
313 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
314 * for the local tg_pt_gp.
316 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
317 if (!l_tg_pt_gp_mem) {
318 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
319 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
322 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
323 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
325 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
326 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
327 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
330 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
332 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
333 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
334 " while TPGS_EXPLICIT_ALUA is disabled\n");
335 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
338 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
340 ptr = &buf[4]; /* Skip over RESERVED area in header */
342 while (len < cmd->data_length) {
344 alua_access_state = (ptr[0] & 0x0f);
346 * Check the received ALUA access state, and determine if
347 * the state is a primary or secondary target port asymmetric
350 rc = core_alua_check_transition(alua_access_state,
351 valid_states, &primary);
354 * If the SET TARGET PORT GROUPS attempts to establish
355 * an invalid combination of target port asymmetric
356 * access states or attempts to establish an
357 * unsupported target port asymmetric access state,
358 * then the command shall be terminated with CHECK
359 * CONDITION status, with the sense key set to ILLEGAL
360 * REQUEST, and the additional sense code set to INVALID
361 * FIELD IN PARAMETER LIST.
367 * If the ASYMMETRIC ACCESS STATE field (see table 267)
368 * specifies a primary target port asymmetric access state,
369 * then the TARGET PORT GROUP OR TARGET PORT field specifies
370 * a primary target port group for which the primary target
371 * port asymmetric access state shall be changed. If the
372 * ASYMMETRIC ACCESS STATE field specifies a secondary target
373 * port asymmetric access state, then the TARGET PORT GROUP OR
374 * TARGET PORT field specifies the relative target port
375 * identifier (see 3.1.120) of the target port for which the
376 * secondary target port asymmetric access state shall be
380 tg_pt_id = get_unaligned_be16(ptr + 2);
382 * Locate the matching target port group ID from
383 * the global tg_pt_gp list
385 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
386 list_for_each_entry(tg_pt_gp,
387 &dev->t10_alua.tg_pt_gps_list,
389 if (!tg_pt_gp->tg_pt_gp_valid_id)
392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
396 smp_mb__after_atomic_inc();
398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
400 if (!core_alua_do_port_transition(tg_pt_gp,
402 alua_access_state, 1))
405 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
407 smp_mb__after_atomic_dec();
410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
413 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
414 * the Target Port in question for the the incoming
415 * SET_TARGET_PORT_GROUPS op.
417 rtpi = get_unaligned_be16(ptr + 2);
419 * Locate the matching relative target port identifier
420 * for the struct se_device storage object.
422 spin_lock(&dev->se_port_lock);
423 list_for_each_entry(port, &dev->dev_sep_list,
425 if (port->sep_rtpi != rtpi)
428 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
430 spin_unlock(&dev->se_port_lock);
432 if (!core_alua_set_tg_pt_secondary_state(
433 tg_pt_gp_mem, port, 1, 1))
436 spin_lock(&dev->se_port_lock);
439 spin_unlock(&dev->se_port_lock);
443 rc = TCM_INVALID_PARAMETER_LIST;
452 transport_kunmap_data_sg(cmd);
454 target_complete_cmd(cmd, GOOD);
458 static inline int core_alua_state_nonoptimized(
461 int nonop_delay_msecs,
465 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
466 * later to determine if processing of this cmd needs to be
467 * temporarily delayed for the Active/NonOptimized primary access state.
469 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
470 cmd->alua_nonop_delay = nonop_delay_msecs;
474 static inline int core_alua_state_lba_dependent(
476 struct t10_alua_tg_pt_gp *tg_pt_gp,
479 struct se_device *dev = cmd->se_dev;
480 u64 segment_size, segment_mult, sectors, lba;
482 /* Only need to check for cdb actually containing LBAs */
483 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
486 spin_lock(&dev->t10_alua.lba_map_lock);
487 segment_size = dev->t10_alua.lba_map_segment_size;
488 segment_mult = dev->t10_alua.lba_map_segment_multiplier;
489 sectors = cmd->data_length / dev->dev_attrib.block_size;
491 lba = cmd->t_task_lba;
492 while (lba < cmd->t_task_lba + sectors) {
493 struct t10_alua_lba_map *cur_map = NULL, *map;
494 struct t10_alua_lba_map_member *map_mem;
496 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
498 u64 start_lba, last_lba;
499 u64 first_lba = map->lba_map_first_lba;
503 start_lba = do_div(tmp, segment_size * segment_mult);
505 last_lba = first_lba + segment_size - 1;
506 if (start_lba >= first_lba &&
507 start_lba <= last_lba) {
513 last_lba = map->lba_map_last_lba;
514 if (lba >= first_lba && lba <= last_lba) {
522 spin_unlock(&dev->t10_alua.lba_map_lock);
523 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
526 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
528 if (map_mem->lba_map_mem_alua_pg_id !=
529 tg_pt_gp->tg_pt_gp_id)
531 switch(map_mem->lba_map_mem_alua_state) {
532 case ALUA_ACCESS_STATE_STANDBY:
533 spin_unlock(&dev->t10_alua.lba_map_lock);
534 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
536 case ALUA_ACCESS_STATE_UNAVAILABLE:
537 spin_unlock(&dev->t10_alua.lba_map_lock);
538 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
545 spin_unlock(&dev->t10_alua.lba_map_lock);
549 static inline int core_alua_state_standby(
555 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
556 * spc4r17 section 5.9.2.4.4
565 case RECEIVE_DIAGNOSTIC:
566 case SEND_DIAGNOSTIC:
569 case SERVICE_ACTION_IN:
570 switch (cdb[1] & 0x1f) {
571 case SAI_READ_CAPACITY_16:
574 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
578 switch (cdb[1] & 0x1f) {
579 case MI_REPORT_TARGET_PGS:
582 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
585 case MAINTENANCE_OUT:
587 case MO_SET_TARGET_PGS:
590 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
594 case PERSISTENT_RESERVE_IN:
595 case PERSISTENT_RESERVE_OUT:
600 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
607 static inline int core_alua_state_unavailable(
613 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
614 * spc4r17 section 5.9.2.4.5
621 switch (cdb[1] & 0x1f) {
622 case MI_REPORT_TARGET_PGS:
625 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
628 case MAINTENANCE_OUT:
630 case MO_SET_TARGET_PGS:
633 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
641 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
648 static inline int core_alua_state_transition(
654 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
655 * spc4r17 section 5.9.2.5
662 switch (cdb[1] & 0x1f) {
663 case MI_REPORT_TARGET_PGS:
666 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
674 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
682 * return 1: Is used to signal LUN not accessible, and check condition/not ready
683 * return 0: Used to signal success
684 * return -1: Used to signal failure, and invalid cdb field
687 target_alua_state_check(struct se_cmd *cmd)
689 struct se_device *dev = cmd->se_dev;
690 unsigned char *cdb = cmd->t_task_cdb;
691 struct se_lun *lun = cmd->se_lun;
692 struct se_port *port = lun->lun_sep;
693 struct t10_alua_tg_pt_gp *tg_pt_gp;
694 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
695 int out_alua_state, nonop_delay_msecs;
699 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
701 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
707 * First, check for a struct se_port specific secondary ALUA target port
708 * access state: OFFLINE
710 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
711 pr_debug("ALUA: Got secondary offline status for local"
713 alua_ascq = ASCQ_04H_ALUA_OFFLINE;
718 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
719 * ALUA target port group, to obtain current ALUA access state.
720 * Otherwise look for the underlying struct se_device association with
721 * a ALUA logical unit group.
723 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
727 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
728 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
729 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
730 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
731 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
733 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
734 * statement so the compiler knows explicitly to check this case first.
735 * For the Optimized ALUA access state case, we want to process the
736 * incoming fabric cmd ASAP..
738 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
741 switch (out_alua_state) {
742 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
743 ret = core_alua_state_nonoptimized(cmd, cdb,
744 nonop_delay_msecs, &alua_ascq);
746 case ALUA_ACCESS_STATE_STANDBY:
747 ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
749 case ALUA_ACCESS_STATE_UNAVAILABLE:
750 ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
752 case ALUA_ACCESS_STATE_TRANSITION:
753 ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
755 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
756 ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
759 * OFFLINE is a secondary ALUA target port group access state, that is
760 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
762 case ALUA_ACCESS_STATE_OFFLINE:
764 pr_err("Unknown ALUA access state: 0x%02x\n",
766 return TCM_INVALID_CDB_FIELD;
772 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
773 * The ALUA additional sense code qualifier (ASCQ) is determined
774 * by the ALUA primary or secondary access state..
776 pr_debug("[%s]: ALUA TG Port not available, "
777 "SenseKey: NOT_READY, ASC/ASCQ: "
779 cmd->se_tfo->get_fabric_name(), alua_ascq);
781 cmd->scsi_asc = 0x04;
782 cmd->scsi_ascq = alua_ascq;
783 return TCM_CHECK_CONDITION_NOT_READY;
790 * Check implicit and explicit ALUA state change request.
792 static sense_reason_t
793 core_alua_check_transition(int state, int valid, int *primary)
796 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
797 * defined as primary target port asymmetric access states.
800 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
801 if (!(valid & ALUA_AO_SUP))
805 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
806 if (!(valid & ALUA_AN_SUP))
810 case ALUA_ACCESS_STATE_STANDBY:
811 if (!(valid & ALUA_S_SUP))
815 case ALUA_ACCESS_STATE_UNAVAILABLE:
816 if (!(valid & ALUA_U_SUP))
820 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
821 if (!(valid & ALUA_LBD_SUP))
825 case ALUA_ACCESS_STATE_OFFLINE:
827 * OFFLINE state is defined as a secondary target port
828 * asymmetric access state.
830 if (!(valid & ALUA_O_SUP))
834 case ALUA_ACCESS_STATE_TRANSITION:
836 * Transitioning is set internally, and
837 * cannot be selected manually.
841 pr_err("Unknown ALUA access state: 0x%02x\n", state);
842 return TCM_INVALID_PARAMETER_LIST;
848 pr_err("ALUA access state %s not supported",
849 core_alua_dump_state(state));
850 return TCM_INVALID_PARAMETER_LIST;
853 static char *core_alua_dump_state(int state)
856 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
857 return "Active/Optimized";
858 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
859 return "Active/NonOptimized";
860 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
861 return "LBA Dependent";
862 case ALUA_ACCESS_STATE_STANDBY:
864 case ALUA_ACCESS_STATE_UNAVAILABLE:
865 return "Unavailable";
866 case ALUA_ACCESS_STATE_OFFLINE:
868 case ALUA_ACCESS_STATE_TRANSITION:
869 return "Transitioning";
877 char *core_alua_dump_status(int status)
880 case ALUA_STATUS_NONE:
882 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
883 return "Altered by Explicit STPG";
884 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
885 return "Altered by Implicit ALUA";
894 * Used by fabric modules to determine when we need to delay processing
895 * for the Active/NonOptimized paths..
897 int core_alua_check_nonop_delay(
900 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
905 * The ALUA Active/NonOptimized access state delay can be disabled
906 * in via configfs with a value of zero
908 if (!cmd->alua_nonop_delay)
911 * struct se_cmd->alua_nonop_delay gets set by a target port group
912 * defined interval in core_alua_state_nonoptimized()
914 msleep_interruptible(cmd->alua_nonop_delay);
917 EXPORT_SYMBOL(core_alua_check_nonop_delay);
920 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
923 static int core_alua_write_tpg_metadata(
925 unsigned char *md_buf,
928 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
932 pr_err("filp_open(%s) for ALUA metadata failed\n", path);
935 ret = kernel_write(file, md_buf, md_buf_len, 0);
937 pr_err("Error writing ALUA metadata file: %s\n", path);
939 return (ret < 0) ? -EIO : 0;
943 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
945 static int core_alua_update_tpg_primary_metadata(
946 struct t10_alua_tg_pt_gp *tg_pt_gp)
948 unsigned char *md_buf;
949 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
950 char path[ALUA_METADATA_PATH_LEN];
953 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
955 pr_err("Unable to allocate buf for ALUA metadata\n");
959 memset(path, 0, ALUA_METADATA_PATH_LEN);
961 len = snprintf(md_buf, ALUA_MD_BUF_LEN,
963 "alua_access_state=0x%02x\n"
964 "alua_access_status=0x%02x\n",
965 tg_pt_gp->tg_pt_gp_id,
966 tg_pt_gp->tg_pt_gp_alua_pending_state,
967 tg_pt_gp->tg_pt_gp_alua_access_status);
969 snprintf(path, ALUA_METADATA_PATH_LEN,
970 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
971 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
973 rc = core_alua_write_tpg_metadata(path, md_buf, len);
978 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
980 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
981 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
982 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
983 struct se_dev_entry *se_deve;
984 struct se_lun_acl *lacl;
985 struct se_port *port;
986 struct t10_alua_tg_pt_gp_member *mem;
987 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
988 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
990 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
991 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
995 * After an implicit target port asymmetric access state
996 * change, a device server shall establish a unit attention
997 * condition for the initiator port associated with every I_T
998 * nexus with the additional sense code set to ASYMMETRIC
999 * ACCESS STATE CHANGED.
1001 * After an explicit target port asymmetric access state
1002 * change, a device server shall establish a unit attention
1003 * condition with the additional sense code set to ASYMMETRIC
1004 * ACCESS STATE CHANGED for the initiator port associated with
1005 * every I_T nexus other than the I_T nexus on which the SET
1006 * TARGET PORT GROUPS command
1008 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
1009 smp_mb__after_atomic_inc();
1010 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1012 spin_lock_bh(&port->sep_alua_lock);
1013 list_for_each_entry(se_deve, &port->sep_alua_list,
1015 lacl = se_deve->se_lun_acl;
1017 * se_deve->se_lun_acl pointer may be NULL for a
1018 * entry created without explicit Node+MappedLUN ACLs
1023 if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
1024 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
1025 (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
1026 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
1027 (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
1028 (tg_pt_gp->tg_pt_gp_alua_port == port))
1031 core_scsi3_ua_allocate(lacl->se_lun_nacl,
1032 se_deve->mapped_lun, 0x2A,
1033 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1035 spin_unlock_bh(&port->sep_alua_lock);
1037 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1038 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
1039 smp_mb__after_atomic_dec();
1041 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1043 * Update the ALUA metadata buf that has been allocated in
1044 * core_alua_do_port_transition(), this metadata will be written
1047 * Note that there is the case where we do not want to update the
1048 * metadata when the saved metadata is being parsed in userspace
1049 * when setting the existing port access state and access status.
1051 * Also note that the failure to write out the ALUA metadata to
1052 * struct file does NOT affect the actual ALUA transition.
1054 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1055 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1056 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1057 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1060 * Set the current primary ALUA access state to the requested new state
1062 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1063 tg_pt_gp->tg_pt_gp_alua_pending_state);
1065 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1066 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1067 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1068 tg_pt_gp->tg_pt_gp_id,
1069 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1070 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1071 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1072 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1073 smp_mb__after_atomic_dec();
1074 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1076 if (tg_pt_gp->tg_pt_gp_transition_complete)
1077 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1080 static int core_alua_do_transition_tg_pt(
1081 struct t10_alua_tg_pt_gp *tg_pt_gp,
1085 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1086 DECLARE_COMPLETION_ONSTACK(wait);
1088 /* Nothing to be done here */
1089 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1092 if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1096 * Flush any pending transitions
1098 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1099 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1100 ALUA_ACCESS_STATE_TRANSITION) {
1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1104 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1105 wait_for_completion(&wait);
1106 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1111 * Save the old primary ALUA access state, and set the current state
1112 * to ALUA_ACCESS_STATE_TRANSITION.
1114 tg_pt_gp->tg_pt_gp_alua_previous_state =
1115 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1116 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1118 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1119 ALUA_ACCESS_STATE_TRANSITION);
1120 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1121 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1122 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1125 * Check for the optional ALUA primary state transition delay
1127 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1128 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1131 * Take a reference for workqueue item
1133 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1134 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1135 smp_mb__after_atomic_inc();
1136 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1138 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1139 unsigned long transition_tmo;
1141 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1142 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1143 &tg_pt_gp->tg_pt_gp_transition_work,
1146 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1147 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1148 &tg_pt_gp->tg_pt_gp_transition_work, 0);
1149 wait_for_completion(&wait);
1150 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1156 int core_alua_do_port_transition(
1157 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1158 struct se_device *l_dev,
1159 struct se_port *l_port,
1160 struct se_node_acl *l_nacl,
1164 struct se_device *dev;
1165 struct t10_alua_lu_gp *lu_gp;
1166 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1167 struct t10_alua_tg_pt_gp *tg_pt_gp;
1168 int primary, valid_states, rc = 0;
1170 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1171 if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
1174 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1175 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1176 lu_gp = local_lu_gp_mem->lu_gp;
1177 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1178 smp_mb__after_atomic_inc();
1179 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1181 * For storage objects that are members of the 'default_lu_gp',
1182 * we only do transition on the passed *l_tp_pt_gp, and not
1183 * on all of the matching target port groups IDs in default_lu_gp.
1185 if (!lu_gp->lu_gp_id) {
1187 * core_alua_do_transition_tg_pt() will always return
1190 l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
1191 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1192 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1193 new_state, explicit);
1194 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1195 smp_mb__after_atomic_dec();
1199 * For all other LU groups aside from 'default_lu_gp', walk all of
1200 * the associated storage objects looking for a matching target port
1201 * group ID from the local target port group.
1203 spin_lock(&lu_gp->lu_gp_lock);
1204 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1207 dev = lu_gp_mem->lu_gp_mem_dev;
1208 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
1209 smp_mb__after_atomic_inc();
1210 spin_unlock(&lu_gp->lu_gp_lock);
1212 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1213 list_for_each_entry(tg_pt_gp,
1214 &dev->t10_alua.tg_pt_gps_list,
1217 if (!tg_pt_gp->tg_pt_gp_valid_id)
1220 * If the target behavior port asymmetric access state
1221 * is changed for any target port group accessible via
1222 * a logical unit within a LU group, the target port
1223 * behavior group asymmetric access states for the same
1224 * target port group accessible via other logical units
1225 * in that LU group will also change.
1227 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1230 if (l_tg_pt_gp == tg_pt_gp) {
1231 tg_pt_gp->tg_pt_gp_alua_port = l_port;
1232 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1234 tg_pt_gp->tg_pt_gp_alua_port = NULL;
1235 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1237 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1238 smp_mb__after_atomic_inc();
1239 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1241 * core_alua_do_transition_tg_pt() will always return
1244 rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1245 new_state, explicit);
1247 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1248 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1249 smp_mb__after_atomic_dec();
1253 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1255 spin_lock(&lu_gp->lu_gp_lock);
1256 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
1257 smp_mb__after_atomic_dec();
1259 spin_unlock(&lu_gp->lu_gp_lock);
1262 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1263 " Group IDs: %hu %s transition to primary state: %s\n",
1264 config_item_name(&lu_gp->lu_gp_group.cg_item),
1265 l_tg_pt_gp->tg_pt_gp_id,
1266 (explicit) ? "explicit" : "implicit",
1267 core_alua_dump_state(new_state));
1270 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1271 smp_mb__after_atomic_dec();
1276 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
1278 static int core_alua_update_tpg_secondary_metadata(
1279 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1280 struct se_port *port)
1282 unsigned char *md_buf;
1283 struct se_portal_group *se_tpg = port->sep_tpg;
1284 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1287 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1289 pr_err("Unable to allocate buf for ALUA metadata\n");
1293 memset(path, 0, ALUA_METADATA_PATH_LEN);
1294 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1296 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1297 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1299 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1300 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1301 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1303 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1304 "alua_tg_pt_status=0x%02x\n",
1305 atomic_read(&port->sep_tg_pt_secondary_offline),
1306 port->sep_tg_pt_secondary_stat);
1308 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1309 se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1310 port->sep_lun->unpacked_lun);
1312 rc = core_alua_write_tpg_metadata(path, md_buf, len);
1318 static int core_alua_set_tg_pt_secondary_state(
1319 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1320 struct se_port *port,
1324 struct t10_alua_tg_pt_gp *tg_pt_gp;
1325 int trans_delay_msecs;
1327 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1328 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1330 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1331 pr_err("Unable to complete secondary state"
1335 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1337 * Set the secondary ALUA target port access state to OFFLINE
1338 * or release the previously secondary state for struct se_port
1341 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1343 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1345 port->sep_tg_pt_secondary_stat = (explicit) ?
1346 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1347 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1349 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1350 " to secondary access state: %s\n", (explicit) ? "explicit" :
1351 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1352 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1354 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1356 * Do the optional transition delay after we set the secondary
1357 * ALUA access state.
1359 if (trans_delay_msecs != 0)
1360 msleep_interruptible(trans_delay_msecs);
1362 * See if we need to update the ALUA fabric port metadata for
1363 * secondary state and status
1365 if (port->sep_tg_pt_secondary_write_md) {
1366 mutex_lock(&port->sep_tg_pt_md_mutex);
1367 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
1368 mutex_unlock(&port->sep_tg_pt_md_mutex);
1374 struct t10_alua_lba_map *
1375 core_alua_allocate_lba_map(struct list_head *list,
1376 u64 first_lba, u64 last_lba)
1378 struct t10_alua_lba_map *lba_map;
1380 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1382 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1383 return ERR_PTR(-ENOMEM);
1385 INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1386 lba_map->lba_map_first_lba = first_lba;
1387 lba_map->lba_map_last_lba = last_lba;
1389 list_add_tail(&lba_map->lba_map_list, list);
1394 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1395 int pg_id, int state)
1397 struct t10_alua_lba_map_member *lba_map_mem;
1399 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1401 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1402 pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1407 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1409 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1412 lba_map_mem->lba_map_mem_alua_state = state;
1413 lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1415 list_add_tail(&lba_map_mem->lba_map_mem_list,
1416 &lba_map->lba_map_mem_list);
1421 core_alua_free_lba_map(struct list_head *lba_list)
1423 struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1424 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1426 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1428 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1429 &lba_map->lba_map_mem_list,
1431 list_del(&lba_map_mem->lba_map_mem_list);
1432 kmem_cache_free(t10_alua_lba_map_mem_cache,
1435 list_del(&lba_map->lba_map_list);
1436 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1441 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1442 int segment_size, int segment_mult)
1444 struct list_head old_lba_map_list;
1445 struct t10_alua_tg_pt_gp *tg_pt_gp;
1446 int activate = 0, supported;
1448 INIT_LIST_HEAD(&old_lba_map_list);
1449 spin_lock(&dev->t10_alua.lba_map_lock);
1450 dev->t10_alua.lba_map_segment_size = segment_size;
1451 dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1452 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1454 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1457 spin_unlock(&dev->t10_alua.lba_map_lock);
1458 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1459 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1462 if (!tg_pt_gp->tg_pt_gp_valid_id)
1464 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1466 supported |= ALUA_LBD_SUP;
1468 supported &= ~ALUA_LBD_SUP;
1469 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1471 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1472 core_alua_free_lba_map(&old_lba_map_list);
1475 struct t10_alua_lu_gp *
1476 core_alua_allocate_lu_gp(const char *name, int def_group)
1478 struct t10_alua_lu_gp *lu_gp;
1480 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1482 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1483 return ERR_PTR(-ENOMEM);
1485 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1486 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1487 spin_lock_init(&lu_gp->lu_gp_lock);
1488 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1491 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1492 lu_gp->lu_gp_valid_id = 1;
1493 alua_lu_gps_count++;
1499 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1501 struct t10_alua_lu_gp *lu_gp_tmp;
1504 * The lu_gp->lu_gp_id may only be set once..
1506 if (lu_gp->lu_gp_valid_id) {
1507 pr_warn("ALUA LU Group already has a valid ID,"
1508 " ignoring request\n");
1512 spin_lock(&lu_gps_lock);
1513 if (alua_lu_gps_count == 0x0000ffff) {
1514 pr_err("Maximum ALUA alua_lu_gps_count:"
1515 " 0x0000ffff reached\n");
1516 spin_unlock(&lu_gps_lock);
1517 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1521 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1522 alua_lu_gps_counter++;
1524 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1525 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1529 pr_warn("ALUA Logical Unit Group ID: %hu"
1530 " already exists, ignoring request\n",
1532 spin_unlock(&lu_gps_lock);
1537 lu_gp->lu_gp_id = lu_gp_id_tmp;
1538 lu_gp->lu_gp_valid_id = 1;
1539 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1540 alua_lu_gps_count++;
1541 spin_unlock(&lu_gps_lock);
1546 static struct t10_alua_lu_gp_member *
1547 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1549 struct t10_alua_lu_gp_member *lu_gp_mem;
1551 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1553 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1554 return ERR_PTR(-ENOMEM);
1556 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1557 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1558 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1560 lu_gp_mem->lu_gp_mem_dev = dev;
1561 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1566 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1568 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1570 * Once we have reached this point, config_item_put() has
1571 * already been called from target_core_alua_drop_lu_gp().
1573 * Here, we remove the *lu_gp from the global list so that
1574 * no associations can be made while we are releasing
1575 * struct t10_alua_lu_gp.
1577 spin_lock(&lu_gps_lock);
1578 list_del(&lu_gp->lu_gp_node);
1579 alua_lu_gps_count--;
1580 spin_unlock(&lu_gps_lock);
1582 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1583 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1584 * released with core_alua_put_lu_gp_from_name()
1586 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1589 * Release reference to struct t10_alua_lu_gp * from all associated
1592 spin_lock(&lu_gp->lu_gp_lock);
1593 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1594 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1595 if (lu_gp_mem->lu_gp_assoc) {
1596 list_del(&lu_gp_mem->lu_gp_mem_list);
1597 lu_gp->lu_gp_members--;
1598 lu_gp_mem->lu_gp_assoc = 0;
1600 spin_unlock(&lu_gp->lu_gp_lock);
1603 * lu_gp_mem is associated with a single
1604 * struct se_device->dev_alua_lu_gp_mem, and is released when
1605 * struct se_device is released via core_alua_free_lu_gp_mem().
1607 * If the passed lu_gp does NOT match the default_lu_gp, assume
1608 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1610 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1611 if (lu_gp != default_lu_gp)
1612 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1615 lu_gp_mem->lu_gp = NULL;
1616 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1618 spin_lock(&lu_gp->lu_gp_lock);
1620 spin_unlock(&lu_gp->lu_gp_lock);
1622 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1625 void core_alua_free_lu_gp_mem(struct se_device *dev)
1627 struct t10_alua_lu_gp *lu_gp;
1628 struct t10_alua_lu_gp_member *lu_gp_mem;
1630 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1634 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1637 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1638 lu_gp = lu_gp_mem->lu_gp;
1640 spin_lock(&lu_gp->lu_gp_lock);
1641 if (lu_gp_mem->lu_gp_assoc) {
1642 list_del(&lu_gp_mem->lu_gp_mem_list);
1643 lu_gp->lu_gp_members--;
1644 lu_gp_mem->lu_gp_assoc = 0;
1646 spin_unlock(&lu_gp->lu_gp_lock);
1647 lu_gp_mem->lu_gp = NULL;
1649 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1651 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1654 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1656 struct t10_alua_lu_gp *lu_gp;
1657 struct config_item *ci;
1659 spin_lock(&lu_gps_lock);
1660 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1661 if (!lu_gp->lu_gp_valid_id)
1663 ci = &lu_gp->lu_gp_group.cg_item;
1664 if (!strcmp(config_item_name(ci), name)) {
1665 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1666 spin_unlock(&lu_gps_lock);
1670 spin_unlock(&lu_gps_lock);
1675 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1677 spin_lock(&lu_gps_lock);
1678 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1679 spin_unlock(&lu_gps_lock);
1683 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1685 void __core_alua_attach_lu_gp_mem(
1686 struct t10_alua_lu_gp_member *lu_gp_mem,
1687 struct t10_alua_lu_gp *lu_gp)
1689 spin_lock(&lu_gp->lu_gp_lock);
1690 lu_gp_mem->lu_gp = lu_gp;
1691 lu_gp_mem->lu_gp_assoc = 1;
1692 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1693 lu_gp->lu_gp_members++;
1694 spin_unlock(&lu_gp->lu_gp_lock);
1698 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1700 void __core_alua_drop_lu_gp_mem(
1701 struct t10_alua_lu_gp_member *lu_gp_mem,
1702 struct t10_alua_lu_gp *lu_gp)
1704 spin_lock(&lu_gp->lu_gp_lock);
1705 list_del(&lu_gp_mem->lu_gp_mem_list);
1706 lu_gp_mem->lu_gp = NULL;
1707 lu_gp_mem->lu_gp_assoc = 0;
1708 lu_gp->lu_gp_members--;
1709 spin_unlock(&lu_gp->lu_gp_lock);
1712 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1713 const char *name, int def_group)
1715 struct t10_alua_tg_pt_gp *tg_pt_gp;
1717 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1719 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1722 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1723 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1724 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1725 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1726 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1727 INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1728 core_alua_do_transition_tg_pt_work);
1729 tg_pt_gp->tg_pt_gp_dev = dev;
1730 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1731 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1733 * Enable both explicit and implicit ALUA support by default
1735 tg_pt_gp->tg_pt_gp_alua_access_type =
1736 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1738 * Set the default Active/NonOptimized Delay in milliseconds
1740 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1741 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1742 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1745 * Enable all supported states
1747 tg_pt_gp->tg_pt_gp_alua_supported_states =
1748 ALUA_T_SUP | ALUA_O_SUP |
1749 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1752 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1753 tg_pt_gp->tg_pt_gp_id =
1754 dev->t10_alua.alua_tg_pt_gps_counter++;
1755 tg_pt_gp->tg_pt_gp_valid_id = 1;
1756 dev->t10_alua.alua_tg_pt_gps_count++;
1757 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1758 &dev->t10_alua.tg_pt_gps_list);
1759 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1765 int core_alua_set_tg_pt_gp_id(
1766 struct t10_alua_tg_pt_gp *tg_pt_gp,
1769 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1770 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1771 u16 tg_pt_gp_id_tmp;
1774 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1776 if (tg_pt_gp->tg_pt_gp_valid_id) {
1777 pr_warn("ALUA TG PT Group already has a valid ID,"
1778 " ignoring request\n");
1782 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1783 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1784 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1785 " 0x0000ffff reached\n");
1786 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1787 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1791 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1792 dev->t10_alua.alua_tg_pt_gps_counter++;
1794 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1796 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1800 pr_err("ALUA Target Port Group ID: %hu already"
1801 " exists, ignoring request\n", tg_pt_gp_id);
1802 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1807 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1808 tg_pt_gp->tg_pt_gp_valid_id = 1;
1809 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1810 &dev->t10_alua.tg_pt_gps_list);
1811 dev->t10_alua.alua_tg_pt_gps_count++;
1812 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1817 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1818 struct se_port *port)
1820 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1822 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1824 if (!tg_pt_gp_mem) {
1825 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1826 return ERR_PTR(-ENOMEM);
1828 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1829 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1830 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1832 tg_pt_gp_mem->tg_pt = port;
1833 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1835 return tg_pt_gp_mem;
1838 void core_alua_free_tg_pt_gp(
1839 struct t10_alua_tg_pt_gp *tg_pt_gp)
1841 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1842 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1845 * Once we have reached this point, config_item_put() has already
1846 * been called from target_core_alua_drop_tg_pt_gp().
1848 * Here we remove *tg_pt_gp from the global list so that
1849 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1850 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1852 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1853 list_del(&tg_pt_gp->tg_pt_gp_list);
1854 dev->t10_alua.alua_tg_pt_gps_counter--;
1855 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1857 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1860 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1861 * core_alua_get_tg_pt_gp_by_name() in
1862 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1863 * to be released with core_alua_put_tg_pt_gp_from_name().
1865 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1869 * Release reference to struct t10_alua_tg_pt_gp from all associated
1872 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1873 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1874 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1875 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1876 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1877 tg_pt_gp->tg_pt_gp_members--;
1878 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1880 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1882 * tg_pt_gp_mem is associated with a single
1883 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1884 * core_alua_free_tg_pt_gp_mem().
1886 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1887 * assume we want to re-associate a given tg_pt_gp_mem with
1890 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1891 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1892 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1893 dev->t10_alua.default_tg_pt_gp);
1895 tg_pt_gp_mem->tg_pt_gp = NULL;
1896 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1898 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1900 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1902 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1905 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1907 struct t10_alua_tg_pt_gp *tg_pt_gp;
1908 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1910 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1914 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1917 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1918 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1920 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1921 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1922 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1923 tg_pt_gp->tg_pt_gp_members--;
1924 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1926 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1927 tg_pt_gp_mem->tg_pt_gp = NULL;
1929 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1931 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1934 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1935 struct se_device *dev, const char *name)
1937 struct t10_alua_tg_pt_gp *tg_pt_gp;
1938 struct config_item *ci;
1940 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1941 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1943 if (!tg_pt_gp->tg_pt_gp_valid_id)
1945 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1946 if (!strcmp(config_item_name(ci), name)) {
1947 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1948 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1952 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1957 static void core_alua_put_tg_pt_gp_from_name(
1958 struct t10_alua_tg_pt_gp *tg_pt_gp)
1960 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1962 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1963 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1964 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1968 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1970 void __core_alua_attach_tg_pt_gp_mem(
1971 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1972 struct t10_alua_tg_pt_gp *tg_pt_gp)
1974 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1975 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1976 tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1977 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1978 &tg_pt_gp->tg_pt_gp_mem_list);
1979 tg_pt_gp->tg_pt_gp_members++;
1980 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1984 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1986 static void __core_alua_drop_tg_pt_gp_mem(
1987 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1988 struct t10_alua_tg_pt_gp *tg_pt_gp)
1990 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1991 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1992 tg_pt_gp_mem->tg_pt_gp = NULL;
1993 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1994 tg_pt_gp->tg_pt_gp_members--;
1995 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1998 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
2000 struct config_item *tg_pt_ci;
2001 struct t10_alua_tg_pt_gp *tg_pt_gp;
2002 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2005 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
2009 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2010 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2012 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
2013 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
2014 " %hu\nTG Port Primary Access State: %s\nTG Port "
2015 "Primary Access Status: %s\nTG Port Secondary Access"
2016 " State: %s\nTG Port Secondary Access Status: %s\n",
2017 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
2018 core_alua_dump_state(atomic_read(
2019 &tg_pt_gp->tg_pt_gp_alua_access_state)),
2020 core_alua_dump_status(
2021 tg_pt_gp->tg_pt_gp_alua_access_status),
2022 (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
2024 core_alua_dump_status(port->sep_tg_pt_secondary_stat));
2026 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2031 ssize_t core_alua_store_tg_pt_gp_info(
2032 struct se_port *port,
2036 struct se_portal_group *tpg;
2038 struct se_device *dev = port->sep_lun->lun_se_dev;
2039 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
2040 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2041 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2044 tpg = port->sep_tpg;
2045 lun = port->sep_lun;
2047 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
2051 if (count > TG_PT_GROUP_NAME_BUF) {
2052 pr_err("ALUA Target Port Group alias too large!\n");
2055 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2056 memcpy(buf, page, count);
2058 * Any ALUA target port group alias besides "NULL" means we will be
2059 * making a new group association.
2061 if (strcmp(strstrip(buf), "NULL")) {
2063 * core_alua_get_tg_pt_gp_by_name() will increment reference to
2064 * struct t10_alua_tg_pt_gp. This reference is released with
2065 * core_alua_put_tg_pt_gp_from_name() below.
2067 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
2073 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2074 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2077 * Clearing an existing tg_pt_gp association, and replacing
2078 * with the default_tg_pt_gp.
2080 if (!tg_pt_gp_new) {
2081 pr_debug("Target_Core_ConfigFS: Moving"
2082 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
2083 " alua/%s, ID: %hu back to"
2084 " default_tg_pt_gp\n",
2085 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2086 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2087 config_item_name(&lun->lun_group.cg_item),
2089 &tg_pt_gp->tg_pt_gp_group.cg_item),
2090 tg_pt_gp->tg_pt_gp_id);
2092 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2093 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
2094 dev->t10_alua.default_tg_pt_gp);
2095 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2100 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
2102 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2106 * Associate tg_pt_gp_mem with tg_pt_gp_new.
2108 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
2109 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2110 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
2111 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
2112 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2113 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2114 config_item_name(&lun->lun_group.cg_item),
2115 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
2116 tg_pt_gp_new->tg_pt_gp_id);
2118 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2122 ssize_t core_alua_show_access_type(
2123 struct t10_alua_tg_pt_gp *tg_pt_gp,
2126 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2127 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2128 return sprintf(page, "Implicit and Explicit\n");
2129 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2130 return sprintf(page, "Implicit\n");
2131 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2132 return sprintf(page, "Explicit\n");
2134 return sprintf(page, "None\n");
2137 ssize_t core_alua_store_access_type(
2138 struct t10_alua_tg_pt_gp *tg_pt_gp,
2145 ret = kstrtoul(page, 0, &tmp);
2147 pr_err("Unable to extract alua_access_type\n");
2150 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2151 pr_err("Illegal value for alua_access_type:"
2156 tg_pt_gp->tg_pt_gp_alua_access_type =
2157 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2159 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2161 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2163 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2168 ssize_t core_alua_show_nonop_delay_msecs(
2169 struct t10_alua_tg_pt_gp *tg_pt_gp,
2172 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2175 ssize_t core_alua_store_nonop_delay_msecs(
2176 struct t10_alua_tg_pt_gp *tg_pt_gp,
2183 ret = kstrtoul(page, 0, &tmp);
2185 pr_err("Unable to extract nonop_delay_msecs\n");
2188 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2189 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2190 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2191 ALUA_MAX_NONOP_DELAY_MSECS);
2194 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2199 ssize_t core_alua_show_trans_delay_msecs(
2200 struct t10_alua_tg_pt_gp *tg_pt_gp,
2203 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2206 ssize_t core_alua_store_trans_delay_msecs(
2207 struct t10_alua_tg_pt_gp *tg_pt_gp,
2214 ret = kstrtoul(page, 0, &tmp);
2216 pr_err("Unable to extract trans_delay_msecs\n");
2219 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2220 pr_err("Passed trans_delay_msecs: %lu, exceeds"
2221 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2222 ALUA_MAX_TRANS_DELAY_MSECS);
2225 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2230 ssize_t core_alua_show_implicit_trans_secs(
2231 struct t10_alua_tg_pt_gp *tg_pt_gp,
2234 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2237 ssize_t core_alua_store_implicit_trans_secs(
2238 struct t10_alua_tg_pt_gp *tg_pt_gp,
2245 ret = kstrtoul(page, 0, &tmp);
2247 pr_err("Unable to extract implicit_trans_secs\n");
2250 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2251 pr_err("Passed implicit_trans_secs: %lu, exceeds"
2252 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2253 ALUA_MAX_IMPLICIT_TRANS_SECS);
2256 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2261 ssize_t core_alua_show_preferred_bit(
2262 struct t10_alua_tg_pt_gp *tg_pt_gp,
2265 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2268 ssize_t core_alua_store_preferred_bit(
2269 struct t10_alua_tg_pt_gp *tg_pt_gp,
2276 ret = kstrtoul(page, 0, &tmp);
2278 pr_err("Unable to extract preferred ALUA value\n");
2281 if ((tmp != 0) && (tmp != 1)) {
2282 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2285 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2290 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2295 return sprintf(page, "%d\n",
2296 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
2299 ssize_t core_alua_store_offline_bit(
2304 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2311 ret = kstrtoul(page, 0, &tmp);
2313 pr_err("Unable to extract alua_tg_pt_offline value\n");
2316 if ((tmp != 0) && (tmp != 1)) {
2317 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2321 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
2322 if (!tg_pt_gp_mem) {
2323 pr_err("Unable to locate *tg_pt_gp_mem\n");
2327 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
2328 lun->lun_sep, 0, (int)tmp);
2335 ssize_t core_alua_show_secondary_status(
2339 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
2342 ssize_t core_alua_store_secondary_status(
2350 ret = kstrtoul(page, 0, &tmp);
2352 pr_err("Unable to extract alua_tg_pt_status\n");
2355 if ((tmp != ALUA_STATUS_NONE) &&
2356 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2357 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2358 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2362 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
2367 ssize_t core_alua_show_secondary_write_metadata(
2371 return sprintf(page, "%d\n",
2372 lun->lun_sep->sep_tg_pt_secondary_write_md);
2375 ssize_t core_alua_store_secondary_write_metadata(
2383 ret = kstrtoul(page, 0, &tmp);
2385 pr_err("Unable to extract alua_tg_pt_write_md\n");
2388 if ((tmp != 0) && (tmp != 1)) {
2389 pr_err("Illegal value for alua_tg_pt_write_md:"
2393 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
2398 int core_setup_alua(struct se_device *dev)
2400 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
2401 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2402 struct t10_alua_lu_gp_member *lu_gp_mem;
2405 * Associate this struct se_device with the default ALUA
2408 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2409 if (IS_ERR(lu_gp_mem))
2410 return PTR_ERR(lu_gp_mem);
2412 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2413 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2415 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2417 pr_debug("%s: Adding to default ALUA LU Group:"
2418 " core/alua/lu_gps/default_lu_gp\n",
2419 dev->transport->name);