d9619278a17a335fa522e89d97ae8b047957dcb6
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / target / target_core_alua.c
1 /*******************************************************************************
2  * Filename:  target_core_alua.c
3  *
4  * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5  *
6  * Copyright (c) 2009-2010 Rising Tide Systems
7  * Copyright (c) 2009-2010 Linux-iSCSI.org
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/configfs.h>
30 #include <linux/export.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33
34 #include <target/target_core_base.h>
35 #include <target/target_core_device.h>
36 #include <target/target_core_transport.h>
37 #include <target/target_core_fabric_ops.h>
38 #include <target/target_core_configfs.h>
39
40 #include "target_core_alua.h"
41 #include "target_core_hba.h"
42 #include "target_core_ua.h"
43
44 static int core_alua_check_transition(int state, int *primary);
45 static int core_alua_set_tg_pt_secondary_state(
46                 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47                 struct se_port *port, int explict, int offline);
48
49 static u16 alua_lu_gps_counter;
50 static u32 alua_lu_gps_count;
51
52 static DEFINE_SPINLOCK(lu_gps_lock);
53 static LIST_HEAD(lu_gps_list);
54
55 struct t10_alua_lu_gp *default_lu_gp;
56
57 /*
58  * REPORT_TARGET_PORT_GROUPS
59  *
60  * See spc4r17 section 6.27
61  */
62 int core_emulate_report_target_port_groups(struct se_cmd *cmd)
63 {
64         struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
65         struct se_port *port;
66         struct t10_alua_tg_pt_gp *tg_pt_gp;
67         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
68         unsigned char *buf;
69         u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
70                                     Target port group descriptor */
71         /*
72          * Need at least 4 bytes of response data or else we can't
73          * even fit the return data length.
74          */
75         if (cmd->data_length < 4) {
76                 pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
77                         " too small\n", cmd->data_length);
78                 return -EINVAL;
79         }
80
81         buf = transport_kmap_first_data_page(cmd);
82
83         spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
84         list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
85                         tg_pt_gp_list) {
86                 /*
87                  * Check if the Target port group and Target port descriptor list
88                  * based on tg_pt_gp_members count will fit into the response payload.
89                  * Otherwise, bump rd_len to let the initiator know we have exceeded
90                  * the allocation length and the response is truncated.
91                  */
92                 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
93                      cmd->data_length) {
94                         rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
95                         continue;
96                 }
97                 /*
98                  * PREF: Preferred target port bit, determine if this
99                  * bit should be set for port group.
100                  */
101                 if (tg_pt_gp->tg_pt_gp_pref)
102                         buf[off] = 0x80;
103                 /*
104                  * Set the ASYMMETRIC ACCESS State
105                  */
106                 buf[off++] |= (atomic_read(
107                         &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
108                 /*
109                  * Set supported ASYMMETRIC ACCESS State bits
110                  */
111                 buf[off] = 0x80; /* T_SUP */
112                 buf[off] |= 0x40; /* O_SUP */
113                 buf[off] |= 0x8; /* U_SUP */
114                 buf[off] |= 0x4; /* S_SUP */
115                 buf[off] |= 0x2; /* AN_SUP */
116                 buf[off++] |= 0x1; /* AO_SUP */
117                 /*
118                  * TARGET PORT GROUP
119                  */
120                 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
121                 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
122
123                 off++; /* Skip over Reserved */
124                 /*
125                  * STATUS CODE
126                  */
127                 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
128                 /*
129                  * Vendor Specific field
130                  */
131                 buf[off++] = 0x00;
132                 /*
133                  * TARGET PORT COUNT
134                  */
135                 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
136                 rd_len += 8;
137
138                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
139                 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
140                                 tg_pt_gp_mem_list) {
141                         port = tg_pt_gp_mem->tg_pt;
142                         /*
143                          * Start Target Port descriptor format
144                          *
145                          * See spc4r17 section 6.2.7 Table 247
146                          */
147                         off += 2; /* Skip over Obsolete */
148                         /*
149                          * Set RELATIVE TARGET PORT IDENTIFIER
150                          */
151                         buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
152                         buf[off++] = (port->sep_rtpi & 0xff);
153                         rd_len += 4;
154                 }
155                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
156         }
157         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
158         /*
159          * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
160          */
161         buf[0] = ((rd_len >> 24) & 0xff);
162         buf[1] = ((rd_len >> 16) & 0xff);
163         buf[2] = ((rd_len >> 8) & 0xff);
164         buf[3] = (rd_len & 0xff);
165
166         transport_kunmap_first_data_page(cmd);
167
168         return 0;
169 }
170
171 /*
172  * SET_TARGET_PORT_GROUPS for explict ALUA operation.
173  *
174  * See spc4r17 section 6.35
175  */
176 int core_emulate_set_target_port_groups(struct se_cmd *cmd)
177 {
178         struct se_device *dev = cmd->se_dev;
179         struct se_subsystem_dev *su_dev = dev->se_sub_dev;
180         struct se_port *port, *l_port = cmd->se_lun->lun_sep;
181         struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
182         struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
183         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
184         unsigned char *buf;
185         unsigned char *ptr;
186         u32 len = 4; /* Skip over RESERVED area in header */
187         int alua_access_state, primary = 0, rc;
188         u16 tg_pt_id, rtpi;
189
190         if (!l_port)
191                 return PYX_TRANSPORT_LU_COMM_FAILURE;
192
193         buf = transport_kmap_first_data_page(cmd);
194
195         /*
196          * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
197          * for the local tg_pt_gp.
198          */
199         l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
200         if (!l_tg_pt_gp_mem) {
201                 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
202                 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
203                 goto out;
204         }
205         spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
206         l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
207         if (!l_tg_pt_gp) {
208                 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
209                 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
210                 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
211                 goto out;
212         }
213         rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
214         spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
215
216         if (!rc) {
217                 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
218                                 " while TPGS_EXPLICT_ALUA is disabled\n");
219                 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
220                 goto out;
221         }
222
223         ptr = &buf[4]; /* Skip over RESERVED area in header */
224
225         while (len < cmd->data_length) {
226                 alua_access_state = (ptr[0] & 0x0f);
227                 /*
228                  * Check the received ALUA access state, and determine if
229                  * the state is a primary or secondary target port asymmetric
230                  * access state.
231                  */
232                 rc = core_alua_check_transition(alua_access_state, &primary);
233                 if (rc != 0) {
234                         /*
235                          * If the SET TARGET PORT GROUPS attempts to establish
236                          * an invalid combination of target port asymmetric
237                          * access states or attempts to establish an
238                          * unsupported target port asymmetric access state,
239                          * then the command shall be terminated with CHECK
240                          * CONDITION status, with the sense key set to ILLEGAL
241                          * REQUEST, and the additional sense code set to INVALID
242                          * FIELD IN PARAMETER LIST.
243                          */
244                         rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
245                         goto out;
246                 }
247                 rc = -1;
248                 /*
249                  * If the ASYMMETRIC ACCESS STATE field (see table 267)
250                  * specifies a primary target port asymmetric access state,
251                  * then the TARGET PORT GROUP OR TARGET PORT field specifies
252                  * a primary target port group for which the primary target
253                  * port asymmetric access state shall be changed. If the
254                  * ASYMMETRIC ACCESS STATE field specifies a secondary target
255                  * port asymmetric access state, then the TARGET PORT GROUP OR
256                  * TARGET PORT field specifies the relative target port
257                  * identifier (see 3.1.120) of the target port for which the
258                  * secondary target port asymmetric access state shall be
259                  * changed.
260                  */
261                 if (primary) {
262                         tg_pt_id = ((ptr[2] << 8) & 0xff);
263                         tg_pt_id |= (ptr[3] & 0xff);
264                         /*
265                          * Locate the matching target port group ID from
266                          * the global tg_pt_gp list
267                          */
268                         spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
269                         list_for_each_entry(tg_pt_gp,
270                                         &su_dev->t10_alua.tg_pt_gps_list,
271                                         tg_pt_gp_list) {
272                                 if (!tg_pt_gp->tg_pt_gp_valid_id)
273                                         continue;
274
275                                 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
276                                         continue;
277
278                                 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
279                                 smp_mb__after_atomic_inc();
280                                 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
281
282                                 rc = core_alua_do_port_transition(tg_pt_gp,
283                                                 dev, l_port, nacl,
284                                                 alua_access_state, 1);
285
286                                 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
287                                 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
288                                 smp_mb__after_atomic_dec();
289                                 break;
290                         }
291                         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
292                         /*
293                          * If not matching target port group ID can be located
294                          * throw an exception with ASCQ: INVALID_PARAMETER_LIST
295                          */
296                         if (rc != 0) {
297                                 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
298                                 goto out;
299                         }
300                 } else {
301                         /*
302                          * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
303                          * the Target Port in question for the the incoming
304                          * SET_TARGET_PORT_GROUPS op.
305                          */
306                         rtpi = ((ptr[2] << 8) & 0xff);
307                         rtpi |= (ptr[3] & 0xff);
308                         /*
309                          * Locate the matching relative target port identifer
310                          * for the struct se_device storage object.
311                          */
312                         spin_lock(&dev->se_port_lock);
313                         list_for_each_entry(port, &dev->dev_sep_list,
314                                                         sep_list) {
315                                 if (port->sep_rtpi != rtpi)
316                                         continue;
317
318                                 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
319                                 spin_unlock(&dev->se_port_lock);
320
321                                 rc = core_alua_set_tg_pt_secondary_state(
322                                                 tg_pt_gp_mem, port, 1, 1);
323
324                                 spin_lock(&dev->se_port_lock);
325                                 break;
326                         }
327                         spin_unlock(&dev->se_port_lock);
328                         /*
329                          * If not matching relative target port identifier can
330                          * be located, throw an exception with ASCQ:
331                          * INVALID_PARAMETER_LIST
332                          */
333                         if (rc != 0) {
334                                 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
335                                 goto out;
336                         }
337                 }
338
339                 ptr += 4;
340                 len += 4;
341         }
342
343 out:
344         transport_kunmap_first_data_page(cmd);
345
346         return 0;
347 }
348
349 static inline int core_alua_state_nonoptimized(
350         struct se_cmd *cmd,
351         unsigned char *cdb,
352         int nonop_delay_msecs,
353         u8 *alua_ascq)
354 {
355         /*
356          * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
357          * later to determine if processing of this cmd needs to be
358          * temporarily delayed for the Active/NonOptimized primary access state.
359          */
360         cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
361         cmd->alua_nonop_delay = nonop_delay_msecs;
362         return 0;
363 }
364
365 static inline int core_alua_state_standby(
366         struct se_cmd *cmd,
367         unsigned char *cdb,
368         u8 *alua_ascq)
369 {
370         /*
371          * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
372          * spc4r17 section 5.9.2.4.4
373          */
374         switch (cdb[0]) {
375         case INQUIRY:
376         case LOG_SELECT:
377         case LOG_SENSE:
378         case MODE_SELECT:
379         case MODE_SENSE:
380         case REPORT_LUNS:
381         case RECEIVE_DIAGNOSTIC:
382         case SEND_DIAGNOSTIC:
383         case MAINTENANCE_IN:
384                 switch (cdb[1]) {
385                 case MI_REPORT_TARGET_PGS:
386                         return 0;
387                 default:
388                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
389                         return 1;
390                 }
391         case MAINTENANCE_OUT:
392                 switch (cdb[1]) {
393                 case MO_SET_TARGET_PGS:
394                         return 0;
395                 default:
396                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
397                         return 1;
398                 }
399         case REQUEST_SENSE:
400         case PERSISTENT_RESERVE_IN:
401         case PERSISTENT_RESERVE_OUT:
402         case READ_BUFFER:
403         case WRITE_BUFFER:
404                 return 0;
405         default:
406                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
407                 return 1;
408         }
409
410         return 0;
411 }
412
413 static inline int core_alua_state_unavailable(
414         struct se_cmd *cmd,
415         unsigned char *cdb,
416         u8 *alua_ascq)
417 {
418         /*
419          * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
420          * spc4r17 section 5.9.2.4.5
421          */
422         switch (cdb[0]) {
423         case INQUIRY:
424         case REPORT_LUNS:
425         case MAINTENANCE_IN:
426                 switch (cdb[1]) {
427                 case MI_REPORT_TARGET_PGS:
428                         return 0;
429                 default:
430                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
431                         return 1;
432                 }
433         case MAINTENANCE_OUT:
434                 switch (cdb[1]) {
435                 case MO_SET_TARGET_PGS:
436                         return 0;
437                 default:
438                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
439                         return 1;
440                 }
441         case REQUEST_SENSE:
442         case READ_BUFFER:
443         case WRITE_BUFFER:
444                 return 0;
445         default:
446                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
447                 return 1;
448         }
449
450         return 0;
451 }
452
453 static inline int core_alua_state_transition(
454         struct se_cmd *cmd,
455         unsigned char *cdb,
456         u8 *alua_ascq)
457 {
458         /*
459          * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
460          * spc4r17 section 5.9.2.5
461          */
462         switch (cdb[0]) {
463         case INQUIRY:
464         case REPORT_LUNS:
465         case MAINTENANCE_IN:
466                 switch (cdb[1]) {
467                 case MI_REPORT_TARGET_PGS:
468                         return 0;
469                 default:
470                         *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
471                         return 1;
472                 }
473         case REQUEST_SENSE:
474         case READ_BUFFER:
475         case WRITE_BUFFER:
476                 return 0;
477         default:
478                 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
479                 return 1;
480         }
481
482         return 0;
483 }
484
485 /*
486  * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
487  * in transport_cmd_sequencer().  This function is assigned to
488  * struct t10_alua *->state_check() in core_setup_alua()
489  */
490 static int core_alua_state_check_nop(
491         struct se_cmd *cmd,
492         unsigned char *cdb,
493         u8 *alua_ascq)
494 {
495         return 0;
496 }
497
498 /*
499  * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
500  * This function is assigned to struct t10_alua *->state_check() in
501  * core_setup_alua()
502  *
503  * Also, this function can return three different return codes to
504  * signal transport_generic_cmd_sequencer()
505  *
506  * return 1: Is used to signal LUN not accecsable, and check condition/not ready
507  * return 0: Used to signal success
508  * reutrn -1: Used to signal failure, and invalid cdb field
509  */
510 static int core_alua_state_check(
511         struct se_cmd *cmd,
512         unsigned char *cdb,
513         u8 *alua_ascq)
514 {
515         struct se_lun *lun = cmd->se_lun;
516         struct se_port *port = lun->lun_sep;
517         struct t10_alua_tg_pt_gp *tg_pt_gp;
518         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
519         int out_alua_state, nonop_delay_msecs;
520
521         if (!port)
522                 return 0;
523         /*
524          * First, check for a struct se_port specific secondary ALUA target port
525          * access state: OFFLINE
526          */
527         if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
528                 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
529                 pr_debug("ALUA: Got secondary offline status for local"
530                                 " target port\n");
531                 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
532                 return 1;
533         }
534          /*
535          * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
536          * ALUA target port group, to obtain current ALUA access state.
537          * Otherwise look for the underlying struct se_device association with
538          * a ALUA logical unit group.
539          */
540         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
541         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
542         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
543         out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
544         nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
545         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
546         /*
547          * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
548          * statement so the compiler knows explicitly to check this case first.
549          * For the Optimized ALUA access state case, we want to process the
550          * incoming fabric cmd ASAP..
551          */
552         if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
553                 return 0;
554
555         switch (out_alua_state) {
556         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
557                 return core_alua_state_nonoptimized(cmd, cdb,
558                                         nonop_delay_msecs, alua_ascq);
559         case ALUA_ACCESS_STATE_STANDBY:
560                 return core_alua_state_standby(cmd, cdb, alua_ascq);
561         case ALUA_ACCESS_STATE_UNAVAILABLE:
562                 return core_alua_state_unavailable(cmd, cdb, alua_ascq);
563         case ALUA_ACCESS_STATE_TRANSITION:
564                 return core_alua_state_transition(cmd, cdb, alua_ascq);
565         /*
566          * OFFLINE is a secondary ALUA target port group access state, that is
567          * handled above with struct se_port->sep_tg_pt_secondary_offline=1
568          */
569         case ALUA_ACCESS_STATE_OFFLINE:
570         default:
571                 pr_err("Unknown ALUA access state: 0x%02x\n",
572                                 out_alua_state);
573                 return -EINVAL;
574         }
575
576         return 0;
577 }
578
579 /*
580  * Check implict and explict ALUA state change request.
581  */
582 static int core_alua_check_transition(int state, int *primary)
583 {
584         switch (state) {
585         case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
586         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
587         case ALUA_ACCESS_STATE_STANDBY:
588         case ALUA_ACCESS_STATE_UNAVAILABLE:
589                 /*
590                  * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
591                  * defined as primary target port asymmetric access states.
592                  */
593                 *primary = 1;
594                 break;
595         case ALUA_ACCESS_STATE_OFFLINE:
596                 /*
597                  * OFFLINE state is defined as a secondary target port
598                  * asymmetric access state.
599                  */
600                 *primary = 0;
601                 break;
602         default:
603                 pr_err("Unknown ALUA access state: 0x%02x\n", state);
604                 return -EINVAL;
605         }
606
607         return 0;
608 }
609
610 static char *core_alua_dump_state(int state)
611 {
612         switch (state) {
613         case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
614                 return "Active/Optimized";
615         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
616                 return "Active/NonOptimized";
617         case ALUA_ACCESS_STATE_STANDBY:
618                 return "Standby";
619         case ALUA_ACCESS_STATE_UNAVAILABLE:
620                 return "Unavailable";
621         case ALUA_ACCESS_STATE_OFFLINE:
622                 return "Offline";
623         default:
624                 return "Unknown";
625         }
626
627         return NULL;
628 }
629
630 char *core_alua_dump_status(int status)
631 {
632         switch (status) {
633         case ALUA_STATUS_NONE:
634                 return "None";
635         case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
636                 return "Altered by Explict STPG";
637         case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
638                 return "Altered by Implict ALUA";
639         default:
640                 return "Unknown";
641         }
642
643         return NULL;
644 }
645
646 /*
647  * Used by fabric modules to determine when we need to delay processing
648  * for the Active/NonOptimized paths..
649  */
650 int core_alua_check_nonop_delay(
651         struct se_cmd *cmd)
652 {
653         if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
654                 return 0;
655         if (in_interrupt())
656                 return 0;
657         /*
658          * The ALUA Active/NonOptimized access state delay can be disabled
659          * in via configfs with a value of zero
660          */
661         if (!cmd->alua_nonop_delay)
662                 return 0;
663         /*
664          * struct se_cmd->alua_nonop_delay gets set by a target port group
665          * defined interval in core_alua_state_nonoptimized()
666          */
667         msleep_interruptible(cmd->alua_nonop_delay);
668         return 0;
669 }
670 EXPORT_SYMBOL(core_alua_check_nonop_delay);
671
672 /*
673  * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
674  *
675  */
676 static int core_alua_write_tpg_metadata(
677         const char *path,
678         unsigned char *md_buf,
679         u32 md_buf_len)
680 {
681         mm_segment_t old_fs;
682         struct file *file;
683         struct iovec iov[1];
684         int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
685
686         memset(iov, 0, sizeof(struct iovec));
687
688         file = filp_open(path, flags, 0600);
689         if (IS_ERR(file) || !file || !file->f_dentry) {
690                 pr_err("filp_open(%s) for ALUA metadata failed\n",
691                         path);
692                 return -ENODEV;
693         }
694
695         iov[0].iov_base = &md_buf[0];
696         iov[0].iov_len = md_buf_len;
697
698         old_fs = get_fs();
699         set_fs(get_ds());
700         ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
701         set_fs(old_fs);
702
703         if (ret < 0) {
704                 pr_err("Error writing ALUA metadata file: %s\n", path);
705                 filp_close(file, NULL);
706                 return -EIO;
707         }
708         filp_close(file, NULL);
709
710         return 0;
711 }
712
713 /*
714  * Called with tg_pt_gp->tg_pt_gp_md_mutex held
715  */
716 static int core_alua_update_tpg_primary_metadata(
717         struct t10_alua_tg_pt_gp *tg_pt_gp,
718         int primary_state,
719         unsigned char *md_buf)
720 {
721         struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
722         struct t10_wwn *wwn = &su_dev->t10_wwn;
723         char path[ALUA_METADATA_PATH_LEN];
724         int len;
725
726         memset(path, 0, ALUA_METADATA_PATH_LEN);
727
728         len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
729                         "tg_pt_gp_id=%hu\n"
730                         "alua_access_state=0x%02x\n"
731                         "alua_access_status=0x%02x\n",
732                         tg_pt_gp->tg_pt_gp_id, primary_state,
733                         tg_pt_gp->tg_pt_gp_alua_access_status);
734
735         snprintf(path, ALUA_METADATA_PATH_LEN,
736                 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
737                 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
738
739         return core_alua_write_tpg_metadata(path, md_buf, len);
740 }
741
742 static int core_alua_do_transition_tg_pt(
743         struct t10_alua_tg_pt_gp *tg_pt_gp,
744         struct se_port *l_port,
745         struct se_node_acl *nacl,
746         unsigned char *md_buf,
747         int new_state,
748         int explict)
749 {
750         struct se_dev_entry *se_deve;
751         struct se_lun_acl *lacl;
752         struct se_port *port;
753         struct t10_alua_tg_pt_gp_member *mem;
754         int old_state = 0;
755         /*
756          * Save the old primary ALUA access state, and set the current state
757          * to ALUA_ACCESS_STATE_TRANSITION.
758          */
759         old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
760         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
761                         ALUA_ACCESS_STATE_TRANSITION);
762         tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
763                                 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
764                                 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
765         /*
766          * Check for the optional ALUA primary state transition delay
767          */
768         if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
769                 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
770
771         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
772         list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
773                                 tg_pt_gp_mem_list) {
774                 port = mem->tg_pt;
775                 /*
776                  * After an implicit target port asymmetric access state
777                  * change, a device server shall establish a unit attention
778                  * condition for the initiator port associated with every I_T
779                  * nexus with the additional sense code set to ASYMMETRIC
780                  * ACCESS STATE CHAGED.
781                  *
782                  * After an explicit target port asymmetric access state
783                  * change, a device server shall establish a unit attention
784                  * condition with the additional sense code set to ASYMMETRIC
785                  * ACCESS STATE CHANGED for the initiator port associated with
786                  * every I_T nexus other than the I_T nexus on which the SET
787                  * TARGET PORT GROUPS command
788                  */
789                 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
790                 smp_mb__after_atomic_inc();
791                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
792
793                 spin_lock_bh(&port->sep_alua_lock);
794                 list_for_each_entry(se_deve, &port->sep_alua_list,
795                                         alua_port_list) {
796                         lacl = se_deve->se_lun_acl;
797                         /*
798                          * se_deve->se_lun_acl pointer may be NULL for a
799                          * entry created without explict Node+MappedLUN ACLs
800                          */
801                         if (!lacl)
802                                 continue;
803
804                         if (explict &&
805                            (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
806                            (l_port != NULL) && (l_port == port))
807                                 continue;
808
809                         core_scsi3_ua_allocate(lacl->se_lun_nacl,
810                                 se_deve->mapped_lun, 0x2A,
811                                 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
812                 }
813                 spin_unlock_bh(&port->sep_alua_lock);
814
815                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
816                 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
817                 smp_mb__after_atomic_dec();
818         }
819         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
820         /*
821          * Update the ALUA metadata buf that has been allocated in
822          * core_alua_do_port_transition(), this metadata will be written
823          * to struct file.
824          *
825          * Note that there is the case where we do not want to update the
826          * metadata when the saved metadata is being parsed in userspace
827          * when setting the existing port access state and access status.
828          *
829          * Also note that the failure to write out the ALUA metadata to
830          * struct file does NOT affect the actual ALUA transition.
831          */
832         if (tg_pt_gp->tg_pt_gp_write_metadata) {
833                 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
834                 core_alua_update_tpg_primary_metadata(tg_pt_gp,
835                                         new_state, md_buf);
836                 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
837         }
838         /*
839          * Set the current primary ALUA access state to the requested new state
840          */
841         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
842
843         pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
844                 " from primary access state %s to %s\n", (explict) ? "explict" :
845                 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
846                 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
847                 core_alua_dump_state(new_state));
848
849         return 0;
850 }
851
852 int core_alua_do_port_transition(
853         struct t10_alua_tg_pt_gp *l_tg_pt_gp,
854         struct se_device *l_dev,
855         struct se_port *l_port,
856         struct se_node_acl *l_nacl,
857         int new_state,
858         int explict)
859 {
860         struct se_device *dev;
861         struct se_port *port;
862         struct se_subsystem_dev *su_dev;
863         struct se_node_acl *nacl;
864         struct t10_alua_lu_gp *lu_gp;
865         struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
866         struct t10_alua_tg_pt_gp *tg_pt_gp;
867         unsigned char *md_buf;
868         int primary;
869
870         if (core_alua_check_transition(new_state, &primary) != 0)
871                 return -EINVAL;
872
873         md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
874         if (!md_buf) {
875                 pr_err("Unable to allocate buf for ALUA metadata\n");
876                 return -ENOMEM;
877         }
878
879         local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
880         spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
881         lu_gp = local_lu_gp_mem->lu_gp;
882         atomic_inc(&lu_gp->lu_gp_ref_cnt);
883         smp_mb__after_atomic_inc();
884         spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
885         /*
886          * For storage objects that are members of the 'default_lu_gp',
887          * we only do transition on the passed *l_tp_pt_gp, and not
888          * on all of the matching target port groups IDs in default_lu_gp.
889          */
890         if (!lu_gp->lu_gp_id) {
891                 /*
892                  * core_alua_do_transition_tg_pt() will always return
893                  * success.
894                  */
895                 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
896                                         md_buf, new_state, explict);
897                 atomic_dec(&lu_gp->lu_gp_ref_cnt);
898                 smp_mb__after_atomic_dec();
899                 kfree(md_buf);
900                 return 0;
901         }
902         /*
903          * For all other LU groups aside from 'default_lu_gp', walk all of
904          * the associated storage objects looking for a matching target port
905          * group ID from the local target port group.
906          */
907         spin_lock(&lu_gp->lu_gp_lock);
908         list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
909                                 lu_gp_mem_list) {
910
911                 dev = lu_gp_mem->lu_gp_mem_dev;
912                 su_dev = dev->se_sub_dev;
913                 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
914                 smp_mb__after_atomic_inc();
915                 spin_unlock(&lu_gp->lu_gp_lock);
916
917                 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
918                 list_for_each_entry(tg_pt_gp,
919                                 &su_dev->t10_alua.tg_pt_gps_list,
920                                 tg_pt_gp_list) {
921
922                         if (!tg_pt_gp->tg_pt_gp_valid_id)
923                                 continue;
924                         /*
925                          * If the target behavior port asymmetric access state
926                          * is changed for any target port group accessiable via
927                          * a logical unit within a LU group, the target port
928                          * behavior group asymmetric access states for the same
929                          * target port group accessible via other logical units
930                          * in that LU group will also change.
931                          */
932                         if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
933                                 continue;
934
935                         if (l_tg_pt_gp == tg_pt_gp) {
936                                 port = l_port;
937                                 nacl = l_nacl;
938                         } else {
939                                 port = NULL;
940                                 nacl = NULL;
941                         }
942                         atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
943                         smp_mb__after_atomic_inc();
944                         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
945                         /*
946                          * core_alua_do_transition_tg_pt() will always return
947                          * success.
948                          */
949                         core_alua_do_transition_tg_pt(tg_pt_gp, port,
950                                         nacl, md_buf, new_state, explict);
951
952                         spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
953                         atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
954                         smp_mb__after_atomic_dec();
955                 }
956                 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
957
958                 spin_lock(&lu_gp->lu_gp_lock);
959                 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
960                 smp_mb__after_atomic_dec();
961         }
962         spin_unlock(&lu_gp->lu_gp_lock);
963
964         pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
965                 " Group IDs: %hu %s transition to primary state: %s\n",
966                 config_item_name(&lu_gp->lu_gp_group.cg_item),
967                 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
968                 core_alua_dump_state(new_state));
969
970         atomic_dec(&lu_gp->lu_gp_ref_cnt);
971         smp_mb__after_atomic_dec();
972         kfree(md_buf);
973         return 0;
974 }
975
976 /*
977  * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
978  */
979 static int core_alua_update_tpg_secondary_metadata(
980         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
981         struct se_port *port,
982         unsigned char *md_buf,
983         u32 md_buf_len)
984 {
985         struct se_portal_group *se_tpg = port->sep_tpg;
986         char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
987         int len;
988
989         memset(path, 0, ALUA_METADATA_PATH_LEN);
990         memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
991
992         len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
993                         se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
994
995         if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
996                 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
997                                 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
998
999         len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
1000                         "alua_tg_pt_status=0x%02x\n",
1001                         atomic_read(&port->sep_tg_pt_secondary_offline),
1002                         port->sep_tg_pt_secondary_stat);
1003
1004         snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1005                         se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1006                         port->sep_lun->unpacked_lun);
1007
1008         return core_alua_write_tpg_metadata(path, md_buf, len);
1009 }
1010
1011 static int core_alua_set_tg_pt_secondary_state(
1012         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1013         struct se_port *port,
1014         int explict,
1015         int offline)
1016 {
1017         struct t10_alua_tg_pt_gp *tg_pt_gp;
1018         unsigned char *md_buf;
1019         u32 md_buf_len;
1020         int trans_delay_msecs;
1021
1022         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1023         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1024         if (!tg_pt_gp) {
1025                 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1026                 pr_err("Unable to complete secondary state"
1027                                 " transition\n");
1028                 return -EINVAL;
1029         }
1030         trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1031         /*
1032          * Set the secondary ALUA target port access state to OFFLINE
1033          * or release the previously secondary state for struct se_port
1034          */
1035         if (offline)
1036                 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1037         else
1038                 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1039
1040         md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1041         port->sep_tg_pt_secondary_stat = (explict) ?
1042                         ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1043                         ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1044
1045         pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1046                 " to secondary access state: %s\n", (explict) ? "explict" :
1047                 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1048                 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1049
1050         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1051         /*
1052          * Do the optional transition delay after we set the secondary
1053          * ALUA access state.
1054          */
1055         if (trans_delay_msecs != 0)
1056                 msleep_interruptible(trans_delay_msecs);
1057         /*
1058          * See if we need to update the ALUA fabric port metadata for
1059          * secondary state and status
1060          */
1061         if (port->sep_tg_pt_secondary_write_md) {
1062                 md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1063                 if (!md_buf) {
1064                         pr_err("Unable to allocate md_buf for"
1065                                 " secondary ALUA access metadata\n");
1066                         return -ENOMEM;
1067                 }
1068                 mutex_lock(&port->sep_tg_pt_md_mutex);
1069                 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1070                                 md_buf, md_buf_len);
1071                 mutex_unlock(&port->sep_tg_pt_md_mutex);
1072
1073                 kfree(md_buf);
1074         }
1075
1076         return 0;
1077 }
1078
1079 struct t10_alua_lu_gp *
1080 core_alua_allocate_lu_gp(const char *name, int def_group)
1081 {
1082         struct t10_alua_lu_gp *lu_gp;
1083
1084         lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1085         if (!lu_gp) {
1086                 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1087                 return ERR_PTR(-ENOMEM);
1088         }
1089         INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1090         INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1091         spin_lock_init(&lu_gp->lu_gp_lock);
1092         atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1093
1094         if (def_group) {
1095                 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1096                 lu_gp->lu_gp_valid_id = 1;
1097                 alua_lu_gps_count++;
1098         }
1099
1100         return lu_gp;
1101 }
1102
1103 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1104 {
1105         struct t10_alua_lu_gp *lu_gp_tmp;
1106         u16 lu_gp_id_tmp;
1107         /*
1108          * The lu_gp->lu_gp_id may only be set once..
1109          */
1110         if (lu_gp->lu_gp_valid_id) {
1111                 pr_warn("ALUA LU Group already has a valid ID,"
1112                         " ignoring request\n");
1113                 return -EINVAL;
1114         }
1115
1116         spin_lock(&lu_gps_lock);
1117         if (alua_lu_gps_count == 0x0000ffff) {
1118                 pr_err("Maximum ALUA alua_lu_gps_count:"
1119                                 " 0x0000ffff reached\n");
1120                 spin_unlock(&lu_gps_lock);
1121                 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1122                 return -ENOSPC;
1123         }
1124 again:
1125         lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1126                                 alua_lu_gps_counter++;
1127
1128         list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1129                 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1130                         if (!lu_gp_id)
1131                                 goto again;
1132
1133                         pr_warn("ALUA Logical Unit Group ID: %hu"
1134                                 " already exists, ignoring request\n",
1135                                 lu_gp_id);
1136                         spin_unlock(&lu_gps_lock);
1137                         return -EINVAL;
1138                 }
1139         }
1140
1141         lu_gp->lu_gp_id = lu_gp_id_tmp;
1142         lu_gp->lu_gp_valid_id = 1;
1143         list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1144         alua_lu_gps_count++;
1145         spin_unlock(&lu_gps_lock);
1146
1147         return 0;
1148 }
1149
1150 static struct t10_alua_lu_gp_member *
1151 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1152 {
1153         struct t10_alua_lu_gp_member *lu_gp_mem;
1154
1155         lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1156         if (!lu_gp_mem) {
1157                 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1158                 return ERR_PTR(-ENOMEM);
1159         }
1160         INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1161         spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1162         atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1163
1164         lu_gp_mem->lu_gp_mem_dev = dev;
1165         dev->dev_alua_lu_gp_mem = lu_gp_mem;
1166
1167         return lu_gp_mem;
1168 }
1169
1170 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1171 {
1172         struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1173         /*
1174          * Once we have reached this point, config_item_put() has
1175          * already been called from target_core_alua_drop_lu_gp().
1176          *
1177          * Here, we remove the *lu_gp from the global list so that
1178          * no associations can be made while we are releasing
1179          * struct t10_alua_lu_gp.
1180          */
1181         spin_lock(&lu_gps_lock);
1182         atomic_set(&lu_gp->lu_gp_shutdown, 1);
1183         list_del(&lu_gp->lu_gp_node);
1184         alua_lu_gps_count--;
1185         spin_unlock(&lu_gps_lock);
1186         /*
1187          * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1188          * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1189          * released with core_alua_put_lu_gp_from_name()
1190          */
1191         while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1192                 cpu_relax();
1193         /*
1194          * Release reference to struct t10_alua_lu_gp * from all associated
1195          * struct se_device.
1196          */
1197         spin_lock(&lu_gp->lu_gp_lock);
1198         list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1199                                 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1200                 if (lu_gp_mem->lu_gp_assoc) {
1201                         list_del(&lu_gp_mem->lu_gp_mem_list);
1202                         lu_gp->lu_gp_members--;
1203                         lu_gp_mem->lu_gp_assoc = 0;
1204                 }
1205                 spin_unlock(&lu_gp->lu_gp_lock);
1206                 /*
1207                  *
1208                  * lu_gp_mem is associated with a single
1209                  * struct se_device->dev_alua_lu_gp_mem, and is released when
1210                  * struct se_device is released via core_alua_free_lu_gp_mem().
1211                  *
1212                  * If the passed lu_gp does NOT match the default_lu_gp, assume
1213                  * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1214                  */
1215                 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1216                 if (lu_gp != default_lu_gp)
1217                         __core_alua_attach_lu_gp_mem(lu_gp_mem,
1218                                         default_lu_gp);
1219                 else
1220                         lu_gp_mem->lu_gp = NULL;
1221                 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1222
1223                 spin_lock(&lu_gp->lu_gp_lock);
1224         }
1225         spin_unlock(&lu_gp->lu_gp_lock);
1226
1227         kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1228 }
1229
1230 void core_alua_free_lu_gp_mem(struct se_device *dev)
1231 {
1232         struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1233         struct t10_alua *alua = &su_dev->t10_alua;
1234         struct t10_alua_lu_gp *lu_gp;
1235         struct t10_alua_lu_gp_member *lu_gp_mem;
1236
1237         if (alua->alua_type != SPC3_ALUA_EMULATED)
1238                 return;
1239
1240         lu_gp_mem = dev->dev_alua_lu_gp_mem;
1241         if (!lu_gp_mem)
1242                 return;
1243
1244         while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1245                 cpu_relax();
1246
1247         spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1248         lu_gp = lu_gp_mem->lu_gp;
1249         if (lu_gp) {
1250                 spin_lock(&lu_gp->lu_gp_lock);
1251                 if (lu_gp_mem->lu_gp_assoc) {
1252                         list_del(&lu_gp_mem->lu_gp_mem_list);
1253                         lu_gp->lu_gp_members--;
1254                         lu_gp_mem->lu_gp_assoc = 0;
1255                 }
1256                 spin_unlock(&lu_gp->lu_gp_lock);
1257                 lu_gp_mem->lu_gp = NULL;
1258         }
1259         spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1260
1261         kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1262 }
1263
1264 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1265 {
1266         struct t10_alua_lu_gp *lu_gp;
1267         struct config_item *ci;
1268
1269         spin_lock(&lu_gps_lock);
1270         list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1271                 if (!lu_gp->lu_gp_valid_id)
1272                         continue;
1273                 ci = &lu_gp->lu_gp_group.cg_item;
1274                 if (!strcmp(config_item_name(ci), name)) {
1275                         atomic_inc(&lu_gp->lu_gp_ref_cnt);
1276                         spin_unlock(&lu_gps_lock);
1277                         return lu_gp;
1278                 }
1279         }
1280         spin_unlock(&lu_gps_lock);
1281
1282         return NULL;
1283 }
1284
1285 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1286 {
1287         spin_lock(&lu_gps_lock);
1288         atomic_dec(&lu_gp->lu_gp_ref_cnt);
1289         spin_unlock(&lu_gps_lock);
1290 }
1291
1292 /*
1293  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1294  */
1295 void __core_alua_attach_lu_gp_mem(
1296         struct t10_alua_lu_gp_member *lu_gp_mem,
1297         struct t10_alua_lu_gp *lu_gp)
1298 {
1299         spin_lock(&lu_gp->lu_gp_lock);
1300         lu_gp_mem->lu_gp = lu_gp;
1301         lu_gp_mem->lu_gp_assoc = 1;
1302         list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1303         lu_gp->lu_gp_members++;
1304         spin_unlock(&lu_gp->lu_gp_lock);
1305 }
1306
1307 /*
1308  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1309  */
1310 void __core_alua_drop_lu_gp_mem(
1311         struct t10_alua_lu_gp_member *lu_gp_mem,
1312         struct t10_alua_lu_gp *lu_gp)
1313 {
1314         spin_lock(&lu_gp->lu_gp_lock);
1315         list_del(&lu_gp_mem->lu_gp_mem_list);
1316         lu_gp_mem->lu_gp = NULL;
1317         lu_gp_mem->lu_gp_assoc = 0;
1318         lu_gp->lu_gp_members--;
1319         spin_unlock(&lu_gp->lu_gp_lock);
1320 }
1321
1322 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1323         struct se_subsystem_dev *su_dev,
1324         const char *name,
1325         int def_group)
1326 {
1327         struct t10_alua_tg_pt_gp *tg_pt_gp;
1328
1329         tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1330         if (!tg_pt_gp) {
1331                 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1332                 return NULL;
1333         }
1334         INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1335         INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1336         mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1337         spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1338         atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1339         tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1340         tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1341         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1342                 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1343         /*
1344          * Enable both explict and implict ALUA support by default
1345          */
1346         tg_pt_gp->tg_pt_gp_alua_access_type =
1347                         TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1348         /*
1349          * Set the default Active/NonOptimized Delay in milliseconds
1350          */
1351         tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1352         tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1353
1354         if (def_group) {
1355                 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1356                 tg_pt_gp->tg_pt_gp_id =
1357                                 su_dev->t10_alua.alua_tg_pt_gps_counter++;
1358                 tg_pt_gp->tg_pt_gp_valid_id = 1;
1359                 su_dev->t10_alua.alua_tg_pt_gps_count++;
1360                 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1361                               &su_dev->t10_alua.tg_pt_gps_list);
1362                 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1363         }
1364
1365         return tg_pt_gp;
1366 }
1367
1368 int core_alua_set_tg_pt_gp_id(
1369         struct t10_alua_tg_pt_gp *tg_pt_gp,
1370         u16 tg_pt_gp_id)
1371 {
1372         struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1373         struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1374         u16 tg_pt_gp_id_tmp;
1375         /*
1376          * The tg_pt_gp->tg_pt_gp_id may only be set once..
1377          */
1378         if (tg_pt_gp->tg_pt_gp_valid_id) {
1379                 pr_warn("ALUA TG PT Group already has a valid ID,"
1380                         " ignoring request\n");
1381                 return -EINVAL;
1382         }
1383
1384         spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1385         if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1386                 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1387                         " 0x0000ffff reached\n");
1388                 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1389                 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1390                 return -ENOSPC;
1391         }
1392 again:
1393         tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1394                         su_dev->t10_alua.alua_tg_pt_gps_counter++;
1395
1396         list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
1397                         tg_pt_gp_list) {
1398                 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1399                         if (!tg_pt_gp_id)
1400                                 goto again;
1401
1402                         pr_err("ALUA Target Port Group ID: %hu already"
1403                                 " exists, ignoring request\n", tg_pt_gp_id);
1404                         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1405                         return -EINVAL;
1406                 }
1407         }
1408
1409         tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1410         tg_pt_gp->tg_pt_gp_valid_id = 1;
1411         list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1412                         &su_dev->t10_alua.tg_pt_gps_list);
1413         su_dev->t10_alua.alua_tg_pt_gps_count++;
1414         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1415
1416         return 0;
1417 }
1418
1419 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1420         struct se_port *port)
1421 {
1422         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1423
1424         tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1425                                 GFP_KERNEL);
1426         if (!tg_pt_gp_mem) {
1427                 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1428                 return ERR_PTR(-ENOMEM);
1429         }
1430         INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1431         spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1432         atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1433
1434         tg_pt_gp_mem->tg_pt = port;
1435         port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1436         atomic_set(&port->sep_tg_pt_gp_active, 1);
1437
1438         return tg_pt_gp_mem;
1439 }
1440
1441 void core_alua_free_tg_pt_gp(
1442         struct t10_alua_tg_pt_gp *tg_pt_gp)
1443 {
1444         struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1445         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1446         /*
1447          * Once we have reached this point, config_item_put() has already
1448          * been called from target_core_alua_drop_tg_pt_gp().
1449          *
1450          * Here we remove *tg_pt_gp from the global list so that
1451          * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1452          * can be made while we are releasing struct t10_alua_tg_pt_gp.
1453          */
1454         spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1455         list_del(&tg_pt_gp->tg_pt_gp_list);
1456         su_dev->t10_alua.alua_tg_pt_gps_counter--;
1457         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1458         /*
1459          * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1460          * core_alua_get_tg_pt_gp_by_name() in
1461          * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1462          * to be released with core_alua_put_tg_pt_gp_from_name().
1463          */
1464         while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1465                 cpu_relax();
1466         /*
1467          * Release reference to struct t10_alua_tg_pt_gp from all associated
1468          * struct se_port.
1469          */
1470         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1471         list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1472                         &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1473                 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1474                         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1475                         tg_pt_gp->tg_pt_gp_members--;
1476                         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1477                 }
1478                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1479                 /*
1480                  * tg_pt_gp_mem is associated with a single
1481                  * se_port->sep_alua_tg_pt_gp_mem, and is released via
1482                  * core_alua_free_tg_pt_gp_mem().
1483                  *
1484                  * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1485                  * assume we want to re-assocate a given tg_pt_gp_mem with
1486                  * default_tg_pt_gp.
1487                  */
1488                 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1489                 if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
1490                         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1491                                         su_dev->t10_alua.default_tg_pt_gp);
1492                 } else
1493                         tg_pt_gp_mem->tg_pt_gp = NULL;
1494                 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1495
1496                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1497         }
1498         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1499
1500         kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1501 }
1502
1503 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1504 {
1505         struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1506         struct t10_alua *alua = &su_dev->t10_alua;
1507         struct t10_alua_tg_pt_gp *tg_pt_gp;
1508         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1509
1510         if (alua->alua_type != SPC3_ALUA_EMULATED)
1511                 return;
1512
1513         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1514         if (!tg_pt_gp_mem)
1515                 return;
1516
1517         while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1518                 cpu_relax();
1519
1520         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1521         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1522         if (tg_pt_gp) {
1523                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1524                 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1525                         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1526                         tg_pt_gp->tg_pt_gp_members--;
1527                         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1528                 }
1529                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1530                 tg_pt_gp_mem->tg_pt_gp = NULL;
1531         }
1532         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1533
1534         kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1535 }
1536
1537 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1538         struct se_subsystem_dev *su_dev,
1539         const char *name)
1540 {
1541         struct t10_alua_tg_pt_gp *tg_pt_gp;
1542         struct config_item *ci;
1543
1544         spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1545         list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1546                         tg_pt_gp_list) {
1547                 if (!tg_pt_gp->tg_pt_gp_valid_id)
1548                         continue;
1549                 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1550                 if (!strcmp(config_item_name(ci), name)) {
1551                         atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1552                         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1553                         return tg_pt_gp;
1554                 }
1555         }
1556         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1557
1558         return NULL;
1559 }
1560
1561 static void core_alua_put_tg_pt_gp_from_name(
1562         struct t10_alua_tg_pt_gp *tg_pt_gp)
1563 {
1564         struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1565
1566         spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1567         atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1568         spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1569 }
1570
1571 /*
1572  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1573  */
1574 void __core_alua_attach_tg_pt_gp_mem(
1575         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1576         struct t10_alua_tg_pt_gp *tg_pt_gp)
1577 {
1578         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1579         tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1580         tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1581         list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1582                         &tg_pt_gp->tg_pt_gp_mem_list);
1583         tg_pt_gp->tg_pt_gp_members++;
1584         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1585 }
1586
1587 /*
1588  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1589  */
1590 static void __core_alua_drop_tg_pt_gp_mem(
1591         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1592         struct t10_alua_tg_pt_gp *tg_pt_gp)
1593 {
1594         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1595         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1596         tg_pt_gp_mem->tg_pt_gp = NULL;
1597         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1598         tg_pt_gp->tg_pt_gp_members--;
1599         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1600 }
1601
1602 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1603 {
1604         struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1605         struct config_item *tg_pt_ci;
1606         struct t10_alua *alua = &su_dev->t10_alua;
1607         struct t10_alua_tg_pt_gp *tg_pt_gp;
1608         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1609         ssize_t len = 0;
1610
1611         if (alua->alua_type != SPC3_ALUA_EMULATED)
1612                 return len;
1613
1614         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1615         if (!tg_pt_gp_mem)
1616                 return len;
1617
1618         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1619         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1620         if (tg_pt_gp) {
1621                 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1622                 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1623                         " %hu\nTG Port Primary Access State: %s\nTG Port "
1624                         "Primary Access Status: %s\nTG Port Secondary Access"
1625                         " State: %s\nTG Port Secondary Access Status: %s\n",
1626                         config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1627                         core_alua_dump_state(atomic_read(
1628                                         &tg_pt_gp->tg_pt_gp_alua_access_state)),
1629                         core_alua_dump_status(
1630                                 tg_pt_gp->tg_pt_gp_alua_access_status),
1631                         (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1632                         "Offline" : "None",
1633                         core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1634         }
1635         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1636
1637         return len;
1638 }
1639
1640 ssize_t core_alua_store_tg_pt_gp_info(
1641         struct se_port *port,
1642         const char *page,
1643         size_t count)
1644 {
1645         struct se_portal_group *tpg;
1646         struct se_lun *lun;
1647         struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1648         struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1649         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1650         unsigned char buf[TG_PT_GROUP_NAME_BUF];
1651         int move = 0;
1652
1653         tpg = port->sep_tpg;
1654         lun = port->sep_lun;
1655
1656         if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1657                 pr_warn("SPC3_ALUA_EMULATED not enabled for"
1658                         " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1659                         tpg->se_tpg_tfo->tpg_get_tag(tpg),
1660                         config_item_name(&lun->lun_group.cg_item));
1661                 return -EINVAL;
1662         }
1663
1664         if (count > TG_PT_GROUP_NAME_BUF) {
1665                 pr_err("ALUA Target Port Group alias too large!\n");
1666                 return -EINVAL;
1667         }
1668         memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1669         memcpy(buf, page, count);
1670         /*
1671          * Any ALUA target port group alias besides "NULL" means we will be
1672          * making a new group association.
1673          */
1674         if (strcmp(strstrip(buf), "NULL")) {
1675                 /*
1676                  * core_alua_get_tg_pt_gp_by_name() will increment reference to
1677                  * struct t10_alua_tg_pt_gp.  This reference is released with
1678                  * core_alua_put_tg_pt_gp_from_name() below.
1679                  */
1680                 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1681                                         strstrip(buf));
1682                 if (!tg_pt_gp_new)
1683                         return -ENODEV;
1684         }
1685         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1686         if (!tg_pt_gp_mem) {
1687                 if (tg_pt_gp_new)
1688                         core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1689                 pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1690                 return -EINVAL;
1691         }
1692
1693         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1694         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1695         if (tg_pt_gp) {
1696                 /*
1697                  * Clearing an existing tg_pt_gp association, and replacing
1698                  * with the default_tg_pt_gp.
1699                  */
1700                 if (!tg_pt_gp_new) {
1701                         pr_debug("Target_Core_ConfigFS: Moving"
1702                                 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1703                                 " alua/%s, ID: %hu back to"
1704                                 " default_tg_pt_gp\n",
1705                                 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1706                                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1707                                 config_item_name(&lun->lun_group.cg_item),
1708                                 config_item_name(
1709                                         &tg_pt_gp->tg_pt_gp_group.cg_item),
1710                                 tg_pt_gp->tg_pt_gp_id);
1711
1712                         __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1713                         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1714                                         su_dev->t10_alua.default_tg_pt_gp);
1715                         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1716
1717                         return count;
1718                 }
1719                 /*
1720                  * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1721                  */
1722                 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1723                 move = 1;
1724         }
1725         /*
1726          * Associate tg_pt_gp_mem with tg_pt_gp_new.
1727          */
1728         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1729         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1730         pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1731                 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1732                 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1733                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1734                 config_item_name(&lun->lun_group.cg_item),
1735                 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1736                 tg_pt_gp_new->tg_pt_gp_id);
1737
1738         core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1739         return count;
1740 }
1741
1742 ssize_t core_alua_show_access_type(
1743         struct t10_alua_tg_pt_gp *tg_pt_gp,
1744         char *page)
1745 {
1746         if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1747             (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1748                 return sprintf(page, "Implict and Explict\n");
1749         else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1750                 return sprintf(page, "Implict\n");
1751         else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1752                 return sprintf(page, "Explict\n");
1753         else
1754                 return sprintf(page, "None\n");
1755 }
1756
1757 ssize_t core_alua_store_access_type(
1758         struct t10_alua_tg_pt_gp *tg_pt_gp,
1759         const char *page,
1760         size_t count)
1761 {
1762         unsigned long tmp;
1763         int ret;
1764
1765         ret = strict_strtoul(page, 0, &tmp);
1766         if (ret < 0) {
1767                 pr_err("Unable to extract alua_access_type\n");
1768                 return -EINVAL;
1769         }
1770         if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1771                 pr_err("Illegal value for alua_access_type:"
1772                                 " %lu\n", tmp);
1773                 return -EINVAL;
1774         }
1775         if (tmp == 3)
1776                 tg_pt_gp->tg_pt_gp_alua_access_type =
1777                         TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1778         else if (tmp == 2)
1779                 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1780         else if (tmp == 1)
1781                 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1782         else
1783                 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1784
1785         return count;
1786 }
1787
1788 ssize_t core_alua_show_nonop_delay_msecs(
1789         struct t10_alua_tg_pt_gp *tg_pt_gp,
1790         char *page)
1791 {
1792         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1793 }
1794
1795 ssize_t core_alua_store_nonop_delay_msecs(
1796         struct t10_alua_tg_pt_gp *tg_pt_gp,
1797         const char *page,
1798         size_t count)
1799 {
1800         unsigned long tmp;
1801         int ret;
1802
1803         ret = strict_strtoul(page, 0, &tmp);
1804         if (ret < 0) {
1805                 pr_err("Unable to extract nonop_delay_msecs\n");
1806                 return -EINVAL;
1807         }
1808         if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1809                 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1810                         " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1811                         ALUA_MAX_NONOP_DELAY_MSECS);
1812                 return -EINVAL;
1813         }
1814         tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1815
1816         return count;
1817 }
1818
1819 ssize_t core_alua_show_trans_delay_msecs(
1820         struct t10_alua_tg_pt_gp *tg_pt_gp,
1821         char *page)
1822 {
1823         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1824 }
1825
1826 ssize_t core_alua_store_trans_delay_msecs(
1827         struct t10_alua_tg_pt_gp *tg_pt_gp,
1828         const char *page,
1829         size_t count)
1830 {
1831         unsigned long tmp;
1832         int ret;
1833
1834         ret = strict_strtoul(page, 0, &tmp);
1835         if (ret < 0) {
1836                 pr_err("Unable to extract trans_delay_msecs\n");
1837                 return -EINVAL;
1838         }
1839         if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1840                 pr_err("Passed trans_delay_msecs: %lu, exceeds"
1841                         " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1842                         ALUA_MAX_TRANS_DELAY_MSECS);
1843                 return -EINVAL;
1844         }
1845         tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1846
1847         return count;
1848 }
1849
1850 ssize_t core_alua_show_preferred_bit(
1851         struct t10_alua_tg_pt_gp *tg_pt_gp,
1852         char *page)
1853 {
1854         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1855 }
1856
1857 ssize_t core_alua_store_preferred_bit(
1858         struct t10_alua_tg_pt_gp *tg_pt_gp,
1859         const char *page,
1860         size_t count)
1861 {
1862         unsigned long tmp;
1863         int ret;
1864
1865         ret = strict_strtoul(page, 0, &tmp);
1866         if (ret < 0) {
1867                 pr_err("Unable to extract preferred ALUA value\n");
1868                 return -EINVAL;
1869         }
1870         if ((tmp != 0) && (tmp != 1)) {
1871                 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1872                 return -EINVAL;
1873         }
1874         tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1875
1876         return count;
1877 }
1878
1879 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1880 {
1881         if (!lun->lun_sep)
1882                 return -ENODEV;
1883
1884         return sprintf(page, "%d\n",
1885                 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1886 }
1887
1888 ssize_t core_alua_store_offline_bit(
1889         struct se_lun *lun,
1890         const char *page,
1891         size_t count)
1892 {
1893         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1894         unsigned long tmp;
1895         int ret;
1896
1897         if (!lun->lun_sep)
1898                 return -ENODEV;
1899
1900         ret = strict_strtoul(page, 0, &tmp);
1901         if (ret < 0) {
1902                 pr_err("Unable to extract alua_tg_pt_offline value\n");
1903                 return -EINVAL;
1904         }
1905         if ((tmp != 0) && (tmp != 1)) {
1906                 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1907                                 tmp);
1908                 return -EINVAL;
1909         }
1910         tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1911         if (!tg_pt_gp_mem) {
1912                 pr_err("Unable to locate *tg_pt_gp_mem\n");
1913                 return -EINVAL;
1914         }
1915
1916         ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1917                         lun->lun_sep, 0, (int)tmp);
1918         if (ret < 0)
1919                 return -EINVAL;
1920
1921         return count;
1922 }
1923
1924 ssize_t core_alua_show_secondary_status(
1925         struct se_lun *lun,
1926         char *page)
1927 {
1928         return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1929 }
1930
1931 ssize_t core_alua_store_secondary_status(
1932         struct se_lun *lun,
1933         const char *page,
1934         size_t count)
1935 {
1936         unsigned long tmp;
1937         int ret;
1938
1939         ret = strict_strtoul(page, 0, &tmp);
1940         if (ret < 0) {
1941                 pr_err("Unable to extract alua_tg_pt_status\n");
1942                 return -EINVAL;
1943         }
1944         if ((tmp != ALUA_STATUS_NONE) &&
1945             (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1946             (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1947                 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1948                                 tmp);
1949                 return -EINVAL;
1950         }
1951         lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1952
1953         return count;
1954 }
1955
1956 ssize_t core_alua_show_secondary_write_metadata(
1957         struct se_lun *lun,
1958         char *page)
1959 {
1960         return sprintf(page, "%d\n",
1961                         lun->lun_sep->sep_tg_pt_secondary_write_md);
1962 }
1963
1964 ssize_t core_alua_store_secondary_write_metadata(
1965         struct se_lun *lun,
1966         const char *page,
1967         size_t count)
1968 {
1969         unsigned long tmp;
1970         int ret;
1971
1972         ret = strict_strtoul(page, 0, &tmp);
1973         if (ret < 0) {
1974                 pr_err("Unable to extract alua_tg_pt_write_md\n");
1975                 return -EINVAL;
1976         }
1977         if ((tmp != 0) && (tmp != 1)) {
1978                 pr_err("Illegal value for alua_tg_pt_write_md:"
1979                                 " %lu\n", tmp);
1980                 return -EINVAL;
1981         }
1982         lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1983
1984         return count;
1985 }
1986
1987 int core_setup_alua(struct se_device *dev, int force_pt)
1988 {
1989         struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1990         struct t10_alua *alua = &su_dev->t10_alua;
1991         struct t10_alua_lu_gp_member *lu_gp_mem;
1992         /*
1993          * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
1994          * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
1995          * cause a problem because libata and some SATA RAID HBAs appear
1996          * under Linux/SCSI, but emulate SCSI logic themselves.
1997          */
1998         if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
1999             !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
2000                 alua->alua_type = SPC_ALUA_PASSTHROUGH;
2001                 alua->alua_state_check = &core_alua_state_check_nop;
2002                 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
2003                         " emulation\n", dev->transport->name);
2004                 return 0;
2005         }
2006         /*
2007          * If SPC-3 or above is reported by real or emulated struct se_device,
2008          * use emulated ALUA.
2009          */
2010         if (dev->transport->get_device_rev(dev) >= SCSI_3) {
2011                 pr_debug("%s: Enabling ALUA Emulation for SPC-3"
2012                         " device\n", dev->transport->name);
2013                 /*
2014                  * Associate this struct se_device with the default ALUA
2015                  * LUN Group.
2016                  */
2017                 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2018                 if (IS_ERR(lu_gp_mem))
2019                         return PTR_ERR(lu_gp_mem);
2020
2021                 alua->alua_type = SPC3_ALUA_EMULATED;
2022                 alua->alua_state_check = &core_alua_state_check;
2023                 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2024                 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2025                                 default_lu_gp);
2026                 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2027
2028                 pr_debug("%s: Adding to default ALUA LU Group:"
2029                         " core/alua/lu_gps/default_lu_gp\n",
2030                         dev->transport->name);
2031         } else {
2032                 alua->alua_type = SPC2_ALUA_DISABLED;
2033                 alua->alua_state_check = &core_alua_state_check_nop;
2034                 pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2035                         " device\n", dev->transport->name);
2036         }
2037
2038         return 0;
2039 }