Correct .gbs.conf settings
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / target / target_core_alua.c
1 /*******************************************************************************
2  * Filename:  target_core_alua.c
3  *
4  * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5  *
6  * (c) Copyright 2009-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/configfs.h>
29 #include <linux/export.h>
30 #include <linux/file.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <asm/unaligned.h>
34
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <target/target_core_configfs.h>
39
40 #include "target_core_internal.h"
41 #include "target_core_alua.h"
42 #include "target_core_ua.h"
43
44 static sense_reason_t core_alua_check_transition(int state, int valid,
45                                                  int *primary);
46 static int core_alua_set_tg_pt_secondary_state(
47                 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
48                 struct se_port *port, int explicit, int offline);
49
50 static char *core_alua_dump_state(int state);
51
52 static u16 alua_lu_gps_counter;
53 static u32 alua_lu_gps_count;
54
55 static DEFINE_SPINLOCK(lu_gps_lock);
56 static LIST_HEAD(lu_gps_list);
57
58 struct t10_alua_lu_gp *default_lu_gp;
59
60 /*
61  * REPORT REFERRALS
62  *
63  * See sbc3r35 section 5.23
64  */
65 sense_reason_t
66 target_emulate_report_referrals(struct se_cmd *cmd)
67 {
68         struct se_device *dev = cmd->se_dev;
69         struct t10_alua_lba_map *map;
70         struct t10_alua_lba_map_member *map_mem;
71         unsigned char *buf;
72         u32 rd_len = 0, off;
73
74         if (cmd->data_length < 4) {
75                 pr_warn("REPORT REFERRALS allocation length %u too"
76                         " small\n", cmd->data_length);
77                 return TCM_INVALID_CDB_FIELD;
78         }
79
80         buf = transport_kmap_data_sg(cmd);
81         if (!buf)
82                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
83
84         off = 4;
85         spin_lock(&dev->t10_alua.lba_map_lock);
86         if (list_empty(&dev->t10_alua.lba_map_list)) {
87                 spin_unlock(&dev->t10_alua.lba_map_lock);
88                 transport_kunmap_data_sg(cmd);
89
90                 return TCM_UNSUPPORTED_SCSI_OPCODE;
91         }
92
93         list_for_each_entry(map, &dev->t10_alua.lba_map_list,
94                             lba_map_list) {
95                 int desc_num = off + 3;
96                 int pg_num;
97
98                 off += 4;
99                 if (cmd->data_length > off)
100                         put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
101                 off += 8;
102                 if (cmd->data_length > off)
103                         put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
104                 off += 8;
105                 rd_len += 20;
106                 pg_num = 0;
107                 list_for_each_entry(map_mem, &map->lba_map_mem_list,
108                                     lba_map_mem_list) {
109                         int alua_state = map_mem->lba_map_mem_alua_state;
110                         int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
111
112                         if (cmd->data_length > off)
113                                 buf[off] = alua_state & 0x0f;
114                         off += 2;
115                         if (cmd->data_length > off)
116                                 buf[off] = (alua_pg_id >> 8) & 0xff;
117                         off++;
118                         if (cmd->data_length > off)
119                                 buf[off] = (alua_pg_id & 0xff);
120                         off++;
121                         rd_len += 4;
122                         pg_num++;
123                 }
124                 if (cmd->data_length > desc_num)
125                         buf[desc_num] = pg_num;
126         }
127         spin_unlock(&dev->t10_alua.lba_map_lock);
128
129         /*
130          * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
131          */
132         put_unaligned_be16(rd_len, &buf[2]);
133
134         transport_kunmap_data_sg(cmd);
135
136         target_complete_cmd(cmd, GOOD);
137         return 0;
138 }
139
140 /*
141  * REPORT_TARGET_PORT_GROUPS
142  *
143  * See spc4r17 section 6.27
144  */
145 sense_reason_t
146 target_emulate_report_target_port_groups(struct se_cmd *cmd)
147 {
148         struct se_device *dev = cmd->se_dev;
149         struct se_port *port;
150         struct t10_alua_tg_pt_gp *tg_pt_gp;
151         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
152         unsigned char *buf;
153         u32 rd_len = 0, off;
154         int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
155
156         /*
157          * Skip over RESERVED area to first Target port group descriptor
158          * depending on the PARAMETER DATA FORMAT type..
159          */
160         if (ext_hdr != 0)
161                 off = 8;
162         else
163                 off = 4;
164
165         if (cmd->data_length < off) {
166                 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
167                         " small for %s header\n", cmd->data_length,
168                         (ext_hdr) ? "extended" : "normal");
169                 return TCM_INVALID_CDB_FIELD;
170         }
171         buf = transport_kmap_data_sg(cmd);
172         if (!buf)
173                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
174
175         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
176         list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
177                         tg_pt_gp_list) {
178                 /*
179                  * Check if the Target port group and Target port descriptor list
180                  * based on tg_pt_gp_members count will fit into the response payload.
181                  * Otherwise, bump rd_len to let the initiator know we have exceeded
182                  * the allocation length and the response is truncated.
183                  */
184                 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
185                      cmd->data_length) {
186                         rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
187                         continue;
188                 }
189                 /*
190                  * PREF: Preferred target port bit, determine if this
191                  * bit should be set for port group.
192                  */
193                 if (tg_pt_gp->tg_pt_gp_pref)
194                         buf[off] = 0x80;
195                 /*
196                  * Set the ASYMMETRIC ACCESS State
197                  */
198                 buf[off++] |= (atomic_read(
199                         &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
200                 /*
201                  * Set supported ASYMMETRIC ACCESS State bits
202                  */
203                 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
204                 /*
205                  * TARGET PORT GROUP
206                  */
207                 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
208                 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
209
210                 off++; /* Skip over Reserved */
211                 /*
212                  * STATUS CODE
213                  */
214                 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
215                 /*
216                  * Vendor Specific field
217                  */
218                 buf[off++] = 0x00;
219                 /*
220                  * TARGET PORT COUNT
221                  */
222                 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
223                 rd_len += 8;
224
225                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
226                 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
227                                 tg_pt_gp_mem_list) {
228                         port = tg_pt_gp_mem->tg_pt;
229                         /*
230                          * Start Target Port descriptor format
231                          *
232                          * See spc4r17 section 6.2.7 Table 247
233                          */
234                         off += 2; /* Skip over Obsolete */
235                         /*
236                          * Set RELATIVE TARGET PORT IDENTIFIER
237                          */
238                         buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
239                         buf[off++] = (port->sep_rtpi & 0xff);
240                         rd_len += 4;
241                 }
242                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
243         }
244         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
245         /*
246          * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
247          */
248         put_unaligned_be32(rd_len, &buf[0]);
249
250         /*
251          * Fill in the Extended header parameter data format if requested
252          */
253         if (ext_hdr != 0) {
254                 buf[4] = 0x10;
255                 /*
256                  * Set the implicit transition time (in seconds) for the application
257                  * client to use as a base for it's transition timeout value.
258                  *
259                  * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
260                  * this CDB was received upon to determine this value individually
261                  * for ALUA target port group.
262                  */
263                 port = cmd->se_lun->lun_sep;
264                 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
265                 if (tg_pt_gp_mem) {
266                         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
267                         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
268                         if (tg_pt_gp)
269                                 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
270                         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
271                 }
272         }
273         transport_kunmap_data_sg(cmd);
274
275         target_complete_cmd(cmd, GOOD);
276         return 0;
277 }
278
279 /*
280  * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
281  *
282  * See spc4r17 section 6.35
283  */
284 sense_reason_t
285 target_emulate_set_target_port_groups(struct se_cmd *cmd)
286 {
287         struct se_device *dev = cmd->se_dev;
288         struct se_port *port, *l_port = cmd->se_lun->lun_sep;
289         struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
290         struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
291         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
292         unsigned char *buf;
293         unsigned char *ptr;
294         sense_reason_t rc = TCM_NO_SENSE;
295         u32 len = 4; /* Skip over RESERVED area in header */
296         int alua_access_state, primary = 0, valid_states;
297         u16 tg_pt_id, rtpi;
298
299         if (!l_port)
300                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
301
302         if (cmd->data_length < 4) {
303                 pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
304                         " small\n", cmd->data_length);
305                 return TCM_INVALID_PARAMETER_LIST;
306         }
307
308         buf = transport_kmap_data_sg(cmd);
309         if (!buf)
310                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
311
312         /*
313          * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
314          * for the local tg_pt_gp.
315          */
316         l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
317         if (!l_tg_pt_gp_mem) {
318                 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
319                 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
320                 goto out;
321         }
322         spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
323         l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
324         if (!l_tg_pt_gp) {
325                 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
326                 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
327                 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
328                 goto out;
329         }
330         spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
331
332         if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
333                 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
334                                 " while TPGS_EXPLICIT_ALUA is disabled\n");
335                 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
336                 goto out;
337         }
338         valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
339
340         ptr = &buf[4]; /* Skip over RESERVED area in header */
341
342         while (len < cmd->data_length) {
343                 bool found = false;
344                 alua_access_state = (ptr[0] & 0x0f);
345                 /*
346                  * Check the received ALUA access state, and determine if
347                  * the state is a primary or secondary target port asymmetric
348                  * access state.
349                  */
350                 rc = core_alua_check_transition(alua_access_state,
351                                                 valid_states, &primary);
352                 if (rc) {
353                         /*
354                          * If the SET TARGET PORT GROUPS attempts to establish
355                          * an invalid combination of target port asymmetric
356                          * access states or attempts to establish an
357                          * unsupported target port asymmetric access state,
358                          * then the command shall be terminated with CHECK
359                          * CONDITION status, with the sense key set to ILLEGAL
360                          * REQUEST, and the additional sense code set to INVALID
361                          * FIELD IN PARAMETER LIST.
362                          */
363                         goto out;
364                 }
365
366                 /*
367                  * If the ASYMMETRIC ACCESS STATE field (see table 267)
368                  * specifies a primary target port asymmetric access state,
369                  * then the TARGET PORT GROUP OR TARGET PORT field specifies
370                  * a primary target port group for which the primary target
371                  * port asymmetric access state shall be changed. If the
372                  * ASYMMETRIC ACCESS STATE field specifies a secondary target
373                  * port asymmetric access state, then the TARGET PORT GROUP OR
374                  * TARGET PORT field specifies the relative target port
375                  * identifier (see 3.1.120) of the target port for which the
376                  * secondary target port asymmetric access state shall be
377                  * changed.
378                  */
379                 if (primary) {
380                         tg_pt_id = get_unaligned_be16(ptr + 2);
381                         /*
382                          * Locate the matching target port group ID from
383                          * the global tg_pt_gp list
384                          */
385                         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
386                         list_for_each_entry(tg_pt_gp,
387                                         &dev->t10_alua.tg_pt_gps_list,
388                                         tg_pt_gp_list) {
389                                 if (!tg_pt_gp->tg_pt_gp_valid_id)
390                                         continue;
391
392                                 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
393                                         continue;
394
395                                 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
396                                 smp_mb__after_atomic_inc();
397
398                                 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399
400                                 if (!core_alua_do_port_transition(tg_pt_gp,
401                                                 dev, l_port, nacl,
402                                                 alua_access_state, 1))
403                                         found = true;
404
405                                 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406                                 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
407                                 smp_mb__after_atomic_dec();
408                                 break;
409                         }
410                         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
411                 } else {
412                         /*
413                          * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
414                          * the Target Port in question for the the incoming
415                          * SET_TARGET_PORT_GROUPS op.
416                          */
417                         rtpi = get_unaligned_be16(ptr + 2);
418                         /*
419                          * Locate the matching relative target port identifier
420                          * for the struct se_device storage object.
421                          */
422                         spin_lock(&dev->se_port_lock);
423                         list_for_each_entry(port, &dev->dev_sep_list,
424                                                         sep_list) {
425                                 if (port->sep_rtpi != rtpi)
426                                         continue;
427
428                                 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
429
430                                 spin_unlock(&dev->se_port_lock);
431
432                                 if (!core_alua_set_tg_pt_secondary_state(
433                                                 tg_pt_gp_mem, port, 1, 1))
434                                         found = true;
435
436                                 spin_lock(&dev->se_port_lock);
437                                 break;
438                         }
439                         spin_unlock(&dev->se_port_lock);
440                 }
441
442                 if (!found) {
443                         rc = TCM_INVALID_PARAMETER_LIST;
444                         goto out;
445                 }
446
447                 ptr += 4;
448                 len += 4;
449         }
450
451 out:
452         transport_kunmap_data_sg(cmd);
453         if (!rc)
454                 target_complete_cmd(cmd, GOOD);
455         return rc;
456 }
457
458 static inline int core_alua_state_nonoptimized(
459         struct se_cmd *cmd,
460         unsigned char *cdb,
461         int nonop_delay_msecs,
462         u8 *alua_ascq)
463 {
464         /*
465          * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
466          * later to determine if processing of this cmd needs to be
467          * temporarily delayed for the Active/NonOptimized primary access state.
468          */
469         cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
470         cmd->alua_nonop_delay = nonop_delay_msecs;
471         return 0;
472 }
473
474 static inline int core_alua_state_lba_dependent(
475         struct se_cmd *cmd,
476         struct t10_alua_tg_pt_gp *tg_pt_gp,
477         u8 *alua_ascq)
478 {
479         struct se_device *dev = cmd->se_dev;
480         u64 segment_size, segment_mult, sectors, lba;
481
482         /* Only need to check for cdb actually containing LBAs */
483         if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
484                 return 0;
485
486         spin_lock(&dev->t10_alua.lba_map_lock);
487         segment_size = dev->t10_alua.lba_map_segment_size;
488         segment_mult = dev->t10_alua.lba_map_segment_multiplier;
489         sectors = cmd->data_length / dev->dev_attrib.block_size;
490
491         lba = cmd->t_task_lba;
492         while (lba < cmd->t_task_lba + sectors) {
493                 struct t10_alua_lba_map *cur_map = NULL, *map;
494                 struct t10_alua_lba_map_member *map_mem;
495
496                 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
497                                     lba_map_list) {
498                         u64 start_lba, last_lba;
499                         u64 first_lba = map->lba_map_first_lba;
500
501                         if (segment_mult) {
502                                 u64 tmp = lba;
503                                 start_lba = do_div(tmp, segment_size * segment_mult);
504
505                                 last_lba = first_lba + segment_size - 1;
506                                 if (start_lba >= first_lba &&
507                                     start_lba <= last_lba) {
508                                         lba += segment_size;
509                                         cur_map = map;
510                                         break;
511                                 }
512                         } else {
513                                 last_lba = map->lba_map_last_lba;
514                                 if (lba >= first_lba && lba <= last_lba) {
515                                         lba = last_lba + 1;
516                                         cur_map = map;
517                                         break;
518                                 }
519                         }
520                 }
521                 if (!cur_map) {
522                         spin_unlock(&dev->t10_alua.lba_map_lock);
523                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
524                         return 1;
525                 }
526                 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
527                                     lba_map_mem_list) {
528                         if (map_mem->lba_map_mem_alua_pg_id !=
529                             tg_pt_gp->tg_pt_gp_id)
530                                 continue;
531                         switch(map_mem->lba_map_mem_alua_state) {
532                         case ALUA_ACCESS_STATE_STANDBY:
533                                 spin_unlock(&dev->t10_alua.lba_map_lock);
534                                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
535                                 return 1;
536                         case ALUA_ACCESS_STATE_UNAVAILABLE:
537                                 spin_unlock(&dev->t10_alua.lba_map_lock);
538                                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
539                                 return 1;
540                         default:
541                                 break;
542                         }
543                 }
544         }
545         spin_unlock(&dev->t10_alua.lba_map_lock);
546         return 0;
547 }
548
549 static inline int core_alua_state_standby(
550         struct se_cmd *cmd,
551         unsigned char *cdb,
552         u8 *alua_ascq)
553 {
554         /*
555          * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
556          * spc4r17 section 5.9.2.4.4
557          */
558         switch (cdb[0]) {
559         case INQUIRY:
560         case LOG_SELECT:
561         case LOG_SENSE:
562         case MODE_SELECT:
563         case MODE_SENSE:
564         case REPORT_LUNS:
565         case RECEIVE_DIAGNOSTIC:
566         case SEND_DIAGNOSTIC:
567         case READ_CAPACITY:
568                 return 0;
569         case SERVICE_ACTION_IN:
570                 switch (cdb[1] & 0x1f) {
571                 case SAI_READ_CAPACITY_16:
572                         return 0;
573                 default:
574                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
575                         return 1;
576                 }
577         case MAINTENANCE_IN:
578                 switch (cdb[1] & 0x1f) {
579                 case MI_REPORT_TARGET_PGS:
580                         return 0;
581                 default:
582                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
583                         return 1;
584                 }
585         case MAINTENANCE_OUT:
586                 switch (cdb[1]) {
587                 case MO_SET_TARGET_PGS:
588                         return 0;
589                 default:
590                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
591                         return 1;
592                 }
593         case REQUEST_SENSE:
594         case PERSISTENT_RESERVE_IN:
595         case PERSISTENT_RESERVE_OUT:
596         case READ_BUFFER:
597         case WRITE_BUFFER:
598                 return 0;
599         default:
600                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
601                 return 1;
602         }
603
604         return 0;
605 }
606
607 static inline int core_alua_state_unavailable(
608         struct se_cmd *cmd,
609         unsigned char *cdb,
610         u8 *alua_ascq)
611 {
612         /*
613          * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
614          * spc4r17 section 5.9.2.4.5
615          */
616         switch (cdb[0]) {
617         case INQUIRY:
618         case REPORT_LUNS:
619                 return 0;
620         case MAINTENANCE_IN:
621                 switch (cdb[1] & 0x1f) {
622                 case MI_REPORT_TARGET_PGS:
623                         return 0;
624                 default:
625                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
626                         return 1;
627                 }
628         case MAINTENANCE_OUT:
629                 switch (cdb[1]) {
630                 case MO_SET_TARGET_PGS:
631                         return 0;
632                 default:
633                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
634                         return 1;
635                 }
636         case REQUEST_SENSE:
637         case READ_BUFFER:
638         case WRITE_BUFFER:
639                 return 0;
640         default:
641                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
642                 return 1;
643         }
644
645         return 0;
646 }
647
648 static inline int core_alua_state_transition(
649         struct se_cmd *cmd,
650         unsigned char *cdb,
651         u8 *alua_ascq)
652 {
653         /*
654          * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
655          * spc4r17 section 5.9.2.5
656          */
657         switch (cdb[0]) {
658         case INQUIRY:
659         case REPORT_LUNS:
660                 return 0;
661         case MAINTENANCE_IN:
662                 switch (cdb[1] & 0x1f) {
663                 case MI_REPORT_TARGET_PGS:
664                         return 0;
665                 default:
666                         *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
667                         return 1;
668                 }
669         case REQUEST_SENSE:
670         case READ_BUFFER:
671         case WRITE_BUFFER:
672                 return 0;
673         default:
674                 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
675                 return 1;
676         }
677
678         return 0;
679 }
680
681 /*
682  * return 1: Is used to signal LUN not accessible, and check condition/not ready
683  * return 0: Used to signal success
684  * return -1: Used to signal failure, and invalid cdb field
685  */
686 sense_reason_t
687 target_alua_state_check(struct se_cmd *cmd)
688 {
689         struct se_device *dev = cmd->se_dev;
690         unsigned char *cdb = cmd->t_task_cdb;
691         struct se_lun *lun = cmd->se_lun;
692         struct se_port *port = lun->lun_sep;
693         struct t10_alua_tg_pt_gp *tg_pt_gp;
694         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
695         int out_alua_state, nonop_delay_msecs;
696         u8 alua_ascq;
697         int ret;
698
699         if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
700                 return 0;
701         if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
702                 return 0;
703
704         if (!port)
705                 return 0;
706         /*
707          * First, check for a struct se_port specific secondary ALUA target port
708          * access state: OFFLINE
709          */
710         if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
711                 pr_debug("ALUA: Got secondary offline status for local"
712                                 " target port\n");
713                 alua_ascq = ASCQ_04H_ALUA_OFFLINE;
714                 ret = 1;
715                 goto out;
716         }
717          /*
718          * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
719          * ALUA target port group, to obtain current ALUA access state.
720          * Otherwise look for the underlying struct se_device association with
721          * a ALUA logical unit group.
722          */
723         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
724         if (!tg_pt_gp_mem)
725                 return 0;
726
727         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
728         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
729         out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
730         nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
731         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
732         /*
733          * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
734          * statement so the compiler knows explicitly to check this case first.
735          * For the Optimized ALUA access state case, we want to process the
736          * incoming fabric cmd ASAP..
737          */
738         if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
739                 return 0;
740
741         switch (out_alua_state) {
742         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
743                 ret = core_alua_state_nonoptimized(cmd, cdb,
744                                         nonop_delay_msecs, &alua_ascq);
745                 break;
746         case ALUA_ACCESS_STATE_STANDBY:
747                 ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
748                 break;
749         case ALUA_ACCESS_STATE_UNAVAILABLE:
750                 ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
751                 break;
752         case ALUA_ACCESS_STATE_TRANSITION:
753                 ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
754                 break;
755         case ALUA_ACCESS_STATE_LBA_DEPENDENT:
756                 ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
757                 break;
758         /*
759          * OFFLINE is a secondary ALUA target port group access state, that is
760          * handled above with struct se_port->sep_tg_pt_secondary_offline=1
761          */
762         case ALUA_ACCESS_STATE_OFFLINE:
763         default:
764                 pr_err("Unknown ALUA access state: 0x%02x\n",
765                                 out_alua_state);
766                 return TCM_INVALID_CDB_FIELD;
767         }
768
769 out:
770         if (ret > 0) {
771                 /*
772                  * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
773                  * The ALUA additional sense code qualifier (ASCQ) is determined
774                  * by the ALUA primary or secondary access state..
775                  */
776                 pr_debug("[%s]: ALUA TG Port not available, "
777                         "SenseKey: NOT_READY, ASC/ASCQ: "
778                         "0x04/0x%02x\n",
779                         cmd->se_tfo->get_fabric_name(), alua_ascq);
780
781                 cmd->scsi_asc = 0x04;
782                 cmd->scsi_ascq = alua_ascq;
783                 return TCM_CHECK_CONDITION_NOT_READY;
784         }
785
786         return 0;
787 }
788
789 /*
790  * Check implicit and explicit ALUA state change request.
791  */
792 static sense_reason_t
793 core_alua_check_transition(int state, int valid, int *primary)
794 {
795         /*
796          * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
797          * defined as primary target port asymmetric access states.
798          */
799         switch (state) {
800         case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
801                 if (!(valid & ALUA_AO_SUP))
802                         goto not_supported;
803                 *primary = 1;
804                 break;
805         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
806                 if (!(valid & ALUA_AN_SUP))
807                         goto not_supported;
808                 *primary = 1;
809                 break;
810         case ALUA_ACCESS_STATE_STANDBY:
811                 if (!(valid & ALUA_S_SUP))
812                         goto not_supported;
813                 *primary = 1;
814                 break;
815         case ALUA_ACCESS_STATE_UNAVAILABLE:
816                 if (!(valid & ALUA_U_SUP))
817                         goto not_supported;
818                 *primary = 1;
819                 break;
820         case ALUA_ACCESS_STATE_LBA_DEPENDENT:
821                 if (!(valid & ALUA_LBD_SUP))
822                         goto not_supported;
823                 *primary = 1;
824                 break;
825         case ALUA_ACCESS_STATE_OFFLINE:
826                 /*
827                  * OFFLINE state is defined as a secondary target port
828                  * asymmetric access state.
829                  */
830                 if (!(valid & ALUA_O_SUP))
831                         goto not_supported;
832                 *primary = 0;
833                 break;
834         case ALUA_ACCESS_STATE_TRANSITION:
835                 /*
836                  * Transitioning is set internally, and
837                  * cannot be selected manually.
838                  */
839                 goto not_supported;
840         default:
841                 pr_err("Unknown ALUA access state: 0x%02x\n", state);
842                 return TCM_INVALID_PARAMETER_LIST;
843         }
844
845         return 0;
846
847 not_supported:
848         pr_err("ALUA access state %s not supported",
849                core_alua_dump_state(state));
850         return TCM_INVALID_PARAMETER_LIST;
851 }
852
853 static char *core_alua_dump_state(int state)
854 {
855         switch (state) {
856         case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
857                 return "Active/Optimized";
858         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
859                 return "Active/NonOptimized";
860         case ALUA_ACCESS_STATE_LBA_DEPENDENT:
861                 return "LBA Dependent";
862         case ALUA_ACCESS_STATE_STANDBY:
863                 return "Standby";
864         case ALUA_ACCESS_STATE_UNAVAILABLE:
865                 return "Unavailable";
866         case ALUA_ACCESS_STATE_OFFLINE:
867                 return "Offline";
868         case ALUA_ACCESS_STATE_TRANSITION:
869                 return "Transitioning";
870         default:
871                 return "Unknown";
872         }
873
874         return NULL;
875 }
876
877 char *core_alua_dump_status(int status)
878 {
879         switch (status) {
880         case ALUA_STATUS_NONE:
881                 return "None";
882         case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
883                 return "Altered by Explicit STPG";
884         case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
885                 return "Altered by Implicit ALUA";
886         default:
887                 return "Unknown";
888         }
889
890         return NULL;
891 }
892
893 /*
894  * Used by fabric modules to determine when we need to delay processing
895  * for the Active/NonOptimized paths..
896  */
897 int core_alua_check_nonop_delay(
898         struct se_cmd *cmd)
899 {
900         if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
901                 return 0;
902         if (in_interrupt())
903                 return 0;
904         /*
905          * The ALUA Active/NonOptimized access state delay can be disabled
906          * in via configfs with a value of zero
907          */
908         if (!cmd->alua_nonop_delay)
909                 return 0;
910         /*
911          * struct se_cmd->alua_nonop_delay gets set by a target port group
912          * defined interval in core_alua_state_nonoptimized()
913          */
914         msleep_interruptible(cmd->alua_nonop_delay);
915         return 0;
916 }
917 EXPORT_SYMBOL(core_alua_check_nonop_delay);
918
919 /*
920  * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
921  *
922  */
923 static int core_alua_write_tpg_metadata(
924         const char *path,
925         unsigned char *md_buf,
926         u32 md_buf_len)
927 {
928         struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
929         int ret;
930
931         if (IS_ERR(file)) {
932                 pr_err("filp_open(%s) for ALUA metadata failed\n", path);
933                 return -ENODEV;
934         }
935         ret = kernel_write(file, md_buf, md_buf_len, 0);
936         if (ret < 0)
937                 pr_err("Error writing ALUA metadata file: %s\n", path);
938         fput(file);
939         return (ret < 0) ? -EIO : 0;
940 }
941
942 /*
943  * Called with tg_pt_gp->tg_pt_gp_md_mutex held
944  */
945 static int core_alua_update_tpg_primary_metadata(
946         struct t10_alua_tg_pt_gp *tg_pt_gp)
947 {
948         unsigned char *md_buf;
949         struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
950         char path[ALUA_METADATA_PATH_LEN];
951         int len, rc;
952
953         md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
954         if (!md_buf) {
955                 pr_err("Unable to allocate buf for ALUA metadata\n");
956                 return -ENOMEM;
957         }
958
959         memset(path, 0, ALUA_METADATA_PATH_LEN);
960
961         len = snprintf(md_buf, ALUA_MD_BUF_LEN,
962                         "tg_pt_gp_id=%hu\n"
963                         "alua_access_state=0x%02x\n"
964                         "alua_access_status=0x%02x\n",
965                         tg_pt_gp->tg_pt_gp_id,
966                         tg_pt_gp->tg_pt_gp_alua_pending_state,
967                         tg_pt_gp->tg_pt_gp_alua_access_status);
968
969         snprintf(path, ALUA_METADATA_PATH_LEN,
970                 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
971                 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
972
973         rc = core_alua_write_tpg_metadata(path, md_buf, len);
974         kfree(md_buf);
975         return rc;
976 }
977
978 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
979 {
980         struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
981                 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
982         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
983         struct se_dev_entry *se_deve;
984         struct se_lun_acl *lacl;
985         struct se_port *port;
986         struct t10_alua_tg_pt_gp_member *mem;
987         bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
988                          ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
989
990         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
991         list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
992                                 tg_pt_gp_mem_list) {
993                 port = mem->tg_pt;
994                 /*
995                  * After an implicit target port asymmetric access state
996                  * change, a device server shall establish a unit attention
997                  * condition for the initiator port associated with every I_T
998                  * nexus with the additional sense code set to ASYMMETRIC
999                  * ACCESS STATE CHANGED.
1000                  *
1001                  * After an explicit target port asymmetric access state
1002                  * change, a device server shall establish a unit attention
1003                  * condition with the additional sense code set to ASYMMETRIC
1004                  * ACCESS STATE CHANGED for the initiator port associated with
1005                  * every I_T nexus other than the I_T nexus on which the SET
1006                  * TARGET PORT GROUPS command
1007                  */
1008                 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
1009                 smp_mb__after_atomic_inc();
1010                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1011
1012                 spin_lock_bh(&port->sep_alua_lock);
1013                 list_for_each_entry(se_deve, &port->sep_alua_list,
1014                                         alua_port_list) {
1015                         lacl = se_deve->se_lun_acl;
1016                         /*
1017                          * se_deve->se_lun_acl pointer may be NULL for a
1018                          * entry created without explicit Node+MappedLUN ACLs
1019                          */
1020                         if (!lacl)
1021                                 continue;
1022
1023                         if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
1024                              ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
1025                            (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
1026                             (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
1027                            (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
1028                             (tg_pt_gp->tg_pt_gp_alua_port == port))
1029                                 continue;
1030
1031                         core_scsi3_ua_allocate(lacl->se_lun_nacl,
1032                                 se_deve->mapped_lun, 0x2A,
1033                                 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1034                 }
1035                 spin_unlock_bh(&port->sep_alua_lock);
1036
1037                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1038                 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
1039                 smp_mb__after_atomic_dec();
1040         }
1041         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1042         /*
1043          * Update the ALUA metadata buf that has been allocated in
1044          * core_alua_do_port_transition(), this metadata will be written
1045          * to struct file.
1046          *
1047          * Note that there is the case where we do not want to update the
1048          * metadata when the saved metadata is being parsed in userspace
1049          * when setting the existing port access state and access status.
1050          *
1051          * Also note that the failure to write out the ALUA metadata to
1052          * struct file does NOT affect the actual ALUA transition.
1053          */
1054         if (tg_pt_gp->tg_pt_gp_write_metadata) {
1055                 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1056                 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1057                 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1058         }
1059         /*
1060          * Set the current primary ALUA access state to the requested new state
1061          */
1062         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1063                    tg_pt_gp->tg_pt_gp_alua_pending_state);
1064
1065         pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1066                 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1067                 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1068                 tg_pt_gp->tg_pt_gp_id,
1069                 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1070                 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1071         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1072         atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1073         smp_mb__after_atomic_dec();
1074         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1075
1076         if (tg_pt_gp->tg_pt_gp_transition_complete)
1077                 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1078 }
1079
1080 static int core_alua_do_transition_tg_pt(
1081         struct t10_alua_tg_pt_gp *tg_pt_gp,
1082         int new_state,
1083         int explicit)
1084 {
1085         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1086         DECLARE_COMPLETION_ONSTACK(wait);
1087
1088         /* Nothing to be done here */
1089         if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1090                 return 0;
1091
1092         if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1093                 return -EAGAIN;
1094
1095         /*
1096          * Flush any pending transitions
1097          */
1098         if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1099             atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1100             ALUA_ACCESS_STATE_TRANSITION) {
1101                 /* Just in case */
1102                 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103                 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1104                 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1105                 wait_for_completion(&wait);
1106                 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1107                 return 0;
1108         }
1109
1110         /*
1111          * Save the old primary ALUA access state, and set the current state
1112          * to ALUA_ACCESS_STATE_TRANSITION.
1113          */
1114         tg_pt_gp->tg_pt_gp_alua_previous_state =
1115                 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1116         tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1117
1118         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1119                         ALUA_ACCESS_STATE_TRANSITION);
1120         tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1121                                 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1122                                 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1123
1124         /*
1125          * Check for the optional ALUA primary state transition delay
1126          */
1127         if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1128                 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1129
1130         /*
1131          * Take a reference for workqueue item
1132          */
1133         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1134         atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1135         smp_mb__after_atomic_inc();
1136         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1137
1138         if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1139                 unsigned long transition_tmo;
1140
1141                 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1142                 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1143                                    &tg_pt_gp->tg_pt_gp_transition_work,
1144                                    transition_tmo);
1145         } else {
1146                 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1147                 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1148                                    &tg_pt_gp->tg_pt_gp_transition_work, 0);
1149                 wait_for_completion(&wait);
1150                 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1151         }
1152
1153         return 0;
1154 }
1155
1156 int core_alua_do_port_transition(
1157         struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1158         struct se_device *l_dev,
1159         struct se_port *l_port,
1160         struct se_node_acl *l_nacl,
1161         int new_state,
1162         int explicit)
1163 {
1164         struct se_device *dev;
1165         struct t10_alua_lu_gp *lu_gp;
1166         struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1167         struct t10_alua_tg_pt_gp *tg_pt_gp;
1168         int primary, valid_states, rc = 0;
1169
1170         valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1171         if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
1172                 return -EINVAL;
1173
1174         local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1175         spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1176         lu_gp = local_lu_gp_mem->lu_gp;
1177         atomic_inc(&lu_gp->lu_gp_ref_cnt);
1178         smp_mb__after_atomic_inc();
1179         spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1180         /*
1181          * For storage objects that are members of the 'default_lu_gp',
1182          * we only do transition on the passed *l_tp_pt_gp, and not
1183          * on all of the matching target port groups IDs in default_lu_gp.
1184          */
1185         if (!lu_gp->lu_gp_id) {
1186                 /*
1187                  * core_alua_do_transition_tg_pt() will always return
1188                  * success.
1189                  */
1190                 l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
1191                 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1192                 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1193                                                    new_state, explicit);
1194                 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1195                 smp_mb__after_atomic_dec();
1196                 return rc;
1197         }
1198         /*
1199          * For all other LU groups aside from 'default_lu_gp', walk all of
1200          * the associated storage objects looking for a matching target port
1201          * group ID from the local target port group.
1202          */
1203         spin_lock(&lu_gp->lu_gp_lock);
1204         list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1205                                 lu_gp_mem_list) {
1206
1207                 dev = lu_gp_mem->lu_gp_mem_dev;
1208                 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
1209                 smp_mb__after_atomic_inc();
1210                 spin_unlock(&lu_gp->lu_gp_lock);
1211
1212                 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1213                 list_for_each_entry(tg_pt_gp,
1214                                 &dev->t10_alua.tg_pt_gps_list,
1215                                 tg_pt_gp_list) {
1216
1217                         if (!tg_pt_gp->tg_pt_gp_valid_id)
1218                                 continue;
1219                         /*
1220                          * If the target behavior port asymmetric access state
1221                          * is changed for any target port group accessible via
1222                          * a logical unit within a LU group, the target port
1223                          * behavior group asymmetric access states for the same
1224                          * target port group accessible via other logical units
1225                          * in that LU group will also change.
1226                          */
1227                         if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1228                                 continue;
1229
1230                         if (l_tg_pt_gp == tg_pt_gp) {
1231                                 tg_pt_gp->tg_pt_gp_alua_port = l_port;
1232                                 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1233                         } else {
1234                                 tg_pt_gp->tg_pt_gp_alua_port = NULL;
1235                                 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1236                         }
1237                         atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1238                         smp_mb__after_atomic_inc();
1239                         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1240                         /*
1241                          * core_alua_do_transition_tg_pt() will always return
1242                          * success.
1243                          */
1244                         rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1245                                         new_state, explicit);
1246
1247                         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1248                         atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1249                         smp_mb__after_atomic_dec();
1250                         if (rc)
1251                                 break;
1252                 }
1253                 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1254
1255                 spin_lock(&lu_gp->lu_gp_lock);
1256                 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
1257                 smp_mb__after_atomic_dec();
1258         }
1259         spin_unlock(&lu_gp->lu_gp_lock);
1260
1261         if (!rc) {
1262                 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1263                          " Group IDs: %hu %s transition to primary state: %s\n",
1264                          config_item_name(&lu_gp->lu_gp_group.cg_item),
1265                          l_tg_pt_gp->tg_pt_gp_id,
1266                          (explicit) ? "explicit" : "implicit",
1267                          core_alua_dump_state(new_state));
1268         }
1269
1270         atomic_dec(&lu_gp->lu_gp_ref_cnt);
1271         smp_mb__after_atomic_dec();
1272         return rc;
1273 }
1274
1275 /*
1276  * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
1277  */
1278 static int core_alua_update_tpg_secondary_metadata(
1279         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1280         struct se_port *port)
1281 {
1282         unsigned char *md_buf;
1283         struct se_portal_group *se_tpg = port->sep_tpg;
1284         char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1285         int len, rc;
1286
1287         md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1288         if (!md_buf) {
1289                 pr_err("Unable to allocate buf for ALUA metadata\n");
1290                 return -ENOMEM;
1291         }
1292
1293         memset(path, 0, ALUA_METADATA_PATH_LEN);
1294         memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1295
1296         len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1297                         se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1298
1299         if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1300                 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1301                                 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1302
1303         len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1304                         "alua_tg_pt_status=0x%02x\n",
1305                         atomic_read(&port->sep_tg_pt_secondary_offline),
1306                         port->sep_tg_pt_secondary_stat);
1307
1308         snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1309                         se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1310                         port->sep_lun->unpacked_lun);
1311
1312         rc = core_alua_write_tpg_metadata(path, md_buf, len);
1313         kfree(md_buf);
1314
1315         return rc;
1316 }
1317
1318 static int core_alua_set_tg_pt_secondary_state(
1319         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1320         struct se_port *port,
1321         int explicit,
1322         int offline)
1323 {
1324         struct t10_alua_tg_pt_gp *tg_pt_gp;
1325         int trans_delay_msecs;
1326
1327         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1328         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1329         if (!tg_pt_gp) {
1330                 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1331                 pr_err("Unable to complete secondary state"
1332                                 " transition\n");
1333                 return -EINVAL;
1334         }
1335         trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1336         /*
1337          * Set the secondary ALUA target port access state to OFFLINE
1338          * or release the previously secondary state for struct se_port
1339          */
1340         if (offline)
1341                 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1342         else
1343                 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1344
1345         port->sep_tg_pt_secondary_stat = (explicit) ?
1346                         ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1347                         ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1348
1349         pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1350                 " to secondary access state: %s\n", (explicit) ? "explicit" :
1351                 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1352                 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1353
1354         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1355         /*
1356          * Do the optional transition delay after we set the secondary
1357          * ALUA access state.
1358          */
1359         if (trans_delay_msecs != 0)
1360                 msleep_interruptible(trans_delay_msecs);
1361         /*
1362          * See if we need to update the ALUA fabric port metadata for
1363          * secondary state and status
1364          */
1365         if (port->sep_tg_pt_secondary_write_md) {
1366                 mutex_lock(&port->sep_tg_pt_md_mutex);
1367                 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
1368                 mutex_unlock(&port->sep_tg_pt_md_mutex);
1369         }
1370
1371         return 0;
1372 }
1373
1374 struct t10_alua_lba_map *
1375 core_alua_allocate_lba_map(struct list_head *list,
1376                            u64 first_lba, u64 last_lba)
1377 {
1378         struct t10_alua_lba_map *lba_map;
1379
1380         lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1381         if (!lba_map) {
1382                 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1383                 return ERR_PTR(-ENOMEM);
1384         }
1385         INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1386         lba_map->lba_map_first_lba = first_lba;
1387         lba_map->lba_map_last_lba = last_lba;
1388
1389         list_add_tail(&lba_map->lba_map_list, list);
1390         return lba_map;
1391 }
1392
1393 int
1394 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1395                                int pg_id, int state)
1396 {
1397         struct t10_alua_lba_map_member *lba_map_mem;
1398
1399         list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1400                             lba_map_mem_list) {
1401                 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1402                         pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1403                         return -EINVAL;
1404                 }
1405         }
1406
1407         lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1408         if (!lba_map_mem) {
1409                 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1410                 return -ENOMEM;
1411         }
1412         lba_map_mem->lba_map_mem_alua_state = state;
1413         lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1414
1415         list_add_tail(&lba_map_mem->lba_map_mem_list,
1416                       &lba_map->lba_map_mem_list);
1417         return 0;
1418 }
1419
1420 void
1421 core_alua_free_lba_map(struct list_head *lba_list)
1422 {
1423         struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1424         struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1425
1426         list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1427                                  lba_map_list) {
1428                 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1429                                          &lba_map->lba_map_mem_list,
1430                                          lba_map_mem_list) {
1431                         list_del(&lba_map_mem->lba_map_mem_list);
1432                         kmem_cache_free(t10_alua_lba_map_mem_cache,
1433                                         lba_map_mem);
1434                 }
1435                 list_del(&lba_map->lba_map_list);
1436                 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1437         }
1438 }
1439
1440 void
1441 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1442                       int segment_size, int segment_mult)
1443 {
1444         struct list_head old_lba_map_list;
1445         struct t10_alua_tg_pt_gp *tg_pt_gp;
1446         int activate = 0, supported;
1447
1448         INIT_LIST_HEAD(&old_lba_map_list);
1449         spin_lock(&dev->t10_alua.lba_map_lock);
1450         dev->t10_alua.lba_map_segment_size = segment_size;
1451         dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1452         list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1453         if (lba_map_list) {
1454                 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1455                 activate = 1;
1456         }
1457         spin_unlock(&dev->t10_alua.lba_map_lock);
1458         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1459         list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1460                             tg_pt_gp_list) {
1461
1462                 if (!tg_pt_gp->tg_pt_gp_valid_id)
1463                         continue;
1464                 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1465                 if (activate)
1466                         supported |= ALUA_LBD_SUP;
1467                 else
1468                         supported &= ~ALUA_LBD_SUP;
1469                 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1470         }
1471         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1472         core_alua_free_lba_map(&old_lba_map_list);
1473 }
1474
1475 struct t10_alua_lu_gp *
1476 core_alua_allocate_lu_gp(const char *name, int def_group)
1477 {
1478         struct t10_alua_lu_gp *lu_gp;
1479
1480         lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1481         if (!lu_gp) {
1482                 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1483                 return ERR_PTR(-ENOMEM);
1484         }
1485         INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1486         INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1487         spin_lock_init(&lu_gp->lu_gp_lock);
1488         atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1489
1490         if (def_group) {
1491                 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1492                 lu_gp->lu_gp_valid_id = 1;
1493                 alua_lu_gps_count++;
1494         }
1495
1496         return lu_gp;
1497 }
1498
1499 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1500 {
1501         struct t10_alua_lu_gp *lu_gp_tmp;
1502         u16 lu_gp_id_tmp;
1503         /*
1504          * The lu_gp->lu_gp_id may only be set once..
1505          */
1506         if (lu_gp->lu_gp_valid_id) {
1507                 pr_warn("ALUA LU Group already has a valid ID,"
1508                         " ignoring request\n");
1509                 return -EINVAL;
1510         }
1511
1512         spin_lock(&lu_gps_lock);
1513         if (alua_lu_gps_count == 0x0000ffff) {
1514                 pr_err("Maximum ALUA alua_lu_gps_count:"
1515                                 " 0x0000ffff reached\n");
1516                 spin_unlock(&lu_gps_lock);
1517                 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1518                 return -ENOSPC;
1519         }
1520 again:
1521         lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1522                                 alua_lu_gps_counter++;
1523
1524         list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1525                 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1526                         if (!lu_gp_id)
1527                                 goto again;
1528
1529                         pr_warn("ALUA Logical Unit Group ID: %hu"
1530                                 " already exists, ignoring request\n",
1531                                 lu_gp_id);
1532                         spin_unlock(&lu_gps_lock);
1533                         return -EINVAL;
1534                 }
1535         }
1536
1537         lu_gp->lu_gp_id = lu_gp_id_tmp;
1538         lu_gp->lu_gp_valid_id = 1;
1539         list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1540         alua_lu_gps_count++;
1541         spin_unlock(&lu_gps_lock);
1542
1543         return 0;
1544 }
1545
1546 static struct t10_alua_lu_gp_member *
1547 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1548 {
1549         struct t10_alua_lu_gp_member *lu_gp_mem;
1550
1551         lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1552         if (!lu_gp_mem) {
1553                 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1554                 return ERR_PTR(-ENOMEM);
1555         }
1556         INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1557         spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1558         atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1559
1560         lu_gp_mem->lu_gp_mem_dev = dev;
1561         dev->dev_alua_lu_gp_mem = lu_gp_mem;
1562
1563         return lu_gp_mem;
1564 }
1565
1566 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1567 {
1568         struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1569         /*
1570          * Once we have reached this point, config_item_put() has
1571          * already been called from target_core_alua_drop_lu_gp().
1572          *
1573          * Here, we remove the *lu_gp from the global list so that
1574          * no associations can be made while we are releasing
1575          * struct t10_alua_lu_gp.
1576          */
1577         spin_lock(&lu_gps_lock);
1578         list_del(&lu_gp->lu_gp_node);
1579         alua_lu_gps_count--;
1580         spin_unlock(&lu_gps_lock);
1581         /*
1582          * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1583          * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1584          * released with core_alua_put_lu_gp_from_name()
1585          */
1586         while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1587                 cpu_relax();
1588         /*
1589          * Release reference to struct t10_alua_lu_gp * from all associated
1590          * struct se_device.
1591          */
1592         spin_lock(&lu_gp->lu_gp_lock);
1593         list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1594                                 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1595                 if (lu_gp_mem->lu_gp_assoc) {
1596                         list_del(&lu_gp_mem->lu_gp_mem_list);
1597                         lu_gp->lu_gp_members--;
1598                         lu_gp_mem->lu_gp_assoc = 0;
1599                 }
1600                 spin_unlock(&lu_gp->lu_gp_lock);
1601                 /*
1602                  *
1603                  * lu_gp_mem is associated with a single
1604                  * struct se_device->dev_alua_lu_gp_mem, and is released when
1605                  * struct se_device is released via core_alua_free_lu_gp_mem().
1606                  *
1607                  * If the passed lu_gp does NOT match the default_lu_gp, assume
1608                  * we want to re-associate a given lu_gp_mem with default_lu_gp.
1609                  */
1610                 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1611                 if (lu_gp != default_lu_gp)
1612                         __core_alua_attach_lu_gp_mem(lu_gp_mem,
1613                                         default_lu_gp);
1614                 else
1615                         lu_gp_mem->lu_gp = NULL;
1616                 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1617
1618                 spin_lock(&lu_gp->lu_gp_lock);
1619         }
1620         spin_unlock(&lu_gp->lu_gp_lock);
1621
1622         kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1623 }
1624
1625 void core_alua_free_lu_gp_mem(struct se_device *dev)
1626 {
1627         struct t10_alua_lu_gp *lu_gp;
1628         struct t10_alua_lu_gp_member *lu_gp_mem;
1629
1630         lu_gp_mem = dev->dev_alua_lu_gp_mem;
1631         if (!lu_gp_mem)
1632                 return;
1633
1634         while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1635                 cpu_relax();
1636
1637         spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1638         lu_gp = lu_gp_mem->lu_gp;
1639         if (lu_gp) {
1640                 spin_lock(&lu_gp->lu_gp_lock);
1641                 if (lu_gp_mem->lu_gp_assoc) {
1642                         list_del(&lu_gp_mem->lu_gp_mem_list);
1643                         lu_gp->lu_gp_members--;
1644                         lu_gp_mem->lu_gp_assoc = 0;
1645                 }
1646                 spin_unlock(&lu_gp->lu_gp_lock);
1647                 lu_gp_mem->lu_gp = NULL;
1648         }
1649         spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1650
1651         kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1652 }
1653
1654 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1655 {
1656         struct t10_alua_lu_gp *lu_gp;
1657         struct config_item *ci;
1658
1659         spin_lock(&lu_gps_lock);
1660         list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1661                 if (!lu_gp->lu_gp_valid_id)
1662                         continue;
1663                 ci = &lu_gp->lu_gp_group.cg_item;
1664                 if (!strcmp(config_item_name(ci), name)) {
1665                         atomic_inc(&lu_gp->lu_gp_ref_cnt);
1666                         spin_unlock(&lu_gps_lock);
1667                         return lu_gp;
1668                 }
1669         }
1670         spin_unlock(&lu_gps_lock);
1671
1672         return NULL;
1673 }
1674
1675 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1676 {
1677         spin_lock(&lu_gps_lock);
1678         atomic_dec(&lu_gp->lu_gp_ref_cnt);
1679         spin_unlock(&lu_gps_lock);
1680 }
1681
1682 /*
1683  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1684  */
1685 void __core_alua_attach_lu_gp_mem(
1686         struct t10_alua_lu_gp_member *lu_gp_mem,
1687         struct t10_alua_lu_gp *lu_gp)
1688 {
1689         spin_lock(&lu_gp->lu_gp_lock);
1690         lu_gp_mem->lu_gp = lu_gp;
1691         lu_gp_mem->lu_gp_assoc = 1;
1692         list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1693         lu_gp->lu_gp_members++;
1694         spin_unlock(&lu_gp->lu_gp_lock);
1695 }
1696
1697 /*
1698  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1699  */
1700 void __core_alua_drop_lu_gp_mem(
1701         struct t10_alua_lu_gp_member *lu_gp_mem,
1702         struct t10_alua_lu_gp *lu_gp)
1703 {
1704         spin_lock(&lu_gp->lu_gp_lock);
1705         list_del(&lu_gp_mem->lu_gp_mem_list);
1706         lu_gp_mem->lu_gp = NULL;
1707         lu_gp_mem->lu_gp_assoc = 0;
1708         lu_gp->lu_gp_members--;
1709         spin_unlock(&lu_gp->lu_gp_lock);
1710 }
1711
1712 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1713                 const char *name, int def_group)
1714 {
1715         struct t10_alua_tg_pt_gp *tg_pt_gp;
1716
1717         tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1718         if (!tg_pt_gp) {
1719                 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1720                 return NULL;
1721         }
1722         INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1723         INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1724         mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1725         spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1726         atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1727         INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1728                           core_alua_do_transition_tg_pt_work);
1729         tg_pt_gp->tg_pt_gp_dev = dev;
1730         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1731                 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1732         /*
1733          * Enable both explicit and implicit ALUA support by default
1734          */
1735         tg_pt_gp->tg_pt_gp_alua_access_type =
1736                         TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1737         /*
1738          * Set the default Active/NonOptimized Delay in milliseconds
1739          */
1740         tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1741         tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1742         tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1743
1744         /*
1745          * Enable all supported states
1746          */
1747         tg_pt_gp->tg_pt_gp_alua_supported_states =
1748             ALUA_T_SUP | ALUA_O_SUP |
1749             ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1750
1751         if (def_group) {
1752                 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1753                 tg_pt_gp->tg_pt_gp_id =
1754                                 dev->t10_alua.alua_tg_pt_gps_counter++;
1755                 tg_pt_gp->tg_pt_gp_valid_id = 1;
1756                 dev->t10_alua.alua_tg_pt_gps_count++;
1757                 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1758                               &dev->t10_alua.tg_pt_gps_list);
1759                 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1760         }
1761
1762         return tg_pt_gp;
1763 }
1764
1765 int core_alua_set_tg_pt_gp_id(
1766         struct t10_alua_tg_pt_gp *tg_pt_gp,
1767         u16 tg_pt_gp_id)
1768 {
1769         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1770         struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1771         u16 tg_pt_gp_id_tmp;
1772
1773         /*
1774          * The tg_pt_gp->tg_pt_gp_id may only be set once..
1775          */
1776         if (tg_pt_gp->tg_pt_gp_valid_id) {
1777                 pr_warn("ALUA TG PT Group already has a valid ID,"
1778                         " ignoring request\n");
1779                 return -EINVAL;
1780         }
1781
1782         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1783         if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1784                 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1785                         " 0x0000ffff reached\n");
1786                 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1787                 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1788                 return -ENOSPC;
1789         }
1790 again:
1791         tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1792                         dev->t10_alua.alua_tg_pt_gps_counter++;
1793
1794         list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1795                         tg_pt_gp_list) {
1796                 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1797                         if (!tg_pt_gp_id)
1798                                 goto again;
1799
1800                         pr_err("ALUA Target Port Group ID: %hu already"
1801                                 " exists, ignoring request\n", tg_pt_gp_id);
1802                         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1803                         return -EINVAL;
1804                 }
1805         }
1806
1807         tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1808         tg_pt_gp->tg_pt_gp_valid_id = 1;
1809         list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1810                         &dev->t10_alua.tg_pt_gps_list);
1811         dev->t10_alua.alua_tg_pt_gps_count++;
1812         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1813
1814         return 0;
1815 }
1816
1817 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1818         struct se_port *port)
1819 {
1820         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1821
1822         tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1823                                 GFP_KERNEL);
1824         if (!tg_pt_gp_mem) {
1825                 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1826                 return ERR_PTR(-ENOMEM);
1827         }
1828         INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1829         spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1830         atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1831
1832         tg_pt_gp_mem->tg_pt = port;
1833         port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1834
1835         return tg_pt_gp_mem;
1836 }
1837
1838 void core_alua_free_tg_pt_gp(
1839         struct t10_alua_tg_pt_gp *tg_pt_gp)
1840 {
1841         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1842         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1843
1844         /*
1845          * Once we have reached this point, config_item_put() has already
1846          * been called from target_core_alua_drop_tg_pt_gp().
1847          *
1848          * Here we remove *tg_pt_gp from the global list so that
1849          * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1850          * can be made while we are releasing struct t10_alua_tg_pt_gp.
1851          */
1852         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1853         list_del(&tg_pt_gp->tg_pt_gp_list);
1854         dev->t10_alua.alua_tg_pt_gps_counter--;
1855         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1856
1857         flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1858
1859         /*
1860          * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1861          * core_alua_get_tg_pt_gp_by_name() in
1862          * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1863          * to be released with core_alua_put_tg_pt_gp_from_name().
1864          */
1865         while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1866                 cpu_relax();
1867
1868         /*
1869          * Release reference to struct t10_alua_tg_pt_gp from all associated
1870          * struct se_port.
1871          */
1872         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1873         list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1874                         &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1875                 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1876                         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1877                         tg_pt_gp->tg_pt_gp_members--;
1878                         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1879                 }
1880                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1881                 /*
1882                  * tg_pt_gp_mem is associated with a single
1883                  * se_port->sep_alua_tg_pt_gp_mem, and is released via
1884                  * core_alua_free_tg_pt_gp_mem().
1885                  *
1886                  * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1887                  * assume we want to re-associate a given tg_pt_gp_mem with
1888                  * default_tg_pt_gp.
1889                  */
1890                 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1891                 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1892                         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1893                                         dev->t10_alua.default_tg_pt_gp);
1894                 } else
1895                         tg_pt_gp_mem->tg_pt_gp = NULL;
1896                 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1897
1898                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1899         }
1900         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1901
1902         kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1903 }
1904
1905 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1906 {
1907         struct t10_alua_tg_pt_gp *tg_pt_gp;
1908         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1909
1910         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1911         if (!tg_pt_gp_mem)
1912                 return;
1913
1914         while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1915                 cpu_relax();
1916
1917         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1918         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1919         if (tg_pt_gp) {
1920                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1921                 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1922                         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1923                         tg_pt_gp->tg_pt_gp_members--;
1924                         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1925                 }
1926                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1927                 tg_pt_gp_mem->tg_pt_gp = NULL;
1928         }
1929         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1930
1931         kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1932 }
1933
1934 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1935                 struct se_device *dev, const char *name)
1936 {
1937         struct t10_alua_tg_pt_gp *tg_pt_gp;
1938         struct config_item *ci;
1939
1940         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1941         list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1942                         tg_pt_gp_list) {
1943                 if (!tg_pt_gp->tg_pt_gp_valid_id)
1944                         continue;
1945                 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1946                 if (!strcmp(config_item_name(ci), name)) {
1947                         atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1948                         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1949                         return tg_pt_gp;
1950                 }
1951         }
1952         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1953
1954         return NULL;
1955 }
1956
1957 static void core_alua_put_tg_pt_gp_from_name(
1958         struct t10_alua_tg_pt_gp *tg_pt_gp)
1959 {
1960         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1961
1962         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1963         atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1964         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1965 }
1966
1967 /*
1968  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1969  */
1970 void __core_alua_attach_tg_pt_gp_mem(
1971         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1972         struct t10_alua_tg_pt_gp *tg_pt_gp)
1973 {
1974         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1975         tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1976         tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1977         list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1978                         &tg_pt_gp->tg_pt_gp_mem_list);
1979         tg_pt_gp->tg_pt_gp_members++;
1980         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1981 }
1982
1983 /*
1984  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1985  */
1986 static void __core_alua_drop_tg_pt_gp_mem(
1987         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1988         struct t10_alua_tg_pt_gp *tg_pt_gp)
1989 {
1990         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1991         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1992         tg_pt_gp_mem->tg_pt_gp = NULL;
1993         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1994         tg_pt_gp->tg_pt_gp_members--;
1995         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1996 }
1997
1998 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1999 {
2000         struct config_item *tg_pt_ci;
2001         struct t10_alua_tg_pt_gp *tg_pt_gp;
2002         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2003         ssize_t len = 0;
2004
2005         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
2006         if (!tg_pt_gp_mem)
2007                 return len;
2008
2009         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2010         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2011         if (tg_pt_gp) {
2012                 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
2013                 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
2014                         " %hu\nTG Port Primary Access State: %s\nTG Port "
2015                         "Primary Access Status: %s\nTG Port Secondary Access"
2016                         " State: %s\nTG Port Secondary Access Status: %s\n",
2017                         config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
2018                         core_alua_dump_state(atomic_read(
2019                                         &tg_pt_gp->tg_pt_gp_alua_access_state)),
2020                         core_alua_dump_status(
2021                                 tg_pt_gp->tg_pt_gp_alua_access_status),
2022                         (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
2023                         "Offline" : "None",
2024                         core_alua_dump_status(port->sep_tg_pt_secondary_stat));
2025         }
2026         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2027
2028         return len;
2029 }
2030
2031 ssize_t core_alua_store_tg_pt_gp_info(
2032         struct se_port *port,
2033         const char *page,
2034         size_t count)
2035 {
2036         struct se_portal_group *tpg;
2037         struct se_lun *lun;
2038         struct se_device *dev = port->sep_lun->lun_se_dev;
2039         struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
2040         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2041         unsigned char buf[TG_PT_GROUP_NAME_BUF];
2042         int move = 0;
2043
2044         tpg = port->sep_tpg;
2045         lun = port->sep_lun;
2046
2047         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
2048         if (!tg_pt_gp_mem)
2049                 return 0;
2050
2051         if (count > TG_PT_GROUP_NAME_BUF) {
2052                 pr_err("ALUA Target Port Group alias too large!\n");
2053                 return -EINVAL;
2054         }
2055         memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2056         memcpy(buf, page, count);
2057         /*
2058          * Any ALUA target port group alias besides "NULL" means we will be
2059          * making a new group association.
2060          */
2061         if (strcmp(strstrip(buf), "NULL")) {
2062                 /*
2063                  * core_alua_get_tg_pt_gp_by_name() will increment reference to
2064                  * struct t10_alua_tg_pt_gp.  This reference is released with
2065                  * core_alua_put_tg_pt_gp_from_name() below.
2066                  */
2067                 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
2068                                         strstrip(buf));
2069                 if (!tg_pt_gp_new)
2070                         return -ENODEV;
2071         }
2072
2073         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2074         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2075         if (tg_pt_gp) {
2076                 /*
2077                  * Clearing an existing tg_pt_gp association, and replacing
2078                  * with the default_tg_pt_gp.
2079                  */
2080                 if (!tg_pt_gp_new) {
2081                         pr_debug("Target_Core_ConfigFS: Moving"
2082                                 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
2083                                 " alua/%s, ID: %hu back to"
2084                                 " default_tg_pt_gp\n",
2085                                 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2086                                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2087                                 config_item_name(&lun->lun_group.cg_item),
2088                                 config_item_name(
2089                                         &tg_pt_gp->tg_pt_gp_group.cg_item),
2090                                 tg_pt_gp->tg_pt_gp_id);
2091
2092                         __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2093                         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
2094                                         dev->t10_alua.default_tg_pt_gp);
2095                         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2096
2097                         return count;
2098                 }
2099                 /*
2100                  * Removing existing association of tg_pt_gp_mem with tg_pt_gp
2101                  */
2102                 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2103                 move = 1;
2104         }
2105         /*
2106          * Associate tg_pt_gp_mem with tg_pt_gp_new.
2107          */
2108         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
2109         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2110         pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
2111                 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
2112                 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2113                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2114                 config_item_name(&lun->lun_group.cg_item),
2115                 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
2116                 tg_pt_gp_new->tg_pt_gp_id);
2117
2118         core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2119         return count;
2120 }
2121
2122 ssize_t core_alua_show_access_type(
2123         struct t10_alua_tg_pt_gp *tg_pt_gp,
2124         char *page)
2125 {
2126         if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2127             (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2128                 return sprintf(page, "Implicit and Explicit\n");
2129         else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2130                 return sprintf(page, "Implicit\n");
2131         else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2132                 return sprintf(page, "Explicit\n");
2133         else
2134                 return sprintf(page, "None\n");
2135 }
2136
2137 ssize_t core_alua_store_access_type(
2138         struct t10_alua_tg_pt_gp *tg_pt_gp,
2139         const char *page,
2140         size_t count)
2141 {
2142         unsigned long tmp;
2143         int ret;
2144
2145         ret = kstrtoul(page, 0, &tmp);
2146         if (ret < 0) {
2147                 pr_err("Unable to extract alua_access_type\n");
2148                 return ret;
2149         }
2150         if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2151                 pr_err("Illegal value for alua_access_type:"
2152                                 " %lu\n", tmp);
2153                 return -EINVAL;
2154         }
2155         if (tmp == 3)
2156                 tg_pt_gp->tg_pt_gp_alua_access_type =
2157                         TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2158         else if (tmp == 2)
2159                 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2160         else if (tmp == 1)
2161                 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2162         else
2163                 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2164
2165         return count;
2166 }
2167
2168 ssize_t core_alua_show_nonop_delay_msecs(
2169         struct t10_alua_tg_pt_gp *tg_pt_gp,
2170         char *page)
2171 {
2172         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2173 }
2174
2175 ssize_t core_alua_store_nonop_delay_msecs(
2176         struct t10_alua_tg_pt_gp *tg_pt_gp,
2177         const char *page,
2178         size_t count)
2179 {
2180         unsigned long tmp;
2181         int ret;
2182
2183         ret = kstrtoul(page, 0, &tmp);
2184         if (ret < 0) {
2185                 pr_err("Unable to extract nonop_delay_msecs\n");
2186                 return ret;
2187         }
2188         if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2189                 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2190                         " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2191                         ALUA_MAX_NONOP_DELAY_MSECS);
2192                 return -EINVAL;
2193         }
2194         tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2195
2196         return count;
2197 }
2198
2199 ssize_t core_alua_show_trans_delay_msecs(
2200         struct t10_alua_tg_pt_gp *tg_pt_gp,
2201         char *page)
2202 {
2203         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2204 }
2205
2206 ssize_t core_alua_store_trans_delay_msecs(
2207         struct t10_alua_tg_pt_gp *tg_pt_gp,
2208         const char *page,
2209         size_t count)
2210 {
2211         unsigned long tmp;
2212         int ret;
2213
2214         ret = kstrtoul(page, 0, &tmp);
2215         if (ret < 0) {
2216                 pr_err("Unable to extract trans_delay_msecs\n");
2217                 return ret;
2218         }
2219         if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2220                 pr_err("Passed trans_delay_msecs: %lu, exceeds"
2221                         " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2222                         ALUA_MAX_TRANS_DELAY_MSECS);
2223                 return -EINVAL;
2224         }
2225         tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2226
2227         return count;
2228 }
2229
2230 ssize_t core_alua_show_implicit_trans_secs(
2231         struct t10_alua_tg_pt_gp *tg_pt_gp,
2232         char *page)
2233 {
2234         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2235 }
2236
2237 ssize_t core_alua_store_implicit_trans_secs(
2238         struct t10_alua_tg_pt_gp *tg_pt_gp,
2239         const char *page,
2240         size_t count)
2241 {
2242         unsigned long tmp;
2243         int ret;
2244
2245         ret = kstrtoul(page, 0, &tmp);
2246         if (ret < 0) {
2247                 pr_err("Unable to extract implicit_trans_secs\n");
2248                 return ret;
2249         }
2250         if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2251                 pr_err("Passed implicit_trans_secs: %lu, exceeds"
2252                         " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2253                         ALUA_MAX_IMPLICIT_TRANS_SECS);
2254                 return  -EINVAL;
2255         }
2256         tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2257
2258         return count;
2259 }
2260
2261 ssize_t core_alua_show_preferred_bit(
2262         struct t10_alua_tg_pt_gp *tg_pt_gp,
2263         char *page)
2264 {
2265         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2266 }
2267
2268 ssize_t core_alua_store_preferred_bit(
2269         struct t10_alua_tg_pt_gp *tg_pt_gp,
2270         const char *page,
2271         size_t count)
2272 {
2273         unsigned long tmp;
2274         int ret;
2275
2276         ret = kstrtoul(page, 0, &tmp);
2277         if (ret < 0) {
2278                 pr_err("Unable to extract preferred ALUA value\n");
2279                 return ret;
2280         }
2281         if ((tmp != 0) && (tmp != 1)) {
2282                 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2283                 return -EINVAL;
2284         }
2285         tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2286
2287         return count;
2288 }
2289
2290 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2291 {
2292         if (!lun->lun_sep)
2293                 return -ENODEV;
2294
2295         return sprintf(page, "%d\n",
2296                 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
2297 }
2298
2299 ssize_t core_alua_store_offline_bit(
2300         struct se_lun *lun,
2301         const char *page,
2302         size_t count)
2303 {
2304         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2305         unsigned long tmp;
2306         int ret;
2307
2308         if (!lun->lun_sep)
2309                 return -ENODEV;
2310
2311         ret = kstrtoul(page, 0, &tmp);
2312         if (ret < 0) {
2313                 pr_err("Unable to extract alua_tg_pt_offline value\n");
2314                 return ret;
2315         }
2316         if ((tmp != 0) && (tmp != 1)) {
2317                 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2318                                 tmp);
2319                 return -EINVAL;
2320         }
2321         tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
2322         if (!tg_pt_gp_mem) {
2323                 pr_err("Unable to locate *tg_pt_gp_mem\n");
2324                 return -EINVAL;
2325         }
2326
2327         ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
2328                         lun->lun_sep, 0, (int)tmp);
2329         if (ret < 0)
2330                 return -EINVAL;
2331
2332         return count;
2333 }
2334
2335 ssize_t core_alua_show_secondary_status(
2336         struct se_lun *lun,
2337         char *page)
2338 {
2339         return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
2340 }
2341
2342 ssize_t core_alua_store_secondary_status(
2343         struct se_lun *lun,
2344         const char *page,
2345         size_t count)
2346 {
2347         unsigned long tmp;
2348         int ret;
2349
2350         ret = kstrtoul(page, 0, &tmp);
2351         if (ret < 0) {
2352                 pr_err("Unable to extract alua_tg_pt_status\n");
2353                 return ret;
2354         }
2355         if ((tmp != ALUA_STATUS_NONE) &&
2356             (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2357             (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2358                 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2359                                 tmp);
2360                 return -EINVAL;
2361         }
2362         lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
2363
2364         return count;
2365 }
2366
2367 ssize_t core_alua_show_secondary_write_metadata(
2368         struct se_lun *lun,
2369         char *page)
2370 {
2371         return sprintf(page, "%d\n",
2372                         lun->lun_sep->sep_tg_pt_secondary_write_md);
2373 }
2374
2375 ssize_t core_alua_store_secondary_write_metadata(
2376         struct se_lun *lun,
2377         const char *page,
2378         size_t count)
2379 {
2380         unsigned long tmp;
2381         int ret;
2382
2383         ret = kstrtoul(page, 0, &tmp);
2384         if (ret < 0) {
2385                 pr_err("Unable to extract alua_tg_pt_write_md\n");
2386                 return ret;
2387         }
2388         if ((tmp != 0) && (tmp != 1)) {
2389                 pr_err("Illegal value for alua_tg_pt_write_md:"
2390                                 " %lu\n", tmp);
2391                 return -EINVAL;
2392         }
2393         lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
2394
2395         return count;
2396 }
2397
2398 int core_setup_alua(struct se_device *dev)
2399 {
2400         if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
2401             !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2402                 struct t10_alua_lu_gp_member *lu_gp_mem;
2403
2404                 /*
2405                  * Associate this struct se_device with the default ALUA
2406                  * LUN Group.
2407                  */
2408                 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2409                 if (IS_ERR(lu_gp_mem))
2410                         return PTR_ERR(lu_gp_mem);
2411
2412                 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2413                 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2414                                 default_lu_gp);
2415                 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2416
2417                 pr_debug("%s: Adding to default ALUA LU Group:"
2418                         " core/alua/lu_gps/default_lu_gp\n",
2419                         dev->transport->name);
2420         }
2421
2422         return 0;
2423 }