target/iscsi: Misc cleanups from Agrover (round 2)
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / target / target_core_transport.c
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
37 #include <linux/in.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <linux/ratelimit.h>
41 #include <asm/unaligned.h>
42 #include <net/sock.h>
43 #include <net/tcp.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_tcq.h>
47
48 #include <target/target_core_base.h>
49 #include <target/target_core_backend.h>
50 #include <target/target_core_fabric.h>
51 #include <target/target_core_configfs.h>
52
53 #include "target_core_internal.h"
54 #include "target_core_alua.h"
55 #include "target_core_pr.h"
56 #include "target_core_ua.h"
57
58 static int sub_api_initialized;
59
60 static struct workqueue_struct *target_completion_wq;
61 static struct kmem_cache *se_sess_cache;
62 struct kmem_cache *se_ua_cache;
63 struct kmem_cache *t10_pr_reg_cache;
64 struct kmem_cache *t10_alua_lu_gp_cache;
65 struct kmem_cache *t10_alua_lu_gp_mem_cache;
66 struct kmem_cache *t10_alua_tg_pt_gp_cache;
67 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68
69 static int transport_generic_write_pending(struct se_cmd *);
70 static int transport_processing_thread(void *param);
71 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
72 static void transport_complete_task_attr(struct se_cmd *cmd);
73 static void transport_handle_queue_full(struct se_cmd *cmd,
74                 struct se_device *dev);
75 static void transport_free_dev_tasks(struct se_cmd *cmd);
76 static int transport_generic_get_mem(struct se_cmd *cmd);
77 static void transport_put_cmd(struct se_cmd *cmd);
78 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
79 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
80 static void target_complete_ok_work(struct work_struct *work);
81
82 int init_se_kmem_caches(void)
83 {
84         se_sess_cache = kmem_cache_create("se_sess_cache",
85                         sizeof(struct se_session), __alignof__(struct se_session),
86                         0, NULL);
87         if (!se_sess_cache) {
88                 pr_err("kmem_cache_create() for struct se_session"
89                                 " failed\n");
90                 goto out;
91         }
92         se_ua_cache = kmem_cache_create("se_ua_cache",
93                         sizeof(struct se_ua), __alignof__(struct se_ua),
94                         0, NULL);
95         if (!se_ua_cache) {
96                 pr_err("kmem_cache_create() for struct se_ua failed\n");
97                 goto out_free_sess_cache;
98         }
99         t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
100                         sizeof(struct t10_pr_registration),
101                         __alignof__(struct t10_pr_registration), 0, NULL);
102         if (!t10_pr_reg_cache) {
103                 pr_err("kmem_cache_create() for struct t10_pr_registration"
104                                 " failed\n");
105                 goto out_free_ua_cache;
106         }
107         t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
108                         sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
109                         0, NULL);
110         if (!t10_alua_lu_gp_cache) {
111                 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
112                                 " failed\n");
113                 goto out_free_pr_reg_cache;
114         }
115         t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
116                         sizeof(struct t10_alua_lu_gp_member),
117                         __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
118         if (!t10_alua_lu_gp_mem_cache) {
119                 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
120                                 "cache failed\n");
121                 goto out_free_lu_gp_cache;
122         }
123         t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
124                         sizeof(struct t10_alua_tg_pt_gp),
125                         __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
126         if (!t10_alua_tg_pt_gp_cache) {
127                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
128                                 "cache failed\n");
129                 goto out_free_lu_gp_mem_cache;
130         }
131         t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
132                         "t10_alua_tg_pt_gp_mem_cache",
133                         sizeof(struct t10_alua_tg_pt_gp_member),
134                         __alignof__(struct t10_alua_tg_pt_gp_member),
135                         0, NULL);
136         if (!t10_alua_tg_pt_gp_mem_cache) {
137                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
138                                 "mem_t failed\n");
139                 goto out_free_tg_pt_gp_cache;
140         }
141
142         target_completion_wq = alloc_workqueue("target_completion",
143                                                WQ_MEM_RECLAIM, 0);
144         if (!target_completion_wq)
145                 goto out_free_tg_pt_gp_mem_cache;
146
147         return 0;
148
149 out_free_tg_pt_gp_mem_cache:
150         kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
151 out_free_tg_pt_gp_cache:
152         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153 out_free_lu_gp_mem_cache:
154         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155 out_free_lu_gp_cache:
156         kmem_cache_destroy(t10_alua_lu_gp_cache);
157 out_free_pr_reg_cache:
158         kmem_cache_destroy(t10_pr_reg_cache);
159 out_free_ua_cache:
160         kmem_cache_destroy(se_ua_cache);
161 out_free_sess_cache:
162         kmem_cache_destroy(se_sess_cache);
163 out:
164         return -ENOMEM;
165 }
166
167 void release_se_kmem_caches(void)
168 {
169         destroy_workqueue(target_completion_wq);
170         kmem_cache_destroy(se_sess_cache);
171         kmem_cache_destroy(se_ua_cache);
172         kmem_cache_destroy(t10_pr_reg_cache);
173         kmem_cache_destroy(t10_alua_lu_gp_cache);
174         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176         kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
177 }
178
179 /* This code ensures unique mib indexes are handed out. */
180 static DEFINE_SPINLOCK(scsi_mib_index_lock);
181 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
182
183 /*
184  * Allocate a new row index for the entry type specified
185  */
186 u32 scsi_get_new_index(scsi_index_t type)
187 {
188         u32 new_index;
189
190         BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
191
192         spin_lock(&scsi_mib_index_lock);
193         new_index = ++scsi_mib_index[type];
194         spin_unlock(&scsi_mib_index_lock);
195
196         return new_index;
197 }
198
199 static void transport_init_queue_obj(struct se_queue_obj *qobj)
200 {
201         atomic_set(&qobj->queue_cnt, 0);
202         INIT_LIST_HEAD(&qobj->qobj_list);
203         init_waitqueue_head(&qobj->thread_wq);
204         spin_lock_init(&qobj->cmd_queue_lock);
205 }
206
207 void transport_subsystem_check_init(void)
208 {
209         int ret;
210
211         if (sub_api_initialized)
212                 return;
213
214         ret = request_module("target_core_iblock");
215         if (ret != 0)
216                 pr_err("Unable to load target_core_iblock\n");
217
218         ret = request_module("target_core_file");
219         if (ret != 0)
220                 pr_err("Unable to load target_core_file\n");
221
222         ret = request_module("target_core_pscsi");
223         if (ret != 0)
224                 pr_err("Unable to load target_core_pscsi\n");
225
226         ret = request_module("target_core_stgt");
227         if (ret != 0)
228                 pr_err("Unable to load target_core_stgt\n");
229
230         sub_api_initialized = 1;
231         return;
232 }
233
234 struct se_session *transport_init_session(void)
235 {
236         struct se_session *se_sess;
237
238         se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
239         if (!se_sess) {
240                 pr_err("Unable to allocate struct se_session from"
241                                 " se_sess_cache\n");
242                 return ERR_PTR(-ENOMEM);
243         }
244         INIT_LIST_HEAD(&se_sess->sess_list);
245         INIT_LIST_HEAD(&se_sess->sess_acl_list);
246         INIT_LIST_HEAD(&se_sess->sess_cmd_list);
247         INIT_LIST_HEAD(&se_sess->sess_wait_list);
248         spin_lock_init(&se_sess->sess_cmd_lock);
249         kref_init(&se_sess->sess_kref);
250
251         return se_sess;
252 }
253 EXPORT_SYMBOL(transport_init_session);
254
255 /*
256  * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
257  */
258 void __transport_register_session(
259         struct se_portal_group *se_tpg,
260         struct se_node_acl *se_nacl,
261         struct se_session *se_sess,
262         void *fabric_sess_ptr)
263 {
264         unsigned char buf[PR_REG_ISID_LEN];
265
266         se_sess->se_tpg = se_tpg;
267         se_sess->fabric_sess_ptr = fabric_sess_ptr;
268         /*
269          * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
270          *
271          * Only set for struct se_session's that will actually be moving I/O.
272          * eg: *NOT* discovery sessions.
273          */
274         if (se_nacl) {
275                 /*
276                  * If the fabric module supports an ISID based TransportID,
277                  * save this value in binary from the fabric I_T Nexus now.
278                  */
279                 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
280                         memset(&buf[0], 0, PR_REG_ISID_LEN);
281                         se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
282                                         &buf[0], PR_REG_ISID_LEN);
283                         se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
284                 }
285                 kref_get(&se_nacl->acl_kref);
286
287                 spin_lock_irq(&se_nacl->nacl_sess_lock);
288                 /*
289                  * The se_nacl->nacl_sess pointer will be set to the
290                  * last active I_T Nexus for each struct se_node_acl.
291                  */
292                 se_nacl->nacl_sess = se_sess;
293
294                 list_add_tail(&se_sess->sess_acl_list,
295                               &se_nacl->acl_sess_list);
296                 spin_unlock_irq(&se_nacl->nacl_sess_lock);
297         }
298         list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
299
300         pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
301                 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
302 }
303 EXPORT_SYMBOL(__transport_register_session);
304
305 void transport_register_session(
306         struct se_portal_group *se_tpg,
307         struct se_node_acl *se_nacl,
308         struct se_session *se_sess,
309         void *fabric_sess_ptr)
310 {
311         unsigned long flags;
312
313         spin_lock_irqsave(&se_tpg->session_lock, flags);
314         __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
315         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
316 }
317 EXPORT_SYMBOL(transport_register_session);
318
319 static void target_release_session(struct kref *kref)
320 {
321         struct se_session *se_sess = container_of(kref,
322                         struct se_session, sess_kref);
323         struct se_portal_group *se_tpg = se_sess->se_tpg;
324
325         se_tpg->se_tpg_tfo->close_session(se_sess);
326 }
327
328 void target_get_session(struct se_session *se_sess)
329 {
330         kref_get(&se_sess->sess_kref);
331 }
332 EXPORT_SYMBOL(target_get_session);
333
334 int target_put_session(struct se_session *se_sess)
335 {
336         return kref_put(&se_sess->sess_kref, target_release_session);
337 }
338 EXPORT_SYMBOL(target_put_session);
339
340 static void target_complete_nacl(struct kref *kref)
341 {
342         struct se_node_acl *nacl = container_of(kref,
343                                 struct se_node_acl, acl_kref);
344
345         complete(&nacl->acl_free_comp);
346 }
347
348 void target_put_nacl(struct se_node_acl *nacl)
349 {
350         kref_put(&nacl->acl_kref, target_complete_nacl);
351 }
352
353 void transport_deregister_session_configfs(struct se_session *se_sess)
354 {
355         struct se_node_acl *se_nacl;
356         unsigned long flags;
357         /*
358          * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
359          */
360         se_nacl = se_sess->se_node_acl;
361         if (se_nacl) {
362                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
363                 if (se_nacl->acl_stop == 0)
364                         list_del(&se_sess->sess_acl_list);
365                 /*
366                  * If the session list is empty, then clear the pointer.
367                  * Otherwise, set the struct se_session pointer from the tail
368                  * element of the per struct se_node_acl active session list.
369                  */
370                 if (list_empty(&se_nacl->acl_sess_list))
371                         se_nacl->nacl_sess = NULL;
372                 else {
373                         se_nacl->nacl_sess = container_of(
374                                         se_nacl->acl_sess_list.prev,
375                                         struct se_session, sess_acl_list);
376                 }
377                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
378         }
379 }
380 EXPORT_SYMBOL(transport_deregister_session_configfs);
381
382 void transport_free_session(struct se_session *se_sess)
383 {
384         kmem_cache_free(se_sess_cache, se_sess);
385 }
386 EXPORT_SYMBOL(transport_free_session);
387
388 void transport_deregister_session(struct se_session *se_sess)
389 {
390         struct se_portal_group *se_tpg = se_sess->se_tpg;
391         struct target_core_fabric_ops *se_tfo;
392         struct se_node_acl *se_nacl;
393         unsigned long flags;
394         bool comp_nacl = true;
395
396         if (!se_tpg) {
397                 transport_free_session(se_sess);
398                 return;
399         }
400         se_tfo = se_tpg->se_tpg_tfo;
401
402         spin_lock_irqsave(&se_tpg->session_lock, flags);
403         list_del(&se_sess->sess_list);
404         se_sess->se_tpg = NULL;
405         se_sess->fabric_sess_ptr = NULL;
406         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
407
408         /*
409          * Determine if we need to do extra work for this initiator node's
410          * struct se_node_acl if it had been previously dynamically generated.
411          */
412         se_nacl = se_sess->se_node_acl;
413
414         spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
415         if (se_nacl && se_nacl->dynamic_node_acl) {
416                 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
417                         list_del(&se_nacl->acl_list);
418                         se_tpg->num_node_acls--;
419                         spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
420                         core_tpg_wait_for_nacl_pr_ref(se_nacl);
421                         core_free_device_list_for_node(se_nacl, se_tpg);
422                         se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
423
424                         comp_nacl = false;
425                         spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
426                 }
427         }
428         spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
429
430         pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
431                 se_tpg->se_tpg_tfo->get_fabric_name());
432         /*
433          * If last kref is dropping now for an explict NodeACL, awake sleeping
434          * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
435          * removal context.
436          */
437         if (se_nacl && comp_nacl == true)
438                 target_put_nacl(se_nacl);
439
440         transport_free_session(se_sess);
441 }
442 EXPORT_SYMBOL(transport_deregister_session);
443
444 /*
445  * Called with cmd->t_state_lock held.
446  */
447 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
448 {
449         struct se_device *dev = cmd->se_dev;
450         struct se_task *task;
451         unsigned long flags;
452
453         if (!dev)
454                 return;
455
456         list_for_each_entry(task, &cmd->t_task_list, t_list) {
457                 if (task->task_flags & TF_ACTIVE)
458                         continue;
459
460                 spin_lock_irqsave(&dev->execute_task_lock, flags);
461                 if (task->t_state_active) {
462                         pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
463                                 cmd->se_tfo->get_task_tag(cmd), dev, task);
464
465                         list_del(&task->t_state_list);
466                         atomic_dec(&cmd->t_task_cdbs_ex_left);
467                         task->t_state_active = false;
468                 }
469                 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
470         }
471
472 }
473
474 /*      transport_cmd_check_stop():
475  *
476  *      'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
477  *      'transport_off = 2' determines if task_dev_state should be removed.
478  *
479  *      A non-zero u8 t_state sets cmd->t_state.
480  *      Returns 1 when command is stopped, else 0.
481  */
482 static int transport_cmd_check_stop(
483         struct se_cmd *cmd,
484         int transport_off,
485         u8 t_state)
486 {
487         unsigned long flags;
488
489         spin_lock_irqsave(&cmd->t_state_lock, flags);
490         /*
491          * Determine if IOCTL context caller in requesting the stopping of this
492          * command for LUN shutdown purposes.
493          */
494         if (cmd->transport_state & CMD_T_LUN_STOP) {
495                 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
496                         __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
497
498                 cmd->transport_state &= ~CMD_T_ACTIVE;
499                 if (transport_off == 2)
500                         transport_all_task_dev_remove_state(cmd);
501                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
502
503                 complete(&cmd->transport_lun_stop_comp);
504                 return 1;
505         }
506         /*
507          * Determine if frontend context caller is requesting the stopping of
508          * this command for frontend exceptions.
509          */
510         if (cmd->transport_state & CMD_T_STOP) {
511                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
512                         __func__, __LINE__,
513                         cmd->se_tfo->get_task_tag(cmd));
514
515                 if (transport_off == 2)
516                         transport_all_task_dev_remove_state(cmd);
517
518                 /*
519                  * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
520                  * to FE.
521                  */
522                 if (transport_off == 2)
523                         cmd->se_lun = NULL;
524                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
525
526                 complete(&cmd->t_transport_stop_comp);
527                 return 1;
528         }
529         if (transport_off) {
530                 cmd->transport_state &= ~CMD_T_ACTIVE;
531                 if (transport_off == 2) {
532                         transport_all_task_dev_remove_state(cmd);
533                         /*
534                          * Clear struct se_cmd->se_lun before the transport_off == 2
535                          * handoff to fabric module.
536                          */
537                         cmd->se_lun = NULL;
538                         /*
539                          * Some fabric modules like tcm_loop can release
540                          * their internally allocated I/O reference now and
541                          * struct se_cmd now.
542                          *
543                          * Fabric modules are expected to return '1' here if the
544                          * se_cmd being passed is released at this point,
545                          * or zero if not being released.
546                          */
547                         if (cmd->se_tfo->check_stop_free != NULL) {
548                                 spin_unlock_irqrestore(
549                                         &cmd->t_state_lock, flags);
550
551                                 return cmd->se_tfo->check_stop_free(cmd);
552                         }
553                 }
554                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
555
556                 return 0;
557         } else if (t_state)
558                 cmd->t_state = t_state;
559         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
560
561         return 0;
562 }
563
564 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
565 {
566         return transport_cmd_check_stop(cmd, 2, 0);
567 }
568
569 static void transport_lun_remove_cmd(struct se_cmd *cmd)
570 {
571         struct se_lun *lun = cmd->se_lun;
572         unsigned long flags;
573
574         if (!lun)
575                 return;
576
577         spin_lock_irqsave(&cmd->t_state_lock, flags);
578         if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
579                 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
580                 transport_all_task_dev_remove_state(cmd);
581         }
582         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
583
584         spin_lock_irqsave(&lun->lun_cmd_lock, flags);
585         if (!list_empty(&cmd->se_lun_node))
586                 list_del_init(&cmd->se_lun_node);
587         spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
588 }
589
590 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
591 {
592         if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
593                 transport_lun_remove_cmd(cmd);
594
595         if (transport_cmd_check_stop_to_fabric(cmd))
596                 return;
597         if (remove) {
598                 transport_remove_cmd_from_queue(cmd);
599                 transport_put_cmd(cmd);
600         }
601 }
602
603 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
604                 bool at_head)
605 {
606         struct se_device *dev = cmd->se_dev;
607         struct se_queue_obj *qobj = &dev->dev_queue_obj;
608         unsigned long flags;
609
610         if (t_state) {
611                 spin_lock_irqsave(&cmd->t_state_lock, flags);
612                 cmd->t_state = t_state;
613                 cmd->transport_state |= CMD_T_ACTIVE;
614                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
615         }
616
617         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
618
619         /* If the cmd is already on the list, remove it before we add it */
620         if (!list_empty(&cmd->se_queue_node))
621                 list_del(&cmd->se_queue_node);
622         else
623                 atomic_inc(&qobj->queue_cnt);
624
625         if (at_head)
626                 list_add(&cmd->se_queue_node, &qobj->qobj_list);
627         else
628                 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
629         cmd->transport_state |= CMD_T_QUEUED;
630         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
631
632         wake_up_interruptible(&qobj->thread_wq);
633 }
634
635 static struct se_cmd *
636 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
637 {
638         struct se_cmd *cmd;
639         unsigned long flags;
640
641         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
642         if (list_empty(&qobj->qobj_list)) {
643                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
644                 return NULL;
645         }
646         cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
647
648         cmd->transport_state &= ~CMD_T_QUEUED;
649         list_del_init(&cmd->se_queue_node);
650         atomic_dec(&qobj->queue_cnt);
651         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
652
653         return cmd;
654 }
655
656 static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
657 {
658         struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
659         unsigned long flags;
660
661         spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
662         if (!(cmd->transport_state & CMD_T_QUEUED)) {
663                 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
664                 return;
665         }
666         cmd->transport_state &= ~CMD_T_QUEUED;
667         atomic_dec(&qobj->queue_cnt);
668         list_del_init(&cmd->se_queue_node);
669         spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
670 }
671
672 /*
673  * Completion function used by TCM subsystem plugins (such as FILEIO)
674  * for queueing up response from struct se_subsystem_api->do_task()
675  */
676 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
677 {
678         struct se_task *task = list_entry(cmd->t_task_list.next,
679                                 struct se_task, t_list);
680
681         if (good) {
682                 cmd->scsi_status = SAM_STAT_GOOD;
683                 task->task_scsi_status = GOOD;
684         } else {
685                 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
686                 task->task_se_cmd->scsi_sense_reason =
687                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
688
689         }
690
691         transport_complete_task(task, good);
692 }
693 EXPORT_SYMBOL(transport_complete_sync_cache);
694
695 static void target_complete_failure_work(struct work_struct *work)
696 {
697         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
698
699         transport_generic_request_failure(cmd);
700 }
701
702 /*      transport_complete_task():
703  *
704  *      Called from interrupt and non interrupt context depending
705  *      on the transport plugin.
706  */
707 void transport_complete_task(struct se_task *task, int success)
708 {
709         struct se_cmd *cmd = task->task_se_cmd;
710         struct se_device *dev = cmd->se_dev;
711         unsigned long flags;
712
713         spin_lock_irqsave(&cmd->t_state_lock, flags);
714         task->task_flags &= ~TF_ACTIVE;
715
716         /*
717          * See if any sense data exists, if so set the TASK_SENSE flag.
718          * Also check for any other post completion work that needs to be
719          * done by the plugins.
720          */
721         if (dev && dev->transport->transport_complete) {
722                 if (dev->transport->transport_complete(task) != 0) {
723                         cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
724                         task->task_flags |= TF_HAS_SENSE;
725                         success = 1;
726                 }
727         }
728
729         /*
730          * See if we are waiting for outstanding struct se_task
731          * to complete for an exception condition
732          */
733         if (task->task_flags & TF_REQUEST_STOP) {
734                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
735                 complete(&task->task_stop_comp);
736                 return;
737         }
738
739         if (!success)
740                 cmd->transport_state |= CMD_T_FAILED;
741
742         /*
743          * Decrement the outstanding t_task_cdbs_left count.  The last
744          * struct se_task from struct se_cmd will complete itself into the
745          * device queue depending upon int success.
746          */
747         if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
748                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
749                 return;
750         }
751         /*
752          * Check for case where an explict ABORT_TASK has been received
753          * and transport_wait_for_tasks() will be waiting for completion..
754          */
755         if (cmd->transport_state & CMD_T_ABORTED &&
756             cmd->transport_state & CMD_T_STOP) {
757                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
758                 complete(&cmd->t_transport_stop_comp);
759                 return;
760         } else if (cmd->transport_state & CMD_T_FAILED) {
761                 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
762                 INIT_WORK(&cmd->work, target_complete_failure_work);
763         } else {
764                 INIT_WORK(&cmd->work, target_complete_ok_work);
765         }
766
767         cmd->t_state = TRANSPORT_COMPLETE;
768         cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
769         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
770
771         queue_work(target_completion_wq, &cmd->work);
772 }
773 EXPORT_SYMBOL(transport_complete_task);
774
775 /*
776  * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
777  * struct se_task list are ready to be added to the active execution list
778  * struct se_device
779
780  * Called with se_dev_t->execute_task_lock called.
781  */
782 static inline int transport_add_task_check_sam_attr(
783         struct se_task *task,
784         struct se_task *task_prev,
785         struct se_device *dev)
786 {
787         /*
788          * No SAM Task attribute emulation enabled, add to tail of
789          * execution queue
790          */
791         if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
792                 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
793                 return 0;
794         }
795         /*
796          * HEAD_OF_QUEUE attribute for received CDB, which means
797          * the first task that is associated with a struct se_cmd goes to
798          * head of the struct se_device->execute_task_list, and task_prev
799          * after that for each subsequent task
800          */
801         if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
802                 list_add(&task->t_execute_list,
803                                 (task_prev != NULL) ?
804                                 &task_prev->t_execute_list :
805                                 &dev->execute_task_list);
806
807                 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
808                                 " in execution queue\n",
809                                 task->task_se_cmd->t_task_cdb[0]);
810                 return 1;
811         }
812         /*
813          * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
814          * transitioned from Dermant -> Active state, and are added to the end
815          * of the struct se_device->execute_task_list
816          */
817         list_add_tail(&task->t_execute_list, &dev->execute_task_list);
818         return 0;
819 }
820
821 /*      __transport_add_task_to_execute_queue():
822  *
823  *      Called with se_dev_t->execute_task_lock called.
824  */
825 static void __transport_add_task_to_execute_queue(
826         struct se_task *task,
827         struct se_task *task_prev,
828         struct se_device *dev)
829 {
830         int head_of_queue;
831
832         head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
833         atomic_inc(&dev->execute_tasks);
834
835         if (task->t_state_active)
836                 return;
837         /*
838          * Determine if this task needs to go to HEAD_OF_QUEUE for the
839          * state list as well.  Running with SAM Task Attribute emulation
840          * will always return head_of_queue == 0 here
841          */
842         if (head_of_queue)
843                 list_add(&task->t_state_list, (task_prev) ?
844                                 &task_prev->t_state_list :
845                                 &dev->state_task_list);
846         else
847                 list_add_tail(&task->t_state_list, &dev->state_task_list);
848
849         task->t_state_active = true;
850
851         pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
852                 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
853                 task, dev);
854 }
855
856 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
857 {
858         struct se_device *dev = cmd->se_dev;
859         struct se_task *task;
860         unsigned long flags;
861
862         spin_lock_irqsave(&cmd->t_state_lock, flags);
863         list_for_each_entry(task, &cmd->t_task_list, t_list) {
864                 spin_lock(&dev->execute_task_lock);
865                 if (!task->t_state_active) {
866                         list_add_tail(&task->t_state_list,
867                                       &dev->state_task_list);
868                         task->t_state_active = true;
869
870                         pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
871                                 task->task_se_cmd->se_tfo->get_task_tag(
872                                 task->task_se_cmd), task, dev);
873                 }
874                 spin_unlock(&dev->execute_task_lock);
875         }
876         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
877 }
878
879 static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
880 {
881         struct se_device *dev = cmd->se_dev;
882         struct se_task *task, *task_prev = NULL;
883
884         list_for_each_entry(task, &cmd->t_task_list, t_list) {
885                 if (!list_empty(&task->t_execute_list))
886                         continue;
887                 /*
888                  * __transport_add_task_to_execute_queue() handles the
889                  * SAM Task Attribute emulation if enabled
890                  */
891                 __transport_add_task_to_execute_queue(task, task_prev, dev);
892                 task_prev = task;
893         }
894 }
895
896 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
897 {
898         unsigned long flags;
899         struct se_device *dev = cmd->se_dev;
900
901         spin_lock_irqsave(&dev->execute_task_lock, flags);
902         __transport_add_tasks_from_cmd(cmd);
903         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
904 }
905
906 void __transport_remove_task_from_execute_queue(struct se_task *task,
907                 struct se_device *dev)
908 {
909         list_del_init(&task->t_execute_list);
910         atomic_dec(&dev->execute_tasks);
911 }
912
913 static void transport_remove_task_from_execute_queue(
914         struct se_task *task,
915         struct se_device *dev)
916 {
917         unsigned long flags;
918
919         if (WARN_ON(list_empty(&task->t_execute_list)))
920                 return;
921
922         spin_lock_irqsave(&dev->execute_task_lock, flags);
923         __transport_remove_task_from_execute_queue(task, dev);
924         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
925 }
926
927 /*
928  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
929  */
930
931 static void target_qf_do_work(struct work_struct *work)
932 {
933         struct se_device *dev = container_of(work, struct se_device,
934                                         qf_work_queue);
935         LIST_HEAD(qf_cmd_list);
936         struct se_cmd *cmd, *cmd_tmp;
937
938         spin_lock_irq(&dev->qf_cmd_lock);
939         list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
940         spin_unlock_irq(&dev->qf_cmd_lock);
941
942         list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
943                 list_del(&cmd->se_qf_node);
944                 atomic_dec(&dev->dev_qf_count);
945                 smp_mb__after_atomic_dec();
946
947                 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
948                         " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
949                         (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
950                         (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
951                         : "UNKNOWN");
952
953                 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
954         }
955 }
956
957 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
958 {
959         switch (cmd->data_direction) {
960         case DMA_NONE:
961                 return "NONE";
962         case DMA_FROM_DEVICE:
963                 return "READ";
964         case DMA_TO_DEVICE:
965                 return "WRITE";
966         case DMA_BIDIRECTIONAL:
967                 return "BIDI";
968         default:
969                 break;
970         }
971
972         return "UNKNOWN";
973 }
974
975 void transport_dump_dev_state(
976         struct se_device *dev,
977         char *b,
978         int *bl)
979 {
980         *bl += sprintf(b + *bl, "Status: ");
981         switch (dev->dev_status) {
982         case TRANSPORT_DEVICE_ACTIVATED:
983                 *bl += sprintf(b + *bl, "ACTIVATED");
984                 break;
985         case TRANSPORT_DEVICE_DEACTIVATED:
986                 *bl += sprintf(b + *bl, "DEACTIVATED");
987                 break;
988         case TRANSPORT_DEVICE_SHUTDOWN:
989                 *bl += sprintf(b + *bl, "SHUTDOWN");
990                 break;
991         case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
992         case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
993                 *bl += sprintf(b + *bl, "OFFLINE");
994                 break;
995         default:
996                 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
997                 break;
998         }
999
1000         *bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
1001                 atomic_read(&dev->execute_tasks), dev->queue_depth);
1002         *bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
1003                 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
1004         *bl += sprintf(b + *bl, "        ");
1005 }
1006
1007 void transport_dump_vpd_proto_id(
1008         struct t10_vpd *vpd,
1009         unsigned char *p_buf,
1010         int p_buf_len)
1011 {
1012         unsigned char buf[VPD_TMP_BUF_SIZE];
1013         int len;
1014
1015         memset(buf, 0, VPD_TMP_BUF_SIZE);
1016         len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1017
1018         switch (vpd->protocol_identifier) {
1019         case 0x00:
1020                 sprintf(buf+len, "Fibre Channel\n");
1021                 break;
1022         case 0x10:
1023                 sprintf(buf+len, "Parallel SCSI\n");
1024                 break;
1025         case 0x20:
1026                 sprintf(buf+len, "SSA\n");
1027                 break;
1028         case 0x30:
1029                 sprintf(buf+len, "IEEE 1394\n");
1030                 break;
1031         case 0x40:
1032                 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1033                                 " Protocol\n");
1034                 break;
1035         case 0x50:
1036                 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1037                 break;
1038         case 0x60:
1039                 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1040                 break;
1041         case 0x70:
1042                 sprintf(buf+len, "Automation/Drive Interface Transport"
1043                                 " Protocol\n");
1044                 break;
1045         case 0x80:
1046                 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1047                 break;
1048         default:
1049                 sprintf(buf+len, "Unknown 0x%02x\n",
1050                                 vpd->protocol_identifier);
1051                 break;
1052         }
1053
1054         if (p_buf)
1055                 strncpy(p_buf, buf, p_buf_len);
1056         else
1057                 pr_debug("%s", buf);
1058 }
1059
1060 void
1061 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1062 {
1063         /*
1064          * Check if the Protocol Identifier Valid (PIV) bit is set..
1065          *
1066          * from spc3r23.pdf section 7.5.1
1067          */
1068          if (page_83[1] & 0x80) {
1069                 vpd->protocol_identifier = (page_83[0] & 0xf0);
1070                 vpd->protocol_identifier_set = 1;
1071                 transport_dump_vpd_proto_id(vpd, NULL, 0);
1072         }
1073 }
1074 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1075
1076 int transport_dump_vpd_assoc(
1077         struct t10_vpd *vpd,
1078         unsigned char *p_buf,
1079         int p_buf_len)
1080 {
1081         unsigned char buf[VPD_TMP_BUF_SIZE];
1082         int ret = 0;
1083         int len;
1084
1085         memset(buf, 0, VPD_TMP_BUF_SIZE);
1086         len = sprintf(buf, "T10 VPD Identifier Association: ");
1087
1088         switch (vpd->association) {
1089         case 0x00:
1090                 sprintf(buf+len, "addressed logical unit\n");
1091                 break;
1092         case 0x10:
1093                 sprintf(buf+len, "target port\n");
1094                 break;
1095         case 0x20:
1096                 sprintf(buf+len, "SCSI target device\n");
1097                 break;
1098         default:
1099                 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1100                 ret = -EINVAL;
1101                 break;
1102         }
1103
1104         if (p_buf)
1105                 strncpy(p_buf, buf, p_buf_len);
1106         else
1107                 pr_debug("%s", buf);
1108
1109         return ret;
1110 }
1111
1112 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1113 {
1114         /*
1115          * The VPD identification association..
1116          *
1117          * from spc3r23.pdf Section 7.6.3.1 Table 297
1118          */
1119         vpd->association = (page_83[1] & 0x30);
1120         return transport_dump_vpd_assoc(vpd, NULL, 0);
1121 }
1122 EXPORT_SYMBOL(transport_set_vpd_assoc);
1123
1124 int transport_dump_vpd_ident_type(
1125         struct t10_vpd *vpd,
1126         unsigned char *p_buf,
1127         int p_buf_len)
1128 {
1129         unsigned char buf[VPD_TMP_BUF_SIZE];
1130         int ret = 0;
1131         int len;
1132
1133         memset(buf, 0, VPD_TMP_BUF_SIZE);
1134         len = sprintf(buf, "T10 VPD Identifier Type: ");
1135
1136         switch (vpd->device_identifier_type) {
1137         case 0x00:
1138                 sprintf(buf+len, "Vendor specific\n");
1139                 break;
1140         case 0x01:
1141                 sprintf(buf+len, "T10 Vendor ID based\n");
1142                 break;
1143         case 0x02:
1144                 sprintf(buf+len, "EUI-64 based\n");
1145                 break;
1146         case 0x03:
1147                 sprintf(buf+len, "NAA\n");
1148                 break;
1149         case 0x04:
1150                 sprintf(buf+len, "Relative target port identifier\n");
1151                 break;
1152         case 0x08:
1153                 sprintf(buf+len, "SCSI name string\n");
1154                 break;
1155         default:
1156                 sprintf(buf+len, "Unsupported: 0x%02x\n",
1157                                 vpd->device_identifier_type);
1158                 ret = -EINVAL;
1159                 break;
1160         }
1161
1162         if (p_buf) {
1163                 if (p_buf_len < strlen(buf)+1)
1164                         return -EINVAL;
1165                 strncpy(p_buf, buf, p_buf_len);
1166         } else {
1167                 pr_debug("%s", buf);
1168         }
1169
1170         return ret;
1171 }
1172
1173 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1174 {
1175         /*
1176          * The VPD identifier type..
1177          *
1178          * from spc3r23.pdf Section 7.6.3.1 Table 298
1179          */
1180         vpd->device_identifier_type = (page_83[1] & 0x0f);
1181         return transport_dump_vpd_ident_type(vpd, NULL, 0);
1182 }
1183 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1184
1185 int transport_dump_vpd_ident(
1186         struct t10_vpd *vpd,
1187         unsigned char *p_buf,
1188         int p_buf_len)
1189 {
1190         unsigned char buf[VPD_TMP_BUF_SIZE];
1191         int ret = 0;
1192
1193         memset(buf, 0, VPD_TMP_BUF_SIZE);
1194
1195         switch (vpd->device_identifier_code_set) {
1196         case 0x01: /* Binary */
1197                 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1198                         &vpd->device_identifier[0]);
1199                 break;
1200         case 0x02: /* ASCII */
1201                 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1202                         &vpd->device_identifier[0]);
1203                 break;
1204         case 0x03: /* UTF-8 */
1205                 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1206                         &vpd->device_identifier[0]);
1207                 break;
1208         default:
1209                 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1210                         " 0x%02x", vpd->device_identifier_code_set);
1211                 ret = -EINVAL;
1212                 break;
1213         }
1214
1215         if (p_buf)
1216                 strncpy(p_buf, buf, p_buf_len);
1217         else
1218                 pr_debug("%s", buf);
1219
1220         return ret;
1221 }
1222
1223 int
1224 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1225 {
1226         static const char hex_str[] = "0123456789abcdef";
1227         int j = 0, i = 4; /* offset to start of the identifer */
1228
1229         /*
1230          * The VPD Code Set (encoding)
1231          *
1232          * from spc3r23.pdf Section 7.6.3.1 Table 296
1233          */
1234         vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1235         switch (vpd->device_identifier_code_set) {
1236         case 0x01: /* Binary */
1237                 vpd->device_identifier[j++] =
1238                                 hex_str[vpd->device_identifier_type];
1239                 while (i < (4 + page_83[3])) {
1240                         vpd->device_identifier[j++] =
1241                                 hex_str[(page_83[i] & 0xf0) >> 4];
1242                         vpd->device_identifier[j++] =
1243                                 hex_str[page_83[i] & 0x0f];
1244                         i++;
1245                 }
1246                 break;
1247         case 0x02: /* ASCII */
1248         case 0x03: /* UTF-8 */
1249                 while (i < (4 + page_83[3]))
1250                         vpd->device_identifier[j++] = page_83[i++];
1251                 break;
1252         default:
1253                 break;
1254         }
1255
1256         return transport_dump_vpd_ident(vpd, NULL, 0);
1257 }
1258 EXPORT_SYMBOL(transport_set_vpd_ident);
1259
1260 static void core_setup_task_attr_emulation(struct se_device *dev)
1261 {
1262         /*
1263          * If this device is from Target_Core_Mod/pSCSI, disable the
1264          * SAM Task Attribute emulation.
1265          *
1266          * This is currently not available in upsream Linux/SCSI Target
1267          * mode code, and is assumed to be disabled while using TCM/pSCSI.
1268          */
1269         if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1270                 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1271                 return;
1272         }
1273
1274         dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1275         pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1276                 " device\n", dev->transport->name,
1277                 dev->transport->get_device_rev(dev));
1278 }
1279
1280 static void scsi_dump_inquiry(struct se_device *dev)
1281 {
1282         struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1283         char buf[17];
1284         int i, device_type;
1285         /*
1286          * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1287          */
1288         for (i = 0; i < 8; i++)
1289                 if (wwn->vendor[i] >= 0x20)
1290                         buf[i] = wwn->vendor[i];
1291                 else
1292                         buf[i] = ' ';
1293         buf[i] = '\0';
1294         pr_debug("  Vendor: %s\n", buf);
1295
1296         for (i = 0; i < 16; i++)
1297                 if (wwn->model[i] >= 0x20)
1298                         buf[i] = wwn->model[i];
1299                 else
1300                         buf[i] = ' ';
1301         buf[i] = '\0';
1302         pr_debug("  Model: %s\n", buf);
1303
1304         for (i = 0; i < 4; i++)
1305                 if (wwn->revision[i] >= 0x20)
1306                         buf[i] = wwn->revision[i];
1307                 else
1308                         buf[i] = ' ';
1309         buf[i] = '\0';
1310         pr_debug("  Revision: %s\n", buf);
1311
1312         device_type = dev->transport->get_device_type(dev);
1313         pr_debug("  Type:   %s ", scsi_device_type(device_type));
1314         pr_debug("                 ANSI SCSI revision: %02x\n",
1315                                 dev->transport->get_device_rev(dev));
1316 }
1317
1318 struct se_device *transport_add_device_to_core_hba(
1319         struct se_hba *hba,
1320         struct se_subsystem_api *transport,
1321         struct se_subsystem_dev *se_dev,
1322         u32 device_flags,
1323         void *transport_dev,
1324         struct se_dev_limits *dev_limits,
1325         const char *inquiry_prod,
1326         const char *inquiry_rev)
1327 {
1328         int force_pt;
1329         struct se_device  *dev;
1330
1331         dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1332         if (!dev) {
1333                 pr_err("Unable to allocate memory for se_dev_t\n");
1334                 return NULL;
1335         }
1336
1337         transport_init_queue_obj(&dev->dev_queue_obj);
1338         dev->dev_flags          = device_flags;
1339         dev->dev_status         |= TRANSPORT_DEVICE_DEACTIVATED;
1340         dev->dev_ptr            = transport_dev;
1341         dev->se_hba             = hba;
1342         dev->se_sub_dev         = se_dev;
1343         dev->transport          = transport;
1344         INIT_LIST_HEAD(&dev->dev_list);
1345         INIT_LIST_HEAD(&dev->dev_sep_list);
1346         INIT_LIST_HEAD(&dev->dev_tmr_list);
1347         INIT_LIST_HEAD(&dev->execute_task_list);
1348         INIT_LIST_HEAD(&dev->delayed_cmd_list);
1349         INIT_LIST_HEAD(&dev->state_task_list);
1350         INIT_LIST_HEAD(&dev->qf_cmd_list);
1351         spin_lock_init(&dev->execute_task_lock);
1352         spin_lock_init(&dev->delayed_cmd_lock);
1353         spin_lock_init(&dev->dev_reservation_lock);
1354         spin_lock_init(&dev->dev_status_lock);
1355         spin_lock_init(&dev->se_port_lock);
1356         spin_lock_init(&dev->se_tmr_lock);
1357         spin_lock_init(&dev->qf_cmd_lock);
1358         atomic_set(&dev->dev_ordered_id, 0);
1359
1360         se_dev_set_default_attribs(dev, dev_limits);
1361
1362         dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1363         dev->creation_time = get_jiffies_64();
1364         spin_lock_init(&dev->stats_lock);
1365
1366         spin_lock(&hba->device_lock);
1367         list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1368         hba->dev_count++;
1369         spin_unlock(&hba->device_lock);
1370         /*
1371          * Setup the SAM Task Attribute emulation for struct se_device
1372          */
1373         core_setup_task_attr_emulation(dev);
1374         /*
1375          * Force PR and ALUA passthrough emulation with internal object use.
1376          */
1377         force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1378         /*
1379          * Setup the Reservations infrastructure for struct se_device
1380          */
1381         core_setup_reservations(dev, force_pt);
1382         /*
1383          * Setup the Asymmetric Logical Unit Assignment for struct se_device
1384          */
1385         if (core_setup_alua(dev, force_pt) < 0)
1386                 goto out;
1387
1388         /*
1389          * Startup the struct se_device processing thread
1390          */
1391         dev->process_thread = kthread_run(transport_processing_thread, dev,
1392                                           "LIO_%s", dev->transport->name);
1393         if (IS_ERR(dev->process_thread)) {
1394                 pr_err("Unable to create kthread: LIO_%s\n",
1395                         dev->transport->name);
1396                 goto out;
1397         }
1398         /*
1399          * Setup work_queue for QUEUE_FULL
1400          */
1401         INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1402         /*
1403          * Preload the initial INQUIRY const values if we are doing
1404          * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1405          * passthrough because this is being provided by the backend LLD.
1406          * This is required so that transport_get_inquiry() copies these
1407          * originals once back into DEV_T10_WWN(dev) for the virtual device
1408          * setup.
1409          */
1410         if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1411                 if (!inquiry_prod || !inquiry_rev) {
1412                         pr_err("All non TCM/pSCSI plugins require"
1413                                 " INQUIRY consts\n");
1414                         goto out;
1415                 }
1416
1417                 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1418                 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1419                 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1420         }
1421         scsi_dump_inquiry(dev);
1422
1423         return dev;
1424 out:
1425         kthread_stop(dev->process_thread);
1426
1427         spin_lock(&hba->device_lock);
1428         list_del(&dev->dev_list);
1429         hba->dev_count--;
1430         spin_unlock(&hba->device_lock);
1431
1432         se_release_vpd_for_dev(dev);
1433
1434         kfree(dev);
1435
1436         return NULL;
1437 }
1438 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1439
1440 /*      transport_generic_prepare_cdb():
1441  *
1442  *      Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
1443  *      contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1444  *      The point of this is since we are mapping iSCSI LUNs to
1445  *      SCSI Target IDs having a non-zero LUN in the CDB will throw the
1446  *      devices and HBAs for a loop.
1447  */
1448 static inline void transport_generic_prepare_cdb(
1449         unsigned char *cdb)
1450 {
1451         switch (cdb[0]) {
1452         case READ_10: /* SBC - RDProtect */
1453         case READ_12: /* SBC - RDProtect */
1454         case READ_16: /* SBC - RDProtect */
1455         case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1456         case VERIFY: /* SBC - VRProtect */
1457         case VERIFY_16: /* SBC - VRProtect */
1458         case WRITE_VERIFY: /* SBC - VRProtect */
1459         case WRITE_VERIFY_12: /* SBC - VRProtect */
1460                 break;
1461         default:
1462                 cdb[1] &= 0x1f; /* clear logical unit number */
1463                 break;
1464         }
1465 }
1466
1467 static struct se_task *
1468 transport_generic_get_task(struct se_cmd *cmd,
1469                 enum dma_data_direction data_direction)
1470 {
1471         struct se_task *task;
1472         struct se_device *dev = cmd->se_dev;
1473
1474         task = dev->transport->alloc_task(cmd->t_task_cdb);
1475         if (!task) {
1476                 pr_err("Unable to allocate struct se_task\n");
1477                 return NULL;
1478         }
1479
1480         INIT_LIST_HEAD(&task->t_list);
1481         INIT_LIST_HEAD(&task->t_execute_list);
1482         INIT_LIST_HEAD(&task->t_state_list);
1483         init_completion(&task->task_stop_comp);
1484         task->task_se_cmd = cmd;
1485         task->task_data_direction = data_direction;
1486
1487         return task;
1488 }
1489
1490 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1491
1492 /*
1493  * Used by fabric modules containing a local struct se_cmd within their
1494  * fabric dependent per I/O descriptor.
1495  */
1496 void transport_init_se_cmd(
1497         struct se_cmd *cmd,
1498         struct target_core_fabric_ops *tfo,
1499         struct se_session *se_sess,
1500         u32 data_length,
1501         int data_direction,
1502         int task_attr,
1503         unsigned char *sense_buffer)
1504 {
1505         INIT_LIST_HEAD(&cmd->se_lun_node);
1506         INIT_LIST_HEAD(&cmd->se_delayed_node);
1507         INIT_LIST_HEAD(&cmd->se_qf_node);
1508         INIT_LIST_HEAD(&cmd->se_queue_node);
1509         INIT_LIST_HEAD(&cmd->se_cmd_list);
1510         INIT_LIST_HEAD(&cmd->t_task_list);
1511         init_completion(&cmd->transport_lun_fe_stop_comp);
1512         init_completion(&cmd->transport_lun_stop_comp);
1513         init_completion(&cmd->t_transport_stop_comp);
1514         init_completion(&cmd->cmd_wait_comp);
1515         spin_lock_init(&cmd->t_state_lock);
1516         cmd->transport_state = CMD_T_DEV_ACTIVE;
1517
1518         cmd->se_tfo = tfo;
1519         cmd->se_sess = se_sess;
1520         cmd->data_length = data_length;
1521         cmd->data_direction = data_direction;
1522         cmd->sam_task_attr = task_attr;
1523         cmd->sense_buffer = sense_buffer;
1524 }
1525 EXPORT_SYMBOL(transport_init_se_cmd);
1526
1527 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1528 {
1529         /*
1530          * Check if SAM Task Attribute emulation is enabled for this
1531          * struct se_device storage object
1532          */
1533         if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1534                 return 0;
1535
1536         if (cmd->sam_task_attr == MSG_ACA_TAG) {
1537                 pr_debug("SAM Task Attribute ACA"
1538                         " emulation is not supported\n");
1539                 return -EINVAL;
1540         }
1541         /*
1542          * Used to determine when ORDERED commands should go from
1543          * Dormant to Active status.
1544          */
1545         cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1546         smp_mb__after_atomic_inc();
1547         pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1548                         cmd->se_ordered_id, cmd->sam_task_attr,
1549                         cmd->se_dev->transport->name);
1550         return 0;
1551 }
1552
1553 /*      transport_generic_allocate_tasks():
1554  *
1555  *      Called from fabric RX Thread.
1556  */
1557 int transport_generic_allocate_tasks(
1558         struct se_cmd *cmd,
1559         unsigned char *cdb)
1560 {
1561         int ret;
1562
1563         transport_generic_prepare_cdb(cdb);
1564         /*
1565          * Ensure that the received CDB is less than the max (252 + 8) bytes
1566          * for VARIABLE_LENGTH_CMD
1567          */
1568         if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1569                 pr_err("Received SCSI CDB with command_size: %d that"
1570                         " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1571                         scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1572                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1573                 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1574                 return -EINVAL;
1575         }
1576         /*
1577          * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1578          * allocate the additional extended CDB buffer now..  Otherwise
1579          * setup the pointer from __t_task_cdb to t_task_cdb.
1580          */
1581         if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1582                 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1583                                                 GFP_KERNEL);
1584                 if (!cmd->t_task_cdb) {
1585                         pr_err("Unable to allocate cmd->t_task_cdb"
1586                                 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1587                                 scsi_command_size(cdb),
1588                                 (unsigned long)sizeof(cmd->__t_task_cdb));
1589                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1590                         cmd->scsi_sense_reason =
1591                                         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1592                         return -ENOMEM;
1593                 }
1594         } else
1595                 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1596         /*
1597          * Copy the original CDB into cmd->
1598          */
1599         memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1600         /*
1601          * Setup the received CDB based on SCSI defined opcodes and
1602          * perform unit attention, persistent reservations and ALUA
1603          * checks for virtual device backends.  The cmd->t_task_cdb
1604          * pointer is expected to be setup before we reach this point.
1605          */
1606         ret = transport_generic_cmd_sequencer(cmd, cdb);
1607         if (ret < 0)
1608                 return ret;
1609         /*
1610          * Check for SAM Task Attribute Emulation
1611          */
1612         if (transport_check_alloc_task_attr(cmd) < 0) {
1613                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1614                 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1615                 return -EINVAL;
1616         }
1617         spin_lock(&cmd->se_lun->lun_sep_lock);
1618         if (cmd->se_lun->lun_sep)
1619                 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1620         spin_unlock(&cmd->se_lun->lun_sep_lock);
1621         return 0;
1622 }
1623 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1624
1625 /*
1626  * Used by fabric module frontends to queue tasks directly.
1627  * Many only be used from process context only
1628  */
1629 int transport_handle_cdb_direct(
1630         struct se_cmd *cmd)
1631 {
1632         int ret;
1633
1634         if (!cmd->se_lun) {
1635                 dump_stack();
1636                 pr_err("cmd->se_lun is NULL\n");
1637                 return -EINVAL;
1638         }
1639         if (in_interrupt()) {
1640                 dump_stack();
1641                 pr_err("transport_generic_handle_cdb cannot be called"
1642                                 " from interrupt context\n");
1643                 return -EINVAL;
1644         }
1645         /*
1646          * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1647          * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1648          * in existing usage to ensure that outstanding descriptors are handled
1649          * correctly during shutdown via transport_wait_for_tasks()
1650          *
1651          * Also, we don't take cmd->t_state_lock here as we only expect
1652          * this to be called for initial descriptor submission.
1653          */
1654         cmd->t_state = TRANSPORT_NEW_CMD;
1655         cmd->transport_state |= CMD_T_ACTIVE;
1656
1657         /*
1658          * transport_generic_new_cmd() is already handling QUEUE_FULL,
1659          * so follow TRANSPORT_NEW_CMD processing thread context usage
1660          * and call transport_generic_request_failure() if necessary..
1661          */
1662         ret = transport_generic_new_cmd(cmd);
1663         if (ret < 0)
1664                 transport_generic_request_failure(cmd);
1665
1666         return 0;
1667 }
1668 EXPORT_SYMBOL(transport_handle_cdb_direct);
1669
1670 /**
1671  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1672  *
1673  * @se_cmd: command descriptor to submit
1674  * @se_sess: associated se_sess for endpoint
1675  * @cdb: pointer to SCSI CDB
1676  * @sense: pointer to SCSI sense buffer
1677  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1678  * @data_length: fabric expected data transfer length
1679  * @task_addr: SAM task attribute
1680  * @data_dir: DMA data direction
1681  * @flags: flags for command submission from target_sc_flags_tables
1682  *
1683  * This may only be called from process context, and also currently
1684  * assumes internal allocation of fabric payload buffer by target-core.
1685  **/
1686 void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1687                 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1688                 u32 data_length, int task_attr, int data_dir, int flags)
1689 {
1690         struct se_portal_group *se_tpg;
1691         int rc;
1692
1693         se_tpg = se_sess->se_tpg;
1694         BUG_ON(!se_tpg);
1695         BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1696         BUG_ON(in_interrupt());
1697         /*
1698          * Initialize se_cmd for target operation.  From this point
1699          * exceptions are handled by sending exception status via
1700          * target_core_fabric_ops->queue_status() callback
1701          */
1702         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1703                                 data_length, data_dir, task_attr, sense);
1704         if (flags & TARGET_SCF_UNKNOWN_SIZE)
1705                 se_cmd->unknown_data_length = 1;
1706         /*
1707          * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1708          * se_sess->sess_cmd_list.  A second kref_get here is necessary
1709          * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1710          * kref_put() to happen during fabric packet acknowledgement.
1711          */
1712         target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1713         /*
1714          * Signal bidirectional data payloads to target-core
1715          */
1716         if (flags & TARGET_SCF_BIDI_OP)
1717                 se_cmd->se_cmd_flags |= SCF_BIDI;
1718         /*
1719          * Locate se_lun pointer and attach it to struct se_cmd
1720          */
1721         if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
1722                 transport_send_check_condition_and_sense(se_cmd,
1723                                 se_cmd->scsi_sense_reason, 0);
1724                 target_put_sess_cmd(se_sess, se_cmd);
1725                 return;
1726         }
1727         /*
1728          * Sanitize CDBs via transport_generic_cmd_sequencer() and
1729          * allocate the necessary tasks to complete the received CDB+data
1730          */
1731         rc = transport_generic_allocate_tasks(se_cmd, cdb);
1732         if (rc != 0) {
1733                 transport_generic_request_failure(se_cmd);
1734                 return;
1735         }
1736         /*
1737          * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1738          * for immediate execution of READs, otherwise wait for
1739          * transport_generic_handle_data() to be called for WRITEs
1740          * when fabric has filled the incoming buffer.
1741          */
1742         transport_handle_cdb_direct(se_cmd);
1743         return;
1744 }
1745 EXPORT_SYMBOL(target_submit_cmd);
1746
1747 static void target_complete_tmr_failure(struct work_struct *work)
1748 {
1749         struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1750
1751         se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1752         se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1753         transport_generic_free_cmd(se_cmd, 0);
1754 }
1755
1756 /**
1757  * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1758  *                     for TMR CDBs
1759  *
1760  * @se_cmd: command descriptor to submit
1761  * @se_sess: associated se_sess for endpoint
1762  * @sense: pointer to SCSI sense buffer
1763  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1764  * @fabric_context: fabric context for TMR req
1765  * @tm_type: Type of TM request
1766  * @gfp: gfp type for caller
1767  * @tag: referenced task tag for TMR_ABORT_TASK
1768  * @flags: submit cmd flags
1769  *
1770  * Callable from all contexts.
1771  **/
1772
1773 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1774                 unsigned char *sense, u32 unpacked_lun,
1775                 void *fabric_tmr_ptr, unsigned char tm_type,
1776                 gfp_t gfp, unsigned int tag, int flags)
1777 {
1778         struct se_portal_group *se_tpg;
1779         int ret;
1780
1781         se_tpg = se_sess->se_tpg;
1782         BUG_ON(!se_tpg);
1783
1784         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1785                               0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1786         /*
1787          * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1788          * allocation failure.
1789          */
1790         ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1791         if (ret < 0)
1792                 return -ENOMEM;
1793
1794         if (tm_type == TMR_ABORT_TASK)
1795                 se_cmd->se_tmr_req->ref_task_tag = tag;
1796
1797         /* See target_submit_cmd for commentary */
1798         target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1799
1800         ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1801         if (ret) {
1802                 /*
1803                  * For callback during failure handling, push this work off
1804                  * to process context with TMR_LUN_DOES_NOT_EXIST status.
1805                  */
1806                 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1807                 schedule_work(&se_cmd->work);
1808                 return 0;
1809         }
1810         transport_generic_handle_tmr(se_cmd);
1811         return 0;
1812 }
1813 EXPORT_SYMBOL(target_submit_tmr);
1814
1815 /*
1816  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1817  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1818  * complete setup in TCM process context w/ TFO->new_cmd_map().
1819  */
1820 int transport_generic_handle_cdb_map(
1821         struct se_cmd *cmd)
1822 {
1823         if (!cmd->se_lun) {
1824                 dump_stack();
1825                 pr_err("cmd->se_lun is NULL\n");
1826                 return -EINVAL;
1827         }
1828
1829         transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1830         return 0;
1831 }
1832 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1833
1834 /*      transport_generic_handle_data():
1835  *
1836  *
1837  */
1838 int transport_generic_handle_data(
1839         struct se_cmd *cmd)
1840 {
1841         /*
1842          * For the software fabric case, then we assume the nexus is being
1843          * failed/shutdown when signals are pending from the kthread context
1844          * caller, so we return a failure.  For the HW target mode case running
1845          * in interrupt code, the signal_pending() check is skipped.
1846          */
1847         if (!in_interrupt() && signal_pending(current))
1848                 return -EPERM;
1849         /*
1850          * If the received CDB has aleady been ABORTED by the generic
1851          * target engine, we now call transport_check_aborted_status()
1852          * to queue any delated TASK_ABORTED status for the received CDB to the
1853          * fabric module as we are expecting no further incoming DATA OUT
1854          * sequences at this point.
1855          */
1856         if (transport_check_aborted_status(cmd, 1) != 0)
1857                 return 0;
1858
1859         transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1860         return 0;
1861 }
1862 EXPORT_SYMBOL(transport_generic_handle_data);
1863
1864 /*      transport_generic_handle_tmr():
1865  *
1866  *
1867  */
1868 int transport_generic_handle_tmr(
1869         struct se_cmd *cmd)
1870 {
1871         transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1872         return 0;
1873 }
1874 EXPORT_SYMBOL(transport_generic_handle_tmr);
1875
1876 /*
1877  * If the task is active, request it to be stopped and sleep until it
1878  * has completed.
1879  */
1880 bool target_stop_task(struct se_task *task, unsigned long *flags)
1881 {
1882         struct se_cmd *cmd = task->task_se_cmd;
1883         bool was_active = false;
1884
1885         if (task->task_flags & TF_ACTIVE) {
1886                 task->task_flags |= TF_REQUEST_STOP;
1887                 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1888
1889                 pr_debug("Task %p waiting to complete\n", task);
1890                 wait_for_completion(&task->task_stop_comp);
1891                 pr_debug("Task %p stopped successfully\n", task);
1892
1893                 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1894                 atomic_dec(&cmd->t_task_cdbs_left);
1895                 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1896                 was_active = true;
1897         }
1898
1899         return was_active;
1900 }
1901
1902 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1903 {
1904         struct se_task *task, *task_tmp;
1905         unsigned long flags;
1906         int ret = 0;
1907
1908         pr_debug("ITT[0x%08x] - Stopping tasks\n",
1909                 cmd->se_tfo->get_task_tag(cmd));
1910
1911         /*
1912          * No tasks remain in the execution queue
1913          */
1914         spin_lock_irqsave(&cmd->t_state_lock, flags);
1915         list_for_each_entry_safe(task, task_tmp,
1916                                 &cmd->t_task_list, t_list) {
1917                 pr_debug("Processing task %p\n", task);
1918                 /*
1919                  * If the struct se_task has not been sent and is not active,
1920                  * remove the struct se_task from the execution queue.
1921                  */
1922                 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1923                         spin_unlock_irqrestore(&cmd->t_state_lock,
1924                                         flags);
1925                         transport_remove_task_from_execute_queue(task,
1926                                         cmd->se_dev);
1927
1928                         pr_debug("Task %p removed from execute queue\n", task);
1929                         spin_lock_irqsave(&cmd->t_state_lock, flags);
1930                         continue;
1931                 }
1932
1933                 if (!target_stop_task(task, &flags)) {
1934                         pr_debug("Task %p - did nothing\n", task);
1935                         ret++;
1936                 }
1937         }
1938         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1939
1940         return ret;
1941 }
1942
1943 /*
1944  * Handle SAM-esque emulation for generic transport request failures.
1945  */
1946 void transport_generic_request_failure(struct se_cmd *cmd)
1947 {
1948         int ret = 0;
1949
1950         pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1951                 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1952                 cmd->t_task_cdb[0]);
1953         pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1954                 cmd->se_tfo->get_cmd_state(cmd),
1955                 cmd->t_state, cmd->scsi_sense_reason);
1956         pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1957                 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1958                 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1959                 cmd->t_task_list_num,
1960                 atomic_read(&cmd->t_task_cdbs_left),
1961                 atomic_read(&cmd->t_task_cdbs_sent),
1962                 atomic_read(&cmd->t_task_cdbs_ex_left),
1963                 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1964                 (cmd->transport_state & CMD_T_STOP) != 0,
1965                 (cmd->transport_state & CMD_T_SENT) != 0);
1966
1967         /*
1968          * For SAM Task Attribute emulation for failed struct se_cmd
1969          */
1970         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1971                 transport_complete_task_attr(cmd);
1972
1973         switch (cmd->scsi_sense_reason) {
1974         case TCM_NON_EXISTENT_LUN:
1975         case TCM_UNSUPPORTED_SCSI_OPCODE:
1976         case TCM_INVALID_CDB_FIELD:
1977         case TCM_INVALID_PARAMETER_LIST:
1978         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1979         case TCM_UNKNOWN_MODE_PAGE:
1980         case TCM_WRITE_PROTECTED:
1981         case TCM_CHECK_CONDITION_ABORT_CMD:
1982         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1983         case TCM_CHECK_CONDITION_NOT_READY:
1984                 break;
1985         case TCM_RESERVATION_CONFLICT:
1986                 /*
1987                  * No SENSE Data payload for this case, set SCSI Status
1988                  * and queue the response to $FABRIC_MOD.
1989                  *
1990                  * Uses linux/include/scsi/scsi.h SAM status codes defs
1991                  */
1992                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1993                 /*
1994                  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1995                  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1996                  * CONFLICT STATUS.
1997                  *
1998                  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1999                  */
2000                 if (cmd->se_sess &&
2001                     cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2002                         core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2003                                 cmd->orig_fe_lun, 0x2C,
2004                                 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2005
2006                 ret = cmd->se_tfo->queue_status(cmd);
2007                 if (ret == -EAGAIN || ret == -ENOMEM)
2008                         goto queue_full;
2009                 goto check_stop;
2010         default:
2011                 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2012                         cmd->t_task_cdb[0], cmd->scsi_sense_reason);
2013                 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2014                 break;
2015         }
2016         /*
2017          * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
2018          * make the call to transport_send_check_condition_and_sense()
2019          * directly.  Otherwise expect the fabric to make the call to
2020          * transport_send_check_condition_and_sense() after handling
2021          * possible unsoliticied write data payloads.
2022          */
2023         ret = transport_send_check_condition_and_sense(cmd,
2024                         cmd->scsi_sense_reason, 0);
2025         if (ret == -EAGAIN || ret == -ENOMEM)
2026                 goto queue_full;
2027
2028 check_stop:
2029         transport_lun_remove_cmd(cmd);
2030         if (!transport_cmd_check_stop_to_fabric(cmd))
2031                 ;
2032         return;
2033
2034 queue_full:
2035         cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2036         transport_handle_queue_full(cmd, cmd->se_dev);
2037 }
2038 EXPORT_SYMBOL(transport_generic_request_failure);
2039
2040 static inline u32 transport_lba_21(unsigned char *cdb)
2041 {
2042         return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2043 }
2044
2045 static inline u32 transport_lba_32(unsigned char *cdb)
2046 {
2047         return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2048 }
2049
2050 static inline unsigned long long transport_lba_64(unsigned char *cdb)
2051 {
2052         unsigned int __v1, __v2;
2053
2054         __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2055         __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2056
2057         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2058 }
2059
2060 /*
2061  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2062  */
2063 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2064 {
2065         unsigned int __v1, __v2;
2066
2067         __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2068         __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2069
2070         return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2071 }
2072
2073 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2074 {
2075         unsigned long flags;
2076
2077         spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2078         se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2079         spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2080 }
2081
2082 /*
2083  * Called from Fabric Module context from transport_execute_tasks()
2084  *
2085  * The return of this function determins if the tasks from struct se_cmd
2086  * get added to the execution queue in transport_execute_tasks(),
2087  * or are added to the delayed or ordered lists here.
2088  */
2089 static inline int transport_execute_task_attr(struct se_cmd *cmd)
2090 {
2091         if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2092                 return 1;
2093         /*
2094          * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2095          * to allow the passed struct se_cmd list of tasks to the front of the list.
2096          */
2097          if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2098                 pr_debug("Added HEAD_OF_QUEUE for CDB:"
2099                         " 0x%02x, se_ordered_id: %u\n",
2100                         cmd->t_task_cdb[0],
2101                         cmd->se_ordered_id);
2102                 return 1;
2103         } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2104                 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2105                 smp_mb__after_atomic_inc();
2106
2107                 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2108                                 " list, se_ordered_id: %u\n",
2109                                 cmd->t_task_cdb[0],
2110                                 cmd->se_ordered_id);
2111                 /*
2112                  * Add ORDERED command to tail of execution queue if
2113                  * no other older commands exist that need to be
2114                  * completed first.
2115                  */
2116                 if (!atomic_read(&cmd->se_dev->simple_cmds))
2117                         return 1;
2118         } else {
2119                 /*
2120                  * For SIMPLE and UNTAGGED Task Attribute commands
2121                  */
2122                 atomic_inc(&cmd->se_dev->simple_cmds);
2123                 smp_mb__after_atomic_inc();
2124         }
2125         /*
2126          * Otherwise if one or more outstanding ORDERED task attribute exist,
2127          * add the dormant task(s) built for the passed struct se_cmd to the
2128          * execution queue and become in Active state for this struct se_device.
2129          */
2130         if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
2131                 /*
2132                  * Otherwise, add cmd w/ tasks to delayed cmd queue that
2133                  * will be drained upon completion of HEAD_OF_QUEUE task.
2134                  */
2135                 spin_lock(&cmd->se_dev->delayed_cmd_lock);
2136                 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2137                 list_add_tail(&cmd->se_delayed_node,
2138                                 &cmd->se_dev->delayed_cmd_list);
2139                 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2140
2141                 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2142                         " delayed CMD list, se_ordered_id: %u\n",
2143                         cmd->t_task_cdb[0], cmd->sam_task_attr,
2144                         cmd->se_ordered_id);
2145                 /*
2146                  * Return zero to let transport_execute_tasks() know
2147                  * not to add the delayed tasks to the execution list.
2148                  */
2149                 return 0;
2150         }
2151         /*
2152          * Otherwise, no ORDERED task attributes exist..
2153          */
2154         return 1;
2155 }
2156
2157 /*
2158  * Called from fabric module context in transport_generic_new_cmd() and
2159  * transport_generic_process_write()
2160  */
2161 static int transport_execute_tasks(struct se_cmd *cmd)
2162 {
2163         int add_tasks;
2164         struct se_device *se_dev = cmd->se_dev;
2165         /*
2166          * Call transport_cmd_check_stop() to see if a fabric exception
2167          * has occurred that prevents execution.
2168          */
2169         if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2170                 /*
2171                  * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2172                  * attribute for the tasks of the received struct se_cmd CDB
2173                  */
2174                 add_tasks = transport_execute_task_attr(cmd);
2175                 if (!add_tasks)
2176                         goto execute_tasks;
2177                 /*
2178                  * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
2179                  * adds associated se_tasks while holding dev->execute_task_lock
2180                  * before I/O dispath to avoid a double spinlock access.
2181                  */
2182                 __transport_execute_tasks(se_dev, cmd);
2183                 return 0;
2184         }
2185
2186 execute_tasks:
2187         __transport_execute_tasks(se_dev, NULL);
2188         return 0;
2189 }
2190
2191 /*
2192  * Called to check struct se_device tcq depth window, and once open pull struct se_task
2193  * from struct se_device->execute_task_list and
2194  *
2195  * Called from transport_processing_thread()
2196  */
2197 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2198 {
2199         int error;
2200         struct se_cmd *cmd = NULL;
2201         struct se_task *task = NULL;
2202         unsigned long flags;
2203
2204 check_depth:
2205         spin_lock_irq(&dev->execute_task_lock);
2206         if (new_cmd != NULL)
2207                 __transport_add_tasks_from_cmd(new_cmd);
2208
2209         if (list_empty(&dev->execute_task_list)) {
2210                 spin_unlock_irq(&dev->execute_task_lock);
2211                 return 0;
2212         }
2213         task = list_first_entry(&dev->execute_task_list,
2214                                 struct se_task, t_execute_list);
2215         __transport_remove_task_from_execute_queue(task, dev);
2216         spin_unlock_irq(&dev->execute_task_lock);
2217
2218         cmd = task->task_se_cmd;
2219         spin_lock_irqsave(&cmd->t_state_lock, flags);
2220         task->task_flags |= (TF_ACTIVE | TF_SENT);
2221         atomic_inc(&cmd->t_task_cdbs_sent);
2222
2223         if (atomic_read(&cmd->t_task_cdbs_sent) ==
2224             cmd->t_task_list_num)
2225                 cmd->transport_state |= CMD_T_SENT;
2226
2227         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2228
2229         if (cmd->execute_task)
2230                 error = cmd->execute_task(task);
2231         else
2232                 error = dev->transport->do_task(task);
2233         if (error != 0) {
2234                 spin_lock_irqsave(&cmd->t_state_lock, flags);
2235                 task->task_flags &= ~TF_ACTIVE;
2236                 cmd->transport_state &= ~CMD_T_SENT;
2237                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2238
2239                 transport_stop_tasks_for_cmd(cmd);
2240                 transport_generic_request_failure(cmd);
2241         }
2242
2243         new_cmd = NULL;
2244         goto check_depth;
2245
2246         return 0;
2247 }
2248
2249 static inline u32 transport_get_sectors_6(
2250         unsigned char *cdb,
2251         struct se_cmd *cmd,
2252         int *ret)
2253 {
2254         struct se_device *dev = cmd->se_dev;
2255
2256         /*
2257          * Assume TYPE_DISK for non struct se_device objects.
2258          * Use 8-bit sector value.
2259          */
2260         if (!dev)
2261                 goto type_disk;
2262
2263         /*
2264          * Use 24-bit allocation length for TYPE_TAPE.
2265          */
2266         if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2267                 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2268
2269         /*
2270          * Everything else assume TYPE_DISK Sector CDB location.
2271          * Use 8-bit sector value.  SBC-3 says:
2272          *
2273          *   A TRANSFER LENGTH field set to zero specifies that 256
2274          *   logical blocks shall be written.  Any other value
2275          *   specifies the number of logical blocks that shall be
2276          *   written.
2277          */
2278 type_disk:
2279         return cdb[4] ? : 256;
2280 }
2281
2282 static inline u32 transport_get_sectors_10(
2283         unsigned char *cdb,
2284         struct se_cmd *cmd,
2285         int *ret)
2286 {
2287         struct se_device *dev = cmd->se_dev;
2288
2289         /*
2290          * Assume TYPE_DISK for non struct se_device objects.
2291          * Use 16-bit sector value.
2292          */
2293         if (!dev)
2294                 goto type_disk;
2295
2296         /*
2297          * XXX_10 is not defined in SSC, throw an exception
2298          */
2299         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2300                 *ret = -EINVAL;
2301                 return 0;
2302         }
2303
2304         /*
2305          * Everything else assume TYPE_DISK Sector CDB location.
2306          * Use 16-bit sector value.
2307          */
2308 type_disk:
2309         return (u32)(cdb[7] << 8) + cdb[8];
2310 }
2311
2312 static inline u32 transport_get_sectors_12(
2313         unsigned char *cdb,
2314         struct se_cmd *cmd,
2315         int *ret)
2316 {
2317         struct se_device *dev = cmd->se_dev;
2318
2319         /*
2320          * Assume TYPE_DISK for non struct se_device objects.
2321          * Use 32-bit sector value.
2322          */
2323         if (!dev)
2324                 goto type_disk;
2325
2326         /*
2327          * XXX_12 is not defined in SSC, throw an exception
2328          */
2329         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2330                 *ret = -EINVAL;
2331                 return 0;
2332         }
2333
2334         /*
2335          * Everything else assume TYPE_DISK Sector CDB location.
2336          * Use 32-bit sector value.
2337          */
2338 type_disk:
2339         return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2340 }
2341
2342 static inline u32 transport_get_sectors_16(
2343         unsigned char *cdb,
2344         struct se_cmd *cmd,
2345         int *ret)
2346 {
2347         struct se_device *dev = cmd->se_dev;
2348
2349         /*
2350          * Assume TYPE_DISK for non struct se_device objects.
2351          * Use 32-bit sector value.
2352          */
2353         if (!dev)
2354                 goto type_disk;
2355
2356         /*
2357          * Use 24-bit allocation length for TYPE_TAPE.
2358          */
2359         if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2360                 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2361
2362 type_disk:
2363         return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2364                     (cdb[12] << 8) + cdb[13];
2365 }
2366
2367 /*
2368  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2369  */
2370 static inline u32 transport_get_sectors_32(
2371         unsigned char *cdb,
2372         struct se_cmd *cmd,
2373         int *ret)
2374 {
2375         /*
2376          * Assume TYPE_DISK for non struct se_device objects.
2377          * Use 32-bit sector value.
2378          */
2379         return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2380                     (cdb[30] << 8) + cdb[31];
2381
2382 }
2383
2384 static inline u32 transport_get_size(
2385         u32 sectors,
2386         unsigned char *cdb,
2387         struct se_cmd *cmd)
2388 {
2389         struct se_device *dev = cmd->se_dev;
2390
2391         if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2392                 if (cdb[1] & 1) { /* sectors */
2393                         return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2394                 } else /* bytes */
2395                         return sectors;
2396         }
2397
2398         pr_debug("Returning block_size: %u, sectors: %u == %u for"
2399                 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
2400                 sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2401                 dev->transport->name);
2402
2403         return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2404 }
2405
2406 static void transport_xor_callback(struct se_cmd *cmd)
2407 {
2408         unsigned char *buf, *addr;
2409         struct scatterlist *sg;
2410         unsigned int offset;
2411         int i;
2412         int count;
2413         /*
2414          * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2415          *
2416          * 1) read the specified logical block(s);
2417          * 2) transfer logical blocks from the data-out buffer;
2418          * 3) XOR the logical blocks transferred from the data-out buffer with
2419          *    the logical blocks read, storing the resulting XOR data in a buffer;
2420          * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2421          *    blocks transferred from the data-out buffer; and
2422          * 5) transfer the resulting XOR data to the data-in buffer.
2423          */
2424         buf = kmalloc(cmd->data_length, GFP_KERNEL);
2425         if (!buf) {
2426                 pr_err("Unable to allocate xor_callback buf\n");
2427                 return;
2428         }
2429         /*
2430          * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2431          * into the locally allocated *buf
2432          */
2433         sg_copy_to_buffer(cmd->t_data_sg,
2434                           cmd->t_data_nents,
2435                           buf,
2436                           cmd->data_length);
2437
2438         /*
2439          * Now perform the XOR against the BIDI read memory located at
2440          * cmd->t_mem_bidi_list
2441          */
2442
2443         offset = 0;
2444         for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2445                 addr = kmap_atomic(sg_page(sg));
2446                 if (!addr)
2447                         goto out;
2448
2449                 for (i = 0; i < sg->length; i++)
2450                         *(addr + sg->offset + i) ^= *(buf + offset + i);
2451
2452                 offset += sg->length;
2453                 kunmap_atomic(addr);
2454         }
2455
2456 out:
2457         kfree(buf);
2458 }
2459
2460 /*
2461  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2462  */
2463 static int transport_get_sense_data(struct se_cmd *cmd)
2464 {
2465         unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2466         struct se_device *dev = cmd->se_dev;
2467         struct se_task *task = NULL, *task_tmp;
2468         unsigned long flags;
2469         u32 offset = 0;
2470
2471         WARN_ON(!cmd->se_lun);
2472
2473         if (!dev)
2474                 return 0;
2475
2476         spin_lock_irqsave(&cmd->t_state_lock, flags);
2477         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2478                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2479                 return 0;
2480         }
2481
2482         list_for_each_entry_safe(task, task_tmp,
2483                                 &cmd->t_task_list, t_list) {
2484                 if (!(task->task_flags & TF_HAS_SENSE))
2485                         continue;
2486
2487                 if (!dev->transport->get_sense_buffer) {
2488                         pr_err("dev->transport->get_sense_buffer"
2489                                         " is NULL\n");
2490                         continue;
2491                 }
2492
2493                 sense_buffer = dev->transport->get_sense_buffer(task);
2494                 if (!sense_buffer) {
2495                         pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2496                                 " sense buffer for task with sense\n",
2497                                 cmd->se_tfo->get_task_tag(cmd), task);
2498                         continue;
2499                 }
2500                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2501
2502                 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2503                                 TRANSPORT_SENSE_BUFFER);
2504
2505                 memcpy(&buffer[offset], sense_buffer,
2506                                 TRANSPORT_SENSE_BUFFER);
2507                 cmd->scsi_status = task->task_scsi_status;
2508                 /* Automatically padded */
2509                 cmd->scsi_sense_length =
2510                                 (TRANSPORT_SENSE_BUFFER + offset);
2511
2512                 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2513                                 " and sense\n",
2514                         dev->se_hba->hba_id, dev->transport->name,
2515                                 cmd->scsi_status);
2516                 return 0;
2517         }
2518         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2519
2520         return -1;
2521 }
2522
2523 static inline long long transport_dev_end_lba(struct se_device *dev)
2524 {
2525         return dev->transport->get_blocks(dev) + 1;
2526 }
2527
2528 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2529 {
2530         struct se_device *dev = cmd->se_dev;
2531         u32 sectors;
2532
2533         if (dev->transport->get_device_type(dev) != TYPE_DISK)
2534                 return 0;
2535
2536         sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2537
2538         if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2539                 pr_err("LBA: %llu Sectors: %u exceeds"
2540                         " transport_dev_end_lba(): %llu\n",
2541                         cmd->t_task_lba, sectors,
2542                         transport_dev_end_lba(dev));
2543                 return -EINVAL;
2544         }
2545
2546         return 0;
2547 }
2548
2549 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2550 {
2551         /*
2552          * Determine if the received WRITE_SAME is used to for direct
2553          * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2554          * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2555          * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2556          */
2557         int passthrough = (dev->transport->transport_type ==
2558                                 TRANSPORT_PLUGIN_PHBA_PDEV);
2559
2560         if (!passthrough) {
2561                 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2562                         pr_err("WRITE_SAME PBDATA and LBDATA"
2563                                 " bits not supported for Block Discard"
2564                                 " Emulation\n");
2565                         return -ENOSYS;
2566                 }
2567                 /*
2568                  * Currently for the emulated case we only accept
2569                  * tpws with the UNMAP=1 bit set.
2570                  */
2571                 if (!(flags[0] & 0x08)) {
2572                         pr_err("WRITE_SAME w/o UNMAP bit not"
2573                                 " supported for Block Discard Emulation\n");
2574                         return -ENOSYS;
2575                 }
2576         }
2577
2578         return 0;
2579 }
2580
2581 /*      transport_generic_cmd_sequencer():
2582  *
2583  *      Generic Command Sequencer that should work for most DAS transport
2584  *      drivers.
2585  *
2586  *      Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2587  *      RX Thread.
2588  *
2589  *      FIXME: Need to support other SCSI OPCODES where as well.
2590  */
2591 static int transport_generic_cmd_sequencer(
2592         struct se_cmd *cmd,
2593         unsigned char *cdb)
2594 {
2595         struct se_device *dev = cmd->se_dev;
2596         struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2597         int ret = 0, sector_ret = 0, passthrough;
2598         u32 sectors = 0, size = 0, pr_reg_type = 0;
2599         u16 service_action;
2600         u8 alua_ascq = 0;
2601         /*
2602          * Check for an existing UNIT ATTENTION condition
2603          */
2604         if (core_scsi3_ua_check(cmd, cdb) < 0) {
2605                 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2606                 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2607                 return -EINVAL;
2608         }
2609         /*
2610          * Check status of Asymmetric Logical Unit Assignment port
2611          */
2612         ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2613         if (ret != 0) {
2614                 /*
2615                  * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2616                  * The ALUA additional sense code qualifier (ASCQ) is determined
2617                  * by the ALUA primary or secondary access state..
2618                  */
2619                 if (ret > 0) {
2620                         pr_debug("[%s]: ALUA TG Port not available,"
2621                                 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2622                                 cmd->se_tfo->get_fabric_name(), alua_ascq);
2623
2624                         transport_set_sense_codes(cmd, 0x04, alua_ascq);
2625                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2626                         cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2627                         return -EINVAL;
2628                 }
2629                 goto out_invalid_cdb_field;
2630         }
2631         /*
2632          * Check status for SPC-3 Persistent Reservations
2633          */
2634         if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2635                 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2636                                         cmd, cdb, pr_reg_type) != 0) {
2637                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2638                         cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2639                         cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2640                         cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2641                         return -EBUSY;
2642                 }
2643                 /*
2644                  * This means the CDB is allowed for the SCSI Initiator port
2645                  * when said port is *NOT* holding the legacy SPC-2 or
2646                  * SPC-3 Persistent Reservation.
2647                  */
2648         }
2649
2650         /*
2651          * If we operate in passthrough mode we skip most CDB emulation and
2652          * instead hand the commands down to the physical SCSI device.
2653          */
2654         passthrough =
2655                 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2656
2657         switch (cdb[0]) {
2658         case READ_6:
2659                 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2660                 if (sector_ret)
2661                         goto out_unsupported_cdb;
2662                 size = transport_get_size(sectors, cdb, cmd);
2663                 cmd->t_task_lba = transport_lba_21(cdb);
2664                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2665                 break;
2666         case READ_10:
2667                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2668                 if (sector_ret)
2669                         goto out_unsupported_cdb;
2670                 size = transport_get_size(sectors, cdb, cmd);
2671                 cmd->t_task_lba = transport_lba_32(cdb);
2672                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2673                 break;
2674         case READ_12:
2675                 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2676                 if (sector_ret)
2677                         goto out_unsupported_cdb;
2678                 size = transport_get_size(sectors, cdb, cmd);
2679                 cmd->t_task_lba = transport_lba_32(cdb);
2680                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2681                 break;
2682         case READ_16:
2683                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2684                 if (sector_ret)
2685                         goto out_unsupported_cdb;
2686                 size = transport_get_size(sectors, cdb, cmd);
2687                 cmd->t_task_lba = transport_lba_64(cdb);
2688                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2689                 break;
2690         case WRITE_6:
2691                 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2692                 if (sector_ret)
2693                         goto out_unsupported_cdb;
2694                 size = transport_get_size(sectors, cdb, cmd);
2695                 cmd->t_task_lba = transport_lba_21(cdb);
2696                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2697                 break;
2698         case WRITE_10:
2699                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2700                 if (sector_ret)
2701                         goto out_unsupported_cdb;
2702                 size = transport_get_size(sectors, cdb, cmd);
2703                 cmd->t_task_lba = transport_lba_32(cdb);
2704                 if (cdb[1] & 0x8)
2705                         cmd->se_cmd_flags |= SCF_FUA;
2706                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2707                 break;
2708         case WRITE_12:
2709                 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2710                 if (sector_ret)
2711                         goto out_unsupported_cdb;
2712                 size = transport_get_size(sectors, cdb, cmd);
2713                 cmd->t_task_lba = transport_lba_32(cdb);
2714                 if (cdb[1] & 0x8)
2715                         cmd->se_cmd_flags |= SCF_FUA;
2716                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2717                 break;
2718         case WRITE_16:
2719                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2720                 if (sector_ret)
2721                         goto out_unsupported_cdb;
2722                 size = transport_get_size(sectors, cdb, cmd);
2723                 cmd->t_task_lba = transport_lba_64(cdb);
2724                 if (cdb[1] & 0x8)
2725                         cmd->se_cmd_flags |= SCF_FUA;
2726                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2727                 break;
2728         case XDWRITEREAD_10:
2729                 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2730                     !(cmd->se_cmd_flags & SCF_BIDI))
2731                         goto out_invalid_cdb_field;
2732                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2733                 if (sector_ret)
2734                         goto out_unsupported_cdb;
2735                 size = transport_get_size(sectors, cdb, cmd);
2736                 cmd->t_task_lba = transport_lba_32(cdb);
2737                 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2738
2739                 /*
2740                  * Do now allow BIDI commands for passthrough mode.
2741                  */
2742                 if (passthrough)
2743                         goto out_unsupported_cdb;
2744
2745                 /*
2746                  * Setup BIDI XOR callback to be run after I/O completion.
2747                  */
2748                 cmd->transport_complete_callback = &transport_xor_callback;
2749                 if (cdb[1] & 0x8)
2750                         cmd->se_cmd_flags |= SCF_FUA;
2751                 break;
2752         case VARIABLE_LENGTH_CMD:
2753                 service_action = get_unaligned_be16(&cdb[8]);
2754                 switch (service_action) {
2755                 case XDWRITEREAD_32:
2756                         sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2757                         if (sector_ret)
2758                                 goto out_unsupported_cdb;
2759                         size = transport_get_size(sectors, cdb, cmd);
2760                         /*
2761                          * Use WRITE_32 and READ_32 opcodes for the emulated
2762                          * XDWRITE_READ_32 logic.
2763                          */
2764                         cmd->t_task_lba = transport_lba_64_ext(cdb);
2765                         cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2766
2767                         /*
2768                          * Do now allow BIDI commands for passthrough mode.
2769                          */
2770                         if (passthrough)
2771                                 goto out_unsupported_cdb;
2772
2773                         /*
2774                          * Setup BIDI XOR callback to be run during after I/O
2775                          * completion.
2776                          */
2777                         cmd->transport_complete_callback = &transport_xor_callback;
2778                         if (cdb[1] & 0x8)
2779                                 cmd->se_cmd_flags |= SCF_FUA;
2780                         break;
2781                 case WRITE_SAME_32:
2782                         sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2783                         if (sector_ret)
2784                                 goto out_unsupported_cdb;
2785
2786                         if (sectors)
2787                                 size = transport_get_size(1, cdb, cmd);
2788                         else {
2789                                 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2790                                        " supported\n");
2791                                 goto out_invalid_cdb_field;
2792                         }
2793
2794                         cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2795                         cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2796
2797                         if (target_check_write_same_discard(&cdb[10], dev) < 0)
2798                                 goto out_unsupported_cdb;
2799                         if (!passthrough)
2800                                 cmd->execute_task = target_emulate_write_same;
2801                         break;
2802                 default:
2803                         pr_err("VARIABLE_LENGTH_CMD service action"
2804                                 " 0x%04x not supported\n", service_action);
2805                         goto out_unsupported_cdb;
2806                 }
2807                 break;
2808         case MAINTENANCE_IN:
2809                 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2810                         /* MAINTENANCE_IN from SCC-2 */
2811                         /*
2812                          * Check for emulated MI_REPORT_TARGET_PGS.
2813                          */
2814                         if (cdb[1] == MI_REPORT_TARGET_PGS &&
2815                             su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2816                                 cmd->execute_task =
2817                                         target_emulate_report_target_port_groups;
2818                         }
2819                         size = (cdb[6] << 24) | (cdb[7] << 16) |
2820                                (cdb[8] << 8) | cdb[9];
2821                 } else {
2822                         /* GPCMD_SEND_KEY from multi media commands */
2823                         size = (cdb[8] << 8) + cdb[9];
2824                 }
2825                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2826                 break;
2827         case MODE_SELECT:
2828                 size = cdb[4];
2829                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2830                 break;
2831         case MODE_SELECT_10:
2832                 size = (cdb[7] << 8) + cdb[8];
2833                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2834                 break;
2835         case MODE_SENSE:
2836                 size = cdb[4];
2837                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2838                 if (!passthrough)
2839                         cmd->execute_task = target_emulate_modesense;
2840                 break;
2841         case MODE_SENSE_10:
2842                 size = (cdb[7] << 8) + cdb[8];
2843                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2844                 if (!passthrough)
2845                         cmd->execute_task = target_emulate_modesense;
2846                 break;
2847         case GPCMD_READ_BUFFER_CAPACITY:
2848         case GPCMD_SEND_OPC:
2849         case LOG_SELECT:
2850         case LOG_SENSE:
2851                 size = (cdb[7] << 8) + cdb[8];
2852                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2853                 break;
2854         case READ_BLOCK_LIMITS:
2855                 size = READ_BLOCK_LEN;
2856                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2857                 break;
2858         case GPCMD_GET_CONFIGURATION:
2859         case GPCMD_READ_FORMAT_CAPACITIES:
2860         case GPCMD_READ_DISC_INFO:
2861         case GPCMD_READ_TRACK_RZONE_INFO:
2862                 size = (cdb[7] << 8) + cdb[8];
2863                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2864                 break;
2865         case PERSISTENT_RESERVE_IN:
2866                 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2867                         cmd->execute_task = target_scsi3_emulate_pr_in;
2868                 size = (cdb[7] << 8) + cdb[8];
2869                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2870                 break;
2871         case PERSISTENT_RESERVE_OUT:
2872                 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2873                         cmd->execute_task = target_scsi3_emulate_pr_out;
2874                 size = (cdb[7] << 8) + cdb[8];
2875                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2876                 break;
2877         case GPCMD_MECHANISM_STATUS:
2878         case GPCMD_READ_DVD_STRUCTURE:
2879                 size = (cdb[8] << 8) + cdb[9];
2880                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2881                 break;
2882         case READ_POSITION:
2883                 size = READ_POSITION_LEN;
2884                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2885                 break;
2886         case MAINTENANCE_OUT:
2887                 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2888                         /* MAINTENANCE_OUT from SCC-2
2889                          *
2890                          * Check for emulated MO_SET_TARGET_PGS.
2891                          */
2892                         if (cdb[1] == MO_SET_TARGET_PGS &&
2893                             su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2894                                 cmd->execute_task =
2895                                         target_emulate_set_target_port_groups;
2896                         }
2897
2898                         size = (cdb[6] << 24) | (cdb[7] << 16) |
2899                                (cdb[8] << 8) | cdb[9];
2900                 } else  {
2901                         /* GPCMD_REPORT_KEY from multi media commands */
2902                         size = (cdb[8] << 8) + cdb[9];
2903                 }
2904                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2905                 break;
2906         case INQUIRY:
2907                 size = (cdb[3] << 8) + cdb[4];
2908                 /*
2909                  * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2910                  * See spc4r17 section 5.3
2911                  */
2912                 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2913                         cmd->sam_task_attr = MSG_HEAD_TAG;
2914                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2915                 if (!passthrough)
2916                         cmd->execute_task = target_emulate_inquiry;
2917                 break;
2918         case READ_BUFFER:
2919                 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2920                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2921                 break;
2922         case READ_CAPACITY:
2923                 size = READ_CAP_LEN;
2924                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2925                 if (!passthrough)
2926                         cmd->execute_task = target_emulate_readcapacity;
2927                 break;
2928         case READ_MEDIA_SERIAL_NUMBER:
2929         case SECURITY_PROTOCOL_IN:
2930         case SECURITY_PROTOCOL_OUT:
2931                 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2932                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2933                 break;
2934         case SERVICE_ACTION_IN:
2935                 switch (cmd->t_task_cdb[1] & 0x1f) {
2936                 case SAI_READ_CAPACITY_16:
2937                         if (!passthrough)
2938                                 cmd->execute_task =
2939                                         target_emulate_readcapacity_16;
2940                         break;
2941                 default:
2942                         if (passthrough)
2943                                 break;
2944
2945                         pr_err("Unsupported SA: 0x%02x\n",
2946                                 cmd->t_task_cdb[1] & 0x1f);
2947                         goto out_invalid_cdb_field;
2948                 }
2949                 /*FALLTHROUGH*/
2950         case ACCESS_CONTROL_IN:
2951         case ACCESS_CONTROL_OUT:
2952         case EXTENDED_COPY:
2953         case READ_ATTRIBUTE:
2954         case RECEIVE_COPY_RESULTS:
2955         case WRITE_ATTRIBUTE:
2956                 size = (cdb[10] << 24) | (cdb[11] << 16) |
2957                        (cdb[12] << 8) | cdb[13];
2958                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2959                 break;
2960         case RECEIVE_DIAGNOSTIC:
2961         case SEND_DIAGNOSTIC:
2962                 size = (cdb[3] << 8) | cdb[4];
2963                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2964                 break;
2965 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2966 #if 0
2967         case GPCMD_READ_CD:
2968                 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2969                 size = (2336 * sectors);
2970                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2971                 break;
2972 #endif
2973         case READ_TOC:
2974                 size = cdb[8];
2975                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2976                 break;
2977         case REQUEST_SENSE:
2978                 size = cdb[4];
2979                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2980                 if (!passthrough)
2981                         cmd->execute_task = target_emulate_request_sense;
2982                 break;
2983         case READ_ELEMENT_STATUS:
2984                 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2985                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2986                 break;
2987         case WRITE_BUFFER:
2988                 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2989                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2990                 break;
2991         case RESERVE:
2992         case RESERVE_10:
2993                 /*
2994                  * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2995                  * Assume the passthrough or $FABRIC_MOD will tell us about it.
2996                  */
2997                 if (cdb[0] == RESERVE_10)
2998                         size = (cdb[7] << 8) | cdb[8];
2999                 else
3000                         size = cmd->data_length;
3001
3002                 /*
3003                  * Setup the legacy emulated handler for SPC-2 and
3004                  * >= SPC-3 compatible reservation handling (CRH=1)
3005                  * Otherwise, we assume the underlying SCSI logic is
3006                  * is running in SPC_PASSTHROUGH, and wants reservations
3007                  * emulation disabled.
3008                  */
3009                 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
3010                         cmd->execute_task = target_scsi2_reservation_reserve;
3011                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3012                 break;
3013         case RELEASE:
3014         case RELEASE_10:
3015                 /*
3016                  * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3017                  * Assume the passthrough or $FABRIC_MOD will tell us about it.
3018                 */
3019                 if (cdb[0] == RELEASE_10)
3020                         size = (cdb[7] << 8) | cdb[8];
3021                 else
3022                         size = cmd->data_length;
3023
3024                 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
3025                         cmd->execute_task = target_scsi2_reservation_release;
3026                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3027                 break;
3028         case SYNCHRONIZE_CACHE:
3029         case SYNCHRONIZE_CACHE_16:
3030                 /*
3031                  * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3032                  */
3033                 if (cdb[0] == SYNCHRONIZE_CACHE) {
3034                         sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3035                         cmd->t_task_lba = transport_lba_32(cdb);
3036                 } else {
3037                         sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3038                         cmd->t_task_lba = transport_lba_64(cdb);
3039                 }
3040                 if (sector_ret)
3041                         goto out_unsupported_cdb;
3042
3043                 size = transport_get_size(sectors, cdb, cmd);
3044                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3045
3046                 if (passthrough)
3047                         break;
3048
3049                 /*
3050                  * Check to ensure that LBA + Range does not exceed past end of
3051                  * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3052                  */
3053                 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
3054                         if (transport_cmd_get_valid_sectors(cmd) < 0)
3055                                 goto out_invalid_cdb_field;
3056                 }
3057                 cmd->execute_task = target_emulate_synchronize_cache;
3058                 break;
3059         case UNMAP:
3060                 size = get_unaligned_be16(&cdb[7]);
3061                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3062                 if (!passthrough)
3063                         cmd->execute_task = target_emulate_unmap;
3064                 break;
3065         case WRITE_SAME_16:
3066                 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3067                 if (sector_ret)
3068                         goto out_unsupported_cdb;
3069
3070                 if (sectors)
3071                         size = transport_get_size(1, cdb, cmd);
3072                 else {
3073                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3074                         goto out_invalid_cdb_field;
3075                 }
3076
3077                 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3078                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3079
3080                 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3081                         goto out_unsupported_cdb;
3082                 if (!passthrough)
3083                         cmd->execute_task = target_emulate_write_same;
3084                 break;
3085         case WRITE_SAME:
3086                 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3087                 if (sector_ret)
3088                         goto out_unsupported_cdb;
3089
3090                 if (sectors)
3091                         size = transport_get_size(1, cdb, cmd);
3092                 else {
3093                         pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3094                         goto out_invalid_cdb_field;
3095                 }
3096
3097                 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3098                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3099                 /*
3100                  * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3101                  * of byte 1 bit 3 UNMAP instead of original reserved field
3102                  */
3103                 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3104                         goto out_unsupported_cdb;
3105                 if (!passthrough)
3106                         cmd->execute_task = target_emulate_write_same;
3107                 break;
3108         case ALLOW_MEDIUM_REMOVAL:
3109         case ERASE:
3110         case REZERO_UNIT:
3111         case SEEK_10:
3112         case SPACE:
3113         case START_STOP:
3114         case TEST_UNIT_READY:
3115         case VERIFY:
3116         case WRITE_FILEMARKS:
3117                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3118                 if (!passthrough)
3119                         cmd->execute_task = target_emulate_noop;
3120                 break;
3121         case GPCMD_CLOSE_TRACK:
3122         case INITIALIZE_ELEMENT_STATUS:
3123         case GPCMD_LOAD_UNLOAD:
3124         case GPCMD_SET_SPEED:
3125         case MOVE_MEDIUM:
3126                 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3127                 break;
3128         case REPORT_LUNS:
3129                 cmd->execute_task = target_report_luns;
3130                 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3131                 /*
3132                  * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3133                  * See spc4r17 section 5.3
3134                  */
3135                 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3136                         cmd->sam_task_attr = MSG_HEAD_TAG;
3137                 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3138                 break;
3139         default:
3140                 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3141                         " 0x%02x, sending CHECK_CONDITION.\n",
3142                         cmd->se_tfo->get_fabric_name(), cdb[0]);
3143                 goto out_unsupported_cdb;
3144         }
3145
3146         if (cmd->unknown_data_length)
3147                 cmd->data_length = size;
3148
3149         if (size != cmd->data_length) {
3150                 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3151                         " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3152                         " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3153                                 cmd->data_length, size, cdb[0]);
3154
3155                 cmd->cmd_spdtl = size;
3156
3157                 if (cmd->data_direction == DMA_TO_DEVICE) {
3158                         pr_err("Rejecting underflow/overflow"
3159                                         " WRITE data\n");
3160                         goto out_invalid_cdb_field;
3161                 }
3162                 /*
3163                  * Reject READ_* or WRITE_* with overflow/underflow for
3164                  * type SCF_SCSI_DATA_SG_IO_CDB.
3165                  */
3166                 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
3167                         pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3168                                 " CDB on non 512-byte sector setup subsystem"
3169                                 " plugin: %s\n", dev->transport->name);
3170                         /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3171                         goto out_invalid_cdb_field;
3172                 }
3173
3174                 if (size > cmd->data_length) {
3175                         cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3176                         cmd->residual_count = (size - cmd->data_length);
3177                 } else {
3178                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3179                         cmd->residual_count = (cmd->data_length - size);
3180                 }
3181                 cmd->data_length = size;
3182         }
3183
3184         if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
3185             (sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors ||
3186              sectors > dev->se_sub_dev->se_dev_attrib.max_sectors)) {
3187                 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
3188                                    cdb[0], sectors);
3189                 goto out_invalid_cdb_field;
3190         }
3191
3192         /* reject any command that we don't have a handler for */
3193         if (!(passthrough || cmd->execute_task ||
3194              (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3195                 goto out_unsupported_cdb;
3196
3197         transport_set_supported_SAM_opcode(cmd);
3198         return ret;
3199
3200 out_unsupported_cdb:
3201         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3202         cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3203         return -EINVAL;
3204 out_invalid_cdb_field:
3205         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3206         cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3207         return -EINVAL;
3208 }
3209
3210 /*
3211  * Called from I/O completion to determine which dormant/delayed
3212  * and ordered cmds need to have their tasks added to the execution queue.
3213  */
3214 static void transport_complete_task_attr(struct se_cmd *cmd)
3215 {
3216         struct se_device *dev = cmd->se_dev;
3217         struct se_cmd *cmd_p, *cmd_tmp;
3218         int new_active_tasks = 0;
3219
3220         if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3221                 atomic_dec(&dev->simple_cmds);
3222                 smp_mb__after_atomic_dec();
3223                 dev->dev_cur_ordered_id++;
3224                 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3225                         " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3226                         cmd->se_ordered_id);
3227         } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3228                 dev->dev_cur_ordered_id++;
3229                 pr_debug("Incremented dev_cur_ordered_id: %u for"
3230                         " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3231                         cmd->se_ordered_id);
3232         } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3233                 atomic_dec(&dev->dev_ordered_sync);
3234                 smp_mb__after_atomic_dec();
3235
3236                 dev->dev_cur_ordered_id++;
3237                 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3238                         " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3239         }
3240         /*
3241          * Process all commands up to the last received
3242          * ORDERED task attribute which requires another blocking
3243          * boundary
3244          */
3245         spin_lock(&dev->delayed_cmd_lock);
3246         list_for_each_entry_safe(cmd_p, cmd_tmp,
3247                         &dev->delayed_cmd_list, se_delayed_node) {
3248
3249                 list_del(&cmd_p->se_delayed_node);
3250                 spin_unlock(&dev->delayed_cmd_lock);
3251
3252                 pr_debug("Calling add_tasks() for"
3253                         " cmd_p: 0x%02x Task Attr: 0x%02x"
3254                         " Dormant -> Active, se_ordered_id: %u\n",
3255                         cmd_p->t_task_cdb[0],
3256                         cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3257
3258                 transport_add_tasks_from_cmd(cmd_p);
3259                 new_active_tasks++;
3260
3261                 spin_lock(&dev->delayed_cmd_lock);
3262                 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3263                         break;
3264         }
3265         spin_unlock(&dev->delayed_cmd_lock);
3266         /*
3267          * If new tasks have become active, wake up the transport thread
3268          * to do the processing of the Active tasks.
3269          */
3270         if (new_active_tasks != 0)
3271                 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3272 }
3273
3274 static void transport_complete_qf(struct se_cmd *cmd)
3275 {
3276         int ret = 0;
3277
3278         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3279                 transport_complete_task_attr(cmd);
3280
3281         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3282                 ret = cmd->se_tfo->queue_status(cmd);
3283                 if (ret)
3284                         goto out;
3285         }
3286
3287         switch (cmd->data_direction) {
3288         case DMA_FROM_DEVICE:
3289                 ret = cmd->se_tfo->queue_data_in(cmd);
3290                 break;
3291         case DMA_TO_DEVICE:
3292                 if (cmd->t_bidi_data_sg) {
3293                         ret = cmd->se_tfo->queue_data_in(cmd);
3294                         if (ret < 0)
3295                                 break;
3296                 }
3297                 /* Fall through for DMA_TO_DEVICE */
3298         case DMA_NONE:
3299                 ret = cmd->se_tfo->queue_status(cmd);
3300                 break;
3301         default:
3302                 break;
3303         }
3304
3305 out:
3306         if (ret < 0) {
3307                 transport_handle_queue_full(cmd, cmd->se_dev);
3308                 return;
3309         }
3310         transport_lun_remove_cmd(cmd);
3311         transport_cmd_check_stop_to_fabric(cmd);
3312 }
3313
3314 static void transport_handle_queue_full(
3315         struct se_cmd *cmd,
3316         struct se_device *dev)
3317 {
3318         spin_lock_irq(&dev->qf_cmd_lock);
3319         list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3320         atomic_inc(&dev->dev_qf_count);
3321         smp_mb__after_atomic_inc();
3322         spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3323
3324         schedule_work(&cmd->se_dev->qf_work_queue);
3325 }
3326
3327 static void target_complete_ok_work(struct work_struct *work)
3328 {
3329         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3330         int reason = 0, ret;
3331
3332         /*
3333          * Check if we need to move delayed/dormant tasks from cmds on the
3334          * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3335          * Attribute.
3336          */
3337         if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3338                 transport_complete_task_attr(cmd);
3339         /*
3340          * Check to schedule QUEUE_FULL work, or execute an existing
3341          * cmd->transport_qf_callback()
3342          */
3343         if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3344                 schedule_work(&cmd->se_dev->qf_work_queue);
3345
3346         /*
3347          * Check if we need to retrieve a sense buffer from
3348          * the struct se_cmd in question.
3349          */
3350         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3351                 if (transport_get_sense_data(cmd) < 0)
3352                         reason = TCM_NON_EXISTENT_LUN;
3353
3354                 /*
3355                  * Only set when an struct se_task->task_scsi_status returned
3356                  * a non GOOD status.
3357                  */
3358                 if (cmd->scsi_status) {
3359                         ret = transport_send_check_condition_and_sense(
3360                                         cmd, reason, 1);
3361                         if (ret == -EAGAIN || ret == -ENOMEM)
3362                                 goto queue_full;
3363
3364                         transport_lun_remove_cmd(cmd);
3365                         transport_cmd_check_stop_to_fabric(cmd);
3366                         return;
3367                 }
3368         }
3369         /*
3370          * Check for a callback, used by amongst other things
3371          * XDWRITE_READ_10 emulation.
3372          */
3373         if (cmd->transport_complete_callback)
3374                 cmd->transport_complete_callback(cmd);
3375
3376         switch (cmd->data_direction) {
3377         case DMA_FROM_DEVICE:
3378                 spin_lock(&cmd->se_lun->lun_sep_lock);
3379                 if (cmd->se_lun->lun_sep) {
3380                         cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3381                                         cmd->data_length;
3382                 }
3383                 spin_unlock(&cmd->se_lun->lun_sep_lock);
3384
3385                 ret = cmd->se_tfo->queue_data_in(cmd);
3386                 if (ret == -EAGAIN || ret == -ENOMEM)
3387                         goto queue_full;
3388                 break;
3389         case DMA_TO_DEVICE:
3390                 spin_lock(&cmd->se_lun->lun_sep_lock);
3391                 if (cmd->se_lun->lun_sep) {
3392                         cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3393                                 cmd->data_length;
3394                 }
3395                 spin_unlock(&cmd->se_lun->lun_sep_lock);
3396                 /*
3397                  * Check if we need to send READ payload for BIDI-COMMAND
3398                  */
3399                 if (cmd->t_bidi_data_sg) {
3400                         spin_lock(&cmd->se_lun->lun_sep_lock);
3401                         if (cmd->se_lun->lun_sep) {
3402                                 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3403                                         cmd->data_length;
3404                         }
3405                         spin_unlock(&cmd->se_lun->lun_sep_lock);
3406                         ret = cmd->se_tfo->queue_data_in(cmd);
3407                         if (ret == -EAGAIN || ret == -ENOMEM)
3408                                 goto queue_full;
3409                         break;
3410                 }
3411                 /* Fall through for DMA_TO_DEVICE */
3412         case DMA_NONE:
3413                 ret = cmd->se_tfo->queue_status(cmd);
3414                 if (ret == -EAGAIN || ret == -ENOMEM)
3415                         goto queue_full;
3416                 break;
3417         default:
3418                 break;
3419         }
3420
3421         transport_lun_remove_cmd(cmd);
3422         transport_cmd_check_stop_to_fabric(cmd);
3423         return;
3424
3425 queue_full:
3426         pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3427                 " data_direction: %d\n", cmd, cmd->data_direction);
3428         cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3429         transport_handle_queue_full(cmd, cmd->se_dev);
3430 }
3431
3432 static void transport_free_dev_tasks(struct se_cmd *cmd)
3433 {
3434         struct se_task *task, *task_tmp;
3435         unsigned long flags;
3436         LIST_HEAD(dispose_list);
3437
3438         spin_lock_irqsave(&cmd->t_state_lock, flags);
3439         list_for_each_entry_safe(task, task_tmp,
3440                                 &cmd->t_task_list, t_list) {
3441                 if (!(task->task_flags & TF_ACTIVE))
3442                         list_move_tail(&task->t_list, &dispose_list);
3443         }
3444         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3445
3446         while (!list_empty(&dispose_list)) {
3447                 task = list_first_entry(&dispose_list, struct se_task, t_list);
3448
3449                 list_del(&task->t_list);
3450                 cmd->se_dev->transport->free_task(task);
3451         }
3452 }
3453
3454 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3455 {
3456         struct scatterlist *sg;
3457         int count;
3458
3459         for_each_sg(sgl, sg, nents, count)
3460                 __free_page(sg_page(sg));
3461
3462         kfree(sgl);
3463 }
3464
3465 static inline void transport_free_pages(struct se_cmd *cmd)
3466 {
3467         if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3468                 return;
3469
3470         transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3471         cmd->t_data_sg = NULL;
3472         cmd->t_data_nents = 0;
3473
3474         transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3475         cmd->t_bidi_data_sg = NULL;
3476         cmd->t_bidi_data_nents = 0;
3477 }
3478
3479 /**
3480  * transport_release_cmd - free a command
3481  * @cmd:       command to free
3482  *
3483  * This routine unconditionally frees a command, and reference counting
3484  * or list removal must be done in the caller.
3485  */
3486 static void transport_release_cmd(struct se_cmd *cmd)
3487 {
3488         BUG_ON(!cmd->se_tfo);
3489
3490         if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
3491                 core_tmr_release_req(cmd->se_tmr_req);
3492         if (cmd->t_task_cdb != cmd->__t_task_cdb)
3493                 kfree(cmd->t_task_cdb);
3494         /*
3495          * If this cmd has been setup with target_get_sess_cmd(), drop
3496          * the kref and call ->release_cmd() in kref callback.
3497          */
3498          if (cmd->check_release != 0) {
3499                 target_put_sess_cmd(cmd->se_sess, cmd);
3500                 return;
3501         }
3502         cmd->se_tfo->release_cmd(cmd);
3503 }
3504
3505 /**
3506  * transport_put_cmd - release a reference to a command
3507  * @cmd:       command to release
3508  *
3509  * This routine releases our reference to the command and frees it if possible.
3510  */
3511 static void transport_put_cmd(struct se_cmd *cmd)
3512 {
3513         unsigned long flags;
3514         int free_tasks = 0;
3515
3516         spin_lock_irqsave(&cmd->t_state_lock, flags);
3517         if (atomic_read(&cmd->t_fe_count)) {
3518                 if (!atomic_dec_and_test(&cmd->t_fe_count))
3519                         goto out_busy;
3520         }
3521
3522         if (atomic_read(&cmd->t_se_count)) {
3523                 if (!atomic_dec_and_test(&cmd->t_se_count))
3524                         goto out_busy;
3525         }
3526
3527         if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
3528                 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
3529                 transport_all_task_dev_remove_state(cmd);
3530                 free_tasks = 1;
3531         }
3532         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3533
3534         if (free_tasks != 0)
3535                 transport_free_dev_tasks(cmd);
3536
3537         transport_free_pages(cmd);
3538         transport_release_cmd(cmd);
3539         return;
3540 out_busy:
3541         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3542 }
3543
3544 /*
3545  * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3546  * allocating in the core.
3547  * @cmd:  Associated se_cmd descriptor
3548  * @mem:  SGL style memory for TCM WRITE / READ
3549  * @sg_mem_num: Number of SGL elements
3550  * @mem_bidi_in: SGL style memory for TCM BIDI READ
3551  * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3552  *
3553  * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3554  * of parameters.
3555  */
3556 int transport_generic_map_mem_to_cmd(
3557         struct se_cmd *cmd,
3558         struct scatterlist *sgl,
3559         u32 sgl_count,
3560         struct scatterlist *sgl_bidi,
3561         u32 sgl_bidi_count)
3562 {
3563         if (!sgl || !sgl_count)
3564                 return 0;
3565
3566         if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3567             (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3568                 /*
3569                  * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3570                  * scatterlists already have been set to follow what the fabric
3571                  * passes for the original expected data transfer length.
3572                  */
3573                 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3574                         pr_warn("Rejecting SCSI DATA overflow for fabric using"
3575                                 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3576                         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3577                         cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3578                         return -EINVAL;
3579                 }
3580
3581                 cmd->t_data_sg = sgl;
3582                 cmd->t_data_nents = sgl_count;
3583
3584                 if (sgl_bidi && sgl_bidi_count) {
3585                         cmd->t_bidi_data_sg = sgl_bidi;
3586                         cmd->t_bidi_data_nents = sgl_bidi_count;
3587                 }
3588                 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3589         }
3590
3591         return 0;
3592 }
3593 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3594
3595 void *transport_kmap_data_sg(struct se_cmd *cmd)
3596 {
3597         struct scatterlist *sg = cmd->t_data_sg;
3598         struct page **pages;
3599         int i;
3600
3601         BUG_ON(!sg);
3602         /*
3603          * We need to take into account a possible offset here for fabrics like
3604          * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3605          * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3606          */
3607         if (!cmd->t_data_nents)
3608                 return NULL;
3609         else if (cmd->t_data_nents == 1)
3610                 return kmap(sg_page(sg)) + sg->offset;
3611
3612         /* >1 page. use vmap */
3613         pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
3614         if (!pages)
3615                 return NULL;
3616
3617         /* convert sg[] to pages[] */
3618         for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
3619                 pages[i] = sg_page(sg);
3620         }
3621
3622         cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
3623         kfree(pages);
3624         if (!cmd->t_data_vmap)
3625                 return NULL;
3626
3627         return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
3628 }
3629 EXPORT_SYMBOL(transport_kmap_data_sg);
3630
3631 void transport_kunmap_data_sg(struct se_cmd *cmd)
3632 {
3633         if (!cmd->t_data_nents) {
3634                 return;
3635         } else if (cmd->t_data_nents == 1) {
3636                 kunmap(sg_page(cmd->t_data_sg));
3637                 return;
3638         }
3639
3640         vunmap(cmd->t_data_vmap);
3641         cmd->t_data_vmap = NULL;
3642 }
3643 EXPORT_SYMBOL(transport_kunmap_data_sg);
3644
3645 static int
3646 transport_generic_get_mem(struct se_cmd *cmd)
3647 {
3648         u32 length = cmd->data_length;
3649         unsigned int nents;
3650         struct page *page;
3651         gfp_t zero_flag;
3652         int i = 0;
3653
3654         nents = DIV_ROUND_UP(length, PAGE_SIZE);
3655         cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3656         if (!cmd->t_data_sg)
3657                 return -ENOMEM;
3658
3659         cmd->t_data_nents = nents;
3660         sg_init_table(cmd->t_data_sg, nents);
3661
3662         zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
3663
3664         while (length) {
3665                 u32 page_len = min_t(u32, length, PAGE_SIZE);
3666                 page = alloc_page(GFP_KERNEL | zero_flag);
3667                 if (!page)
3668                         goto out;
3669
3670                 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3671                 length -= page_len;
3672                 i++;
3673         }
3674         return 0;
3675
3676 out:
3677         while (i >= 0) {
3678                 __free_page(sg_page(&cmd->t_data_sg[i]));
3679                 i--;
3680         }
3681         kfree(cmd->t_data_sg);
3682         cmd->t_data_sg = NULL;
3683         return -ENOMEM;
3684 }
3685
3686 /* Reduce sectors if they are too long for the device */
3687 static inline sector_t transport_limit_task_sectors(
3688         struct se_device *dev,
3689         unsigned long long lba,
3690         sector_t sectors)
3691 {
3692         sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3693
3694         if (dev->transport->get_device_type(dev) == TYPE_DISK)
3695                 if ((lba + sectors) > transport_dev_end_lba(dev))
3696                         sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3697
3698         return sectors;
3699 }
3700
3701 /*
3702  * Break up cmd into chunks transport can handle
3703  */
3704 static int
3705 transport_allocate_data_tasks(struct se_cmd *cmd,
3706         enum dma_data_direction data_direction,
3707         struct scatterlist *cmd_sg, unsigned int sgl_nents)
3708 {
3709         struct se_device *dev = cmd->se_dev;
3710         struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
3711         sector_t sectors;
3712         struct se_task *task;
3713         unsigned long flags;
3714
3715         if (transport_cmd_get_valid_sectors(cmd) < 0)
3716                 return -EINVAL;
3717
3718         sectors = DIV_ROUND_UP(cmd->data_length, attr->block_size);
3719
3720         BUG_ON(cmd->data_length % attr->block_size);
3721         BUG_ON(sectors > attr->max_sectors);
3722
3723         task = transport_generic_get_task(cmd, data_direction);
3724         if (!task)
3725                 return -ENOMEM;
3726
3727         task->task_sg = cmd_sg;
3728         task->task_sg_nents = sgl_nents;
3729         task->task_size = cmd->data_length;
3730
3731         task->task_lba = cmd->t_task_lba;
3732         task->task_sectors = sectors;
3733
3734         spin_lock_irqsave(&cmd->t_state_lock, flags);
3735         list_add_tail(&task->t_list, &cmd->t_task_list);
3736         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3737
3738         return 1;
3739 }
3740
3741 static int
3742 transport_allocate_control_task(struct se_cmd *cmd)
3743 {
3744         struct se_task *task;
3745         unsigned long flags;
3746
3747         /* Workaround for handling zero-length control CDBs */
3748         if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3749             !cmd->data_length)
3750                 return 0;
3751
3752         task = transport_generic_get_task(cmd, cmd->data_direction);
3753         if (!task)
3754                 return -ENOMEM;
3755
3756         task->task_sg = cmd->t_data_sg;
3757         task->task_size = cmd->data_length;
3758         task->task_sg_nents = cmd->t_data_nents;
3759
3760         spin_lock_irqsave(&cmd->t_state_lock, flags);
3761         list_add_tail(&task->t_list, &cmd->t_task_list);
3762         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3763
3764         /* Success! Return number of tasks allocated */
3765         return 1;
3766 }
3767
3768 /*
3769  * Allocate any required ressources to execute the command, and either place
3770  * it on the execution queue if possible.  For writes we might not have the
3771  * payload yet, thus notify the fabric via a call to ->write_pending instead.
3772  */
3773 int transport_generic_new_cmd(struct se_cmd *cmd)
3774 {
3775         struct se_device *dev = cmd->se_dev;
3776         int task_cdbs, task_cdbs_bidi = 0;
3777         int set_counts = 1;
3778         int ret = 0;
3779
3780         /*
3781          * Determine is the TCM fabric module has already allocated physical
3782          * memory, and is directly calling transport_generic_map_mem_to_cmd()
3783          * beforehand.
3784          */
3785         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3786             cmd->data_length) {
3787                 ret = transport_generic_get_mem(cmd);
3788                 if (ret < 0)
3789                         goto out_fail;
3790         }
3791
3792         /*
3793          * For BIDI command set up the read tasks first.
3794          */
3795         if (cmd->t_bidi_data_sg &&
3796             dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3797                 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3798
3799                 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3800                                 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3801                                 cmd->t_bidi_data_nents);
3802                 if (task_cdbs_bidi <= 0)
3803                         goto out_fail;
3804
3805                 atomic_inc(&cmd->t_fe_count);
3806                 atomic_inc(&cmd->t_se_count);
3807                 set_counts = 0;
3808         }
3809
3810         if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3811                 task_cdbs = transport_allocate_data_tasks(cmd,
3812                                         cmd->data_direction, cmd->t_data_sg,
3813                                         cmd->t_data_nents);
3814         } else {
3815                 task_cdbs = transport_allocate_control_task(cmd);
3816         }
3817
3818         if (task_cdbs < 0)
3819                 goto out_fail;
3820         else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3821                 spin_lock_irq(&cmd->t_state_lock);
3822                 cmd->t_state = TRANSPORT_COMPLETE;
3823                 cmd->transport_state |= CMD_T_ACTIVE;
3824                 spin_unlock_irq(&cmd->t_state_lock);
3825
3826                 if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
3827                         u8 ua_asc = 0, ua_ascq = 0;
3828
3829                         core_scsi3_ua_clear_for_request_sense(cmd,
3830                                         &ua_asc, &ua_ascq);
3831                 }
3832
3833                 INIT_WORK(&cmd->work, target_complete_ok_work);
3834                 queue_work(target_completion_wq, &cmd->work);
3835                 return 0;
3836         }
3837
3838         if (set_counts) {
3839                 atomic_inc(&cmd->t_fe_count);
3840                 atomic_inc(&cmd->t_se_count);
3841         }
3842
3843         cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3844         atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3845         atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3846
3847         /*
3848          * For WRITEs, let the fabric know its buffer is ready..
3849          * This WRITE struct se_cmd (and all of its associated struct se_task's)
3850          * will be added to the struct se_device execution queue after its WRITE
3851          * data has arrived. (ie: It gets handled by the transport processing
3852          * thread a second time)
3853          */
3854         if (cmd->data_direction == DMA_TO_DEVICE) {
3855                 transport_add_tasks_to_state_queue(cmd);
3856                 return transport_generic_write_pending(cmd);
3857         }
3858         /*
3859          * Everything else but a WRITE, add the struct se_cmd's struct se_task's
3860          * to the execution queue.
3861          */
3862         transport_execute_tasks(cmd);
3863         return 0;
3864
3865 out_fail:
3866         cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3867         cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3868         return -EINVAL;
3869 }
3870 EXPORT_SYMBOL(transport_generic_new_cmd);
3871
3872 /*      transport_generic_process_write():
3873  *
3874  *
3875  */
3876 void transport_generic_process_write(struct se_cmd *cmd)
3877 {
3878         transport_execute_tasks(cmd);
3879 }
3880 EXPORT_SYMBOL(transport_generic_process_write);
3881
3882 static void transport_write_pending_qf(struct se_cmd *cmd)
3883 {
3884         int ret;
3885
3886         ret = cmd->se_tfo->write_pending(cmd);
3887         if (ret == -EAGAIN || ret == -ENOMEM) {
3888                 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3889                          cmd);
3890                 transport_handle_queue_full(cmd, cmd->se_dev);
3891         }
3892 }
3893
3894 static int transport_generic_write_pending(struct se_cmd *cmd)
3895 {
3896         unsigned long flags;
3897         int ret;
3898
3899         spin_lock_irqsave(&cmd->t_state_lock, flags);
3900         cmd->t_state = TRANSPORT_WRITE_PENDING;
3901         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3902
3903         /*
3904          * Clear the se_cmd for WRITE_PENDING status in order to set
3905          * CMD_T_ACTIVE so that transport_generic_handle_data can be called
3906          * from HW target mode interrupt code.  This is safe to be called
3907          * with transport_off=1 before the cmd->se_tfo->write_pending
3908          * because the se_cmd->se_lun pointer is not being cleared.
3909          */
3910         transport_cmd_check_stop(cmd, 1, 0);
3911
3912         /*
3913          * Call the fabric write_pending function here to let the
3914          * frontend know that WRITE buffers are ready.
3915          */
3916         ret = cmd->se_tfo->write_pending(cmd);
3917         if (ret == -EAGAIN || ret == -ENOMEM)
3918                 goto queue_full;
3919         else if (ret < 0)
3920                 return ret;
3921
3922         return 1;
3923
3924 queue_full:
3925         pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3926         cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3927         transport_handle_queue_full(cmd, cmd->se_dev);
3928         return 0;
3929 }
3930
3931 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3932 {
3933         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3934                 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3935                          transport_wait_for_tasks(cmd);
3936
3937                 transport_release_cmd(cmd);
3938         } else {
3939                 if (wait_for_tasks)
3940                         transport_wait_for_tasks(cmd);
3941
3942                 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3943
3944                 if (cmd->se_lun)
3945                         transport_lun_remove_cmd(cmd);
3946
3947                 transport_free_dev_tasks(cmd);
3948
3949                 transport_put_cmd(cmd);
3950         }
3951 }
3952 EXPORT_SYMBOL(transport_generic_free_cmd);
3953
3954 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
3955  * @se_sess:    session to reference
3956  * @se_cmd:     command descriptor to add
3957  * @ack_kref:   Signal that fabric will perform an ack target_put_sess_cmd()
3958  */
3959 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
3960                         bool ack_kref)
3961 {
3962         unsigned long flags;
3963
3964         kref_init(&se_cmd->cmd_kref);
3965         /*
3966          * Add a second kref if the fabric caller is expecting to handle
3967          * fabric acknowledgement that requires two target_put_sess_cmd()
3968          * invocations before se_cmd descriptor release.
3969          */
3970         if (ack_kref == true) {
3971                 kref_get(&se_cmd->cmd_kref);
3972                 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
3973         }
3974
3975         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3976         list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3977         se_cmd->check_release = 1;
3978         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3979 }
3980 EXPORT_SYMBOL(target_get_sess_cmd);
3981
3982 static void target_release_cmd_kref(struct kref *kref)
3983 {
3984         struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
3985         struct se_session *se_sess = se_cmd->se_sess;
3986         unsigned long flags;
3987
3988         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3989         if (list_empty(&se_cmd->se_cmd_list)) {
3990                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3991                 se_cmd->se_tfo->release_cmd(se_cmd);
3992                 return;
3993         }
3994         if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3995                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3996                 complete(&se_cmd->cmd_wait_comp);
3997                 return;
3998         }
3999         list_del(&se_cmd->se_cmd_list);
4000         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4001
4002         se_cmd->se_tfo->release_cmd(se_cmd);
4003 }
4004
4005 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
4006  * @se_sess:    session to reference
4007  * @se_cmd:     command descriptor to drop
4008  */
4009 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
4010 {
4011         return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
4012 }
4013 EXPORT_SYMBOL(target_put_sess_cmd);
4014
4015 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
4016  * @se_sess:    session to split
4017  */
4018 void target_splice_sess_cmd_list(struct se_session *se_sess)
4019 {
4020         struct se_cmd *se_cmd;
4021         unsigned long flags;
4022
4023         WARN_ON(!list_empty(&se_sess->sess_wait_list));
4024         INIT_LIST_HEAD(&se_sess->sess_wait_list);
4025
4026         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
4027         se_sess->sess_tearing_down = 1;
4028
4029         list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
4030
4031         list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
4032                 se_cmd->cmd_wait_set = 1;
4033
4034         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
4035 }
4036 EXPORT_SYMBOL(target_splice_sess_cmd_list);
4037
4038 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
4039  * @se_sess:    session to wait for active I/O
4040  * @wait_for_tasks:     Make extra transport_wait_for_tasks call
4041  */
4042 void target_wait_for_sess_cmds(
4043         struct se_session *se_sess,
4044         int wait_for_tasks)
4045 {
4046         struct se_cmd *se_cmd, *tmp_cmd;
4047         bool rc = false;
4048
4049         list_for_each_entry_safe(se_cmd, tmp_cmd,
4050                                 &se_sess->sess_wait_list, se_cmd_list) {
4051                 list_del(&se_cmd->se_cmd_list);
4052
4053                 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4054                         " %d\n", se_cmd, se_cmd->t_state,
4055                         se_cmd->se_tfo->get_cmd_state(se_cmd));
4056
4057                 if (wait_for_tasks) {
4058                         pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4059                                 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4060                                 se_cmd->se_tfo->get_cmd_state(se_cmd));
4061
4062                         rc = transport_wait_for_tasks(se_cmd);
4063
4064                         pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4065                                 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4066                                 se_cmd->se_tfo->get_cmd_state(se_cmd));
4067                 }
4068
4069                 if (!rc) {
4070                         wait_for_completion(&se_cmd->cmd_wait_comp);
4071                         pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4072                                 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4073                                 se_cmd->se_tfo->get_cmd_state(se_cmd));
4074                 }
4075
4076                 se_cmd->se_tfo->release_cmd(se_cmd);
4077         }
4078 }
4079 EXPORT_SYMBOL(target_wait_for_sess_cmds);
4080
4081 /*      transport_lun_wait_for_tasks():
4082  *
4083  *      Called from ConfigFS context to stop the passed struct se_cmd to allow
4084  *      an struct se_lun to be successfully shutdown.
4085  */
4086 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4087 {
4088         unsigned long flags;
4089         int ret;
4090         /*
4091          * If the frontend has already requested this struct se_cmd to
4092          * be stopped, we can safely ignore this struct se_cmd.
4093          */
4094         spin_lock_irqsave(&cmd->t_state_lock, flags);
4095         if (cmd->transport_state & CMD_T_STOP) {
4096                 cmd->transport_state &= ~CMD_T_LUN_STOP;
4097
4098                 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
4099                          cmd->se_tfo->get_task_tag(cmd));
4100                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4101                 transport_cmd_check_stop(cmd, 1, 0);
4102                 return -EPERM;
4103         }
4104         cmd->transport_state |= CMD_T_LUN_FE_STOP;
4105         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4106
4107         wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4108
4109         ret = transport_stop_tasks_for_cmd(cmd);
4110
4111         pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4112                         " %d\n", cmd, cmd->t_task_list_num, ret);
4113         if (!ret) {
4114                 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4115                                 cmd->se_tfo->get_task_tag(cmd));
4116                 wait_for_completion(&cmd->transport_lun_stop_comp);
4117                 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4118                                 cmd->se_tfo->get_task_tag(cmd));
4119         }
4120         transport_remove_cmd_from_queue(cmd);
4121
4122         return 0;
4123 }
4124
4125 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4126 {
4127         struct se_cmd *cmd = NULL;
4128         unsigned long lun_flags, cmd_flags;
4129         /*
4130          * Do exception processing and return CHECK_CONDITION status to the
4131          * Initiator Port.
4132          */
4133         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4134         while (!list_empty(&lun->lun_cmd_list)) {
4135                 cmd = list_first_entry(&lun->lun_cmd_list,
4136                        struct se_cmd, se_lun_node);
4137                 list_del_init(&cmd->se_lun_node);
4138
4139                 /*
4140                  * This will notify iscsi_target_transport.c:
4141                  * transport_cmd_check_stop() that a LUN shutdown is in
4142                  * progress for the iscsi_cmd_t.
4143                  */
4144                 spin_lock(&cmd->t_state_lock);
4145                 pr_debug("SE_LUN[%d] - Setting cmd->transport"
4146                         "_lun_stop for  ITT: 0x%08x\n",
4147                         cmd->se_lun->unpacked_lun,
4148                         cmd->se_tfo->get_task_tag(cmd));
4149                 cmd->transport_state |= CMD_T_LUN_STOP;
4150                 spin_unlock(&cmd->t_state_lock);
4151
4152                 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4153
4154                 if (!cmd->se_lun) {
4155                         pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4156                                 cmd->se_tfo->get_task_tag(cmd),
4157                                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4158                         BUG();
4159                 }
4160                 /*
4161                  * If the Storage engine still owns the iscsi_cmd_t, determine
4162                  * and/or stop its context.
4163                  */
4164                 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4165                         "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4166                         cmd->se_tfo->get_task_tag(cmd));
4167
4168                 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4169                         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4170                         continue;
4171                 }
4172
4173                 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4174                         "_wait_for_tasks(): SUCCESS\n",
4175                         cmd->se_lun->unpacked_lun,
4176                         cmd->se_tfo->get_task_tag(cmd));
4177
4178                 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4179                 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
4180                         spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4181                         goto check_cond;
4182                 }
4183                 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
4184                 transport_all_task_dev_remove_state(cmd);
4185                 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4186
4187                 transport_free_dev_tasks(cmd);
4188                 /*
4189                  * The Storage engine stopped this struct se_cmd before it was
4190                  * send to the fabric frontend for delivery back to the
4191                  * Initiator Node.  Return this SCSI CDB back with an
4192                  * CHECK_CONDITION status.
4193                  */
4194 check_cond:
4195                 transport_send_check_condition_and_sense(cmd,
4196                                 TCM_NON_EXISTENT_LUN, 0);
4197                 /*
4198                  *  If the fabric frontend is waiting for this iscsi_cmd_t to
4199                  * be released, notify the waiting thread now that LU has
4200                  * finished accessing it.
4201                  */
4202                 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4203                 if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
4204                         pr_debug("SE_LUN[%d] - Detected FE stop for"
4205                                 " struct se_cmd: %p ITT: 0x%08x\n",
4206                                 lun->unpacked_lun,
4207                                 cmd, cmd->se_tfo->get_task_tag(cmd));
4208
4209                         spin_unlock_irqrestore(&cmd->t_state_lock,
4210                                         cmd_flags);
4211                         transport_cmd_check_stop(cmd, 1, 0);
4212                         complete(&cmd->transport_lun_fe_stop_comp);
4213                         spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4214                         continue;
4215                 }
4216                 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4217                         lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4218
4219                 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4220                 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4221         }
4222         spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4223 }
4224
4225 static int transport_clear_lun_thread(void *p)
4226 {
4227         struct se_lun *lun = p;
4228
4229         __transport_clear_lun_from_sessions(lun);
4230         complete(&lun->lun_shutdown_comp);
4231
4232         return 0;
4233 }
4234
4235 int transport_clear_lun_from_sessions(struct se_lun *lun)
4236 {
4237         struct task_struct *kt;
4238
4239         kt = kthread_run(transport_clear_lun_thread, lun,
4240                         "tcm_cl_%u", lun->unpacked_lun);
4241         if (IS_ERR(kt)) {
4242                 pr_err("Unable to start clear_lun thread\n");
4243                 return PTR_ERR(kt);
4244         }
4245         wait_for_completion(&lun->lun_shutdown_comp);
4246
4247         return 0;
4248 }
4249
4250 /**
4251  * transport_wait_for_tasks - wait for completion to occur
4252  * @cmd:        command to wait
4253  *
4254  * Called from frontend fabric context to wait for storage engine
4255  * to pause and/or release frontend generated struct se_cmd.
4256  */
4257 bool transport_wait_for_tasks(struct se_cmd *cmd)
4258 {
4259         unsigned long flags;
4260
4261         spin_lock_irqsave(&cmd->t_state_lock, flags);
4262         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
4263             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4264                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4265                 return false;
4266         }
4267         /*
4268          * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4269          * has been set in transport_set_supported_SAM_opcode().
4270          */
4271         if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
4272             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
4273                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4274                 return false;
4275         }
4276         /*
4277          * If we are already stopped due to an external event (ie: LUN shutdown)
4278          * sleep until the connection can have the passed struct se_cmd back.
4279          * The cmd->transport_lun_stopped_sem will be upped by
4280          * transport_clear_lun_from_sessions() once the ConfigFS context caller
4281          * has completed its operation on the struct se_cmd.
4282          */
4283         if (cmd->transport_state & CMD_T_LUN_STOP) {
4284                 pr_debug("wait_for_tasks: Stopping"
4285                         " wait_for_completion(&cmd->t_tasktransport_lun_fe"
4286                         "_stop_comp); for ITT: 0x%08x\n",
4287                         cmd->se_tfo->get_task_tag(cmd));
4288                 /*
4289                  * There is a special case for WRITES where a FE exception +
4290                  * LUN shutdown means ConfigFS context is still sleeping on
4291                  * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4292                  * We go ahead and up transport_lun_stop_comp just to be sure
4293                  * here.
4294                  */
4295                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4296                 complete(&cmd->transport_lun_stop_comp);
4297                 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4298                 spin_lock_irqsave(&cmd->t_state_lock, flags);
4299
4300                 transport_all_task_dev_remove_state(cmd);
4301                 /*
4302                  * At this point, the frontend who was the originator of this
4303                  * struct se_cmd, now owns the structure and can be released through
4304                  * normal means below.
4305                  */
4306                 pr_debug("wait_for_tasks: Stopped"
4307                         " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4308                         "stop_comp); for ITT: 0x%08x\n",
4309                         cmd->se_tfo->get_task_tag(cmd));
4310
4311                 cmd->transport_state &= ~CMD_T_LUN_STOP;
4312         }
4313
4314         if (!(cmd->transport_state & CMD_T_ACTIVE)) {
4315                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4316                 return false;
4317         }
4318
4319         cmd->transport_state |= CMD_T_STOP;
4320
4321         pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4322                 " i_state: %d, t_state: %d, CMD_T_STOP\n",
4323                 cmd, cmd->se_tfo->get_task_tag(cmd),
4324                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4325
4326         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4327
4328         wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4329
4330         wait_for_completion(&cmd->t_transport_stop_comp);
4331
4332         spin_lock_irqsave(&cmd->t_state_lock, flags);
4333         cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
4334
4335         pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4336                 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4337                 cmd->se_tfo->get_task_tag(cmd));
4338
4339         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4340
4341         return true;
4342 }
4343 EXPORT_SYMBOL(transport_wait_for_tasks);
4344
4345 static int transport_get_sense_codes(
4346         struct se_cmd *cmd,
4347         u8 *asc,
4348         u8 *ascq)
4349 {
4350         *asc = cmd->scsi_asc;
4351         *ascq = cmd->scsi_ascq;
4352
4353         return 0;
4354 }
4355
4356 static int transport_set_sense_codes(
4357         struct se_cmd *cmd,
4358         u8 asc,
4359         u8 ascq)
4360 {
4361         cmd->scsi_asc = asc;
4362         cmd->scsi_ascq = ascq;
4363
4364         return 0;
4365 }
4366
4367 int transport_send_check_condition_and_sense(
4368         struct se_cmd *cmd,
4369         u8 reason,
4370         int from_transport)
4371 {
4372         unsigned char *buffer = cmd->sense_buffer;
4373         unsigned long flags;
4374         int offset;
4375         u8 asc = 0, ascq = 0;
4376
4377         spin_lock_irqsave(&cmd->t_state_lock, flags);
4378         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4379                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4380                 return 0;
4381         }
4382         cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4383         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4384
4385         if (!reason && from_transport)
4386                 goto after_reason;
4387
4388         if (!from_transport)
4389                 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4390         /*
4391          * Data Segment and SenseLength of the fabric response PDU.
4392          *
4393          * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4394          * from include/scsi/scsi_cmnd.h
4395          */
4396         offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4397                                 TRANSPORT_SENSE_BUFFER);
4398         /*
4399          * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
4400          * SENSE KEY values from include/scsi/scsi.h
4401          */
4402         switch (reason) {
4403         case TCM_NON_EXISTENT_LUN:
4404                 /* CURRENT ERROR */
4405                 buffer[offset] = 0x70;
4406                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4407                 /* ILLEGAL REQUEST */
4408                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4409                 /* LOGICAL UNIT NOT SUPPORTED */
4410                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4411                 break;
4412         case TCM_UNSUPPORTED_SCSI_OPCODE:
4413         case TCM_SECTOR_COUNT_TOO_MANY:
4414                 /* CURRENT ERROR */
4415                 buffer[offset] = 0x70;
4416                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4417                 /* ILLEGAL REQUEST */
4418                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4419                 /* INVALID COMMAND OPERATION CODE */
4420                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4421                 break;
4422         case TCM_UNKNOWN_MODE_PAGE:
4423                 /* CURRENT ERROR */
4424                 buffer[offset] = 0x70;
4425                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4426                 /* ILLEGAL REQUEST */
4427                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4428                 /* INVALID FIELD IN CDB */
4429                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4430                 break;
4431         case TCM_CHECK_CONDITION_ABORT_CMD:
4432                 /* CURRENT ERROR */
4433                 buffer[offset] = 0x70;
4434                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4435                 /* ABORTED COMMAND */
4436                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4437                 /* BUS DEVICE RESET FUNCTION OCCURRED */
4438                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4439                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4440                 break;
4441         case TCM_INCORRECT_AMOUNT_OF_DATA:
4442                 /* CURRENT ERROR */
4443                 buffer[offset] = 0x70;
4444                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4445                 /* ABORTED COMMAND */
4446                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4447                 /* WRITE ERROR */
4448                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4449                 /* NOT ENOUGH UNSOLICITED DATA */
4450                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4451                 break;
4452         case TCM_INVALID_CDB_FIELD:
4453                 /* CURRENT ERROR */
4454                 buffer[offset] = 0x70;
4455                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4456                 /* ILLEGAL REQUEST */
4457                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4458                 /* INVALID FIELD IN CDB */
4459                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4460                 break;
4461         case TCM_INVALID_PARAMETER_LIST:
4462                 /* CURRENT ERROR */
4463                 buffer[offset] = 0x70;
4464                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4465                 /* ILLEGAL REQUEST */
4466                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4467                 /* INVALID FIELD IN PARAMETER LIST */
4468                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4469                 break;
4470         case TCM_UNEXPECTED_UNSOLICITED_DATA:
4471                 /* CURRENT ERROR */
4472                 buffer[offset] = 0x70;
4473                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4474                 /* ABORTED COMMAND */
4475                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4476                 /* WRITE ERROR */
4477                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4478                 /* UNEXPECTED_UNSOLICITED_DATA */
4479                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4480                 break;
4481         case TCM_SERVICE_CRC_ERROR:
4482                 /* CURRENT ERROR */
4483                 buffer[offset] = 0x70;
4484                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4485                 /* ABORTED COMMAND */
4486                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4487                 /* PROTOCOL SERVICE CRC ERROR */
4488                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4489                 /* N/A */
4490                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4491                 break;
4492         case TCM_SNACK_REJECTED:
4493                 /* CURRENT ERROR */
4494                 buffer[offset] = 0x70;
4495                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4496                 /* ABORTED COMMAND */
4497                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4498                 /* READ ERROR */
4499                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4500                 /* FAILED RETRANSMISSION REQUEST */
4501                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4502                 break;
4503         case TCM_WRITE_PROTECTED:
4504                 /* CURRENT ERROR */
4505                 buffer[offset] = 0x70;
4506                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4507                 /* DATA PROTECT */
4508                 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4509                 /* WRITE PROTECTED */
4510                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4511                 break;
4512         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4513                 /* CURRENT ERROR */
4514                 buffer[offset] = 0x70;
4515                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4516                 /* UNIT ATTENTION */
4517                 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4518                 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4519                 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4520                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4521                 break;
4522         case TCM_CHECK_CONDITION_NOT_READY:
4523                 /* CURRENT ERROR */
4524                 buffer[offset] = 0x70;
4525                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4526                 /* Not Ready */
4527                 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4528                 transport_get_sense_codes(cmd, &asc, &ascq);
4529                 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4530                 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4531                 break;
4532         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4533         default:
4534                 /* CURRENT ERROR */
4535                 buffer[offset] = 0x70;
4536                 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4537                 /* ILLEGAL REQUEST */
4538                 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4539                 /* LOGICAL UNIT COMMUNICATION FAILURE */
4540                 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4541                 break;
4542         }
4543         /*
4544          * This code uses linux/include/scsi/scsi.h SAM status codes!
4545          */
4546         cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4547         /*
4548          * Automatically padded, this value is encoded in the fabric's
4549          * data_length response PDU containing the SCSI defined sense data.
4550          */
4551         cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
4552
4553 after_reason:
4554         return cmd->se_tfo->queue_status(cmd);
4555 }
4556 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4557
4558 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4559 {
4560         int ret = 0;
4561
4562         if (cmd->transport_state & CMD_T_ABORTED) {
4563                 if (!send_status ||
4564                      (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4565                         return 1;
4566
4567                 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4568                         " status for CDB: 0x%02x ITT: 0x%08x\n",
4569                         cmd->t_task_cdb[0],
4570                         cmd->se_tfo->get_task_tag(cmd));
4571
4572                 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4573                 cmd->se_tfo->queue_status(cmd);
4574                 ret = 1;
4575         }
4576         return ret;
4577 }
4578 EXPORT_SYMBOL(transport_check_aborted_status);
4579
4580 void transport_send_task_abort(struct se_cmd *cmd)
4581 {
4582         unsigned long flags;
4583
4584         spin_lock_irqsave(&cmd->t_state_lock, flags);
4585         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4586                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4587                 return;
4588         }
4589         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4590
4591         /*
4592          * If there are still expected incoming fabric WRITEs, we wait
4593          * until until they have completed before sending a TASK_ABORTED
4594          * response.  This response with TASK_ABORTED status will be
4595          * queued back to fabric module by transport_check_aborted_status().
4596          */
4597         if (cmd->data_direction == DMA_TO_DEVICE) {
4598                 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4599                         cmd->transport_state |= CMD_T_ABORTED;
4600                         smp_mb__after_atomic_inc();
4601                 }
4602         }
4603         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4604
4605         pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4606                 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
4607                 cmd->se_tfo->get_task_tag(cmd));
4608
4609         cmd->se_tfo->queue_status(cmd);
4610 }
4611
4612 static int transport_generic_do_tmr(struct se_cmd *cmd)
4613 {
4614         struct se_device *dev = cmd->se_dev;
4615         struct se_tmr_req *tmr = cmd->se_tmr_req;
4616         int ret;
4617
4618         switch (tmr->function) {
4619         case TMR_ABORT_TASK:
4620                 core_tmr_abort_task(dev, tmr, cmd->se_sess);
4621                 break;
4622         case TMR_ABORT_TASK_SET:
4623         case TMR_CLEAR_ACA:
4624         case TMR_CLEAR_TASK_SET:
4625                 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4626                 break;
4627         case TMR_LUN_RESET:
4628                 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4629                 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4630                                          TMR_FUNCTION_REJECTED;
4631                 break;
4632         case TMR_TARGET_WARM_RESET:
4633                 tmr->response = TMR_FUNCTION_REJECTED;
4634                 break;
4635         case TMR_TARGET_COLD_RESET:
4636                 tmr->response = TMR_FUNCTION_REJECTED;
4637                 break;
4638         default:
4639                 pr_err("Uknown TMR function: 0x%02x.\n",
4640                                 tmr->function);
4641                 tmr->response = TMR_FUNCTION_REJECTED;
4642                 break;
4643         }
4644
4645         cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4646         cmd->se_tfo->queue_tm_rsp(cmd);
4647
4648         transport_cmd_check_stop_to_fabric(cmd);
4649         return 0;
4650 }
4651
4652 /*      transport_processing_thread():
4653  *
4654  *
4655  */
4656 static int transport_processing_thread(void *param)
4657 {
4658         int ret;
4659         struct se_cmd *cmd;
4660         struct se_device *dev = param;
4661
4662         while (!kthread_should_stop()) {
4663                 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4664                                 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4665                                 kthread_should_stop());
4666                 if (ret < 0)
4667                         goto out;
4668
4669 get_cmd:
4670                 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4671                 if (!cmd)
4672                         continue;
4673
4674                 switch (cmd->t_state) {
4675                 case TRANSPORT_NEW_CMD:
4676                         BUG();
4677                         break;
4678                 case TRANSPORT_NEW_CMD_MAP:
4679                         if (!cmd->se_tfo->new_cmd_map) {
4680                                 pr_err("cmd->se_tfo->new_cmd_map is"
4681                                         " NULL for TRANSPORT_NEW_CMD_MAP\n");
4682                                 BUG();
4683                         }
4684                         ret = cmd->se_tfo->new_cmd_map(cmd);
4685                         if (ret < 0) {
4686                                 transport_generic_request_failure(cmd);
4687                                 break;
4688                         }
4689                         ret = transport_generic_new_cmd(cmd);
4690                         if (ret < 0) {
4691                                 transport_generic_request_failure(cmd);
4692                                 break;
4693                         }
4694                         break;
4695                 case TRANSPORT_PROCESS_WRITE:
4696                         transport_generic_process_write(cmd);
4697                         break;
4698                 case TRANSPORT_PROCESS_TMR:
4699                         transport_generic_do_tmr(cmd);
4700                         break;
4701                 case TRANSPORT_COMPLETE_QF_WP:
4702                         transport_write_pending_qf(cmd);
4703                         break;
4704                 case TRANSPORT_COMPLETE_QF_OK:
4705                         transport_complete_qf(cmd);
4706                         break;
4707                 default:
4708                         pr_err("Unknown t_state: %d  for ITT: 0x%08x "
4709                                 "i_state: %d on SE LUN: %u\n",
4710                                 cmd->t_state,
4711                                 cmd->se_tfo->get_task_tag(cmd),
4712                                 cmd->se_tfo->get_cmd_state(cmd),
4713                                 cmd->se_lun->unpacked_lun);
4714                         BUG();
4715                 }
4716
4717                 goto get_cmd;
4718         }
4719
4720 out:
4721         WARN_ON(!list_empty(&dev->state_task_list));
4722         WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4723         dev->process_thread = NULL;
4724         return 0;
4725 }