Merge branch 'acpi-assorted'
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/bitops.h>
12 #include <linux/blkdev.h>
13 #include <linux/completion.h>
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/hardirq.h>
22 #include <linux/scatterlist.h>
23
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_dbg.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_host.h>
31
32 #include "scsi_priv.h"
33 #include "scsi_logging.h"
34
35
36 #define SG_MEMPOOL_NR           ARRAY_SIZE(scsi_sg_pools)
37 #define SG_MEMPOOL_SIZE         2
38
39 struct scsi_host_sg_pool {
40         size_t          size;
41         char            *name;
42         struct kmem_cache       *slab;
43         mempool_t       *pool;
44 };
45
46 #define SP(x) { x, "sgpool-" __stringify(x) }
47 #if (SCSI_MAX_SG_SEGMENTS < 32)
48 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
49 #endif
50 static struct scsi_host_sg_pool scsi_sg_pools[] = {
51         SP(8),
52         SP(16),
53 #if (SCSI_MAX_SG_SEGMENTS > 32)
54         SP(32),
55 #if (SCSI_MAX_SG_SEGMENTS > 64)
56         SP(64),
57 #if (SCSI_MAX_SG_SEGMENTS > 128)
58         SP(128),
59 #if (SCSI_MAX_SG_SEGMENTS > 256)
60 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
61 #endif
62 #endif
63 #endif
64 #endif
65         SP(SCSI_MAX_SG_SEGMENTS)
66 };
67 #undef SP
68
69 struct kmem_cache *scsi_sdb_cache;
70
71 #ifdef CONFIG_ACPI
72 #include <acpi/acpi_bus.h>
73
74 static bool acpi_scsi_bus_match(struct device *dev)
75 {
76         return dev->bus == &scsi_bus_type;
77 }
78
79 int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
80 {
81         bus->match = acpi_scsi_bus_match;
82         return register_acpi_bus_type(bus);
83 }
84 EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
85
86 void scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus)
87 {
88         unregister_acpi_bus_type(bus);
89 }
90 EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type);
91 #endif
92
93 /*
94  * When to reinvoke queueing after a resource shortage. It's 3 msecs to
95  * not change behaviour from the previous unplug mechanism, experimentation
96  * may prove this needs changing.
97  */
98 #define SCSI_QUEUE_DELAY        3
99
100 /*
101  * Function:    scsi_unprep_request()
102  *
103  * Purpose:     Remove all preparation done for a request, including its
104  *              associated scsi_cmnd, so that it can be requeued.
105  *
106  * Arguments:   req     - request to unprepare
107  *
108  * Lock status: Assumed that no locks are held upon entry.
109  *
110  * Returns:     Nothing.
111  */
112 static void scsi_unprep_request(struct request *req)
113 {
114         struct scsi_cmnd *cmd = req->special;
115
116         blk_unprep_request(req);
117         req->special = NULL;
118
119         scsi_put_command(cmd);
120 }
121
122 /**
123  * __scsi_queue_insert - private queue insertion
124  * @cmd: The SCSI command being requeued
125  * @reason:  The reason for the requeue
126  * @unbusy: Whether the queue should be unbusied
127  *
128  * This is a private queue insertion.  The public interface
129  * scsi_queue_insert() always assumes the queue should be unbusied
130  * because it's always called before the completion.  This function is
131  * for a requeue after completion, which should only occur in this
132  * file.
133  */
134 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
135 {
136         struct Scsi_Host *host = cmd->device->host;
137         struct scsi_device *device = cmd->device;
138         struct scsi_target *starget = scsi_target(device);
139         struct request_queue *q = device->request_queue;
140         unsigned long flags;
141
142         SCSI_LOG_MLQUEUE(1,
143                  printk("Inserting command %p into mlqueue\n", cmd));
144
145         /*
146          * Set the appropriate busy bit for the device/host.
147          *
148          * If the host/device isn't busy, assume that something actually
149          * completed, and that we should be able to queue a command now.
150          *
151          * Note that the prior mid-layer assumption that any host could
152          * always queue at least one command is now broken.  The mid-layer
153          * will implement a user specifiable stall (see
154          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
155          * if a command is requeued with no other commands outstanding
156          * either for the device or for the host.
157          */
158         switch (reason) {
159         case SCSI_MLQUEUE_HOST_BUSY:
160                 host->host_blocked = host->max_host_blocked;
161                 break;
162         case SCSI_MLQUEUE_DEVICE_BUSY:
163         case SCSI_MLQUEUE_EH_RETRY:
164                 device->device_blocked = device->max_device_blocked;
165                 break;
166         case SCSI_MLQUEUE_TARGET_BUSY:
167                 starget->target_blocked = starget->max_target_blocked;
168                 break;
169         }
170
171         /*
172          * Decrement the counters, since these commands are no longer
173          * active on the host/device.
174          */
175         if (unbusy)
176                 scsi_device_unbusy(device);
177
178         /*
179          * Requeue this command.  It will go before all other commands
180          * that are already in the queue. Schedule requeue work under
181          * lock such that the kblockd_schedule_work() call happens
182          * before blk_cleanup_queue() finishes.
183          */
184         spin_lock_irqsave(q->queue_lock, flags);
185         blk_requeue_request(q, cmd->request);
186         kblockd_schedule_work(q, &device->requeue_work);
187         spin_unlock_irqrestore(q->queue_lock, flags);
188 }
189
190 /*
191  * Function:    scsi_queue_insert()
192  *
193  * Purpose:     Insert a command in the midlevel queue.
194  *
195  * Arguments:   cmd    - command that we are adding to queue.
196  *              reason - why we are inserting command to queue.
197  *
198  * Lock status: Assumed that lock is not held upon entry.
199  *
200  * Returns:     Nothing.
201  *
202  * Notes:       We do this for one of two cases.  Either the host is busy
203  *              and it cannot accept any more commands for the time being,
204  *              or the device returned QUEUE_FULL and can accept no more
205  *              commands.
206  * Notes:       This could be called either from an interrupt context or a
207  *              normal process context.
208  */
209 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
210 {
211         __scsi_queue_insert(cmd, reason, 1);
212 }
213 /**
214  * scsi_execute - insert request and wait for the result
215  * @sdev:       scsi device
216  * @cmd:        scsi command
217  * @data_direction: data direction
218  * @buffer:     data buffer
219  * @bufflen:    len of buffer
220  * @sense:      optional sense buffer
221  * @timeout:    request timeout in seconds
222  * @retries:    number of times to retry request
223  * @flags:      or into request flags;
224  * @resid:      optional residual length
225  *
226  * returns the req->errors value which is the scsi_cmnd result
227  * field.
228  */
229 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
230                  int data_direction, void *buffer, unsigned bufflen,
231                  unsigned char *sense, int timeout, int retries, int flags,
232                  int *resid)
233 {
234         struct request *req;
235         int write = (data_direction == DMA_TO_DEVICE);
236         int ret = DRIVER_ERROR << 24;
237
238         req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
239         if (!req)
240                 return ret;
241
242         if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
243                                         buffer, bufflen, __GFP_WAIT))
244                 goto out;
245
246         req->cmd_len = COMMAND_SIZE(cmd[0]);
247         memcpy(req->cmd, cmd, req->cmd_len);
248         req->sense = sense;
249         req->sense_len = 0;
250         req->retries = retries;
251         req->timeout = timeout;
252         req->cmd_type = REQ_TYPE_BLOCK_PC;
253         req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
254
255         /*
256          * head injection *required* here otherwise quiesce won't work
257          */
258         blk_execute_rq(req->q, NULL, req, 1);
259
260         /*
261          * Some devices (USB mass-storage in particular) may transfer
262          * garbage data together with a residue indicating that the data
263          * is invalid.  Prevent the garbage from being misinterpreted
264          * and prevent security leaks by zeroing out the excess data.
265          */
266         if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
267                 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
268
269         if (resid)
270                 *resid = req->resid_len;
271         ret = req->errors;
272  out:
273         blk_put_request(req);
274
275         return ret;
276 }
277 EXPORT_SYMBOL(scsi_execute);
278
279 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
280                      int data_direction, void *buffer, unsigned bufflen,
281                      struct scsi_sense_hdr *sshdr, int timeout, int retries,
282                      int *resid, int flags)
283 {
284         char *sense = NULL;
285         int result;
286         
287         if (sshdr) {
288                 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
289                 if (!sense)
290                         return DRIVER_ERROR << 24;
291         }
292         result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
293                               sense, timeout, retries, flags, resid);
294         if (sshdr)
295                 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
296
297         kfree(sense);
298         return result;
299 }
300 EXPORT_SYMBOL(scsi_execute_req_flags);
301
302 /*
303  * Function:    scsi_init_cmd_errh()
304  *
305  * Purpose:     Initialize cmd fields related to error handling.
306  *
307  * Arguments:   cmd     - command that is ready to be queued.
308  *
309  * Notes:       This function has the job of initializing a number of
310  *              fields related to error handling.   Typically this will
311  *              be called once for each command, as required.
312  */
313 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
314 {
315         cmd->serial_number = 0;
316         scsi_set_resid(cmd, 0);
317         memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
318         if (cmd->cmd_len == 0)
319                 cmd->cmd_len = scsi_command_size(cmd->cmnd);
320 }
321
322 void scsi_device_unbusy(struct scsi_device *sdev)
323 {
324         struct Scsi_Host *shost = sdev->host;
325         struct scsi_target *starget = scsi_target(sdev);
326         unsigned long flags;
327
328         spin_lock_irqsave(shost->host_lock, flags);
329         shost->host_busy--;
330         starget->target_busy--;
331         if (unlikely(scsi_host_in_recovery(shost) &&
332                      (shost->host_failed || shost->host_eh_scheduled)))
333                 scsi_eh_wakeup(shost);
334         spin_unlock(shost->host_lock);
335         spin_lock(sdev->request_queue->queue_lock);
336         sdev->device_busy--;
337         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
338 }
339
340 /*
341  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
342  * and call blk_run_queue for all the scsi_devices on the target -
343  * including current_sdev first.
344  *
345  * Called with *no* scsi locks held.
346  */
347 static void scsi_single_lun_run(struct scsi_device *current_sdev)
348 {
349         struct Scsi_Host *shost = current_sdev->host;
350         struct scsi_device *sdev, *tmp;
351         struct scsi_target *starget = scsi_target(current_sdev);
352         unsigned long flags;
353
354         spin_lock_irqsave(shost->host_lock, flags);
355         starget->starget_sdev_user = NULL;
356         spin_unlock_irqrestore(shost->host_lock, flags);
357
358         /*
359          * Call blk_run_queue for all LUNs on the target, starting with
360          * current_sdev. We race with others (to set starget_sdev_user),
361          * but in most cases, we will be first. Ideally, each LU on the
362          * target would get some limited time or requests on the target.
363          */
364         blk_run_queue(current_sdev->request_queue);
365
366         spin_lock_irqsave(shost->host_lock, flags);
367         if (starget->starget_sdev_user)
368                 goto out;
369         list_for_each_entry_safe(sdev, tmp, &starget->devices,
370                         same_target_siblings) {
371                 if (sdev == current_sdev)
372                         continue;
373                 if (scsi_device_get(sdev))
374                         continue;
375
376                 spin_unlock_irqrestore(shost->host_lock, flags);
377                 blk_run_queue(sdev->request_queue);
378                 spin_lock_irqsave(shost->host_lock, flags);
379         
380                 scsi_device_put(sdev);
381         }
382  out:
383         spin_unlock_irqrestore(shost->host_lock, flags);
384 }
385
386 static inline int scsi_device_is_busy(struct scsi_device *sdev)
387 {
388         if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
389                 return 1;
390
391         return 0;
392 }
393
394 static inline int scsi_target_is_busy(struct scsi_target *starget)
395 {
396         return ((starget->can_queue > 0 &&
397                  starget->target_busy >= starget->can_queue) ||
398                  starget->target_blocked);
399 }
400
401 static inline int scsi_host_is_busy(struct Scsi_Host *shost)
402 {
403         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
404             shost->host_blocked || shost->host_self_blocked)
405                 return 1;
406
407         return 0;
408 }
409
410 /*
411  * Function:    scsi_run_queue()
412  *
413  * Purpose:     Select a proper request queue to serve next
414  *
415  * Arguments:   q       - last request's queue
416  *
417  * Returns:     Nothing
418  *
419  * Notes:       The previous command was completely finished, start
420  *              a new one if possible.
421  */
422 static void scsi_run_queue(struct request_queue *q)
423 {
424         struct scsi_device *sdev = q->queuedata;
425         struct Scsi_Host *shost;
426         LIST_HEAD(starved_list);
427         unsigned long flags;
428
429         shost = sdev->host;
430         if (scsi_target(sdev)->single_lun)
431                 scsi_single_lun_run(sdev);
432
433         spin_lock_irqsave(shost->host_lock, flags);
434         list_splice_init(&shost->starved_list, &starved_list);
435
436         while (!list_empty(&starved_list)) {
437                 struct request_queue *slq;
438
439                 /*
440                  * As long as shost is accepting commands and we have
441                  * starved queues, call blk_run_queue. scsi_request_fn
442                  * drops the queue_lock and can add us back to the
443                  * starved_list.
444                  *
445                  * host_lock protects the starved_list and starved_entry.
446                  * scsi_request_fn must get the host_lock before checking
447                  * or modifying starved_list or starved_entry.
448                  */
449                 if (scsi_host_is_busy(shost))
450                         break;
451
452                 sdev = list_entry(starved_list.next,
453                                   struct scsi_device, starved_entry);
454                 list_del_init(&sdev->starved_entry);
455                 if (scsi_target_is_busy(scsi_target(sdev))) {
456                         list_move_tail(&sdev->starved_entry,
457                                        &shost->starved_list);
458                         continue;
459                 }
460
461                 /*
462                  * Once we drop the host lock, a racing scsi_remove_device()
463                  * call may remove the sdev from the starved list and destroy
464                  * it and the queue.  Mitigate by taking a reference to the
465                  * queue and never touching the sdev again after we drop the
466                  * host lock.  Note: if __scsi_remove_device() invokes
467                  * blk_cleanup_queue() before the queue is run from this
468                  * function then blk_run_queue() will return immediately since
469                  * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
470                  */
471                 slq = sdev->request_queue;
472                 if (!blk_get_queue(slq))
473                         continue;
474                 spin_unlock_irqrestore(shost->host_lock, flags);
475
476                 blk_run_queue(slq);
477                 blk_put_queue(slq);
478
479                 spin_lock_irqsave(shost->host_lock, flags);
480         }
481         /* put any unprocessed entries back */
482         list_splice(&starved_list, &shost->starved_list);
483         spin_unlock_irqrestore(shost->host_lock, flags);
484
485         blk_run_queue(q);
486 }
487
488 void scsi_requeue_run_queue(struct work_struct *work)
489 {
490         struct scsi_device *sdev;
491         struct request_queue *q;
492
493         sdev = container_of(work, struct scsi_device, requeue_work);
494         q = sdev->request_queue;
495         scsi_run_queue(q);
496 }
497
498 /*
499  * Function:    scsi_requeue_command()
500  *
501  * Purpose:     Handle post-processing of completed commands.
502  *
503  * Arguments:   q       - queue to operate on
504  *              cmd     - command that may need to be requeued.
505  *
506  * Returns:     Nothing
507  *
508  * Notes:       After command completion, there may be blocks left
509  *              over which weren't finished by the previous command
510  *              this can be for a number of reasons - the main one is
511  *              I/O errors in the middle of the request, in which case
512  *              we need to request the blocks that come after the bad
513  *              sector.
514  * Notes:       Upon return, cmd is a stale pointer.
515  */
516 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
517 {
518         struct scsi_device *sdev = cmd->device;
519         struct request *req = cmd->request;
520         unsigned long flags;
521
522         /*
523          * We need to hold a reference on the device to avoid the queue being
524          * killed after the unlock and before scsi_run_queue is invoked which
525          * may happen because scsi_unprep_request() puts the command which
526          * releases its reference on the device.
527          */
528         get_device(&sdev->sdev_gendev);
529
530         spin_lock_irqsave(q->queue_lock, flags);
531         scsi_unprep_request(req);
532         blk_requeue_request(q, req);
533         spin_unlock_irqrestore(q->queue_lock, flags);
534
535         scsi_run_queue(q);
536
537         put_device(&sdev->sdev_gendev);
538 }
539
540 void scsi_next_command(struct scsi_cmnd *cmd)
541 {
542         struct scsi_device *sdev = cmd->device;
543         struct request_queue *q = sdev->request_queue;
544
545         /* need to hold a reference on the device before we let go of the cmd */
546         get_device(&sdev->sdev_gendev);
547
548         scsi_put_command(cmd);
549         scsi_run_queue(q);
550
551         /* ok to remove device now */
552         put_device(&sdev->sdev_gendev);
553 }
554
555 void scsi_run_host_queues(struct Scsi_Host *shost)
556 {
557         struct scsi_device *sdev;
558
559         shost_for_each_device(sdev, shost)
560                 scsi_run_queue(sdev->request_queue);
561 }
562
563 static void __scsi_release_buffers(struct scsi_cmnd *, int);
564
565 /*
566  * Function:    scsi_end_request()
567  *
568  * Purpose:     Post-processing of completed commands (usually invoked at end
569  *              of upper level post-processing and scsi_io_completion).
570  *
571  * Arguments:   cmd      - command that is complete.
572  *              error    - 0 if I/O indicates success, < 0 for I/O error.
573  *              bytes    - number of bytes of completed I/O
574  *              requeue  - indicates whether we should requeue leftovers.
575  *
576  * Lock status: Assumed that lock is not held upon entry.
577  *
578  * Returns:     cmd if requeue required, NULL otherwise.
579  *
580  * Notes:       This is called for block device requests in order to
581  *              mark some number of sectors as complete.
582  * 
583  *              We are guaranteeing that the request queue will be goosed
584  *              at some point during this call.
585  * Notes:       If cmd was requeued, upon return it will be a stale pointer.
586  */
587 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
588                                           int bytes, int requeue)
589 {
590         struct request_queue *q = cmd->device->request_queue;
591         struct request *req = cmd->request;
592
593         /*
594          * If there are blocks left over at the end, set up the command
595          * to queue the remainder of them.
596          */
597         if (blk_end_request(req, error, bytes)) {
598                 /* kill remainder if no retrys */
599                 if (error && scsi_noretry_cmd(cmd))
600                         blk_end_request_all(req, error);
601                 else {
602                         if (requeue) {
603                                 /*
604                                  * Bleah.  Leftovers again.  Stick the
605                                  * leftovers in the front of the
606                                  * queue, and goose the queue again.
607                                  */
608                                 scsi_release_buffers(cmd);
609                                 scsi_requeue_command(q, cmd);
610                                 cmd = NULL;
611                         }
612                         return cmd;
613                 }
614         }
615
616         /*
617          * This will goose the queue request function at the end, so we don't
618          * need to worry about launching another command.
619          */
620         __scsi_release_buffers(cmd, 0);
621         scsi_next_command(cmd);
622         return NULL;
623 }
624
625 static inline unsigned int scsi_sgtable_index(unsigned short nents)
626 {
627         unsigned int index;
628
629         BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
630
631         if (nents <= 8)
632                 index = 0;
633         else
634                 index = get_count_order(nents) - 3;
635
636         return index;
637 }
638
639 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
640 {
641         struct scsi_host_sg_pool *sgp;
642
643         sgp = scsi_sg_pools + scsi_sgtable_index(nents);
644         mempool_free(sgl, sgp->pool);
645 }
646
647 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
648 {
649         struct scsi_host_sg_pool *sgp;
650
651         sgp = scsi_sg_pools + scsi_sgtable_index(nents);
652         return mempool_alloc(sgp->pool, gfp_mask);
653 }
654
655 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
656                               gfp_t gfp_mask)
657 {
658         int ret;
659
660         BUG_ON(!nents);
661
662         ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
663                                gfp_mask, scsi_sg_alloc);
664         if (unlikely(ret))
665                 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
666                                 scsi_sg_free);
667
668         return ret;
669 }
670
671 static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
672 {
673         __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
674 }
675
676 static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
677 {
678
679         if (cmd->sdb.table.nents)
680                 scsi_free_sgtable(&cmd->sdb);
681
682         memset(&cmd->sdb, 0, sizeof(cmd->sdb));
683
684         if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
685                 struct scsi_data_buffer *bidi_sdb =
686                         cmd->request->next_rq->special;
687                 scsi_free_sgtable(bidi_sdb);
688                 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
689                 cmd->request->next_rq->special = NULL;
690         }
691
692         if (scsi_prot_sg_count(cmd))
693                 scsi_free_sgtable(cmd->prot_sdb);
694 }
695
696 /*
697  * Function:    scsi_release_buffers()
698  *
699  * Purpose:     Completion processing for block device I/O requests.
700  *
701  * Arguments:   cmd     - command that we are bailing.
702  *
703  * Lock status: Assumed that no lock is held upon entry.
704  *
705  * Returns:     Nothing
706  *
707  * Notes:       In the event that an upper level driver rejects a
708  *              command, we must release resources allocated during
709  *              the __init_io() function.  Primarily this would involve
710  *              the scatter-gather table, and potentially any bounce
711  *              buffers.
712  */
713 void scsi_release_buffers(struct scsi_cmnd *cmd)
714 {
715         __scsi_release_buffers(cmd, 1);
716 }
717 EXPORT_SYMBOL(scsi_release_buffers);
718
719 /**
720  * __scsi_error_from_host_byte - translate SCSI error code into errno
721  * @cmd:        SCSI command (unused)
722  * @result:     scsi error code
723  *
724  * Translate SCSI error code into standard UNIX errno.
725  * Return values:
726  * -ENOLINK     temporary transport failure
727  * -EREMOTEIO   permanent target failure, do not retry
728  * -EBADE       permanent nexus failure, retry on other path
729  * -ENOSPC      No write space available
730  * -ENODATA     Medium error
731  * -EIO         unspecified I/O error
732  */
733 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
734 {
735         int error = 0;
736
737         switch(host_byte(result)) {
738         case DID_TRANSPORT_FAILFAST:
739                 error = -ENOLINK;
740                 break;
741         case DID_TARGET_FAILURE:
742                 set_host_byte(cmd, DID_OK);
743                 error = -EREMOTEIO;
744                 break;
745         case DID_NEXUS_FAILURE:
746                 set_host_byte(cmd, DID_OK);
747                 error = -EBADE;
748                 break;
749         case DID_ALLOC_FAILURE:
750                 set_host_byte(cmd, DID_OK);
751                 error = -ENOSPC;
752                 break;
753         case DID_MEDIUM_ERROR:
754                 set_host_byte(cmd, DID_OK);
755                 error = -ENODATA;
756                 break;
757         default:
758                 error = -EIO;
759                 break;
760         }
761
762         return error;
763 }
764
765 /*
766  * Function:    scsi_io_completion()
767  *
768  * Purpose:     Completion processing for block device I/O requests.
769  *
770  * Arguments:   cmd   - command that is finished.
771  *
772  * Lock status: Assumed that no lock is held upon entry.
773  *
774  * Returns:     Nothing
775  *
776  * Notes:       This function is matched in terms of capabilities to
777  *              the function that created the scatter-gather list.
778  *              In other words, if there are no bounce buffers
779  *              (the normal case for most drivers), we don't need
780  *              the logic to deal with cleaning up afterwards.
781  *
782  *              We must call scsi_end_request().  This will finish off
783  *              the specified number of sectors.  If we are done, the
784  *              command block will be released and the queue function
785  *              will be goosed.  If we are not done then we have to
786  *              figure out what to do next:
787  *
788  *              a) We can call scsi_requeue_command().  The request
789  *                 will be unprepared and put back on the queue.  Then
790  *                 a new command will be created for it.  This should
791  *                 be used if we made forward progress, or if we want
792  *                 to switch from READ(10) to READ(6) for example.
793  *
794  *              b) We can call scsi_queue_insert().  The request will
795  *                 be put back on the queue and retried using the same
796  *                 command as before, possibly after a delay.
797  *
798  *              c) We can call blk_end_request() with -EIO to fail
799  *                 the remainder of the request.
800  */
801 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
802 {
803         int result = cmd->result;
804         struct request_queue *q = cmd->device->request_queue;
805         struct request *req = cmd->request;
806         int error = 0;
807         struct scsi_sense_hdr sshdr;
808         int sense_valid = 0;
809         int sense_deferred = 0;
810         enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
811               ACTION_DELAYED_RETRY} action;
812         char *description = NULL;
813
814         if (result) {
815                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
816                 if (sense_valid)
817                         sense_deferred = scsi_sense_is_deferred(&sshdr);
818         }
819
820         if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
821                 if (result) {
822                         if (sense_valid && req->sense) {
823                                 /*
824                                  * SG_IO wants current and deferred errors
825                                  */
826                                 int len = 8 + cmd->sense_buffer[7];
827
828                                 if (len > SCSI_SENSE_BUFFERSIZE)
829                                         len = SCSI_SENSE_BUFFERSIZE;
830                                 memcpy(req->sense, cmd->sense_buffer,  len);
831                                 req->sense_len = len;
832                         }
833                         if (!sense_deferred)
834                                 error = __scsi_error_from_host_byte(cmd, result);
835                 }
836                 /*
837                  * __scsi_error_from_host_byte may have reset the host_byte
838                  */
839                 req->errors = cmd->result;
840
841                 req->resid_len = scsi_get_resid(cmd);
842
843                 if (scsi_bidi_cmnd(cmd)) {
844                         /*
845                          * Bidi commands Must be complete as a whole,
846                          * both sides at once.
847                          */
848                         req->next_rq->resid_len = scsi_in(cmd)->resid;
849
850                         scsi_release_buffers(cmd);
851                         blk_end_request_all(req, 0);
852
853                         scsi_next_command(cmd);
854                         return;
855                 }
856         }
857
858         /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
859         BUG_ON(blk_bidi_rq(req));
860
861         /*
862          * Next deal with any sectors which we were able to correctly
863          * handle.
864          */
865         SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
866                                       "%d bytes done.\n",
867                                       blk_rq_sectors(req), good_bytes));
868
869         /*
870          * Recovered errors need reporting, but they're always treated
871          * as success, so fiddle the result code here.  For BLOCK_PC
872          * we already took a copy of the original into rq->errors which
873          * is what gets returned to the user
874          */
875         if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
876                 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
877                  * print since caller wants ATA registers. Only occurs on
878                  * SCSI ATA PASS_THROUGH commands when CK_COND=1
879                  */
880                 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
881                         ;
882                 else if (!(req->cmd_flags & REQ_QUIET))
883                         scsi_print_sense("", cmd);
884                 result = 0;
885                 /* BLOCK_PC may have set error */
886                 error = 0;
887         }
888
889         /*
890          * A number of bytes were successfully read.  If there
891          * are leftovers and there is some kind of error
892          * (result != 0), retry the rest.
893          */
894         if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
895                 return;
896
897         error = __scsi_error_from_host_byte(cmd, result);
898
899         if (host_byte(result) == DID_RESET) {
900                 /* Third party bus reset or reset for error recovery
901                  * reasons.  Just retry the command and see what
902                  * happens.
903                  */
904                 action = ACTION_RETRY;
905         } else if (sense_valid && !sense_deferred) {
906                 switch (sshdr.sense_key) {
907                 case UNIT_ATTENTION:
908                         if (cmd->device->removable) {
909                                 /* Detected disc change.  Set a bit
910                                  * and quietly refuse further access.
911                                  */
912                                 cmd->device->changed = 1;
913                                 description = "Media Changed";
914                                 action = ACTION_FAIL;
915                         } else {
916                                 /* Must have been a power glitch, or a
917                                  * bus reset.  Could not have been a
918                                  * media change, so we just retry the
919                                  * command and see what happens.
920                                  */
921                                 action = ACTION_RETRY;
922                         }
923                         break;
924                 case ILLEGAL_REQUEST:
925                         /* If we had an ILLEGAL REQUEST returned, then
926                          * we may have performed an unsupported
927                          * command.  The only thing this should be
928                          * would be a ten byte read where only a six
929                          * byte read was supported.  Also, on a system
930                          * where READ CAPACITY failed, we may have
931                          * read past the end of the disk.
932                          */
933                         if ((cmd->device->use_10_for_rw &&
934                             sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
935                             (cmd->cmnd[0] == READ_10 ||
936                              cmd->cmnd[0] == WRITE_10)) {
937                                 /* This will issue a new 6-byte command. */
938                                 cmd->device->use_10_for_rw = 0;
939                                 action = ACTION_REPREP;
940                         } else if (sshdr.asc == 0x10) /* DIX */ {
941                                 description = "Host Data Integrity Failure";
942                                 action = ACTION_FAIL;
943                                 error = -EILSEQ;
944                         /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
945                         } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
946                                 switch (cmd->cmnd[0]) {
947                                 case UNMAP:
948                                         description = "Discard failure";
949                                         break;
950                                 case WRITE_SAME:
951                                 case WRITE_SAME_16:
952                                         if (cmd->cmnd[1] & 0x8)
953                                                 description = "Discard failure";
954                                         else
955                                                 description =
956                                                         "Write same failure";
957                                         break;
958                                 default:
959                                         description = "Invalid command failure";
960                                         break;
961                                 }
962                                 action = ACTION_FAIL;
963                                 error = -EREMOTEIO;
964                         } else
965                                 action = ACTION_FAIL;
966                         break;
967                 case ABORTED_COMMAND:
968                         action = ACTION_FAIL;
969                         if (sshdr.asc == 0x10) { /* DIF */
970                                 description = "Target Data Integrity Failure";
971                                 error = -EILSEQ;
972                         }
973                         break;
974                 case NOT_READY:
975                         /* If the device is in the process of becoming
976                          * ready, or has a temporary blockage, retry.
977                          */
978                         if (sshdr.asc == 0x04) {
979                                 switch (sshdr.ascq) {
980                                 case 0x01: /* becoming ready */
981                                 case 0x04: /* format in progress */
982                                 case 0x05: /* rebuild in progress */
983                                 case 0x06: /* recalculation in progress */
984                                 case 0x07: /* operation in progress */
985                                 case 0x08: /* Long write in progress */
986                                 case 0x09: /* self test in progress */
987                                 case 0x14: /* space allocation in progress */
988                                         action = ACTION_DELAYED_RETRY;
989                                         break;
990                                 default:
991                                         description = "Device not ready";
992                                         action = ACTION_FAIL;
993                                         break;
994                                 }
995                         } else {
996                                 description = "Device not ready";
997                                 action = ACTION_FAIL;
998                         }
999                         break;
1000                 case VOLUME_OVERFLOW:
1001                         /* See SSC3rXX or current. */
1002                         action = ACTION_FAIL;
1003                         break;
1004                 default:
1005                         description = "Unhandled sense code";
1006                         action = ACTION_FAIL;
1007                         break;
1008                 }
1009         } else {
1010                 description = "Unhandled error code";
1011                 action = ACTION_FAIL;
1012         }
1013
1014         switch (action) {
1015         case ACTION_FAIL:
1016                 /* Give up and fail the remainder of the request */
1017                 scsi_release_buffers(cmd);
1018                 if (!(req->cmd_flags & REQ_QUIET)) {
1019                         if (description)
1020                                 scmd_printk(KERN_INFO, cmd, "%s\n",
1021                                             description);
1022                         scsi_print_result(cmd);
1023                         if (driver_byte(result) & DRIVER_SENSE)
1024                                 scsi_print_sense("", cmd);
1025                         scsi_print_command(cmd);
1026                 }
1027                 if (blk_end_request_err(req, error))
1028                         scsi_requeue_command(q, cmd);
1029                 else
1030                         scsi_next_command(cmd);
1031                 break;
1032         case ACTION_REPREP:
1033                 /* Unprep the request and put it back at the head of the queue.
1034                  * A new command will be prepared and issued.
1035                  */
1036                 scsi_release_buffers(cmd);
1037                 scsi_requeue_command(q, cmd);
1038                 break;
1039         case ACTION_RETRY:
1040                 /* Retry the same command immediately */
1041                 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1042                 break;
1043         case ACTION_DELAYED_RETRY:
1044                 /* Retry the same command after a delay */
1045                 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1046                 break;
1047         }
1048 }
1049
1050 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1051                              gfp_t gfp_mask)
1052 {
1053         int count;
1054
1055         /*
1056          * If sg table allocation fails, requeue request later.
1057          */
1058         if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1059                                         gfp_mask))) {
1060                 return BLKPREP_DEFER;
1061         }
1062
1063         req->buffer = NULL;
1064
1065         /* 
1066          * Next, walk the list, and fill in the addresses and sizes of
1067          * each segment.
1068          */
1069         count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1070         BUG_ON(count > sdb->table.nents);
1071         sdb->table.nents = count;
1072         sdb->length = blk_rq_bytes(req);
1073         return BLKPREP_OK;
1074 }
1075
1076 /*
1077  * Function:    scsi_init_io()
1078  *
1079  * Purpose:     SCSI I/O initialize function.
1080  *
1081  * Arguments:   cmd   - Command descriptor we wish to initialize
1082  *
1083  * Returns:     0 on success
1084  *              BLKPREP_DEFER if the failure is retryable
1085  *              BLKPREP_KILL if the failure is fatal
1086  */
1087 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1088 {
1089         struct request *rq = cmd->request;
1090
1091         int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1092         if (error)
1093                 goto err_exit;
1094
1095         if (blk_bidi_rq(rq)) {
1096                 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1097                         scsi_sdb_cache, GFP_ATOMIC);
1098                 if (!bidi_sdb) {
1099                         error = BLKPREP_DEFER;
1100                         goto err_exit;
1101                 }
1102
1103                 rq->next_rq->special = bidi_sdb;
1104                 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
1105                 if (error)
1106                         goto err_exit;
1107         }
1108
1109         if (blk_integrity_rq(rq)) {
1110                 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1111                 int ivecs, count;
1112
1113                 BUG_ON(prot_sdb == NULL);
1114                 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1115
1116                 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1117                         error = BLKPREP_DEFER;
1118                         goto err_exit;
1119                 }
1120
1121                 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1122                                                 prot_sdb->table.sgl);
1123                 BUG_ON(unlikely(count > ivecs));
1124                 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1125
1126                 cmd->prot_sdb = prot_sdb;
1127                 cmd->prot_sdb->table.nents = count;
1128         }
1129
1130         return BLKPREP_OK ;
1131
1132 err_exit:
1133         scsi_release_buffers(cmd);
1134         cmd->request->special = NULL;
1135         scsi_put_command(cmd);
1136         return error;
1137 }
1138 EXPORT_SYMBOL(scsi_init_io);
1139
1140 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1141                 struct request *req)
1142 {
1143         struct scsi_cmnd *cmd;
1144
1145         if (!req->special) {
1146                 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1147                 if (unlikely(!cmd))
1148                         return NULL;
1149                 req->special = cmd;
1150         } else {
1151                 cmd = req->special;
1152         }
1153
1154         /* pull a tag out of the request if we have one */
1155         cmd->tag = req->tag;
1156         cmd->request = req;
1157
1158         cmd->cmnd = req->cmd;
1159         cmd->prot_op = SCSI_PROT_NORMAL;
1160
1161         return cmd;
1162 }
1163
1164 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1165 {
1166         struct scsi_cmnd *cmd;
1167         int ret = scsi_prep_state_check(sdev, req);
1168
1169         if (ret != BLKPREP_OK)
1170                 return ret;
1171
1172         cmd = scsi_get_cmd_from_req(sdev, req);
1173         if (unlikely(!cmd))
1174                 return BLKPREP_DEFER;
1175
1176         /*
1177          * BLOCK_PC requests may transfer data, in which case they must
1178          * a bio attached to them.  Or they might contain a SCSI command
1179          * that does not transfer data, in which case they may optionally
1180          * submit a request without an attached bio.
1181          */
1182         if (req->bio) {
1183                 int ret;
1184
1185                 BUG_ON(!req->nr_phys_segments);
1186
1187                 ret = scsi_init_io(cmd, GFP_ATOMIC);
1188                 if (unlikely(ret))
1189                         return ret;
1190         } else {
1191                 BUG_ON(blk_rq_bytes(req));
1192
1193                 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1194                 req->buffer = NULL;
1195         }
1196
1197         cmd->cmd_len = req->cmd_len;
1198         if (!blk_rq_bytes(req))
1199                 cmd->sc_data_direction = DMA_NONE;
1200         else if (rq_data_dir(req) == WRITE)
1201                 cmd->sc_data_direction = DMA_TO_DEVICE;
1202         else
1203                 cmd->sc_data_direction = DMA_FROM_DEVICE;
1204         
1205         cmd->transfersize = blk_rq_bytes(req);
1206         cmd->allowed = req->retries;
1207         return BLKPREP_OK;
1208 }
1209 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1210
1211 /*
1212  * Setup a REQ_TYPE_FS command.  These are simple read/write request
1213  * from filesystems that still need to be translated to SCSI CDBs from
1214  * the ULD.
1215  */
1216 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1217 {
1218         struct scsi_cmnd *cmd;
1219         int ret = scsi_prep_state_check(sdev, req);
1220
1221         if (ret != BLKPREP_OK)
1222                 return ret;
1223
1224         if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1225                          && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1226                 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1227                 if (ret != BLKPREP_OK)
1228                         return ret;
1229         }
1230
1231         /*
1232          * Filesystem requests must transfer data.
1233          */
1234         BUG_ON(!req->nr_phys_segments);
1235
1236         cmd = scsi_get_cmd_from_req(sdev, req);
1237         if (unlikely(!cmd))
1238                 return BLKPREP_DEFER;
1239
1240         memset(cmd->cmnd, 0, BLK_MAX_CDB);
1241         return scsi_init_io(cmd, GFP_ATOMIC);
1242 }
1243 EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1244
1245 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1246 {
1247         int ret = BLKPREP_OK;
1248
1249         /*
1250          * If the device is not in running state we will reject some
1251          * or all commands.
1252          */
1253         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1254                 switch (sdev->sdev_state) {
1255                 case SDEV_OFFLINE:
1256                 case SDEV_TRANSPORT_OFFLINE:
1257                         /*
1258                          * If the device is offline we refuse to process any
1259                          * commands.  The device must be brought online
1260                          * before trying any recovery commands.
1261                          */
1262                         sdev_printk(KERN_ERR, sdev,
1263                                     "rejecting I/O to offline device\n");
1264                         ret = BLKPREP_KILL;
1265                         break;
1266                 case SDEV_DEL:
1267                         /*
1268                          * If the device is fully deleted, we refuse to
1269                          * process any commands as well.
1270                          */
1271                         sdev_printk(KERN_ERR, sdev,
1272                                     "rejecting I/O to dead device\n");
1273                         ret = BLKPREP_KILL;
1274                         break;
1275                 case SDEV_QUIESCE:
1276                 case SDEV_BLOCK:
1277                 case SDEV_CREATED_BLOCK:
1278                         /*
1279                          * If the devices is blocked we defer normal commands.
1280                          */
1281                         if (!(req->cmd_flags & REQ_PREEMPT))
1282                                 ret = BLKPREP_DEFER;
1283                         break;
1284                 default:
1285                         /*
1286                          * For any other not fully online state we only allow
1287                          * special commands.  In particular any user initiated
1288                          * command is not allowed.
1289                          */
1290                         if (!(req->cmd_flags & REQ_PREEMPT))
1291                                 ret = BLKPREP_KILL;
1292                         break;
1293                 }
1294         }
1295         return ret;
1296 }
1297 EXPORT_SYMBOL(scsi_prep_state_check);
1298
1299 int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1300 {
1301         struct scsi_device *sdev = q->queuedata;
1302
1303         switch (ret) {
1304         case BLKPREP_KILL:
1305                 req->errors = DID_NO_CONNECT << 16;
1306                 /* release the command and kill it */
1307                 if (req->special) {
1308                         struct scsi_cmnd *cmd = req->special;
1309                         scsi_release_buffers(cmd);
1310                         scsi_put_command(cmd);
1311                         req->special = NULL;
1312                 }
1313                 break;
1314         case BLKPREP_DEFER:
1315                 /*
1316                  * If we defer, the blk_peek_request() returns NULL, but the
1317                  * queue must be restarted, so we schedule a callback to happen
1318                  * shortly.
1319                  */
1320                 if (sdev->device_busy == 0)
1321                         blk_delay_queue(q, SCSI_QUEUE_DELAY);
1322                 break;
1323         default:
1324                 req->cmd_flags |= REQ_DONTPREP;
1325         }
1326
1327         return ret;
1328 }
1329 EXPORT_SYMBOL(scsi_prep_return);
1330
1331 int scsi_prep_fn(struct request_queue *q, struct request *req)
1332 {
1333         struct scsi_device *sdev = q->queuedata;
1334         int ret = BLKPREP_KILL;
1335
1336         if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1337                 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1338         return scsi_prep_return(q, req, ret);
1339 }
1340 EXPORT_SYMBOL(scsi_prep_fn);
1341
1342 /*
1343  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1344  * return 0.
1345  *
1346  * Called with the queue_lock held.
1347  */
1348 static inline int scsi_dev_queue_ready(struct request_queue *q,
1349                                   struct scsi_device *sdev)
1350 {
1351         if (sdev->device_busy == 0 && sdev->device_blocked) {
1352                 /*
1353                  * unblock after device_blocked iterates to zero
1354                  */
1355                 if (--sdev->device_blocked == 0) {
1356                         SCSI_LOG_MLQUEUE(3,
1357                                    sdev_printk(KERN_INFO, sdev,
1358                                    "unblocking device at zero depth\n"));
1359                 } else {
1360                         blk_delay_queue(q, SCSI_QUEUE_DELAY);
1361                         return 0;
1362                 }
1363         }
1364         if (scsi_device_is_busy(sdev))
1365                 return 0;
1366
1367         return 1;
1368 }
1369
1370
1371 /*
1372  * scsi_target_queue_ready: checks if there we can send commands to target
1373  * @sdev: scsi device on starget to check.
1374  *
1375  * Called with the host lock held.
1376  */
1377 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1378                                            struct scsi_device *sdev)
1379 {
1380         struct scsi_target *starget = scsi_target(sdev);
1381
1382         if (starget->single_lun) {
1383                 if (starget->starget_sdev_user &&
1384                     starget->starget_sdev_user != sdev)
1385                         return 0;
1386                 starget->starget_sdev_user = sdev;
1387         }
1388
1389         if (starget->target_busy == 0 && starget->target_blocked) {
1390                 /*
1391                  * unblock after target_blocked iterates to zero
1392                  */
1393                 if (--starget->target_blocked == 0) {
1394                         SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1395                                          "unblocking target at zero depth\n"));
1396                 } else
1397                         return 0;
1398         }
1399
1400         if (scsi_target_is_busy(starget)) {
1401                 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1402                 return 0;
1403         }
1404
1405         return 1;
1406 }
1407
1408 /*
1409  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1410  * return 0. We must end up running the queue again whenever 0 is
1411  * returned, else IO can hang.
1412  *
1413  * Called with host_lock held.
1414  */
1415 static inline int scsi_host_queue_ready(struct request_queue *q,
1416                                    struct Scsi_Host *shost,
1417                                    struct scsi_device *sdev)
1418 {
1419         if (scsi_host_in_recovery(shost))
1420                 return 0;
1421         if (shost->host_busy == 0 && shost->host_blocked) {
1422                 /*
1423                  * unblock after host_blocked iterates to zero
1424                  */
1425                 if (--shost->host_blocked == 0) {
1426                         SCSI_LOG_MLQUEUE(3,
1427                                 printk("scsi%d unblocking host at zero depth\n",
1428                                         shost->host_no));
1429                 } else {
1430                         return 0;
1431                 }
1432         }
1433         if (scsi_host_is_busy(shost)) {
1434                 if (list_empty(&sdev->starved_entry))
1435                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1436                 return 0;
1437         }
1438
1439         /* We're OK to process the command, so we can't be starved */
1440         if (!list_empty(&sdev->starved_entry))
1441                 list_del_init(&sdev->starved_entry);
1442
1443         return 1;
1444 }
1445
1446 /*
1447  * Busy state exporting function for request stacking drivers.
1448  *
1449  * For efficiency, no lock is taken to check the busy state of
1450  * shost/starget/sdev, since the returned value is not guaranteed and
1451  * may be changed after request stacking drivers call the function,
1452  * regardless of taking lock or not.
1453  *
1454  * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1455  * needs to return 'not busy'. Otherwise, request stacking drivers
1456  * may hold requests forever.
1457  */
1458 static int scsi_lld_busy(struct request_queue *q)
1459 {
1460         struct scsi_device *sdev = q->queuedata;
1461         struct Scsi_Host *shost;
1462
1463         if (blk_queue_dying(q))
1464                 return 0;
1465
1466         shost = sdev->host;
1467
1468         /*
1469          * Ignore host/starget busy state.
1470          * Since block layer does not have a concept of fairness across
1471          * multiple queues, congestion of host/starget needs to be handled
1472          * in SCSI layer.
1473          */
1474         if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1475                 return 1;
1476
1477         return 0;
1478 }
1479
1480 /*
1481  * Kill a request for a dead device
1482  */
1483 static void scsi_kill_request(struct request *req, struct request_queue *q)
1484 {
1485         struct scsi_cmnd *cmd = req->special;
1486         struct scsi_device *sdev;
1487         struct scsi_target *starget;
1488         struct Scsi_Host *shost;
1489
1490         blk_start_request(req);
1491
1492         scmd_printk(KERN_INFO, cmd, "killing request\n");
1493
1494         sdev = cmd->device;
1495         starget = scsi_target(sdev);
1496         shost = sdev->host;
1497         scsi_init_cmd_errh(cmd);
1498         cmd->result = DID_NO_CONNECT << 16;
1499         atomic_inc(&cmd->device->iorequest_cnt);
1500
1501         /*
1502          * SCSI request completion path will do scsi_device_unbusy(),
1503          * bump busy counts.  To bump the counters, we need to dance
1504          * with the locks as normal issue path does.
1505          */
1506         sdev->device_busy++;
1507         spin_unlock(sdev->request_queue->queue_lock);
1508         spin_lock(shost->host_lock);
1509         shost->host_busy++;
1510         starget->target_busy++;
1511         spin_unlock(shost->host_lock);
1512         spin_lock(sdev->request_queue->queue_lock);
1513
1514         blk_complete_request(req);
1515 }
1516
1517 static void scsi_softirq_done(struct request *rq)
1518 {
1519         struct scsi_cmnd *cmd = rq->special;
1520         unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1521         int disposition;
1522
1523         INIT_LIST_HEAD(&cmd->eh_entry);
1524
1525         atomic_inc(&cmd->device->iodone_cnt);
1526         if (cmd->result)
1527                 atomic_inc(&cmd->device->ioerr_cnt);
1528
1529         disposition = scsi_decide_disposition(cmd);
1530         if (disposition != SUCCESS &&
1531             time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1532                 sdev_printk(KERN_ERR, cmd->device,
1533                             "timing out command, waited %lus\n",
1534                             wait_for/HZ);
1535                 disposition = SUCCESS;
1536         }
1537                         
1538         scsi_log_completion(cmd, disposition);
1539
1540         switch (disposition) {
1541                 case SUCCESS:
1542                         scsi_finish_command(cmd);
1543                         break;
1544                 case NEEDS_RETRY:
1545                         scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1546                         break;
1547                 case ADD_TO_MLQUEUE:
1548                         scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1549                         break;
1550                 default:
1551                         if (!scsi_eh_scmd_add(cmd, 0))
1552                                 scsi_finish_command(cmd);
1553         }
1554 }
1555
1556 /*
1557  * Function:    scsi_request_fn()
1558  *
1559  * Purpose:     Main strategy routine for SCSI.
1560  *
1561  * Arguments:   q       - Pointer to actual queue.
1562  *
1563  * Returns:     Nothing
1564  *
1565  * Lock status: IO request lock assumed to be held when called.
1566  */
1567 static void scsi_request_fn(struct request_queue *q)
1568 {
1569         struct scsi_device *sdev = q->queuedata;
1570         struct Scsi_Host *shost;
1571         struct scsi_cmnd *cmd;
1572         struct request *req;
1573
1574         if(!get_device(&sdev->sdev_gendev))
1575                 /* We must be tearing the block queue down already */
1576                 return;
1577
1578         /*
1579          * To start with, we keep looping until the queue is empty, or until
1580          * the host is no longer able to accept any more requests.
1581          */
1582         shost = sdev->host;
1583         for (;;) {
1584                 int rtn;
1585                 /*
1586                  * get next queueable request.  We do this early to make sure
1587                  * that the request is fully prepared even if we cannot 
1588                  * accept it.
1589                  */
1590                 req = blk_peek_request(q);
1591                 if (!req || !scsi_dev_queue_ready(q, sdev))
1592                         break;
1593
1594                 if (unlikely(!scsi_device_online(sdev))) {
1595                         sdev_printk(KERN_ERR, sdev,
1596                                     "rejecting I/O to offline device\n");
1597                         scsi_kill_request(req, q);
1598                         continue;
1599                 }
1600
1601
1602                 /*
1603                  * Remove the request from the request list.
1604                  */
1605                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1606                         blk_start_request(req);
1607                 sdev->device_busy++;
1608
1609                 spin_unlock(q->queue_lock);
1610                 cmd = req->special;
1611                 if (unlikely(cmd == NULL)) {
1612                         printk(KERN_CRIT "impossible request in %s.\n"
1613                                          "please mail a stack trace to "
1614                                          "linux-scsi@vger.kernel.org\n",
1615                                          __func__);
1616                         blk_dump_rq_flags(req, "foo");
1617                         BUG();
1618                 }
1619                 spin_lock(shost->host_lock);
1620
1621                 /*
1622                  * We hit this when the driver is using a host wide
1623                  * tag map. For device level tag maps the queue_depth check
1624                  * in the device ready fn would prevent us from trying
1625                  * to allocate a tag. Since the map is a shared host resource
1626                  * we add the dev to the starved list so it eventually gets
1627                  * a run when a tag is freed.
1628                  */
1629                 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1630                         if (list_empty(&sdev->starved_entry))
1631                                 list_add_tail(&sdev->starved_entry,
1632                                               &shost->starved_list);
1633                         goto not_ready;
1634                 }
1635
1636                 if (!scsi_target_queue_ready(shost, sdev))
1637                         goto not_ready;
1638
1639                 if (!scsi_host_queue_ready(q, shost, sdev))
1640                         goto not_ready;
1641
1642                 scsi_target(sdev)->target_busy++;
1643                 shost->host_busy++;
1644
1645                 /*
1646                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1647                  *              take the lock again.
1648                  */
1649                 spin_unlock_irq(shost->host_lock);
1650
1651                 /*
1652                  * Finally, initialize any error handling parameters, and set up
1653                  * the timers for timeouts.
1654                  */
1655                 scsi_init_cmd_errh(cmd);
1656
1657                 /*
1658                  * Dispatch the command to the low-level driver.
1659                  */
1660                 rtn = scsi_dispatch_cmd(cmd);
1661                 spin_lock_irq(q->queue_lock);
1662                 if (rtn)
1663                         goto out_delay;
1664         }
1665
1666         goto out;
1667
1668  not_ready:
1669         spin_unlock_irq(shost->host_lock);
1670
1671         /*
1672          * lock q, handle tag, requeue req, and decrement device_busy. We
1673          * must return with queue_lock held.
1674          *
1675          * Decrementing device_busy without checking it is OK, as all such
1676          * cases (host limits or settings) should run the queue at some
1677          * later time.
1678          */
1679         spin_lock_irq(q->queue_lock);
1680         blk_requeue_request(q, req);
1681         sdev->device_busy--;
1682 out_delay:
1683         if (sdev->device_busy == 0)
1684                 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1685 out:
1686         /* must be careful here...if we trigger the ->remove() function
1687          * we cannot be holding the q lock */
1688         spin_unlock_irq(q->queue_lock);
1689         put_device(&sdev->sdev_gendev);
1690         spin_lock_irq(q->queue_lock);
1691 }
1692
1693 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1694 {
1695         struct device *host_dev;
1696         u64 bounce_limit = 0xffffffff;
1697
1698         if (shost->unchecked_isa_dma)
1699                 return BLK_BOUNCE_ISA;
1700         /*
1701          * Platforms with virtual-DMA translation
1702          * hardware have no practical limit.
1703          */
1704         if (!PCI_DMA_BUS_IS_PHYS)
1705                 return BLK_BOUNCE_ANY;
1706
1707         host_dev = scsi_get_device(shost);
1708         if (host_dev && host_dev->dma_mask)
1709                 bounce_limit = *host_dev->dma_mask;
1710
1711         return bounce_limit;
1712 }
1713 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1714
1715 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1716                                          request_fn_proc *request_fn)
1717 {
1718         struct request_queue *q;
1719         struct device *dev = shost->dma_dev;
1720
1721         q = blk_init_queue(request_fn, NULL);
1722         if (!q)
1723                 return NULL;
1724
1725         /*
1726          * this limit is imposed by hardware restrictions
1727          */
1728         blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1729                                         SCSI_MAX_SG_CHAIN_SEGMENTS));
1730
1731         if (scsi_host_prot_dma(shost)) {
1732                 shost->sg_prot_tablesize =
1733                         min_not_zero(shost->sg_prot_tablesize,
1734                                      (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1735                 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1736                 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1737         }
1738
1739         blk_queue_max_hw_sectors(q, shost->max_sectors);
1740         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1741         blk_queue_segment_boundary(q, shost->dma_boundary);
1742         dma_set_seg_boundary(dev, shost->dma_boundary);
1743
1744         blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1745
1746         if (!shost->use_clustering)
1747                 q->limits.cluster = 0;
1748
1749         /*
1750          * set a reasonable default alignment on word boundaries: the
1751          * host and device may alter it using
1752          * blk_queue_update_dma_alignment() later.
1753          */
1754         blk_queue_dma_alignment(q, 0x03);
1755
1756         return q;
1757 }
1758 EXPORT_SYMBOL(__scsi_alloc_queue);
1759
1760 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1761 {
1762         struct request_queue *q;
1763
1764         q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1765         if (!q)
1766                 return NULL;
1767
1768         blk_queue_prep_rq(q, scsi_prep_fn);
1769         blk_queue_softirq_done(q, scsi_softirq_done);
1770         blk_queue_rq_timed_out(q, scsi_times_out);
1771         blk_queue_lld_busy(q, scsi_lld_busy);
1772         return q;
1773 }
1774
1775 /*
1776  * Function:    scsi_block_requests()
1777  *
1778  * Purpose:     Utility function used by low-level drivers to prevent further
1779  *              commands from being queued to the device.
1780  *
1781  * Arguments:   shost       - Host in question
1782  *
1783  * Returns:     Nothing
1784  *
1785  * Lock status: No locks are assumed held.
1786  *
1787  * Notes:       There is no timer nor any other means by which the requests
1788  *              get unblocked other than the low-level driver calling
1789  *              scsi_unblock_requests().
1790  */
1791 void scsi_block_requests(struct Scsi_Host *shost)
1792 {
1793         shost->host_self_blocked = 1;
1794 }
1795 EXPORT_SYMBOL(scsi_block_requests);
1796
1797 /*
1798  * Function:    scsi_unblock_requests()
1799  *
1800  * Purpose:     Utility function used by low-level drivers to allow further
1801  *              commands from being queued to the device.
1802  *
1803  * Arguments:   shost       - Host in question
1804  *
1805  * Returns:     Nothing
1806  *
1807  * Lock status: No locks are assumed held.
1808  *
1809  * Notes:       There is no timer nor any other means by which the requests
1810  *              get unblocked other than the low-level driver calling
1811  *              scsi_unblock_requests().
1812  *
1813  *              This is done as an API function so that changes to the
1814  *              internals of the scsi mid-layer won't require wholesale
1815  *              changes to drivers that use this feature.
1816  */
1817 void scsi_unblock_requests(struct Scsi_Host *shost)
1818 {
1819         shost->host_self_blocked = 0;
1820         scsi_run_host_queues(shost);
1821 }
1822 EXPORT_SYMBOL(scsi_unblock_requests);
1823
1824 int __init scsi_init_queue(void)
1825 {
1826         int i;
1827
1828         scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1829                                            sizeof(struct scsi_data_buffer),
1830                                            0, 0, NULL);
1831         if (!scsi_sdb_cache) {
1832                 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1833                 return -ENOMEM;
1834         }
1835
1836         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1837                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1838                 int size = sgp->size * sizeof(struct scatterlist);
1839
1840                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1841                                 SLAB_HWCACHE_ALIGN, NULL);
1842                 if (!sgp->slab) {
1843                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1844                                         sgp->name);
1845                         goto cleanup_sdb;
1846                 }
1847
1848                 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1849                                                      sgp->slab);
1850                 if (!sgp->pool) {
1851                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1852                                         sgp->name);
1853                         goto cleanup_sdb;
1854                 }
1855         }
1856
1857         return 0;
1858
1859 cleanup_sdb:
1860         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1861                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1862                 if (sgp->pool)
1863                         mempool_destroy(sgp->pool);
1864                 if (sgp->slab)
1865                         kmem_cache_destroy(sgp->slab);
1866         }
1867         kmem_cache_destroy(scsi_sdb_cache);
1868
1869         return -ENOMEM;
1870 }
1871
1872 void scsi_exit_queue(void)
1873 {
1874         int i;
1875
1876         kmem_cache_destroy(scsi_sdb_cache);
1877
1878         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1879                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1880                 mempool_destroy(sgp->pool);
1881                 kmem_cache_destroy(sgp->slab);
1882         }
1883 }
1884
1885 /**
1886  *      scsi_mode_select - issue a mode select
1887  *      @sdev:  SCSI device to be queried
1888  *      @pf:    Page format bit (1 == standard, 0 == vendor specific)
1889  *      @sp:    Save page bit (0 == don't save, 1 == save)
1890  *      @modepage: mode page being requested
1891  *      @buffer: request buffer (may not be smaller than eight bytes)
1892  *      @len:   length of request buffer.
1893  *      @timeout: command timeout
1894  *      @retries: number of retries before failing
1895  *      @data: returns a structure abstracting the mode header data
1896  *      @sshdr: place to put sense data (or NULL if no sense to be collected).
1897  *              must be SCSI_SENSE_BUFFERSIZE big.
1898  *
1899  *      Returns zero if successful; negative error number or scsi
1900  *      status on error
1901  *
1902  */
1903 int
1904 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1905                  unsigned char *buffer, int len, int timeout, int retries,
1906                  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1907 {
1908         unsigned char cmd[10];
1909         unsigned char *real_buffer;
1910         int ret;
1911
1912         memset(cmd, 0, sizeof(cmd));
1913         cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1914
1915         if (sdev->use_10_for_ms) {
1916                 if (len > 65535)
1917                         return -EINVAL;
1918                 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1919                 if (!real_buffer)
1920                         return -ENOMEM;
1921                 memcpy(real_buffer + 8, buffer, len);
1922                 len += 8;
1923                 real_buffer[0] = 0;
1924                 real_buffer[1] = 0;
1925                 real_buffer[2] = data->medium_type;
1926                 real_buffer[3] = data->device_specific;
1927                 real_buffer[4] = data->longlba ? 0x01 : 0;
1928                 real_buffer[5] = 0;
1929                 real_buffer[6] = data->block_descriptor_length >> 8;
1930                 real_buffer[7] = data->block_descriptor_length;
1931
1932                 cmd[0] = MODE_SELECT_10;
1933                 cmd[7] = len >> 8;
1934                 cmd[8] = len;
1935         } else {
1936                 if (len > 255 || data->block_descriptor_length > 255 ||
1937                     data->longlba)
1938                         return -EINVAL;
1939
1940                 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1941                 if (!real_buffer)
1942                         return -ENOMEM;
1943                 memcpy(real_buffer + 4, buffer, len);
1944                 len += 4;
1945                 real_buffer[0] = 0;
1946                 real_buffer[1] = data->medium_type;
1947                 real_buffer[2] = data->device_specific;
1948                 real_buffer[3] = data->block_descriptor_length;
1949                 
1950
1951                 cmd[0] = MODE_SELECT;
1952                 cmd[4] = len;
1953         }
1954
1955         ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1956                                sshdr, timeout, retries, NULL);
1957         kfree(real_buffer);
1958         return ret;
1959 }
1960 EXPORT_SYMBOL_GPL(scsi_mode_select);
1961
1962 /**
1963  *      scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1964  *      @sdev:  SCSI device to be queried
1965  *      @dbd:   set if mode sense will allow block descriptors to be returned
1966  *      @modepage: mode page being requested
1967  *      @buffer: request buffer (may not be smaller than eight bytes)
1968  *      @len:   length of request buffer.
1969  *      @timeout: command timeout
1970  *      @retries: number of retries before failing
1971  *      @data: returns a structure abstracting the mode header data
1972  *      @sshdr: place to put sense data (or NULL if no sense to be collected).
1973  *              must be SCSI_SENSE_BUFFERSIZE big.
1974  *
1975  *      Returns zero if unsuccessful, or the header offset (either 4
1976  *      or 8 depending on whether a six or ten byte command was
1977  *      issued) if successful.
1978  */
1979 int
1980 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1981                   unsigned char *buffer, int len, int timeout, int retries,
1982                   struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1983 {
1984         unsigned char cmd[12];
1985         int use_10_for_ms;
1986         int header_length;
1987         int result;
1988         struct scsi_sense_hdr my_sshdr;
1989
1990         memset(data, 0, sizeof(*data));
1991         memset(&cmd[0], 0, 12);
1992         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1993         cmd[2] = modepage;
1994
1995         /* caller might not be interested in sense, but we need it */
1996         if (!sshdr)
1997                 sshdr = &my_sshdr;
1998
1999  retry:
2000         use_10_for_ms = sdev->use_10_for_ms;
2001
2002         if (use_10_for_ms) {
2003                 if (len < 8)
2004                         len = 8;
2005
2006                 cmd[0] = MODE_SENSE_10;
2007                 cmd[8] = len;
2008                 header_length = 8;
2009         } else {
2010                 if (len < 4)
2011                         len = 4;
2012
2013                 cmd[0] = MODE_SENSE;
2014                 cmd[4] = len;
2015                 header_length = 4;
2016         }
2017
2018         memset(buffer, 0, len);
2019
2020         result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2021                                   sshdr, timeout, retries, NULL);
2022
2023         /* This code looks awful: what it's doing is making sure an
2024          * ILLEGAL REQUEST sense return identifies the actual command
2025          * byte as the problem.  MODE_SENSE commands can return
2026          * ILLEGAL REQUEST if the code page isn't supported */
2027
2028         if (use_10_for_ms && !scsi_status_is_good(result) &&
2029             (driver_byte(result) & DRIVER_SENSE)) {
2030                 if (scsi_sense_valid(sshdr)) {
2031                         if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2032                             (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2033                                 /* 
2034                                  * Invalid command operation code
2035                                  */
2036                                 sdev->use_10_for_ms = 0;
2037                                 goto retry;
2038                         }
2039                 }
2040         }
2041
2042         if(scsi_status_is_good(result)) {
2043                 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2044                              (modepage == 6 || modepage == 8))) {
2045                         /* Initio breakage? */
2046                         header_length = 0;
2047                         data->length = 13;
2048                         data->medium_type = 0;
2049                         data->device_specific = 0;
2050                         data->longlba = 0;
2051                         data->block_descriptor_length = 0;
2052                 } else if(use_10_for_ms) {
2053                         data->length = buffer[0]*256 + buffer[1] + 2;
2054                         data->medium_type = buffer[2];
2055                         data->device_specific = buffer[3];
2056                         data->longlba = buffer[4] & 0x01;
2057                         data->block_descriptor_length = buffer[6]*256
2058                                 + buffer[7];
2059                 } else {
2060                         data->length = buffer[0] + 1;
2061                         data->medium_type = buffer[1];
2062                         data->device_specific = buffer[2];
2063                         data->block_descriptor_length = buffer[3];
2064                 }
2065                 data->header_length = header_length;
2066         }
2067
2068         return result;
2069 }
2070 EXPORT_SYMBOL(scsi_mode_sense);
2071
2072 /**
2073  *      scsi_test_unit_ready - test if unit is ready
2074  *      @sdev:  scsi device to change the state of.
2075  *      @timeout: command timeout
2076  *      @retries: number of retries before failing
2077  *      @sshdr_external: Optional pointer to struct scsi_sense_hdr for
2078  *              returning sense. Make sure that this is cleared before passing
2079  *              in.
2080  *
2081  *      Returns zero if unsuccessful or an error if TUR failed.  For
2082  *      removable media, UNIT_ATTENTION sets ->changed flag.
2083  **/
2084 int
2085 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2086                      struct scsi_sense_hdr *sshdr_external)
2087 {
2088         char cmd[] = {
2089                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2090         };
2091         struct scsi_sense_hdr *sshdr;
2092         int result;
2093
2094         if (!sshdr_external)
2095                 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2096         else
2097                 sshdr = sshdr_external;
2098
2099         /* try to eat the UNIT_ATTENTION if there are enough retries */
2100         do {
2101                 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2102                                           timeout, retries, NULL);
2103                 if (sdev->removable && scsi_sense_valid(sshdr) &&
2104                     sshdr->sense_key == UNIT_ATTENTION)
2105                         sdev->changed = 1;
2106         } while (scsi_sense_valid(sshdr) &&
2107                  sshdr->sense_key == UNIT_ATTENTION && --retries);
2108
2109         if (!sshdr_external)
2110                 kfree(sshdr);
2111         return result;
2112 }
2113 EXPORT_SYMBOL(scsi_test_unit_ready);
2114
2115 /**
2116  *      scsi_device_set_state - Take the given device through the device state model.
2117  *      @sdev:  scsi device to change the state of.
2118  *      @state: state to change to.
2119  *
2120  *      Returns zero if unsuccessful or an error if the requested 
2121  *      transition is illegal.
2122  */
2123 int
2124 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2125 {
2126         enum scsi_device_state oldstate = sdev->sdev_state;
2127
2128         if (state == oldstate)
2129                 return 0;
2130
2131         switch (state) {
2132         case SDEV_CREATED:
2133                 switch (oldstate) {
2134                 case SDEV_CREATED_BLOCK:
2135                         break;
2136                 default:
2137                         goto illegal;
2138                 }
2139                 break;
2140                         
2141         case SDEV_RUNNING:
2142                 switch (oldstate) {
2143                 case SDEV_CREATED:
2144                 case SDEV_OFFLINE:
2145                 case SDEV_TRANSPORT_OFFLINE:
2146                 case SDEV_QUIESCE:
2147                 case SDEV_BLOCK:
2148                         break;
2149                 default:
2150                         goto illegal;
2151                 }
2152                 break;
2153
2154         case SDEV_QUIESCE:
2155                 switch (oldstate) {
2156                 case SDEV_RUNNING:
2157                 case SDEV_OFFLINE:
2158                 case SDEV_TRANSPORT_OFFLINE:
2159                         break;
2160                 default:
2161                         goto illegal;
2162                 }
2163                 break;
2164
2165         case SDEV_OFFLINE:
2166         case SDEV_TRANSPORT_OFFLINE:
2167                 switch (oldstate) {
2168                 case SDEV_CREATED:
2169                 case SDEV_RUNNING:
2170                 case SDEV_QUIESCE:
2171                 case SDEV_BLOCK:
2172                         break;
2173                 default:
2174                         goto illegal;
2175                 }
2176                 break;
2177
2178         case SDEV_BLOCK:
2179                 switch (oldstate) {
2180                 case SDEV_RUNNING:
2181                 case SDEV_CREATED_BLOCK:
2182                         break;
2183                 default:
2184                         goto illegal;
2185                 }
2186                 break;
2187
2188         case SDEV_CREATED_BLOCK:
2189                 switch (oldstate) {
2190                 case SDEV_CREATED:
2191                         break;
2192                 default:
2193                         goto illegal;
2194                 }
2195                 break;
2196
2197         case SDEV_CANCEL:
2198                 switch (oldstate) {
2199                 case SDEV_CREATED:
2200                 case SDEV_RUNNING:
2201                 case SDEV_QUIESCE:
2202                 case SDEV_OFFLINE:
2203                 case SDEV_TRANSPORT_OFFLINE:
2204                 case SDEV_BLOCK:
2205                         break;
2206                 default:
2207                         goto illegal;
2208                 }
2209                 break;
2210
2211         case SDEV_DEL:
2212                 switch (oldstate) {
2213                 case SDEV_CREATED:
2214                 case SDEV_RUNNING:
2215                 case SDEV_OFFLINE:
2216                 case SDEV_TRANSPORT_OFFLINE:
2217                 case SDEV_CANCEL:
2218                 case SDEV_CREATED_BLOCK:
2219                         break;
2220                 default:
2221                         goto illegal;
2222                 }
2223                 break;
2224
2225         }
2226         sdev->sdev_state = state;
2227         return 0;
2228
2229  illegal:
2230         SCSI_LOG_ERROR_RECOVERY(1, 
2231                                 sdev_printk(KERN_ERR, sdev,
2232                                             "Illegal state transition %s->%s\n",
2233                                             scsi_device_state_name(oldstate),
2234                                             scsi_device_state_name(state))
2235                                 );
2236         return -EINVAL;
2237 }
2238 EXPORT_SYMBOL(scsi_device_set_state);
2239
2240 /**
2241  *      sdev_evt_emit - emit a single SCSI device uevent
2242  *      @sdev: associated SCSI device
2243  *      @evt: event to emit
2244  *
2245  *      Send a single uevent (scsi_event) to the associated scsi_device.
2246  */
2247 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2248 {
2249         int idx = 0;
2250         char *envp[3];
2251
2252         switch (evt->evt_type) {
2253         case SDEV_EVT_MEDIA_CHANGE:
2254                 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2255                 break;
2256         case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2257                 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2258                 break;
2259         case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2260                 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2261                 break;
2262         case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2263                envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2264                 break;
2265         case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2266                 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2267                 break;
2268         case SDEV_EVT_LUN_CHANGE_REPORTED:
2269                 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2270                 break;
2271         default:
2272                 /* do nothing */
2273                 break;
2274         }
2275
2276         envp[idx++] = NULL;
2277
2278         kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2279 }
2280
2281 /**
2282  *      sdev_evt_thread - send a uevent for each scsi event
2283  *      @work: work struct for scsi_device
2284  *
2285  *      Dispatch queued events to their associated scsi_device kobjects
2286  *      as uevents.
2287  */
2288 void scsi_evt_thread(struct work_struct *work)
2289 {
2290         struct scsi_device *sdev;
2291         enum scsi_device_event evt_type;
2292         LIST_HEAD(event_list);
2293
2294         sdev = container_of(work, struct scsi_device, event_work);
2295
2296         for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2297                 if (test_and_clear_bit(evt_type, sdev->pending_events))
2298                         sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2299
2300         while (1) {
2301                 struct scsi_event *evt;
2302                 struct list_head *this, *tmp;
2303                 unsigned long flags;
2304
2305                 spin_lock_irqsave(&sdev->list_lock, flags);
2306                 list_splice_init(&sdev->event_list, &event_list);
2307                 spin_unlock_irqrestore(&sdev->list_lock, flags);
2308
2309                 if (list_empty(&event_list))
2310                         break;
2311
2312                 list_for_each_safe(this, tmp, &event_list) {
2313                         evt = list_entry(this, struct scsi_event, node);
2314                         list_del(&evt->node);
2315                         scsi_evt_emit(sdev, evt);
2316                         kfree(evt);
2317                 }
2318         }
2319 }
2320
2321 /**
2322  *      sdev_evt_send - send asserted event to uevent thread
2323  *      @sdev: scsi_device event occurred on
2324  *      @evt: event to send
2325  *
2326  *      Assert scsi device event asynchronously.
2327  */
2328 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2329 {
2330         unsigned long flags;
2331
2332 #if 0
2333         /* FIXME: currently this check eliminates all media change events
2334          * for polled devices.  Need to update to discriminate between AN
2335          * and polled events */
2336         if (!test_bit(evt->evt_type, sdev->supported_events)) {
2337                 kfree(evt);
2338                 return;
2339         }
2340 #endif
2341
2342         spin_lock_irqsave(&sdev->list_lock, flags);
2343         list_add_tail(&evt->node, &sdev->event_list);
2344         schedule_work(&sdev->event_work);
2345         spin_unlock_irqrestore(&sdev->list_lock, flags);
2346 }
2347 EXPORT_SYMBOL_GPL(sdev_evt_send);
2348
2349 /**
2350  *      sdev_evt_alloc - allocate a new scsi event
2351  *      @evt_type: type of event to allocate
2352  *      @gfpflags: GFP flags for allocation
2353  *
2354  *      Allocates and returns a new scsi_event.
2355  */
2356 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2357                                   gfp_t gfpflags)
2358 {
2359         struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2360         if (!evt)
2361                 return NULL;
2362
2363         evt->evt_type = evt_type;
2364         INIT_LIST_HEAD(&evt->node);
2365
2366         /* evt_type-specific initialization, if any */
2367         switch (evt_type) {
2368         case SDEV_EVT_MEDIA_CHANGE:
2369         case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2370         case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2371         case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2372         case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2373         case SDEV_EVT_LUN_CHANGE_REPORTED:
2374         default:
2375                 /* do nothing */
2376                 break;
2377         }
2378
2379         return evt;
2380 }
2381 EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2382
2383 /**
2384  *      sdev_evt_send_simple - send asserted event to uevent thread
2385  *      @sdev: scsi_device event occurred on
2386  *      @evt_type: type of event to send
2387  *      @gfpflags: GFP flags for allocation
2388  *
2389  *      Assert scsi device event asynchronously, given an event type.
2390  */
2391 void sdev_evt_send_simple(struct scsi_device *sdev,
2392                           enum scsi_device_event evt_type, gfp_t gfpflags)
2393 {
2394         struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2395         if (!evt) {
2396                 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2397                             evt_type);
2398                 return;
2399         }
2400
2401         sdev_evt_send(sdev, evt);
2402 }
2403 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2404
2405 /**
2406  *      scsi_device_quiesce - Block user issued commands.
2407  *      @sdev:  scsi device to quiesce.
2408  *
2409  *      This works by trying to transition to the SDEV_QUIESCE state
2410  *      (which must be a legal transition).  When the device is in this
2411  *      state, only special requests will be accepted, all others will
2412  *      be deferred.  Since special requests may also be requeued requests,
2413  *      a successful return doesn't guarantee the device will be 
2414  *      totally quiescent.
2415  *
2416  *      Must be called with user context, may sleep.
2417  *
2418  *      Returns zero if unsuccessful or an error if not.
2419  */
2420 int
2421 scsi_device_quiesce(struct scsi_device *sdev)
2422 {
2423         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2424         if (err)
2425                 return err;
2426
2427         scsi_run_queue(sdev->request_queue);
2428         while (sdev->device_busy) {
2429                 msleep_interruptible(200);
2430                 scsi_run_queue(sdev->request_queue);
2431         }
2432         return 0;
2433 }
2434 EXPORT_SYMBOL(scsi_device_quiesce);
2435
2436 /**
2437  *      scsi_device_resume - Restart user issued commands to a quiesced device.
2438  *      @sdev:  scsi device to resume.
2439  *
2440  *      Moves the device from quiesced back to running and restarts the
2441  *      queues.
2442  *
2443  *      Must be called with user context, may sleep.
2444  */
2445 void scsi_device_resume(struct scsi_device *sdev)
2446 {
2447         /* check if the device state was mutated prior to resume, and if
2448          * so assume the state is being managed elsewhere (for example
2449          * device deleted during suspend)
2450          */
2451         if (sdev->sdev_state != SDEV_QUIESCE ||
2452             scsi_device_set_state(sdev, SDEV_RUNNING))
2453                 return;
2454         scsi_run_queue(sdev->request_queue);
2455 }
2456 EXPORT_SYMBOL(scsi_device_resume);
2457
2458 static void
2459 device_quiesce_fn(struct scsi_device *sdev, void *data)
2460 {
2461         scsi_device_quiesce(sdev);
2462 }
2463
2464 void
2465 scsi_target_quiesce(struct scsi_target *starget)
2466 {
2467         starget_for_each_device(starget, NULL, device_quiesce_fn);
2468 }
2469 EXPORT_SYMBOL(scsi_target_quiesce);
2470
2471 static void
2472 device_resume_fn(struct scsi_device *sdev, void *data)
2473 {
2474         scsi_device_resume(sdev);
2475 }
2476
2477 void
2478 scsi_target_resume(struct scsi_target *starget)
2479 {
2480         starget_for_each_device(starget, NULL, device_resume_fn);
2481 }
2482 EXPORT_SYMBOL(scsi_target_resume);
2483
2484 /**
2485  * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2486  * @sdev:       device to block
2487  *
2488  * Block request made by scsi lld's to temporarily stop all
2489  * scsi commands on the specified device.  Called from interrupt
2490  * or normal process context.
2491  *
2492  * Returns zero if successful or error if not
2493  *
2494  * Notes:       
2495  *      This routine transitions the device to the SDEV_BLOCK state
2496  *      (which must be a legal transition).  When the device is in this
2497  *      state, all commands are deferred until the scsi lld reenables
2498  *      the device with scsi_device_unblock or device_block_tmo fires.
2499  */
2500 int
2501 scsi_internal_device_block(struct scsi_device *sdev)
2502 {
2503         struct request_queue *q = sdev->request_queue;
2504         unsigned long flags;
2505         int err = 0;
2506
2507         err = scsi_device_set_state(sdev, SDEV_BLOCK);
2508         if (err) {
2509                 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2510
2511                 if (err)
2512                         return err;
2513         }
2514
2515         /* 
2516          * The device has transitioned to SDEV_BLOCK.  Stop the
2517          * block layer from calling the midlayer with this device's
2518          * request queue. 
2519          */
2520         spin_lock_irqsave(q->queue_lock, flags);
2521         blk_stop_queue(q);
2522         spin_unlock_irqrestore(q->queue_lock, flags);
2523
2524         return 0;
2525 }
2526 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2527  
2528 /**
2529  * scsi_internal_device_unblock - resume a device after a block request
2530  * @sdev:       device to resume
2531  * @new_state:  state to set devices to after unblocking
2532  *
2533  * Called by scsi lld's or the midlayer to restart the device queue
2534  * for the previously suspended scsi device.  Called from interrupt or
2535  * normal process context.
2536  *
2537  * Returns zero if successful or error if not.
2538  *
2539  * Notes:       
2540  *      This routine transitions the device to the SDEV_RUNNING state
2541  *      or to one of the offline states (which must be a legal transition)
2542  *      allowing the midlayer to goose the queue for this device.
2543  */
2544 int
2545 scsi_internal_device_unblock(struct scsi_device *sdev,
2546                              enum scsi_device_state new_state)
2547 {
2548         struct request_queue *q = sdev->request_queue; 
2549         unsigned long flags;
2550
2551         /*
2552          * Try to transition the scsi device to SDEV_RUNNING or one of the
2553          * offlined states and goose the device queue if successful.
2554          */
2555         if ((sdev->sdev_state == SDEV_BLOCK) ||
2556             (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
2557                 sdev->sdev_state = new_state;
2558         else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2559                 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2560                     new_state == SDEV_OFFLINE)
2561                         sdev->sdev_state = new_state;
2562                 else
2563                         sdev->sdev_state = SDEV_CREATED;
2564         } else if (sdev->sdev_state != SDEV_CANCEL &&
2565                  sdev->sdev_state != SDEV_OFFLINE)
2566                 return -EINVAL;
2567
2568         spin_lock_irqsave(q->queue_lock, flags);
2569         blk_start_queue(q);
2570         spin_unlock_irqrestore(q->queue_lock, flags);
2571
2572         return 0;
2573 }
2574 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2575
2576 static void
2577 device_block(struct scsi_device *sdev, void *data)
2578 {
2579         scsi_internal_device_block(sdev);
2580 }
2581
2582 static int
2583 target_block(struct device *dev, void *data)
2584 {
2585         if (scsi_is_target_device(dev))
2586                 starget_for_each_device(to_scsi_target(dev), NULL,
2587                                         device_block);
2588         return 0;
2589 }
2590
2591 void
2592 scsi_target_block(struct device *dev)
2593 {
2594         if (scsi_is_target_device(dev))
2595                 starget_for_each_device(to_scsi_target(dev), NULL,
2596                                         device_block);
2597         else
2598                 device_for_each_child(dev, NULL, target_block);
2599 }
2600 EXPORT_SYMBOL_GPL(scsi_target_block);
2601
2602 static void
2603 device_unblock(struct scsi_device *sdev, void *data)
2604 {
2605         scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2606 }
2607
2608 static int
2609 target_unblock(struct device *dev, void *data)
2610 {
2611         if (scsi_is_target_device(dev))
2612                 starget_for_each_device(to_scsi_target(dev), data,
2613                                         device_unblock);
2614         return 0;
2615 }
2616
2617 void
2618 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2619 {
2620         if (scsi_is_target_device(dev))
2621                 starget_for_each_device(to_scsi_target(dev), &new_state,
2622                                         device_unblock);
2623         else
2624                 device_for_each_child(dev, &new_state, target_unblock);
2625 }
2626 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2627
2628 /**
2629  * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2630  * @sgl:        scatter-gather list
2631  * @sg_count:   number of segments in sg
2632  * @offset:     offset in bytes into sg, on return offset into the mapped area
2633  * @len:        bytes to map, on return number of bytes mapped
2634  *
2635  * Returns virtual address of the start of the mapped page
2636  */
2637 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2638                           size_t *offset, size_t *len)
2639 {
2640         int i;
2641         size_t sg_len = 0, len_complete = 0;
2642         struct scatterlist *sg;
2643         struct page *page;
2644
2645         WARN_ON(!irqs_disabled());
2646
2647         for_each_sg(sgl, sg, sg_count, i) {
2648                 len_complete = sg_len; /* Complete sg-entries */
2649                 sg_len += sg->length;
2650                 if (sg_len > *offset)
2651                         break;
2652         }
2653
2654         if (unlikely(i == sg_count)) {
2655                 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2656                         "elements %d\n",
2657                        __func__, sg_len, *offset, sg_count);
2658                 WARN_ON(1);
2659                 return NULL;
2660         }
2661
2662         /* Offset starting from the beginning of first page in this sg-entry */
2663         *offset = *offset - len_complete + sg->offset;
2664
2665         /* Assumption: contiguous pages can be accessed as "page + i" */
2666         page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2667         *offset &= ~PAGE_MASK;
2668
2669         /* Bytes in this sg-entry from *offset to the end of the page */
2670         sg_len = PAGE_SIZE - *offset;
2671         if (*len > sg_len)
2672                 *len = sg_len;
2673
2674         return kmap_atomic(page);
2675 }
2676 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2677
2678 /**
2679  * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2680  * @virt:       virtual address to be unmapped
2681  */
2682 void scsi_kunmap_atomic_sg(void *virt)
2683 {
2684         kunmap_atomic(virt);
2685 }
2686 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2687
2688 void sdev_disable_disk_events(struct scsi_device *sdev)
2689 {
2690         atomic_inc(&sdev->disk_events_disable_depth);
2691 }
2692 EXPORT_SYMBOL(sdev_disable_disk_events);
2693
2694 void sdev_enable_disk_events(struct scsi_device *sdev)
2695 {
2696         if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
2697                 return;
2698         atomic_dec(&sdev->disk_events_disable_depth);
2699 }
2700 EXPORT_SYMBOL(sdev_enable_disk_events);