99d5d1251c45fb8f3e0c58e812e0d3fb07f1ca02
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / block / skd_main.c
1 /* Copyright 2012 STEC, Inc.
2  *
3  * This file is licensed under the terms of the 3-clause
4  * BSD License (http://opensource.org/licenses/BSD-3-Clause)
5  * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
6  * at your option. Both licenses are also available in the LICENSE file
7  * distributed with this project. This file may not be copied, modified,
8  * or distributed except in accordance with those terms.
9  * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
10  * Initial Driver Design!
11  * Thomas Swann <tswann@stec-inc.com>
12  * Interrupt handling.
13  * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
14  * biomode implementation.
15  * Akhil Bhansali <abhansali@stec-inc.com>
16  * Added support for DISCARD / FLUSH and FUA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/blkdev.h>
26 #include <linux/sched.h>
27 #include <linux/interrupt.h>
28 #include <linux/compiler.h>
29 #include <linux/workqueue.h>
30 #include <linux/bitops.h>
31 #include <linux/delay.h>
32 #include <linux/time.h>
33 #include <linux/hdreg.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/completion.h>
36 #include <linux/scatterlist.h>
37 #include <linux/version.h>
38 #include <linux/err.h>
39 #include <linux/scatterlist.h>
40 #include <linux/aer.h>
41 #include <linux/ctype.h>
42 #include <linux/wait.h>
43 #include <linux/uio.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <scsi/sg.h>
49 #include <linux/io.h>
50 #include <linux/uaccess.h>
51 #include <asm-generic/unaligned.h>
52
53 #include "skd_s1120.h"
54
55 static int skd_dbg_level;
56 static int skd_isr_comp_limit = 4;
57
58 enum {
59         STEC_LINK_2_5GTS = 0,
60         STEC_LINK_5GTS = 1,
61         STEC_LINK_8GTS = 2,
62         STEC_LINK_UNKNOWN = 0xFF
63 };
64
65 enum {
66         SKD_FLUSH_INITIALIZER,
67         SKD_FLUSH_ZERO_SIZE_FIRST,
68         SKD_FLUSH_DATA_SECOND,
69 };
70
71 #define SKD_ASSERT(expr) \
72         do { \
73                 if (unlikely(!(expr))) { \
74                         pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
75                                # expr, __FILE__, __func__, __LINE__); \
76                 } \
77         } while (0)
78
79 #define DRV_NAME "skd"
80 #define DRV_VERSION "2.2.1"
81 #define DRV_BUILD_ID "0260"
82 #define PFX DRV_NAME ": "
83 #define DRV_BIN_VERSION 0x100
84 #define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
85
86 MODULE_AUTHOR("bug-reports: support@stec-inc.com");
87 MODULE_LICENSE("Dual BSD/GPL");
88
89 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
90 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
91
92 #define PCI_VENDOR_ID_STEC      0x1B39
93 #define PCI_DEVICE_ID_S1120     0x0001
94
95 #define SKD_FUA_NV              (1 << 1)
96 #define SKD_MINORS_PER_DEVICE   16
97
98 #define SKD_MAX_QUEUE_DEPTH     200u
99
100 #define SKD_PAUSE_TIMEOUT       (5 * 1000)
101
102 #define SKD_N_FITMSG_BYTES      (512u)
103
104 #define SKD_N_SPECIAL_CONTEXT   32u
105 #define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
106
107 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
108  * 128KB limit.  That allows 4096*4K = 16M xfer size
109  */
110 #define SKD_N_SG_PER_REQ_DEFAULT 256u
111 #define SKD_N_SG_PER_SPECIAL    256u
112
113 #define SKD_N_COMPLETION_ENTRY  256u
114 #define SKD_N_READ_CAP_BYTES    (8u)
115
116 #define SKD_N_INTERNAL_BYTES    (512u)
117
118 /* 5 bits of uniqifier, 0xF800 */
119 #define SKD_ID_INCR             (0x400)
120 #define SKD_ID_TABLE_MASK       (3u << 8u)
121 #define  SKD_ID_RW_REQUEST      (0u << 8u)
122 #define  SKD_ID_INTERNAL        (1u << 8u)
123 #define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
124 #define  SKD_ID_FIT_MSG         (3u << 8u)
125 #define SKD_ID_SLOT_MASK        0x00FFu
126 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
127
128 #define SKD_N_TIMEOUT_SLOT      4u
129 #define SKD_TIMEOUT_SLOT_MASK   3u
130
131 #define SKD_N_MAX_SECTORS 2048u
132
133 #define SKD_MAX_RETRIES 2u
134
135 #define SKD_TIMER_SECONDS(seconds) (seconds)
136 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
137
138 #define INQ_STD_NBYTES 36
139 #define SKD_DISCARD_CDB_LENGTH  24
140
141 enum skd_drvr_state {
142         SKD_DRVR_STATE_LOAD,
143         SKD_DRVR_STATE_IDLE,
144         SKD_DRVR_STATE_BUSY,
145         SKD_DRVR_STATE_STARTING,
146         SKD_DRVR_STATE_ONLINE,
147         SKD_DRVR_STATE_PAUSING,
148         SKD_DRVR_STATE_PAUSED,
149         SKD_DRVR_STATE_DRAINING_TIMEOUT,
150         SKD_DRVR_STATE_RESTARTING,
151         SKD_DRVR_STATE_RESUMING,
152         SKD_DRVR_STATE_STOPPING,
153         SKD_DRVR_STATE_FAULT,
154         SKD_DRVR_STATE_DISAPPEARED,
155         SKD_DRVR_STATE_PROTOCOL_MISMATCH,
156         SKD_DRVR_STATE_BUSY_ERASE,
157         SKD_DRVR_STATE_BUSY_SANITIZE,
158         SKD_DRVR_STATE_BUSY_IMMINENT,
159         SKD_DRVR_STATE_WAIT_BOOT,
160         SKD_DRVR_STATE_SYNCING,
161 };
162
163 #define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
164 #define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
165 #define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
166 #define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
167 #define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
168 #define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
169 #define SKD_START_WAIT_SECONDS  90u
170
171 enum skd_req_state {
172         SKD_REQ_STATE_IDLE,
173         SKD_REQ_STATE_SETUP,
174         SKD_REQ_STATE_BUSY,
175         SKD_REQ_STATE_COMPLETED,
176         SKD_REQ_STATE_TIMEOUT,
177         SKD_REQ_STATE_ABORTED,
178 };
179
180 enum skd_fit_msg_state {
181         SKD_MSG_STATE_IDLE,
182         SKD_MSG_STATE_BUSY,
183 };
184
185 enum skd_check_status_action {
186         SKD_CHECK_STATUS_REPORT_GOOD,
187         SKD_CHECK_STATUS_REPORT_SMART_ALERT,
188         SKD_CHECK_STATUS_REQUEUE_REQUEST,
189         SKD_CHECK_STATUS_REPORT_ERROR,
190         SKD_CHECK_STATUS_BUSY_IMMINENT,
191 };
192
193 struct skd_fitmsg_context {
194         enum skd_fit_msg_state state;
195
196         struct skd_fitmsg_context *next;
197
198         u32 id;
199         u16 outstanding;
200
201         u32 length;
202         u32 offset;
203
204         u8 *msg_buf;
205         dma_addr_t mb_dma_address;
206 };
207
208 struct skd_request_context {
209         enum skd_req_state state;
210
211         struct skd_request_context *next;
212
213         u16 id;
214         u32 fitmsg_id;
215
216         struct request *req;
217         u8 flush_cmd;
218         u8 discard_page;
219
220         u32 timeout_stamp;
221         u8 sg_data_dir;
222         struct scatterlist *sg;
223         u32 n_sg;
224         u32 sg_byte_count;
225
226         struct fit_sg_descriptor *sksg_list;
227         dma_addr_t sksg_dma_address;
228
229         struct fit_completion_entry_v1 completion;
230
231         struct fit_comp_error_info err_info;
232
233 };
234 #define SKD_DATA_DIR_HOST_TO_CARD       1
235 #define SKD_DATA_DIR_CARD_TO_HOST       2
236 #define SKD_DATA_DIR_NONE               3       /* especially for DISCARD requests. */
237
238 struct skd_special_context {
239         struct skd_request_context req;
240
241         u8 orphaned;
242
243         void *data_buf;
244         dma_addr_t db_dma_address;
245
246         u8 *msg_buf;
247         dma_addr_t mb_dma_address;
248 };
249
250 struct skd_sg_io {
251         fmode_t mode;
252         void __user *argp;
253
254         struct sg_io_hdr sg;
255
256         u8 cdb[16];
257
258         u32 dxfer_len;
259         u32 iovcnt;
260         struct sg_iovec *iov;
261         struct sg_iovec no_iov_iov;
262
263         struct skd_special_context *skspcl;
264 };
265
266 typedef enum skd_irq_type {
267         SKD_IRQ_LEGACY,
268         SKD_IRQ_MSI,
269         SKD_IRQ_MSIX
270 } skd_irq_type_t;
271
272 #define SKD_MAX_BARS                    2
273
274 struct skd_device {
275         volatile void __iomem *mem_map[SKD_MAX_BARS];
276         resource_size_t mem_phys[SKD_MAX_BARS];
277         u32 mem_size[SKD_MAX_BARS];
278
279         skd_irq_type_t irq_type;
280         u32 msix_count;
281         struct skd_msix_entry *msix_entries;
282
283         struct pci_dev *pdev;
284         int pcie_error_reporting_is_enabled;
285
286         spinlock_t lock;
287         struct gendisk *disk;
288         struct request_queue *queue;
289         struct device *class_dev;
290         int gendisk_on;
291         int sync_done;
292
293         atomic_t device_count;
294         u32 devno;
295         u32 major;
296         char name[32];
297         char isr_name[30];
298
299         enum skd_drvr_state state;
300         u32 drive_state;
301
302         u32 in_flight;
303         u32 cur_max_queue_depth;
304         u32 queue_low_water_mark;
305         u32 dev_max_queue_depth;
306
307         u32 num_fitmsg_context;
308         u32 num_req_context;
309
310         u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
311         u32 timeout_stamp;
312         struct skd_fitmsg_context *skmsg_free_list;
313         struct skd_fitmsg_context *skmsg_table;
314
315         struct skd_request_context *skreq_free_list;
316         struct skd_request_context *skreq_table;
317
318         struct skd_special_context *skspcl_free_list;
319         struct skd_special_context *skspcl_table;
320
321         struct skd_special_context internal_skspcl;
322         u32 read_cap_blocksize;
323         u32 read_cap_last_lba;
324         int read_cap_is_valid;
325         int inquiry_is_valid;
326         u8 inq_serial_num[13];  /*12 chars plus null term */
327         u8 id_str[80];          /* holds a composite name (pci + sernum) */
328
329         u8 skcomp_cycle;
330         u32 skcomp_ix;
331         struct fit_completion_entry_v1 *skcomp_table;
332         struct fit_comp_error_info *skerr_table;
333         dma_addr_t cq_dma_address;
334
335         wait_queue_head_t waitq;
336
337         struct timer_list timer;
338         u32 timer_countdown;
339         u32 timer_substate;
340
341         int n_special;
342         int sgs_per_request;
343         u32 last_mtd;
344
345         u32 proto_ver;
346
347         int dbg_level;
348         u32 connect_time_stamp;
349         int connect_retries;
350 #define SKD_MAX_CONNECT_RETRIES 16
351         u32 drive_jiffies;
352
353         u32 timo_slot;
354
355
356         struct work_struct completion_worker;
357 };
358
359 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
360 #define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
361 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
362
363 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
364 {
365         u32 val;
366
367         if (likely(skdev->dbg_level < 2))
368                 return readl(skdev->mem_map[1] + offset);
369         else {
370                 barrier();
371                 val = readl(skdev->mem_map[1] + offset);
372                 barrier();
373                 pr_debug("%s:%s:%d offset %x = %x\n",
374                          skdev->name, __func__, __LINE__, offset, val);
375                 return val;
376         }
377
378 }
379
380 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
381                                    u32 offset)
382 {
383         if (likely(skdev->dbg_level < 2)) {
384                 writel(val, skdev->mem_map[1] + offset);
385                 barrier();
386         } else {
387                 barrier();
388                 writel(val, skdev->mem_map[1] + offset);
389                 barrier();
390                 pr_debug("%s:%s:%d offset %x = %x\n",
391                          skdev->name, __func__, __LINE__, offset, val);
392         }
393 }
394
395 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
396                                    u32 offset)
397 {
398         if (likely(skdev->dbg_level < 2)) {
399                 writeq(val, skdev->mem_map[1] + offset);
400                 barrier();
401         } else {
402                 barrier();
403                 writeq(val, skdev->mem_map[1] + offset);
404                 barrier();
405                 pr_debug("%s:%s:%d offset %x = %016llx\n",
406                          skdev->name, __func__, __LINE__, offset, val);
407         }
408 }
409
410
411 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
412 static int skd_isr_type = SKD_IRQ_DEFAULT;
413
414 module_param(skd_isr_type, int, 0444);
415 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
416                  " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
417
418 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
419 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
420
421 module_param(skd_max_req_per_msg, int, 0444);
422 MODULE_PARM_DESC(skd_max_req_per_msg,
423                  "Maximum SCSI requests packed in a single message."
424                  " (1-14, default==1)");
425
426 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
427 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
428 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
429
430 module_param(skd_max_queue_depth, int, 0444);
431 MODULE_PARM_DESC(skd_max_queue_depth,
432                  "Maximum SCSI requests issued to s1120."
433                  " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
434
435 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
436 module_param(skd_sgs_per_request, int, 0444);
437 MODULE_PARM_DESC(skd_sgs_per_request,
438                  "Maximum SG elements per block request."
439                  " (1-4096, default==256)");
440
441 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
442 module_param(skd_max_pass_thru, int, 0444);
443 MODULE_PARM_DESC(skd_max_pass_thru,
444                  "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
445
446 module_param(skd_dbg_level, int, 0444);
447 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
448
449 module_param(skd_isr_comp_limit, int, 0444);
450 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
451
452 /* Major device number dynamically assigned. */
453 static u32 skd_major;
454
455 static struct skd_device *skd_construct(struct pci_dev *pdev);
456 static void skd_destruct(struct skd_device *skdev);
457 static const struct block_device_operations skd_blockdev_ops;
458 static void skd_send_fitmsg(struct skd_device *skdev,
459                             struct skd_fitmsg_context *skmsg);
460 static void skd_send_special_fitmsg(struct skd_device *skdev,
461                                     struct skd_special_context *skspcl);
462 static void skd_request_fn(struct request_queue *rq);
463 static void skd_end_request(struct skd_device *skdev,
464                             struct skd_request_context *skreq, int error);
465 static int skd_preop_sg_list(struct skd_device *skdev,
466                              struct skd_request_context *skreq);
467 static void skd_postop_sg_list(struct skd_device *skdev,
468                                struct skd_request_context *skreq);
469
470 static void skd_restart_device(struct skd_device *skdev);
471 static int skd_quiesce_dev(struct skd_device *skdev);
472 static int skd_unquiesce_dev(struct skd_device *skdev);
473 static void skd_release_special(struct skd_device *skdev,
474                                 struct skd_special_context *skspcl);
475 static void skd_disable_interrupts(struct skd_device *skdev);
476 static void skd_isr_fwstate(struct skd_device *skdev);
477 static void skd_recover_requests(struct skd_device *skdev, int requeue);
478 static void skd_soft_reset(struct skd_device *skdev);
479
480 static const char *skd_name(struct skd_device *skdev);
481 const char *skd_drive_state_to_str(int state);
482 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
483 static void skd_log_skdev(struct skd_device *skdev, const char *event);
484 static void skd_log_skmsg(struct skd_device *skdev,
485                           struct skd_fitmsg_context *skmsg, const char *event);
486 static void skd_log_skreq(struct skd_device *skdev,
487                           struct skd_request_context *skreq, const char *event);
488
489 /*
490  *****************************************************************************
491  * READ/WRITE REQUESTS
492  *****************************************************************************
493  */
494 static void skd_fail_all_pending(struct skd_device *skdev)
495 {
496         struct request_queue *q = skdev->queue;
497         struct request *req;
498
499         for (;; ) {
500                 req = blk_peek_request(q);
501                 if (req == NULL)
502                         break;
503                 blk_start_request(req);
504                 __blk_end_request_all(req, -EIO);
505         }
506 }
507
508 static void
509 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
510                 int data_dir, unsigned lba,
511                 unsigned count)
512 {
513         if (data_dir == READ)
514                 scsi_req->cdb[0] = 0x28;
515         else
516                 scsi_req->cdb[0] = 0x2a;
517
518         scsi_req->cdb[1] = 0;
519         scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
520         scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
521         scsi_req->cdb[4] = (lba & 0xff00) >> 8;
522         scsi_req->cdb[5] = (lba & 0xff);
523         scsi_req->cdb[6] = 0;
524         scsi_req->cdb[7] = (count & 0xff00) >> 8;
525         scsi_req->cdb[8] = count & 0xff;
526         scsi_req->cdb[9] = 0;
527 }
528
529 static void
530 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
531                             struct skd_request_context *skreq)
532 {
533         skreq->flush_cmd = 1;
534
535         scsi_req->cdb[0] = 0x35;
536         scsi_req->cdb[1] = 0;
537         scsi_req->cdb[2] = 0;
538         scsi_req->cdb[3] = 0;
539         scsi_req->cdb[4] = 0;
540         scsi_req->cdb[5] = 0;
541         scsi_req->cdb[6] = 0;
542         scsi_req->cdb[7] = 0;
543         scsi_req->cdb[8] = 0;
544         scsi_req->cdb[9] = 0;
545 }
546
547 static void
548 skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
549                      struct skd_request_context *skreq,
550                      struct page *page,
551                      u32 lba, u32 count)
552 {
553         char *buf;
554         unsigned long len;
555         struct request *req;
556
557         buf = page_address(page);
558         len = SKD_DISCARD_CDB_LENGTH;
559
560         scsi_req->cdb[0] = UNMAP;
561         scsi_req->cdb[8] = len;
562
563         put_unaligned_be16(6 + 16, &buf[0]);
564         put_unaligned_be16(16, &buf[2]);
565         put_unaligned_be64(lba, &buf[8]);
566         put_unaligned_be32(count, &buf[16]);
567
568         req = skreq->req;
569         blk_add_request_payload(req, page, len);
570         req->buffer = buf;
571 }
572
573 static void skd_request_fn_not_online(struct request_queue *q);
574
575 static void skd_request_fn(struct request_queue *q)
576 {
577         struct skd_device *skdev = q->queuedata;
578         struct skd_fitmsg_context *skmsg = NULL;
579         struct fit_msg_hdr *fmh = NULL;
580         struct skd_request_context *skreq;
581         struct request *req = NULL;
582         struct skd_scsi_request *scsi_req;
583         struct page *page;
584         unsigned long io_flags;
585         int error;
586         u32 lba;
587         u32 count;
588         int data_dir;
589         u32 be_lba;
590         u32 be_count;
591         u64 be_dmaa;
592         u64 cmdctxt;
593         u32 timo_slot;
594         void *cmd_ptr;
595         int flush, fua;
596
597         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
598                 skd_request_fn_not_online(q);
599                 return;
600         }
601
602         if (blk_queue_stopped(skdev->queue)) {
603                 if (skdev->skmsg_free_list == NULL ||
604                     skdev->skreq_free_list == NULL ||
605                     skdev->in_flight >= skdev->queue_low_water_mark)
606                         /* There is still some kind of shortage */
607                         return;
608
609                 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
610         }
611
612         /*
613          * Stop conditions:
614          *  - There are no more native requests
615          *  - There are already the maximum number of requests in progress
616          *  - There are no more skd_request_context entries
617          *  - There are no more FIT msg buffers
618          */
619         for (;; ) {
620
621                 flush = fua = 0;
622
623                 req = blk_peek_request(q);
624
625                 /* Are there any native requests to start? */
626                 if (req == NULL)
627                         break;
628
629                 lba = (u32)blk_rq_pos(req);
630                 count = blk_rq_sectors(req);
631                 data_dir = rq_data_dir(req);
632                 io_flags = req->cmd_flags;
633
634                 if (io_flags & REQ_FLUSH)
635                         flush++;
636
637                 if (io_flags & REQ_FUA)
638                         fua++;
639
640                 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
641                          "count=%u(0x%x) dir=%d\n",
642                          skdev->name, __func__, __LINE__,
643                          req, lba, lba, count, count, data_dir);
644
645                 /* At this point we know there is a request */
646
647                 /* Are too many requets already in progress? */
648                 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
649                         pr_debug("%s:%s:%d qdepth %d, limit %d\n",
650                                  skdev->name, __func__, __LINE__,
651                                  skdev->in_flight, skdev->cur_max_queue_depth);
652                         break;
653                 }
654
655                 /* Is a skd_request_context available? */
656                 skreq = skdev->skreq_free_list;
657                 if (skreq == NULL) {
658                         pr_debug("%s:%s:%d Out of req=%p\n",
659                                  skdev->name, __func__, __LINE__, q);
660                         break;
661                 }
662                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
663                 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
664
665                 /* Now we check to see if we can get a fit msg */
666                 if (skmsg == NULL) {
667                         if (skdev->skmsg_free_list == NULL) {
668                                 pr_debug("%s:%s:%d Out of msg\n",
669                                          skdev->name, __func__, __LINE__);
670                                 break;
671                         }
672                 }
673
674                 skreq->flush_cmd = 0;
675                 skreq->n_sg = 0;
676                 skreq->sg_byte_count = 0;
677                 skreq->discard_page = 0;
678
679                 /*
680                  * OK to now dequeue request from q.
681                  *
682                  * At this point we are comitted to either start or reject
683                  * the native request. Note that skd_request_context is
684                  * available but is still at the head of the free list.
685                  */
686                 blk_start_request(req);
687                 skreq->req = req;
688                 skreq->fitmsg_id = 0;
689
690                 /* Either a FIT msg is in progress or we have to start one. */
691                 if (skmsg == NULL) {
692                         /* Are there any FIT msg buffers available? */
693                         skmsg = skdev->skmsg_free_list;
694                         if (skmsg == NULL) {
695                                 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
696                                          skdev->name, __func__, __LINE__,
697                                          skdev);
698                                 break;
699                         }
700                         SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
701                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
702
703                         skdev->skmsg_free_list = skmsg->next;
704
705                         skmsg->state = SKD_MSG_STATE_BUSY;
706                         skmsg->id += SKD_ID_INCR;
707
708                         /* Initialize the FIT msg header */
709                         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
710                         memset(fmh, 0, sizeof(*fmh));
711                         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
712                         skmsg->length = sizeof(*fmh);
713                 }
714
715                 skreq->fitmsg_id = skmsg->id;
716
717                 /*
718                  * Note that a FIT msg may have just been started
719                  * but contains no SoFIT requests yet.
720                  */
721
722                 /*
723                  * Transcode the request, checking as we go. The outcome of
724                  * the transcoding is represented by the error variable.
725                  */
726                 cmd_ptr = &skmsg->msg_buf[skmsg->length];
727                 memset(cmd_ptr, 0, 32);
728
729                 be_lba = cpu_to_be32(lba);
730                 be_count = cpu_to_be32(count);
731                 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
732                 cmdctxt = skreq->id + SKD_ID_INCR;
733
734                 scsi_req = cmd_ptr;
735                 scsi_req->hdr.tag = cmdctxt;
736                 scsi_req->hdr.sg_list_dma_address = be_dmaa;
737
738                 if (data_dir == READ)
739                         skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
740                 else
741                         skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
742
743                 if (io_flags & REQ_DISCARD) {
744                         page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
745                         if (!page) {
746                                 pr_err("request_fn:Page allocation failed.\n");
747                                 skd_end_request(skdev, skreq, -ENOMEM);
748                                 break;
749                         }
750                         skreq->discard_page = 1;
751                         skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
752
753                 } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
754                         skd_prep_zerosize_flush_cdb(scsi_req, skreq);
755                         SKD_ASSERT(skreq->flush_cmd == 1);
756
757                 } else {
758                         skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
759                 }
760
761                 if (fua)
762                         scsi_req->cdb[1] |= SKD_FUA_NV;
763
764                 if (!req->bio)
765                         goto skip_sg;
766
767                 error = skd_preop_sg_list(skdev, skreq);
768
769                 if (error != 0) {
770                         /*
771                          * Complete the native request with error.
772                          * Note that the request context is still at the
773                          * head of the free list, and that the SoFIT request
774                          * was encoded into the FIT msg buffer but the FIT
775                          * msg length has not been updated. In short, the
776                          * only resource that has been allocated but might
777                          * not be used is that the FIT msg could be empty.
778                          */
779                         pr_debug("%s:%s:%d error Out\n",
780                                  skdev->name, __func__, __LINE__);
781                         skd_end_request(skdev, skreq, error);
782                         continue;
783                 }
784
785 skip_sg:
786                 scsi_req->hdr.sg_list_len_bytes =
787                         cpu_to_be32(skreq->sg_byte_count);
788
789                 /* Complete resource allocations. */
790                 skdev->skreq_free_list = skreq->next;
791                 skreq->state = SKD_REQ_STATE_BUSY;
792                 skreq->id += SKD_ID_INCR;
793
794                 skmsg->length += sizeof(struct skd_scsi_request);
795                 fmh->num_protocol_cmds_coalesced++;
796
797                 /*
798                  * Update the active request counts.
799                  * Capture the timeout timestamp.
800                  */
801                 skreq->timeout_stamp = skdev->timeout_stamp;
802                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
803                 skdev->timeout_slot[timo_slot]++;
804                 skdev->in_flight++;
805                 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
806                          skdev->name, __func__, __LINE__,
807                          skreq->id, skdev->in_flight);
808
809                 /*
810                  * If the FIT msg buffer is full send it.
811                  */
812                 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
813                     fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
814                         skd_send_fitmsg(skdev, skmsg);
815                         skmsg = NULL;
816                         fmh = NULL;
817                 }
818         }
819
820         /*
821          * Is a FIT msg in progress? If it is empty put the buffer back
822          * on the free list. If it is non-empty send what we got.
823          * This minimizes latency when there are fewer requests than
824          * what fits in a FIT msg.
825          */
826         if (skmsg != NULL) {
827                 /* Bigger than just a FIT msg header? */
828                 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
829                         pr_debug("%s:%s:%d sending msg=%p, len %d\n",
830                                  skdev->name, __func__, __LINE__,
831                                  skmsg, skmsg->length);
832                         skd_send_fitmsg(skdev, skmsg);
833                 } else {
834                         /*
835                          * The FIT msg is empty. It means we got started
836                          * on the msg, but the requests were rejected.
837                          */
838                         skmsg->state = SKD_MSG_STATE_IDLE;
839                         skmsg->id += SKD_ID_INCR;
840                         skmsg->next = skdev->skmsg_free_list;
841                         skdev->skmsg_free_list = skmsg;
842                 }
843                 skmsg = NULL;
844                 fmh = NULL;
845         }
846
847         /*
848          * If req is non-NULL it means there is something to do but
849          * we are out of a resource.
850          */
851         if (req)
852                 blk_stop_queue(skdev->queue);
853 }
854
855 static void skd_end_request(struct skd_device *skdev,
856                             struct skd_request_context *skreq, int error)
857 {
858         struct request *req = skreq->req;
859         unsigned int io_flags = req->cmd_flags;
860
861         if ((io_flags & REQ_DISCARD) &&
862                 (skreq->discard_page == 1)) {
863                 pr_debug("%s:%s:%d, free the page!",
864                          skdev->name, __func__, __LINE__);
865                 free_page((unsigned long)req->buffer);
866                 req->buffer = NULL;
867         }
868
869         if (unlikely(error)) {
870                 struct request *req = skreq->req;
871                 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
872                 u32 lba = (u32)blk_rq_pos(req);
873                 u32 count = blk_rq_sectors(req);
874
875                 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
876                        skd_name(skdev), cmd, lba, count, skreq->id);
877         } else
878                 pr_debug("%s:%s:%d id=0x%x error=%d\n",
879                          skdev->name, __func__, __LINE__, skreq->id, error);
880
881         __blk_end_request_all(skreq->req, error);
882 }
883
884 static int skd_preop_sg_list(struct skd_device *skdev,
885                              struct skd_request_context *skreq)
886 {
887         struct request *req = skreq->req;
888         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
889         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
890         struct scatterlist *sg = &skreq->sg[0];
891         int n_sg;
892         int i;
893
894         skreq->sg_byte_count = 0;
895
896         /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
897                    skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
898
899         n_sg = blk_rq_map_sg(skdev->queue, req, sg);
900         if (n_sg <= 0)
901                 return -EINVAL;
902
903         /*
904          * Map scatterlist to PCI bus addresses.
905          * Note PCI might change the number of entries.
906          */
907         n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
908         if (n_sg <= 0)
909                 return -EINVAL;
910
911         SKD_ASSERT(n_sg <= skdev->sgs_per_request);
912
913         skreq->n_sg = n_sg;
914
915         for (i = 0; i < n_sg; i++) {
916                 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
917                 u32 cnt = sg_dma_len(&sg[i]);
918                 uint64_t dma_addr = sg_dma_address(&sg[i]);
919
920                 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
921                 sgd->byte_count = cnt;
922                 skreq->sg_byte_count += cnt;
923                 sgd->host_side_addr = dma_addr;
924                 sgd->dev_side_addr = 0;
925         }
926
927         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
928         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
929
930         if (unlikely(skdev->dbg_level > 1)) {
931                 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
932                          skdev->name, __func__, __LINE__,
933                          skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
934                 for (i = 0; i < n_sg; i++) {
935                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
936                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
937                                  "addr=0x%llx next=0x%llx\n",
938                                  skdev->name, __func__, __LINE__,
939                                  i, sgd->byte_count, sgd->control,
940                                  sgd->host_side_addr, sgd->next_desc_ptr);
941                 }
942         }
943
944         return 0;
945 }
946
947 static void skd_postop_sg_list(struct skd_device *skdev,
948                                struct skd_request_context *skreq)
949 {
950         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
951         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
952
953         /*
954          * restore the next ptr for next IO request so we
955          * don't have to set it every time.
956          */
957         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
958                 skreq->sksg_dma_address +
959                 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
960         pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
961 }
962
963 static void skd_request_fn_not_online(struct request_queue *q)
964 {
965         struct skd_device *skdev = q->queuedata;
966         int error;
967
968         SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
969
970         skd_log_skdev(skdev, "req_not_online");
971         switch (skdev->state) {
972         case SKD_DRVR_STATE_PAUSING:
973         case SKD_DRVR_STATE_PAUSED:
974         case SKD_DRVR_STATE_STARTING:
975         case SKD_DRVR_STATE_RESTARTING:
976         case SKD_DRVR_STATE_WAIT_BOOT:
977         /* In case of starting, we haven't started the queue,
978          * so we can't get here... but requests are
979          * possibly hanging out waiting for us because we
980          * reported the dev/skd0 already.  They'll wait
981          * forever if connect doesn't complete.
982          * What to do??? delay dev/skd0 ??
983          */
984         case SKD_DRVR_STATE_BUSY:
985         case SKD_DRVR_STATE_BUSY_IMMINENT:
986         case SKD_DRVR_STATE_BUSY_ERASE:
987         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
988                 return;
989
990         case SKD_DRVR_STATE_BUSY_SANITIZE:
991         case SKD_DRVR_STATE_STOPPING:
992         case SKD_DRVR_STATE_SYNCING:
993         case SKD_DRVR_STATE_FAULT:
994         case SKD_DRVR_STATE_DISAPPEARED:
995         default:
996                 error = -EIO;
997                 break;
998         }
999
1000         /* If we get here, terminate all pending block requeusts
1001          * with EIO and any scsi pass thru with appropriate sense
1002          */
1003
1004         skd_fail_all_pending(skdev);
1005 }
1006
1007 /*
1008  *****************************************************************************
1009  * TIMER
1010  *****************************************************************************
1011  */
1012
1013 static void skd_timer_tick_not_online(struct skd_device *skdev);
1014
1015 static void skd_timer_tick(ulong arg)
1016 {
1017         struct skd_device *skdev = (struct skd_device *)arg;
1018
1019         u32 timo_slot;
1020         u32 overdue_timestamp;
1021         unsigned long reqflags;
1022         u32 state;
1023
1024         if (skdev->state == SKD_DRVR_STATE_FAULT)
1025                 /* The driver has declared fault, and we want it to
1026                  * stay that way until driver is reloaded.
1027                  */
1028                 return;
1029
1030         spin_lock_irqsave(&skdev->lock, reqflags);
1031
1032         state = SKD_READL(skdev, FIT_STATUS);
1033         state &= FIT_SR_DRIVE_STATE_MASK;
1034         if (state != skdev->drive_state)
1035                 skd_isr_fwstate(skdev);
1036
1037         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1038                 skd_timer_tick_not_online(skdev);
1039                 goto timer_func_out;
1040         }
1041         skdev->timeout_stamp++;
1042         timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1043
1044         /*
1045          * All requests that happened during the previous use of
1046          * this slot should be done by now. The previous use was
1047          * over 7 seconds ago.
1048          */
1049         if (skdev->timeout_slot[timo_slot] == 0)
1050                 goto timer_func_out;
1051
1052         /* Something is overdue */
1053         overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
1054
1055         pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
1056                  skdev->name, __func__, __LINE__,
1057                  skdev->timeout_slot[timo_slot], skdev->in_flight);
1058         pr_err("(%s): Overdue IOs (%d), busy %d\n",
1059                skd_name(skdev), skdev->timeout_slot[timo_slot],
1060                skdev->in_flight);
1061
1062         skdev->timer_countdown = SKD_DRAINING_TIMO;
1063         skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1064         skdev->timo_slot = timo_slot;
1065         blk_stop_queue(skdev->queue);
1066
1067 timer_func_out:
1068         mod_timer(&skdev->timer, (jiffies + HZ));
1069
1070         spin_unlock_irqrestore(&skdev->lock, reqflags);
1071 }
1072
1073 static void skd_timer_tick_not_online(struct skd_device *skdev)
1074 {
1075         switch (skdev->state) {
1076         case SKD_DRVR_STATE_IDLE:
1077         case SKD_DRVR_STATE_LOAD:
1078                 break;
1079         case SKD_DRVR_STATE_BUSY_SANITIZE:
1080                 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1081                          skdev->name, __func__, __LINE__,
1082                          skdev->drive_state, skdev->state);
1083                 /* If we've been in sanitize for 3 seconds, we figure we're not
1084                  * going to get anymore completions, so recover requests now
1085                  */
1086                 if (skdev->timer_countdown > 0) {
1087                         skdev->timer_countdown--;
1088                         return;
1089                 }
1090                 skd_recover_requests(skdev, 0);
1091                 break;
1092
1093         case SKD_DRVR_STATE_BUSY:
1094         case SKD_DRVR_STATE_BUSY_IMMINENT:
1095         case SKD_DRVR_STATE_BUSY_ERASE:
1096                 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1097                          skdev->name, __func__, __LINE__,
1098                          skdev->state, skdev->timer_countdown);
1099                 if (skdev->timer_countdown > 0) {
1100                         skdev->timer_countdown--;
1101                         return;
1102                 }
1103                 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1104                          skdev->name, __func__, __LINE__,
1105                          skdev->state, skdev->timer_countdown);
1106                 skd_restart_device(skdev);
1107                 break;
1108
1109         case SKD_DRVR_STATE_WAIT_BOOT:
1110         case SKD_DRVR_STATE_STARTING:
1111                 if (skdev->timer_countdown > 0) {
1112                         skdev->timer_countdown--;
1113                         return;
1114                 }
1115                 /* For now, we fault the drive.  Could attempt resets to
1116                  * revcover at some point. */
1117                 skdev->state = SKD_DRVR_STATE_FAULT;
1118
1119                 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1120                        skd_name(skdev), skdev->drive_state);
1121
1122                 /*start the queue so we can respond with error to requests */
1123                 /* wakeup anyone waiting for startup complete */
1124                 blk_start_queue(skdev->queue);
1125                 skdev->gendisk_on = -1;
1126                 wake_up_interruptible(&skdev->waitq);
1127                 break;
1128
1129         case SKD_DRVR_STATE_ONLINE:
1130                 /* shouldn't get here. */
1131                 break;
1132
1133         case SKD_DRVR_STATE_PAUSING:
1134         case SKD_DRVR_STATE_PAUSED:
1135                 break;
1136
1137         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1138                 pr_debug("%s:%s:%d "
1139                          "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1140                          skdev->name, __func__, __LINE__,
1141                          skdev->timo_slot,
1142                          skdev->timer_countdown,
1143                          skdev->in_flight,
1144                          skdev->timeout_slot[skdev->timo_slot]);
1145                 /* if the slot has cleared we can let the I/O continue */
1146                 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1147                         pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1148                                  skdev->name, __func__, __LINE__);
1149                         skdev->state = SKD_DRVR_STATE_ONLINE;
1150                         blk_start_queue(skdev->queue);
1151                         return;
1152                 }
1153                 if (skdev->timer_countdown > 0) {
1154                         skdev->timer_countdown--;
1155                         return;
1156                 }
1157                 skd_restart_device(skdev);
1158                 break;
1159
1160         case SKD_DRVR_STATE_RESTARTING:
1161                 if (skdev->timer_countdown > 0) {
1162                         skdev->timer_countdown--;
1163                         return;
1164                 }
1165                 /* For now, we fault the drive. Could attempt resets to
1166                  * revcover at some point. */
1167                 skdev->state = SKD_DRVR_STATE_FAULT;
1168                 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1169                        skd_name(skdev), skdev->drive_state);
1170
1171                 /*
1172                  * Recovering does two things:
1173                  * 1. completes IO with error
1174                  * 2. reclaims dma resources
1175                  * When is it safe to recover requests?
1176                  * - if the drive state is faulted
1177                  * - if the state is still soft reset after out timeout
1178                  * - if the drive registers are dead (state = FF)
1179                  * If it is "unsafe", we still need to recover, so we will
1180                  * disable pci bus mastering and disable our interrupts.
1181                  */
1182
1183                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1184                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1185                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1186                         /* It never came out of soft reset. Try to
1187                          * recover the requests and then let them
1188                          * fail. This is to mitigate hung processes. */
1189                         skd_recover_requests(skdev, 0);
1190                 else {
1191                         pr_err("(%s): Disable BusMaster (%x)\n",
1192                                skd_name(skdev), skdev->drive_state);
1193                         pci_disable_device(skdev->pdev);
1194                         skd_disable_interrupts(skdev);
1195                         skd_recover_requests(skdev, 0);
1196                 }
1197
1198                 /*start the queue so we can respond with error to requests */
1199                 /* wakeup anyone waiting for startup complete */
1200                 blk_start_queue(skdev->queue);
1201                 skdev->gendisk_on = -1;
1202                 wake_up_interruptible(&skdev->waitq);
1203                 break;
1204
1205         case SKD_DRVR_STATE_RESUMING:
1206         case SKD_DRVR_STATE_STOPPING:
1207         case SKD_DRVR_STATE_SYNCING:
1208         case SKD_DRVR_STATE_FAULT:
1209         case SKD_DRVR_STATE_DISAPPEARED:
1210         default:
1211                 break;
1212         }
1213 }
1214
1215 static int skd_start_timer(struct skd_device *skdev)
1216 {
1217         int rc;
1218
1219         init_timer(&skdev->timer);
1220         setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1221
1222         rc = mod_timer(&skdev->timer, (jiffies + HZ));
1223         if (rc)
1224                 pr_err("%s: failed to start timer %d\n",
1225                        __func__, rc);
1226         return rc;
1227 }
1228
1229 static void skd_kill_timer(struct skd_device *skdev)
1230 {
1231         del_timer_sync(&skdev->timer);
1232 }
1233
1234 /*
1235  *****************************************************************************
1236  * IOCTL
1237  *****************************************************************************
1238  */
1239 static int skd_ioctl_sg_io(struct skd_device *skdev,
1240                            fmode_t mode, void __user *argp);
1241 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1242                                         struct skd_sg_io *sksgio);
1243 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1244                                    struct skd_sg_io *sksgio);
1245 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1246                                     struct skd_sg_io *sksgio);
1247 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1248                                  struct skd_sg_io *sksgio, int dxfer_dir);
1249 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1250                                  struct skd_sg_io *sksgio);
1251 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1252 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1253                                     struct skd_sg_io *sksgio);
1254 static int skd_sg_io_put_status(struct skd_device *skdev,
1255                                 struct skd_sg_io *sksgio);
1256
1257 static void skd_complete_special(struct skd_device *skdev,
1258                                  volatile struct fit_completion_entry_v1
1259                                  *skcomp,
1260                                  volatile struct fit_comp_error_info *skerr,
1261                                  struct skd_special_context *skspcl);
1262
1263 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1264                           uint cmd_in, ulong arg)
1265 {
1266         int rc = 0;
1267         struct gendisk *disk = bdev->bd_disk;
1268         struct skd_device *skdev = disk->private_data;
1269         void __user *p = (void *)arg;
1270
1271         pr_debug("%s:%s:%d %s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
1272                  skdev->name, __func__, __LINE__,
1273                  disk->disk_name, current->comm, mode, cmd_in, arg);
1274
1275         if (!capable(CAP_SYS_ADMIN))
1276                 return -EPERM;
1277
1278         switch (cmd_in) {
1279         case SG_SET_TIMEOUT:
1280         case SG_GET_TIMEOUT:
1281         case SG_GET_VERSION_NUM:
1282                 rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
1283                 break;
1284         case SG_IO:
1285                 rc = skd_ioctl_sg_io(skdev, mode, p);
1286                 break;
1287
1288         default:
1289                 rc = -ENOTTY;
1290                 break;
1291         }
1292
1293         pr_debug("%s:%s:%d %s:  completion rc %d\n",
1294                  skdev->name, __func__, __LINE__, disk->disk_name, rc);
1295         return rc;
1296 }
1297
1298 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1299                            void __user *argp)
1300 {
1301         int rc;
1302         struct skd_sg_io sksgio;
1303
1304         memset(&sksgio, 0, sizeof(sksgio));
1305         sksgio.mode = mode;
1306         sksgio.argp = argp;
1307         sksgio.iov = &sksgio.no_iov_iov;
1308
1309         switch (skdev->state) {
1310         case SKD_DRVR_STATE_ONLINE:
1311         case SKD_DRVR_STATE_BUSY_IMMINENT:
1312                 break;
1313
1314         default:
1315                 pr_debug("%s:%s:%d drive not online\n",
1316                          skdev->name, __func__, __LINE__);
1317                 rc = -ENXIO;
1318                 goto out;
1319         }
1320
1321         rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1322         if (rc)
1323                 goto out;
1324
1325         rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1326         if (rc)
1327                 goto out;
1328
1329         rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1330         if (rc)
1331                 goto out;
1332
1333         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1334         if (rc)
1335                 goto out;
1336
1337         rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1338         if (rc)
1339                 goto out;
1340
1341         rc = skd_sg_io_await(skdev, &sksgio);
1342         if (rc)
1343                 goto out;
1344
1345         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1346         if (rc)
1347                 goto out;
1348
1349         rc = skd_sg_io_put_status(skdev, &sksgio);
1350         if (rc)
1351                 goto out;
1352
1353         rc = 0;
1354
1355 out:
1356         skd_sg_io_release_skspcl(skdev, &sksgio);
1357
1358         if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1359                 kfree(sksgio.iov);
1360         return rc;
1361 }
1362
1363 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1364                                         struct skd_sg_io *sksgio)
1365 {
1366         struct sg_io_hdr *sgp = &sksgio->sg;
1367         int i, acc;
1368
1369         if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1370                 pr_debug("%s:%s:%d access sg failed %p\n",
1371                          skdev->name, __func__, __LINE__, sksgio->argp);
1372                 return -EFAULT;
1373         }
1374
1375         if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1376                 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1377                          skdev->name, __func__, __LINE__, sksgio->argp);
1378                 return -EFAULT;
1379         }
1380
1381         if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1382                 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1383                          skdev->name, __func__, __LINE__, sgp->interface_id);
1384                 return -EINVAL;
1385         }
1386
1387         if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1388                 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1389                          skdev->name, __func__, __LINE__, sgp->cmd_len);
1390                 return -EINVAL;
1391         }
1392
1393         if (sgp->iovec_count > 256) {
1394                 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1395                          skdev->name, __func__, __LINE__, sgp->iovec_count);
1396                 return -EINVAL;
1397         }
1398
1399         if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1400                 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1401                          skdev->name, __func__, __LINE__, sgp->dxfer_len);
1402                 return -EINVAL;
1403         }
1404
1405         switch (sgp->dxfer_direction) {
1406         case SG_DXFER_NONE:
1407                 acc = -1;
1408                 break;
1409
1410         case SG_DXFER_TO_DEV:
1411                 acc = VERIFY_READ;
1412                 break;
1413
1414         case SG_DXFER_FROM_DEV:
1415         case SG_DXFER_TO_FROM_DEV:
1416                 acc = VERIFY_WRITE;
1417                 break;
1418
1419         default:
1420                 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1421                          skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1422                 return -EINVAL;
1423         }
1424
1425         if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1426                 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1427                          skdev->name, __func__, __LINE__, sgp->cmdp);
1428                 return -EFAULT;
1429         }
1430
1431         if (sgp->mx_sb_len != 0) {
1432                 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1433                         pr_debug("%s:%s:%d access sbp failed %p\n",
1434                                  skdev->name, __func__, __LINE__, sgp->sbp);
1435                         return -EFAULT;
1436                 }
1437         }
1438
1439         if (sgp->iovec_count == 0) {
1440                 sksgio->iov[0].iov_base = sgp->dxferp;
1441                 sksgio->iov[0].iov_len = sgp->dxfer_len;
1442                 sksgio->iovcnt = 1;
1443                 sksgio->dxfer_len = sgp->dxfer_len;
1444         } else {
1445                 struct sg_iovec *iov;
1446                 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1447                 size_t iov_data_len;
1448
1449                 iov = kmalloc(nbytes, GFP_KERNEL);
1450                 if (iov == NULL) {
1451                         pr_debug("%s:%s:%d alloc iovec failed %d\n",
1452                                  skdev->name, __func__, __LINE__,
1453                                  sgp->iovec_count);
1454                         return -ENOMEM;
1455                 }
1456                 sksgio->iov = iov;
1457                 sksgio->iovcnt = sgp->iovec_count;
1458
1459                 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1460                         pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1461                                  skdev->name, __func__, __LINE__, sgp->dxferp);
1462                         return -EFAULT;
1463                 }
1464
1465                 /*
1466                  * Sum up the vecs, making sure they don't overflow
1467                  */
1468                 iov_data_len = 0;
1469                 for (i = 0; i < sgp->iovec_count; i++) {
1470                         if (iov_data_len + iov[i].iov_len < iov_data_len)
1471                                 return -EINVAL;
1472                         iov_data_len += iov[i].iov_len;
1473                 }
1474
1475                 /* SG_IO howto says that the shorter of the two wins */
1476                 if (sgp->dxfer_len < iov_data_len) {
1477                         sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1478                                                      sgp->iovec_count,
1479                                                      sgp->dxfer_len);
1480                         sksgio->dxfer_len = sgp->dxfer_len;
1481                 } else
1482                         sksgio->dxfer_len = iov_data_len;
1483         }
1484
1485         if (sgp->dxfer_direction != SG_DXFER_NONE) {
1486                 struct sg_iovec *iov = sksgio->iov;
1487                 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1488                         if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1489                                 pr_debug("%s:%s:%d access data failed %p/%d\n",
1490                                          skdev->name, __func__, __LINE__,
1491                                          iov->iov_base, (int)iov->iov_len);
1492                                 return -EFAULT;
1493                         }
1494                 }
1495         }
1496
1497         return 0;
1498 }
1499
1500 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1501                                    struct skd_sg_io *sksgio)
1502 {
1503         struct skd_special_context *skspcl = NULL;
1504         int rc;
1505
1506         for (;;) {
1507                 ulong flags;
1508
1509                 spin_lock_irqsave(&skdev->lock, flags);
1510                 skspcl = skdev->skspcl_free_list;
1511                 if (skspcl != NULL) {
1512                         skdev->skspcl_free_list =
1513                                 (struct skd_special_context *)skspcl->req.next;
1514                         skspcl->req.id += SKD_ID_INCR;
1515                         skspcl->req.state = SKD_REQ_STATE_SETUP;
1516                         skspcl->orphaned = 0;
1517                         skspcl->req.n_sg = 0;
1518                 }
1519                 spin_unlock_irqrestore(&skdev->lock, flags);
1520
1521                 if (skspcl != NULL) {
1522                         rc = 0;
1523                         break;
1524                 }
1525
1526                 pr_debug("%s:%s:%d blocking\n",
1527                          skdev->name, __func__, __LINE__);
1528
1529                 rc = wait_event_interruptible_timeout(
1530                                 skdev->waitq,
1531                                 (skdev->skspcl_free_list != NULL),
1532                                 msecs_to_jiffies(sksgio->sg.timeout));
1533
1534                 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1535                          skdev->name, __func__, __LINE__, rc);
1536
1537                 if (rc <= 0) {
1538                         if (rc == 0)
1539                                 rc = -ETIMEDOUT;
1540                         else
1541                                 rc = -EINTR;
1542                         break;
1543                 }
1544                 /*
1545                  * If we get here rc > 0 meaning the timeout to
1546                  * wait_event_interruptible_timeout() had time left, hence the
1547                  * sought event -- non-empty free list -- happened.
1548                  * Retry the allocation.
1549                  */
1550         }
1551         sksgio->skspcl = skspcl;
1552
1553         return rc;
1554 }
1555
1556 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1557                                     struct skd_request_context *skreq,
1558                                     u32 dxfer_len)
1559 {
1560         u32 resid = dxfer_len;
1561
1562         /*
1563          * The DMA engine must have aligned addresses and byte counts.
1564          */
1565         resid += (-resid) & 3;
1566         skreq->sg_byte_count = resid;
1567
1568         skreq->n_sg = 0;
1569
1570         while (resid > 0) {
1571                 u32 nbytes = PAGE_SIZE;
1572                 u32 ix = skreq->n_sg;
1573                 struct scatterlist *sg = &skreq->sg[ix];
1574                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1575                 struct page *page;
1576
1577                 if (nbytes > resid)
1578                         nbytes = resid;
1579
1580                 page = alloc_page(GFP_KERNEL);
1581                 if (page == NULL)
1582                         return -ENOMEM;
1583
1584                 sg_set_page(sg, page, nbytes, 0);
1585
1586                 /* TODO: This should be going through a pci_???()
1587                  * routine to do proper mapping. */
1588                 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1589                 sksg->byte_count = nbytes;
1590
1591                 sksg->host_side_addr = sg_phys(sg);
1592
1593                 sksg->dev_side_addr = 0;
1594                 sksg->next_desc_ptr = skreq->sksg_dma_address +
1595                                       (ix + 1) * sizeof(*sksg);
1596
1597                 skreq->n_sg++;
1598                 resid -= nbytes;
1599         }
1600
1601         if (skreq->n_sg > 0) {
1602                 u32 ix = skreq->n_sg - 1;
1603                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1604
1605                 sksg->control = FIT_SGD_CONTROL_LAST;
1606                 sksg->next_desc_ptr = 0;
1607         }
1608
1609         if (unlikely(skdev->dbg_level > 1)) {
1610                 u32 i;
1611
1612                 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1613                          skdev->name, __func__, __LINE__,
1614                          skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1615                 for (i = 0; i < skreq->n_sg; i++) {
1616                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1617
1618                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
1619                                  "addr=0x%llx next=0x%llx\n",
1620                                  skdev->name, __func__, __LINE__,
1621                                  i, sgd->byte_count, sgd->control,
1622                                  sgd->host_side_addr, sgd->next_desc_ptr);
1623                 }
1624         }
1625
1626         return 0;
1627 }
1628
1629 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1630                                     struct skd_sg_io *sksgio)
1631 {
1632         struct skd_special_context *skspcl = sksgio->skspcl;
1633         struct skd_request_context *skreq = &skspcl->req;
1634         u32 dxfer_len = sksgio->dxfer_len;
1635         int rc;
1636
1637         rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1638         /*
1639          * Eventually, errors or not, skd_release_special() is called
1640          * to recover allocations including partial allocations.
1641          */
1642         return rc;
1643 }
1644
1645 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1646                                  struct skd_sg_io *sksgio, int dxfer_dir)
1647 {
1648         struct skd_special_context *skspcl = sksgio->skspcl;
1649         u32 iov_ix = 0;
1650         struct sg_iovec curiov;
1651         u32 sksg_ix = 0;
1652         u8 *bufp = NULL;
1653         u32 buf_len = 0;
1654         u32 resid = sksgio->dxfer_len;
1655         int rc;
1656
1657         curiov.iov_len = 0;
1658         curiov.iov_base = NULL;
1659
1660         if (dxfer_dir != sksgio->sg.dxfer_direction) {
1661                 if (dxfer_dir != SG_DXFER_TO_DEV ||
1662                     sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1663                         return 0;
1664         }
1665
1666         while (resid > 0) {
1667                 u32 nbytes = PAGE_SIZE;
1668
1669                 if (curiov.iov_len == 0) {
1670                         curiov = sksgio->iov[iov_ix++];
1671                         continue;
1672                 }
1673
1674                 if (buf_len == 0) {
1675                         struct page *page;
1676                         page = sg_page(&skspcl->req.sg[sksg_ix++]);
1677                         bufp = page_address(page);
1678                         buf_len = PAGE_SIZE;
1679                 }
1680
1681                 nbytes = min_t(u32, nbytes, resid);
1682                 nbytes = min_t(u32, nbytes, curiov.iov_len);
1683                 nbytes = min_t(u32, nbytes, buf_len);
1684
1685                 if (dxfer_dir == SG_DXFER_TO_DEV)
1686                         rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1687                 else
1688                         rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1689
1690                 if (rc)
1691                         return -EFAULT;
1692
1693                 resid -= nbytes;
1694                 curiov.iov_len -= nbytes;
1695                 curiov.iov_base += nbytes;
1696                 buf_len -= nbytes;
1697         }
1698
1699         return 0;
1700 }
1701
1702 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1703                                  struct skd_sg_io *sksgio)
1704 {
1705         struct skd_special_context *skspcl = sksgio->skspcl;
1706         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1707         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1708
1709         memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1710
1711         /* Initialize the FIT msg header */
1712         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1713         fmh->num_protocol_cmds_coalesced = 1;
1714
1715         /* Initialize the SCSI request */
1716         if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1717                 scsi_req->hdr.sg_list_dma_address =
1718                         cpu_to_be64(skspcl->req.sksg_dma_address);
1719         scsi_req->hdr.tag = skspcl->req.id;
1720         scsi_req->hdr.sg_list_len_bytes =
1721                 cpu_to_be32(skspcl->req.sg_byte_count);
1722         memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1723
1724         skspcl->req.state = SKD_REQ_STATE_BUSY;
1725         skd_send_special_fitmsg(skdev, skspcl);
1726
1727         return 0;
1728 }
1729
1730 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1731 {
1732         unsigned long flags;
1733         int rc;
1734
1735         rc = wait_event_interruptible_timeout(skdev->waitq,
1736                                               (sksgio->skspcl->req.state !=
1737                                                SKD_REQ_STATE_BUSY),
1738                                               msecs_to_jiffies(sksgio->sg.
1739                                                                timeout));
1740
1741         spin_lock_irqsave(&skdev->lock, flags);
1742
1743         if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1744                 pr_debug("%s:%s:%d skspcl %p aborted\n",
1745                          skdev->name, __func__, __LINE__, sksgio->skspcl);
1746
1747                 /* Build check cond, sense and let command finish. */
1748                 /* For a timeout, we must fabricate completion and sense
1749                  * data to complete the command */
1750                 sksgio->skspcl->req.completion.status =
1751                         SAM_STAT_CHECK_CONDITION;
1752
1753                 memset(&sksgio->skspcl->req.err_info, 0,
1754                        sizeof(sksgio->skspcl->req.err_info));
1755                 sksgio->skspcl->req.err_info.type = 0x70;
1756                 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1757                 sksgio->skspcl->req.err_info.code = 0x44;
1758                 sksgio->skspcl->req.err_info.qual = 0;
1759                 rc = 0;
1760         } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1761                 /* No longer on the adapter. We finish. */
1762                 rc = 0;
1763         else {
1764                 /* Something's gone wrong. Still busy. Timeout or
1765                  * user interrupted (control-C). Mark as an orphan
1766                  * so it will be disposed when completed. */
1767                 sksgio->skspcl->orphaned = 1;
1768                 sksgio->skspcl = NULL;
1769                 if (rc == 0) {
1770                         pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1771                                  skdev->name, __func__, __LINE__,
1772                                  sksgio, sksgio->sg.timeout);
1773                         rc = -ETIMEDOUT;
1774                 } else {
1775                         pr_debug("%s:%s:%d cntlc %p\n",
1776                                  skdev->name, __func__, __LINE__, sksgio);
1777                         rc = -EINTR;
1778                 }
1779         }
1780
1781         spin_unlock_irqrestore(&skdev->lock, flags);
1782
1783         return rc;
1784 }
1785
1786 static int skd_sg_io_put_status(struct skd_device *skdev,
1787                                 struct skd_sg_io *sksgio)
1788 {
1789         struct sg_io_hdr *sgp = &sksgio->sg;
1790         struct skd_special_context *skspcl = sksgio->skspcl;
1791         int resid = 0;
1792
1793         u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1794
1795         sgp->status = skspcl->req.completion.status;
1796         resid = sksgio->dxfer_len - nb;
1797
1798         sgp->masked_status = sgp->status & STATUS_MASK;
1799         sgp->msg_status = 0;
1800         sgp->host_status = 0;
1801         sgp->driver_status = 0;
1802         sgp->resid = resid;
1803         if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1804                 sgp->info |= SG_INFO_CHECK;
1805
1806         pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1807                  skdev->name, __func__, __LINE__,
1808                  sgp->status, sgp->masked_status, sgp->resid);
1809
1810         if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1811                 if (sgp->mx_sb_len > 0) {
1812                         struct fit_comp_error_info *ei = &skspcl->req.err_info;
1813                         u32 nbytes = sizeof(*ei);
1814
1815                         nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1816
1817                         sgp->sb_len_wr = nbytes;
1818
1819                         if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1820                                 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1821                                          skdev->name, __func__, __LINE__,
1822                                          sgp->sbp);
1823                                 return -EFAULT;
1824                         }
1825                 }
1826         }
1827
1828         if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1829                 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1830                          skdev->name, __func__, __LINE__, sksgio->argp);
1831                 return -EFAULT;
1832         }
1833
1834         return 0;
1835 }
1836
1837 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1838                                     struct skd_sg_io *sksgio)
1839 {
1840         struct skd_special_context *skspcl = sksgio->skspcl;
1841
1842         if (skspcl != NULL) {
1843                 ulong flags;
1844
1845                 sksgio->skspcl = NULL;
1846
1847                 spin_lock_irqsave(&skdev->lock, flags);
1848                 skd_release_special(skdev, skspcl);
1849                 spin_unlock_irqrestore(&skdev->lock, flags);
1850         }
1851
1852         return 0;
1853 }
1854
1855 /*
1856  *****************************************************************************
1857  * INTERNAL REQUESTS -- generated by driver itself
1858  *****************************************************************************
1859  */
1860
1861 static int skd_format_internal_skspcl(struct skd_device *skdev)
1862 {
1863         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1864         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1865         struct fit_msg_hdr *fmh;
1866         uint64_t dma_address;
1867         struct skd_scsi_request *scsi;
1868
1869         fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1870         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1871         fmh->num_protocol_cmds_coalesced = 1;
1872
1873         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1874         memset(scsi, 0, sizeof(*scsi));
1875         dma_address = skspcl->req.sksg_dma_address;
1876         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1877         sgd->control = FIT_SGD_CONTROL_LAST;
1878         sgd->byte_count = 0;
1879         sgd->host_side_addr = skspcl->db_dma_address;
1880         sgd->dev_side_addr = 0;
1881         sgd->next_desc_ptr = 0LL;
1882
1883         return 1;
1884 }
1885
1886 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1887
1888 static void skd_send_internal_skspcl(struct skd_device *skdev,
1889                                      struct skd_special_context *skspcl,
1890                                      u8 opcode)
1891 {
1892         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1893         struct skd_scsi_request *scsi;
1894         unsigned char *buf = skspcl->data_buf;
1895         int i;
1896
1897         if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1898                 /*
1899                  * A refresh is already in progress.
1900                  * Just wait for it to finish.
1901                  */
1902                 return;
1903
1904         SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1905         skspcl->req.state = SKD_REQ_STATE_BUSY;
1906         skspcl->req.id += SKD_ID_INCR;
1907
1908         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1909         scsi->hdr.tag = skspcl->req.id;
1910
1911         memset(scsi->cdb, 0, sizeof(scsi->cdb));
1912
1913         switch (opcode) {
1914         case TEST_UNIT_READY:
1915                 scsi->cdb[0] = TEST_UNIT_READY;
1916                 sgd->byte_count = 0;
1917                 scsi->hdr.sg_list_len_bytes = 0;
1918                 break;
1919
1920         case READ_CAPACITY:
1921                 scsi->cdb[0] = READ_CAPACITY;
1922                 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1923                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1924                 break;
1925
1926         case INQUIRY:
1927                 scsi->cdb[0] = INQUIRY;
1928                 scsi->cdb[1] = 0x01;    /* evpd */
1929                 scsi->cdb[2] = 0x80;    /* serial number page */
1930                 scsi->cdb[4] = 0x10;
1931                 sgd->byte_count = 16;
1932                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1933                 break;
1934
1935         case SYNCHRONIZE_CACHE:
1936                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1937                 sgd->byte_count = 0;
1938                 scsi->hdr.sg_list_len_bytes = 0;
1939                 break;
1940
1941         case WRITE_BUFFER:
1942                 scsi->cdb[0] = WRITE_BUFFER;
1943                 scsi->cdb[1] = 0x02;
1944                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1945                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1946                 sgd->byte_count = WR_BUF_SIZE;
1947                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1948                 /* fill incrementing byte pattern */
1949                 for (i = 0; i < sgd->byte_count; i++)
1950                         buf[i] = i & 0xFF;
1951                 break;
1952
1953         case READ_BUFFER:
1954                 scsi->cdb[0] = READ_BUFFER;
1955                 scsi->cdb[1] = 0x02;
1956                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1957                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1958                 sgd->byte_count = WR_BUF_SIZE;
1959                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1960                 memset(skspcl->data_buf, 0, sgd->byte_count);
1961                 break;
1962
1963         default:
1964                 SKD_ASSERT("Don't know what to send");
1965                 return;
1966
1967         }
1968         skd_send_special_fitmsg(skdev, skspcl);
1969 }
1970
1971 static void skd_refresh_device_data(struct skd_device *skdev)
1972 {
1973         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1974
1975         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1976 }
1977
1978 static int skd_chk_read_buf(struct skd_device *skdev,
1979                             struct skd_special_context *skspcl)
1980 {
1981         unsigned char *buf = skspcl->data_buf;
1982         int i;
1983
1984         /* check for incrementing byte pattern */
1985         for (i = 0; i < WR_BUF_SIZE; i++)
1986                 if (buf[i] != (i & 0xFF))
1987                         return 1;
1988
1989         return 0;
1990 }
1991
1992 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1993                                  u8 code, u8 qual, u8 fruc)
1994 {
1995         /* If the check condition is of special interest, log a message */
1996         if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1997             && (code == 0x04) && (qual == 0x06)) {
1998                 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1999                        "ascq/fruc %02x/%02x/%02x/%02x\n",
2000                        skd_name(skdev), key, code, qual, fruc);
2001         }
2002 }
2003
2004 static void skd_complete_internal(struct skd_device *skdev,
2005                                   volatile struct fit_completion_entry_v1
2006                                   *skcomp,
2007                                   volatile struct fit_comp_error_info *skerr,
2008                                   struct skd_special_context *skspcl)
2009 {
2010         u8 *buf = skspcl->data_buf;
2011         u8 status;
2012         int i;
2013         struct skd_scsi_request *scsi =
2014                 (struct skd_scsi_request *)&skspcl->msg_buf[64];
2015
2016         SKD_ASSERT(skspcl == &skdev->internal_skspcl);
2017
2018         pr_debug("%s:%s:%d complete internal %x\n",
2019                  skdev->name, __func__, __LINE__, scsi->cdb[0]);
2020
2021         skspcl->req.completion = *skcomp;
2022         skspcl->req.state = SKD_REQ_STATE_IDLE;
2023         skspcl->req.id += SKD_ID_INCR;
2024
2025         status = skspcl->req.completion.status;
2026
2027         skd_log_check_status(skdev, status, skerr->key, skerr->code,
2028                              skerr->qual, skerr->fruc);
2029
2030         switch (scsi->cdb[0]) {
2031         case TEST_UNIT_READY:
2032                 if (status == SAM_STAT_GOOD)
2033                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2034                 else if ((status == SAM_STAT_CHECK_CONDITION) &&
2035                          (skerr->key == MEDIUM_ERROR))
2036                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2037                 else {
2038                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2039                                 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
2040                                          skdev->name, __func__, __LINE__,
2041                                          skdev->state);
2042                                 return;
2043                         }
2044                         pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
2045                                  skdev->name, __func__, __LINE__);
2046                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
2047                 }
2048                 break;
2049
2050         case WRITE_BUFFER:
2051                 if (status == SAM_STAT_GOOD)
2052                         skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
2053                 else {
2054                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2055                                 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
2056                                          skdev->name, __func__, __LINE__,
2057                                          skdev->state);
2058                                 return;
2059                         }
2060                         pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2061                                  skdev->name, __func__, __LINE__);
2062                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
2063                 }
2064                 break;
2065
2066         case READ_BUFFER:
2067                 if (status == SAM_STAT_GOOD) {
2068                         if (skd_chk_read_buf(skdev, skspcl) == 0)
2069                                 skd_send_internal_skspcl(skdev, skspcl,
2070                                                          READ_CAPACITY);
2071                         else {
2072                                 pr_err(
2073                                        "(%s):*** W/R Buffer mismatch %d ***\n",
2074                                        skd_name(skdev), skdev->connect_retries);
2075                                 if (skdev->connect_retries <
2076                                     SKD_MAX_CONNECT_RETRIES) {
2077                                         skdev->connect_retries++;
2078                                         skd_soft_reset(skdev);
2079                                 } else {
2080                                         pr_err(
2081                                                "(%s): W/R Buffer Connect Error\n",
2082                                                skd_name(skdev));
2083                                         return;
2084                                 }
2085                         }
2086
2087                 } else {
2088                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2089                                 pr_debug("%s:%s:%d "
2090                                          "read buffer failed, don't send anymore state 0x%x\n",
2091                                          skdev->name, __func__, __LINE__,
2092                                          skdev->state);
2093                                 return;
2094                         }
2095                         pr_debug("%s:%s:%d "
2096                                  "**** read buffer failed, retry skerr\n",
2097                                  skdev->name, __func__, __LINE__);
2098                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
2099                 }
2100                 break;
2101
2102         case READ_CAPACITY:
2103                 skdev->read_cap_is_valid = 0;
2104                 if (status == SAM_STAT_GOOD) {
2105                         skdev->read_cap_last_lba =
2106                                 (buf[0] << 24) | (buf[1] << 16) |
2107                                 (buf[2] << 8) | buf[3];
2108                         skdev->read_cap_blocksize =
2109                                 (buf[4] << 24) | (buf[5] << 16) |
2110                                 (buf[6] << 8) | buf[7];
2111
2112                         pr_debug("%s:%s:%d last lba %d, bs %d\n",
2113                                  skdev->name, __func__, __LINE__,
2114                                  skdev->read_cap_last_lba,
2115                                  skdev->read_cap_blocksize);
2116
2117                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2118
2119                         skdev->read_cap_is_valid = 1;
2120
2121                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2122                 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2123                            (skerr->key == MEDIUM_ERROR)) {
2124                         skdev->read_cap_last_lba = ~0;
2125                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2126                         pr_debug("%s:%s:%d "
2127                                  "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2128                                  skdev->name, __func__, __LINE__);
2129                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2130                 } else {
2131                         pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2132                                  skdev->name, __func__, __LINE__);
2133                         skd_send_internal_skspcl(skdev, skspcl,
2134                                                  TEST_UNIT_READY);
2135                 }
2136                 break;
2137
2138         case INQUIRY:
2139                 skdev->inquiry_is_valid = 0;
2140                 if (status == SAM_STAT_GOOD) {
2141                         skdev->inquiry_is_valid = 1;
2142
2143                         for (i = 0; i < 12; i++)
2144                                 skdev->inq_serial_num[i] = buf[i + 4];
2145                         skdev->inq_serial_num[12] = 0;
2146                 }
2147
2148                 if (skd_unquiesce_dev(skdev) < 0)
2149                         pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2150                                  skdev->name, __func__, __LINE__);
2151                  /* connection is complete */
2152                 skdev->connect_retries = 0;
2153                 break;
2154
2155         case SYNCHRONIZE_CACHE:
2156                 if (status == SAM_STAT_GOOD)
2157                         skdev->sync_done = 1;
2158                 else
2159                         skdev->sync_done = -1;
2160                 wake_up_interruptible(&skdev->waitq);
2161                 break;
2162
2163         default:
2164                 SKD_ASSERT("we didn't send this");
2165         }
2166 }
2167
2168 /*
2169  *****************************************************************************
2170  * FIT MESSAGES
2171  *****************************************************************************
2172  */
2173
2174 static void skd_send_fitmsg(struct skd_device *skdev,
2175                             struct skd_fitmsg_context *skmsg)
2176 {
2177         u64 qcmd;
2178         struct fit_msg_hdr *fmh;
2179
2180         pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2181                  skdev->name, __func__, __LINE__,
2182                  skmsg->mb_dma_address, skdev->in_flight);
2183         pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2184                  skdev->name, __func__, __LINE__,
2185                  skmsg->msg_buf, skmsg->offset);
2186
2187         qcmd = skmsg->mb_dma_address;
2188         qcmd |= FIT_QCMD_QID_NORMAL;
2189
2190         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2191         skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2192
2193         if (unlikely(skdev->dbg_level > 1)) {
2194                 u8 *bp = (u8 *)skmsg->msg_buf;
2195                 int i;
2196                 for (i = 0; i < skmsg->length; i += 8) {
2197                         pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
2198                                  "%02x %02x %02x %02x\n",
2199                                  skdev->name, __func__, __LINE__,
2200                                  i, bp[i + 0], bp[i + 1], bp[i + 2],
2201                                  bp[i + 3], bp[i + 4], bp[i + 5],
2202                                  bp[i + 6], bp[i + 7]);
2203                         if (i == 0)
2204                                 i = 64 - 8;
2205                 }
2206         }
2207
2208         if (skmsg->length > 256)
2209                 qcmd |= FIT_QCMD_MSGSIZE_512;
2210         else if (skmsg->length > 128)
2211                 qcmd |= FIT_QCMD_MSGSIZE_256;
2212         else if (skmsg->length > 64)
2213                 qcmd |= FIT_QCMD_MSGSIZE_128;
2214         else
2215                 /*
2216                  * This makes no sense because the FIT msg header is
2217                  * 64 bytes. If the msg is only 64 bytes long it has
2218                  * no payload.
2219                  */
2220                 qcmd |= FIT_QCMD_MSGSIZE_64;
2221
2222         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2223
2224 }
2225
2226 static void skd_send_special_fitmsg(struct skd_device *skdev,
2227                                     struct skd_special_context *skspcl)
2228 {
2229         u64 qcmd;
2230
2231         if (unlikely(skdev->dbg_level > 1)) {
2232                 u8 *bp = (u8 *)skspcl->msg_buf;
2233                 int i;
2234
2235                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2236                         pr_debug("%s:%s:%d  spcl[%2d] %02x %02x %02x %02x  "
2237                                  "%02x %02x %02x %02x\n",
2238                                  skdev->name, __func__, __LINE__, i,
2239                                  bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
2240                                  bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
2241                         if (i == 0)
2242                                 i = 64 - 8;
2243                 }
2244
2245                 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2246                          skdev->name, __func__, __LINE__,
2247                          skspcl, skspcl->req.id, skspcl->req.sksg_list,
2248                          skspcl->req.sksg_dma_address);
2249                 for (i = 0; i < skspcl->req.n_sg; i++) {
2250                         struct fit_sg_descriptor *sgd =
2251                                 &skspcl->req.sksg_list[i];
2252
2253                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
2254                                  "addr=0x%llx next=0x%llx\n",
2255                                  skdev->name, __func__, __LINE__,
2256                                  i, sgd->byte_count, sgd->control,
2257                                  sgd->host_side_addr, sgd->next_desc_ptr);
2258                 }
2259         }
2260
2261         /*
2262          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2263          * and one 64-byte SSDI command.
2264          */
2265         qcmd = skspcl->mb_dma_address;
2266         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2267
2268         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2269 }
2270
2271 /*
2272  *****************************************************************************
2273  * COMPLETION QUEUE
2274  *****************************************************************************
2275  */
2276
2277 static void skd_complete_other(struct skd_device *skdev,
2278                                volatile struct fit_completion_entry_v1 *skcomp,
2279                                volatile struct fit_comp_error_info *skerr);
2280
2281 struct sns_info {
2282         u8 type;
2283         u8 stat;
2284         u8 key;
2285         u8 asc;
2286         u8 ascq;
2287         u8 mask;
2288         enum skd_check_status_action action;
2289 };
2290
2291 static struct sns_info skd_chkstat_table[] = {
2292         /* Good */
2293         { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
2294           SKD_CHECK_STATUS_REPORT_GOOD },
2295
2296         /* Smart alerts */
2297         { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
2298           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2299         { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
2300           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2301         { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
2302           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2303
2304         /* Retry (with limits) */
2305         { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
2306           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2307         { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
2308           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2309         { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
2310           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2311         { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
2312           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2313
2314         /* Busy (or about to be) */
2315         { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
2316           SKD_CHECK_STATUS_BUSY_IMMINENT },
2317 };
2318
2319 /*
2320  * Look up status and sense data to decide how to handle the error
2321  * from the device.
2322  * mask says which fields must match e.g., mask=0x18 means check
2323  * type and stat, ignore key, asc, ascq.
2324  */
2325
2326 static enum skd_check_status_action
2327 skd_check_status(struct skd_device *skdev,
2328                  u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2329 {
2330         int i, n;
2331
2332         pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2333                skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2334                skerr->fruc);
2335
2336         pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2337                  skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2338                  skerr->key, skerr->code, skerr->qual, skerr->fruc);
2339
2340         /* Does the info match an entry in the good category? */
2341         n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2342         for (i = 0; i < n; i++) {
2343                 struct sns_info *sns = &skd_chkstat_table[i];
2344
2345                 if (sns->mask & 0x10)
2346                         if (skerr->type != sns->type)
2347                                 continue;
2348
2349                 if (sns->mask & 0x08)
2350                         if (cmp_status != sns->stat)
2351                                 continue;
2352
2353                 if (sns->mask & 0x04)
2354                         if (skerr->key != sns->key)
2355                                 continue;
2356
2357                 if (sns->mask & 0x02)
2358                         if (skerr->code != sns->asc)
2359                                 continue;
2360
2361                 if (sns->mask & 0x01)
2362                         if (skerr->qual != sns->ascq)
2363                                 continue;
2364
2365                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2366                         pr_err("(%s): SMART Alert: sense key/asc/ascq "
2367                                "%02x/%02x/%02x\n",
2368                                skd_name(skdev), skerr->key,
2369                                skerr->code, skerr->qual);
2370                 }
2371                 return sns->action;
2372         }
2373
2374         /* No other match, so nonzero status means error,
2375          * zero status means good
2376          */
2377         if (cmp_status) {
2378                 pr_debug("%s:%s:%d status check: error\n",
2379                          skdev->name, __func__, __LINE__);
2380                 return SKD_CHECK_STATUS_REPORT_ERROR;
2381         }
2382
2383         pr_debug("%s:%s:%d status check good default\n",
2384                  skdev->name, __func__, __LINE__);
2385         return SKD_CHECK_STATUS_REPORT_GOOD;
2386 }
2387
2388 static void skd_resolve_req_exception(struct skd_device *skdev,
2389                                       struct skd_request_context *skreq)
2390 {
2391         u8 cmp_status = skreq->completion.status;
2392
2393         switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2394         case SKD_CHECK_STATUS_REPORT_GOOD:
2395         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2396                 skd_end_request(skdev, skreq, 0);
2397                 break;
2398
2399         case SKD_CHECK_STATUS_BUSY_IMMINENT:
2400                 skd_log_skreq(skdev, skreq, "retry(busy)");
2401                 blk_requeue_request(skdev->queue, skreq->req);
2402                 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2403                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2404                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2405                 skd_quiesce_dev(skdev);
2406                 break;
2407
2408         case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2409                 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2410                         skd_log_skreq(skdev, skreq, "retry");
2411                         blk_requeue_request(skdev->queue, skreq->req);
2412                         break;
2413                 }
2414         /* fall through to report error */
2415
2416         case SKD_CHECK_STATUS_REPORT_ERROR:
2417         default:
2418                 skd_end_request(skdev, skreq, -EIO);
2419                 break;
2420         }
2421 }
2422
2423 /* assume spinlock is already held */
2424 static void skd_release_skreq(struct skd_device *skdev,
2425                               struct skd_request_context *skreq)
2426 {
2427         u32 msg_slot;
2428         struct skd_fitmsg_context *skmsg;
2429
2430         u32 timo_slot;
2431
2432         /*
2433          * Reclaim the FIT msg buffer if this is
2434          * the first of the requests it carried to
2435          * be completed. The FIT msg buffer used to
2436          * send this request cannot be reused until
2437          * we are sure the s1120 card has copied
2438          * it to its memory. The FIT msg might have
2439          * contained several requests. As soon as
2440          * any of them are completed we know that
2441          * the entire FIT msg was transferred.
2442          * Only the first completed request will
2443          * match the FIT msg buffer id. The FIT
2444          * msg buffer id is immediately updated.
2445          * When subsequent requests complete the FIT
2446          * msg buffer id won't match, so we know
2447          * quite cheaply that it is already done.
2448          */
2449         msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2450         SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2451
2452         skmsg = &skdev->skmsg_table[msg_slot];
2453         if (skmsg->id == skreq->fitmsg_id) {
2454                 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2455                 SKD_ASSERT(skmsg->outstanding > 0);
2456                 skmsg->outstanding--;
2457                 if (skmsg->outstanding == 0) {
2458                         skmsg->state = SKD_MSG_STATE_IDLE;
2459                         skmsg->id += SKD_ID_INCR;
2460                         skmsg->next = skdev->skmsg_free_list;
2461                         skdev->skmsg_free_list = skmsg;
2462                 }
2463         }
2464
2465         /*
2466          * Decrease the number of active requests.
2467          * Also decrements the count in the timeout slot.
2468          */
2469         SKD_ASSERT(skdev->in_flight > 0);
2470         skdev->in_flight -= 1;
2471
2472         timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2473         SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2474         skdev->timeout_slot[timo_slot] -= 1;
2475
2476         /*
2477          * Reset backpointer
2478          */
2479         skreq->req = NULL;
2480
2481         /*
2482          * Reclaim the skd_request_context
2483          */
2484         skreq->state = SKD_REQ_STATE_IDLE;
2485         skreq->id += SKD_ID_INCR;
2486         skreq->next = skdev->skreq_free_list;
2487         skdev->skreq_free_list = skreq;
2488 }
2489
2490 #define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
2491
2492 static void skd_do_inq_page_00(struct skd_device *skdev,
2493                                volatile struct fit_completion_entry_v1 *skcomp,
2494                                volatile struct fit_comp_error_info *skerr,
2495                                uint8_t *cdb, uint8_t *buf)
2496 {
2497         uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2498
2499         /* Caller requested "supported pages".  The driver needs to insert
2500          * its page.
2501          */
2502         pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2503                  skdev->name, __func__, __LINE__);
2504
2505         /* If the device rejected the request because the CDB was
2506          * improperly formed, then just leave.
2507          */
2508         if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2509             skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2510                 return;
2511
2512         /* Get the amount of space the caller allocated */
2513         max_bytes = (cdb[3] << 8) | cdb[4];
2514
2515         /* Get the number of pages actually returned by the device */
2516         drive_pages = (buf[2] << 8) | buf[3];
2517         drive_bytes = drive_pages + 4;
2518         new_size = drive_pages + 1;
2519
2520         /* Supported pages must be in numerical order, so find where
2521          * the driver page needs to be inserted into the list of
2522          * pages returned by the device.
2523          */
2524         for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2525                 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2526                         return; /* Device using this page code. abort */
2527                 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2528                         break;
2529         }
2530
2531         if (insert_pt < max_bytes) {
2532                 uint16_t u;
2533
2534                 /* Shift everything up one byte to make room. */
2535                 for (u = new_size + 3; u > insert_pt; u--)
2536                         buf[u] = buf[u - 1];
2537                 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2538
2539                 /* SCSI byte order increment of num_returned_bytes by 1 */
2540                 skcomp->num_returned_bytes =
2541                         be32_to_cpu(skcomp->num_returned_bytes) + 1;
2542                 skcomp->num_returned_bytes =
2543                         be32_to_cpu(skcomp->num_returned_bytes);
2544         }
2545
2546         /* update page length field to reflect the driver's page too */
2547         buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2548         buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2549 }
2550
2551 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2552 {
2553         int pcie_reg;
2554         u16 pci_bus_speed;
2555         u8 pci_lanes;
2556
2557         pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2558         if (pcie_reg) {
2559                 u16 linksta;
2560                 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2561
2562                 pci_bus_speed = linksta & 0xF;
2563                 pci_lanes = (linksta & 0x3F0) >> 4;
2564         } else {
2565                 *speed = STEC_LINK_UNKNOWN;
2566                 *width = 0xFF;
2567                 return;
2568         }
2569
2570         switch (pci_bus_speed) {
2571         case 1:
2572                 *speed = STEC_LINK_2_5GTS;
2573                 break;
2574         case 2:
2575                 *speed = STEC_LINK_5GTS;
2576                 break;
2577         case 3:
2578                 *speed = STEC_LINK_8GTS;
2579                 break;
2580         default:
2581                 *speed = STEC_LINK_UNKNOWN;
2582                 break;
2583         }
2584
2585         if (pci_lanes <= 0x20)
2586                 *width = pci_lanes;
2587         else
2588                 *width = 0xFF;
2589 }
2590
2591 static void skd_do_inq_page_da(struct skd_device *skdev,
2592                                volatile struct fit_completion_entry_v1 *skcomp,
2593                                volatile struct fit_comp_error_info *skerr,
2594                                uint8_t *cdb, uint8_t *buf)
2595 {
2596         unsigned max_bytes;
2597         struct driver_inquiry_data inq;
2598         u16 val;
2599
2600         pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2601                  skdev->name, __func__, __LINE__);
2602
2603         memset(&inq, 0, sizeof(inq));
2604
2605         inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2606
2607         if (skdev->pdev && skdev->pdev->bus) {
2608                 skd_get_link_info(skdev->pdev,
2609                                   &inq.pcie_link_speed, &inq.pcie_link_lanes);
2610                 inq.pcie_bus_number = cpu_to_be16(skdev->pdev->bus->number);
2611                 inq.pcie_device_number = PCI_SLOT(skdev->pdev->devfn);
2612                 inq.pcie_function_number = PCI_FUNC(skdev->pdev->devfn);
2613
2614                 pci_read_config_word(skdev->pdev, PCI_VENDOR_ID, &val);
2615                 inq.pcie_vendor_id = cpu_to_be16(val);
2616
2617                 pci_read_config_word(skdev->pdev, PCI_DEVICE_ID, &val);
2618                 inq.pcie_device_id = cpu_to_be16(val);
2619
2620                 pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_VENDOR_ID,
2621                                      &val);
2622                 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2623
2624                 pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_ID, &val);
2625                 inq.pcie_subsystem_device_id = cpu_to_be16(val);
2626         } else {
2627                 inq.pcie_bus_number = 0xFFFF;
2628                 inq.pcie_device_number = 0xFF;
2629                 inq.pcie_function_number = 0xFF;
2630                 inq.pcie_link_speed = 0xFF;
2631                 inq.pcie_link_lanes = 0xFF;
2632                 inq.pcie_vendor_id = 0xFFFF;
2633                 inq.pcie_device_id = 0xFFFF;
2634                 inq.pcie_subsystem_vendor_id = 0xFFFF;
2635                 inq.pcie_subsystem_device_id = 0xFFFF;
2636         }
2637
2638         /* Driver version, fixed lenth, padded with spaces on the right */
2639         inq.driver_version_length = sizeof(inq.driver_version);
2640         memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2641         memcpy(inq.driver_version, DRV_VER_COMPL,
2642                min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2643
2644         inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2645
2646         /* Clear the error set by the device */
2647         skcomp->status = SAM_STAT_GOOD;
2648         memset((void *)skerr, 0, sizeof(*skerr));
2649
2650         /* copy response into output buffer */
2651         max_bytes = (cdb[3] << 8) | cdb[4];
2652         memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2653
2654         skcomp->num_returned_bytes =
2655                 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2656 }
2657
2658 static void skd_do_driver_inq(struct skd_device *skdev,
2659                               volatile struct fit_completion_entry_v1 *skcomp,
2660                               volatile struct fit_comp_error_info *skerr,
2661                               uint8_t *cdb, uint8_t *buf)
2662 {
2663         if (!buf)
2664                 return;
2665         else if (cdb[0] != INQUIRY)
2666                 return;         /* Not an INQUIRY */
2667         else if ((cdb[1] & 1) == 0)
2668                 return;         /* EVPD not set */
2669         else if (cdb[2] == 0)
2670                 /* Need to add driver's page to supported pages list */
2671                 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2672         else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2673                 /* Caller requested driver's page */
2674                 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2675 }
2676
2677 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2678 {
2679         if (!sg)
2680                 return NULL;
2681         if (!sg_page(sg))
2682                 return NULL;
2683         return sg_virt(sg);
2684 }
2685
2686 static void skd_process_scsi_inq(struct skd_device *skdev,
2687                                  volatile struct fit_completion_entry_v1
2688                                  *skcomp,
2689                                  volatile struct fit_comp_error_info *skerr,
2690                                  struct skd_special_context *skspcl)
2691 {
2692         uint8_t *buf;
2693         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2694         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2695
2696         dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2697                             skspcl->req.sg_data_dir);
2698         buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2699
2700         if (buf)
2701                 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2702 }
2703
2704
2705 static int skd_isr_completion_posted(struct skd_device *skdev,
2706                                         int limit, int *enqueued)
2707 {
2708         volatile struct fit_completion_entry_v1 *skcmp = NULL;
2709         volatile struct fit_comp_error_info *skerr;
2710         u16 req_id;
2711         u32 req_slot;
2712         struct skd_request_context *skreq;
2713         u16 cmp_cntxt = 0;
2714         u8 cmp_status = 0;
2715         u8 cmp_cycle = 0;
2716         u32 cmp_bytes = 0;
2717         int rc = 0;
2718         int processed = 0;
2719
2720         for (;; ) {
2721                 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2722
2723                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2724                 cmp_cycle = skcmp->cycle;
2725                 cmp_cntxt = skcmp->tag;
2726                 cmp_status = skcmp->status;
2727                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2728
2729                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2730
2731                 pr_debug("%s:%s:%d "
2732                          "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2733                          "busy=%d rbytes=0x%x proto=%d\n",
2734                          skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2735                          skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2736                          skdev->in_flight, cmp_bytes, skdev->proto_ver);
2737
2738                 if (cmp_cycle != skdev->skcomp_cycle) {
2739                         pr_debug("%s:%s:%d end of completions\n",
2740                                  skdev->name, __func__, __LINE__);
2741                         break;
2742                 }
2743                 /*
2744                  * Update the completion queue head index and possibly
2745                  * the completion cycle count. 8-bit wrap-around.
2746                  */
2747                 skdev->skcomp_ix++;
2748                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2749                         skdev->skcomp_ix = 0;
2750                         skdev->skcomp_cycle++;
2751                 }
2752
2753                 /*
2754                  * The command context is a unique 32-bit ID. The low order
2755                  * bits help locate the request. The request is usually a
2756                  * r/w request (see skd_start() above) or a special request.
2757                  */
2758                 req_id = cmp_cntxt;
2759                 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2760
2761                 /* Is this other than a r/w request? */
2762                 if (req_slot >= skdev->num_req_context) {
2763                         /*
2764                          * This is not a completion for a r/w request.
2765                          */
2766                         skd_complete_other(skdev, skcmp, skerr);
2767                         continue;
2768                 }
2769
2770                 skreq = &skdev->skreq_table[req_slot];
2771
2772                 /*
2773                  * Make sure the request ID for the slot matches.
2774                  */
2775                 if (skreq->id != req_id) {
2776                         pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2777                                  skdev->name, __func__, __LINE__,
2778                                  req_id, skreq->id);
2779                         {
2780                                 u16 new_id = cmp_cntxt;
2781                                 pr_err("(%s): Completion mismatch "
2782                                        "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2783                                        skd_name(skdev), req_id,
2784                                        skreq->id, new_id);
2785
2786                                 continue;
2787                         }
2788                 }
2789
2790                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2791
2792                 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2793                         pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2794                                  skdev->name, __func__, __LINE__,
2795                                  skreq, skreq->id);
2796                         /* a previously timed out command can
2797                          * now be cleaned up */
2798                         skd_release_skreq(skdev, skreq);
2799                         continue;
2800                 }
2801
2802                 skreq->completion = *skcmp;
2803                 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2804                         skreq->err_info = *skerr;
2805                         skd_log_check_status(skdev, cmp_status, skerr->key,
2806                                              skerr->code, skerr->qual,
2807                                              skerr->fruc);
2808                 }
2809                 /* Release DMA resources for the request. */
2810                 if (skreq->n_sg > 0)
2811                         skd_postop_sg_list(skdev, skreq);
2812
2813                 if (!skreq->req) {
2814                         pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2815                                  "req=0x%x req_id=0x%x\n",
2816                                  skdev->name, __func__, __LINE__,
2817                                  skreq, skreq->id, req_id);
2818                 } else {
2819                         /*
2820                          * Capture the outcome and post it back to the
2821                          * native request.
2822                          */
2823                         if (likely(cmp_status == SAM_STAT_GOOD))
2824                                 skd_end_request(skdev, skreq, 0);
2825                         else
2826                                 skd_resolve_req_exception(skdev, skreq);
2827                 }
2828
2829                 /*
2830                  * Release the skreq, its FIT msg (if one), timeout slot,
2831                  * and queue depth.
2832                  */
2833                 skd_release_skreq(skdev, skreq);
2834
2835                 /* skd_isr_comp_limit equal zero means no limit */
2836                 if (limit) {
2837                         if (++processed >= limit) {
2838                                 rc = 1;
2839                                 break;
2840                         }
2841                 }
2842         }
2843
2844         if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2845                 && (skdev->in_flight) == 0) {
2846                 skdev->state = SKD_DRVR_STATE_PAUSED;
2847                 wake_up_interruptible(&skdev->waitq);
2848         }
2849
2850         return rc;
2851 }
2852
2853 static void skd_complete_other(struct skd_device *skdev,
2854                                volatile struct fit_completion_entry_v1 *skcomp,
2855                                volatile struct fit_comp_error_info *skerr)
2856 {
2857         u32 req_id = 0;
2858         u32 req_table;
2859         u32 req_slot;
2860         struct skd_special_context *skspcl;
2861
2862         req_id = skcomp->tag;
2863         req_table = req_id & SKD_ID_TABLE_MASK;
2864         req_slot = req_id & SKD_ID_SLOT_MASK;
2865
2866         pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2867                  skdev->name, __func__, __LINE__,
2868                  req_table, req_id, req_slot);
2869
2870         /*
2871          * Based on the request id, determine how to dispatch this completion.
2872          * This swich/case is finding the good cases and forwarding the
2873          * completion entry. Errors are reported below the switch.
2874          */
2875         switch (req_table) {
2876         case SKD_ID_RW_REQUEST:
2877                 /*
2878                  * The caller, skd_completion_posted_isr() above,
2879                  * handles r/w requests. The only way we get here
2880                  * is if the req_slot is out of bounds.
2881                  */
2882                 break;
2883
2884         case SKD_ID_SPECIAL_REQUEST:
2885                 /*
2886                  * Make sure the req_slot is in bounds and that the id
2887                  * matches.
2888                  */
2889                 if (req_slot < skdev->n_special) {
2890                         skspcl = &skdev->skspcl_table[req_slot];
2891                         if (skspcl->req.id == req_id &&
2892                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2893                                 skd_complete_special(skdev,
2894                                                      skcomp, skerr, skspcl);
2895                                 return;
2896                         }
2897                 }
2898                 break;
2899
2900         case SKD_ID_INTERNAL:
2901                 if (req_slot == 0) {
2902                         skspcl = &skdev->internal_skspcl;
2903                         if (skspcl->req.id == req_id &&
2904                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2905                                 skd_complete_internal(skdev,
2906                                                       skcomp, skerr, skspcl);
2907                                 return;
2908                         }
2909                 }
2910                 break;
2911
2912         case SKD_ID_FIT_MSG:
2913                 /*
2914                  * These id's should never appear in a completion record.
2915                  */
2916                 break;
2917
2918         default:
2919                 /*
2920                  * These id's should never appear anywhere;
2921                  */
2922                 break;
2923         }
2924
2925         /*
2926          * If we get here it is a bad or stale id.
2927          */
2928 }
2929
2930 static void skd_complete_special(struct skd_device *skdev,
2931                                  volatile struct fit_completion_entry_v1
2932                                  *skcomp,
2933                                  volatile struct fit_comp_error_info *skerr,
2934                                  struct skd_special_context *skspcl)
2935 {
2936         pr_debug("%s:%s:%d  completing special request %p\n",
2937                  skdev->name, __func__, __LINE__, skspcl);
2938         if (skspcl->orphaned) {
2939                 /* Discard orphaned request */
2940                 /* ?: Can this release directly or does it need
2941                  * to use a worker? */
2942                 pr_debug("%s:%s:%d release orphaned %p\n",
2943                          skdev->name, __func__, __LINE__, skspcl);
2944                 skd_release_special(skdev, skspcl);
2945                 return;
2946         }
2947
2948         skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2949
2950         skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2951         skspcl->req.completion = *skcomp;
2952         skspcl->req.err_info = *skerr;
2953
2954         skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2955                              skerr->code, skerr->qual, skerr->fruc);
2956
2957         wake_up_interruptible(&skdev->waitq);
2958 }
2959
2960 /* assume spinlock is already held */
2961 static void skd_release_special(struct skd_device *skdev,
2962                                 struct skd_special_context *skspcl)
2963 {
2964         int i, was_depleted;
2965
2966         for (i = 0; i < skspcl->req.n_sg; i++) {
2967                 struct page *page = sg_page(&skspcl->req.sg[i]);
2968                 __free_page(page);
2969         }
2970
2971         was_depleted = (skdev->skspcl_free_list == NULL);
2972
2973         skspcl->req.state = SKD_REQ_STATE_IDLE;
2974         skspcl->req.id += SKD_ID_INCR;
2975         skspcl->req.next =
2976                 (struct skd_request_context *)skdev->skspcl_free_list;
2977         skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2978
2979         if (was_depleted) {
2980                 pr_debug("%s:%s:%d skspcl was depleted\n",
2981                          skdev->name, __func__, __LINE__);
2982                 /* Free list was depleted. Their might be waiters. */
2983                 wake_up_interruptible(&skdev->waitq);
2984         }
2985 }
2986
2987 static void skd_reset_skcomp(struct skd_device *skdev)
2988 {
2989         u32 nbytes;
2990         struct fit_completion_entry_v1 *skcomp;
2991
2992         nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2993         nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2994
2995         memset(skdev->skcomp_table, 0, nbytes);
2996
2997         skdev->skcomp_ix = 0;
2998         skdev->skcomp_cycle = 1;
2999 }
3000
3001 /*
3002  *****************************************************************************
3003  * INTERRUPTS
3004  *****************************************************************************
3005  */
3006 static void skd_completion_worker(struct work_struct *work)
3007 {
3008         struct skd_device *skdev =
3009                 container_of(work, struct skd_device, completion_worker);
3010         unsigned long flags;
3011         int flush_enqueued = 0;
3012
3013         spin_lock_irqsave(&skdev->lock, flags);
3014
3015         /*
3016          * pass in limit=0, which means no limit..
3017          * process everything in compq
3018          */
3019         skd_isr_completion_posted(skdev, 0, &flush_enqueued);
3020         skd_request_fn(skdev->queue);
3021
3022         spin_unlock_irqrestore(&skdev->lock, flags);
3023 }
3024
3025 static void skd_isr_msg_from_dev(struct skd_device *skdev);
3026
3027 irqreturn_t
3028 static skd_isr(int irq, void *ptr)
3029 {
3030         struct skd_device *skdev;
3031         u32 intstat;
3032         u32 ack;
3033         int rc = 0;
3034         int deferred = 0;
3035         int flush_enqueued = 0;
3036
3037         skdev = (struct skd_device *)ptr;
3038         spin_lock(&skdev->lock);
3039
3040         for (;; ) {
3041                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3042
3043                 ack = FIT_INT_DEF_MASK;
3044                 ack &= intstat;
3045
3046                 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
3047                          skdev->name, __func__, __LINE__, intstat, ack);
3048
3049                 /* As long as there is an int pending on device, keep
3050                  * running loop.  When none, get out, but if we've never
3051                  * done any processing, call completion handler?
3052                  */
3053                 if (ack == 0) {
3054                         /* No interrupts on device, but run the completion
3055                          * processor anyway?
3056                          */
3057                         if (rc == 0)
3058                                 if (likely (skdev->state
3059                                         == SKD_DRVR_STATE_ONLINE))
3060                                         deferred = 1;
3061                         break;
3062                 }
3063
3064                 rc = IRQ_HANDLED;
3065
3066                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
3067
3068                 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
3069                            (skdev->state != SKD_DRVR_STATE_STOPPING))) {
3070                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
3071                                 /*
3072                                  * If we have already deferred completion
3073                                  * processing, don't bother running it again
3074                                  */
3075                                 if (deferred == 0)
3076                                         deferred =
3077                                                 skd_isr_completion_posted(skdev,
3078                                                 skd_isr_comp_limit, &flush_enqueued);
3079                         }
3080
3081                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
3082                                 skd_isr_fwstate(skdev);
3083                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
3084                                     skdev->state ==
3085                                     SKD_DRVR_STATE_DISAPPEARED) {
3086                                         spin_unlock(&skdev->lock);
3087                                         return rc;
3088                                 }
3089                         }
3090
3091                         if (intstat & FIT_ISH_MSG_FROM_DEV)
3092                                 skd_isr_msg_from_dev(skdev);
3093                 }
3094         }
3095
3096         if (unlikely(flush_enqueued))
3097                 skd_request_fn(skdev->queue);
3098
3099         if (deferred)
3100                 schedule_work(&skdev->completion_worker);
3101         else if (!flush_enqueued)
3102                 skd_request_fn(skdev->queue);
3103
3104         spin_unlock(&skdev->lock);
3105
3106         return rc;
3107 }
3108
3109 static void skd_drive_fault(struct skd_device *skdev)
3110 {
3111         skdev->state = SKD_DRVR_STATE_FAULT;
3112         pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3113 }
3114
3115 static void skd_drive_disappeared(struct skd_device *skdev)
3116 {
3117         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3118         pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3119 }
3120
3121 static void skd_isr_fwstate(struct skd_device *skdev)
3122 {
3123         u32 sense;
3124         u32 state;
3125         u32 mtd;
3126         int prev_driver_state = skdev->state;
3127
3128         sense = SKD_READL(skdev, FIT_STATUS);
3129         state = sense & FIT_SR_DRIVE_STATE_MASK;
3130
3131         pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3132                skd_name(skdev),
3133                skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3134                skd_drive_state_to_str(state), state);
3135
3136         skdev->drive_state = state;
3137
3138         switch (skdev->drive_state) {
3139         case FIT_SR_DRIVE_INIT:
3140                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3141                         skd_disable_interrupts(skdev);
3142                         break;
3143                 }
3144                 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3145                         skd_recover_requests(skdev, 0);
3146                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3147                         skdev->timer_countdown = SKD_STARTING_TIMO;
3148                         skdev->state = SKD_DRVR_STATE_STARTING;
3149                         skd_soft_reset(skdev);
3150                         break;
3151                 }
3152                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3153                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3154                 skdev->last_mtd = mtd;
3155                 break;
3156
3157         case FIT_SR_DRIVE_ONLINE:
3158                 skdev->cur_max_queue_depth = skd_max_queue_depth;
3159                 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3160                         skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3161
3162                 skdev->queue_low_water_mark =
3163                         skdev->cur_max_queue_depth * 2 / 3 + 1;
3164                 if (skdev->queue_low_water_mark < 1)
3165                         skdev->queue_low_water_mark = 1;
3166                 pr_info(
3167                        "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3168                        skd_name(skdev),
3169                        skdev->cur_max_queue_depth,
3170                        skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3171
3172                 skd_refresh_device_data(skdev);
3173                 break;
3174
3175         case FIT_SR_DRIVE_BUSY:
3176                 skdev->state = SKD_DRVR_STATE_BUSY;
3177                 skdev->timer_countdown = SKD_BUSY_TIMO;
3178                 skd_quiesce_dev(skdev);
3179                 break;
3180         case FIT_SR_DRIVE_BUSY_SANITIZE:
3181                 /* set timer for 3 seconds, we'll abort any unfinished
3182                  * commands after that expires
3183                  */
3184                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3185                 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3186                 blk_start_queue(skdev->queue);
3187                 break;
3188         case FIT_SR_DRIVE_BUSY_ERASE:
3189                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3190                 skdev->timer_countdown = SKD_BUSY_TIMO;
3191                 break;
3192         case FIT_SR_DRIVE_OFFLINE:
3193                 skdev->state = SKD_DRVR_STATE_IDLE;
3194                 break;
3195         case FIT_SR_DRIVE_SOFT_RESET:
3196                 switch (skdev->state) {
3197                 case SKD_DRVR_STATE_STARTING:
3198                 case SKD_DRVR_STATE_RESTARTING:
3199                         /* Expected by a caller of skd_soft_reset() */
3200                         break;
3201                 default:
3202                         skdev->state = SKD_DRVR_STATE_RESTARTING;
3203                         break;
3204                 }
3205                 break;
3206         case FIT_SR_DRIVE_FW_BOOTING:
3207                 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3208                          skdev->name, __func__, __LINE__, skdev->name);
3209                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3210                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3211                 break;
3212
3213         case FIT_SR_DRIVE_DEGRADED:
3214         case FIT_SR_PCIE_LINK_DOWN:
3215         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3216                 break;
3217
3218         case FIT_SR_DRIVE_FAULT:
3219                 skd_drive_fault(skdev);
3220                 skd_recover_requests(skdev, 0);
3221                 blk_start_queue(skdev->queue);
3222                 break;
3223
3224         /* PCIe bus returned all Fs? */
3225         case 0xFF:
3226                 pr_info("(%s): state=0x%x sense=0x%x\n",
3227                        skd_name(skdev), state, sense);
3228                 skd_drive_disappeared(skdev);
3229                 skd_recover_requests(skdev, 0);
3230                 blk_start_queue(skdev->queue);
3231                 break;
3232         default:
3233                 /*
3234                  * Uknown FW State. Wait for a state we recognize.
3235                  */
3236                 break;
3237         }
3238         pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3239                skd_name(skdev),
3240                skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3241                skd_skdev_state_to_str(skdev->state), skdev->state);
3242 }
3243
3244 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3245 {
3246         int i;
3247
3248         for (i = 0; i < skdev->num_req_context; i++) {
3249                 struct skd_request_context *skreq = &skdev->skreq_table[i];
3250
3251                 if (skreq->state == SKD_REQ_STATE_BUSY) {
3252                         skd_log_skreq(skdev, skreq, "recover");
3253
3254                         SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3255                         SKD_ASSERT(skreq->req != NULL);
3256
3257                         /* Release DMA resources for the request. */
3258                         if (skreq->n_sg > 0)
3259                                 skd_postop_sg_list(skdev, skreq);
3260
3261                         if (requeue &&
3262                             (unsigned long) ++skreq->req->special <
3263                             SKD_MAX_RETRIES)
3264                                 blk_requeue_request(skdev->queue, skreq->req);
3265                         else
3266                                 skd_end_request(skdev, skreq, -EIO);
3267
3268                         skreq->req = NULL;
3269
3270                         skreq->state = SKD_REQ_STATE_IDLE;
3271                         skreq->id += SKD_ID_INCR;
3272                 }
3273                 if (i > 0)
3274                         skreq[-1].next = skreq;
3275                 skreq->next = NULL;
3276         }
3277         skdev->skreq_free_list = skdev->skreq_table;
3278
3279         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3280                 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3281
3282                 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3283                         skd_log_skmsg(skdev, skmsg, "salvaged");
3284                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3285                         skmsg->state = SKD_MSG_STATE_IDLE;
3286                         skmsg->id += SKD_ID_INCR;
3287                 }
3288                 if (i > 0)
3289                         skmsg[-1].next = skmsg;
3290                 skmsg->next = NULL;
3291         }
3292         skdev->skmsg_free_list = skdev->skmsg_table;
3293
3294         for (i = 0; i < skdev->n_special; i++) {
3295                 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3296
3297                 /* If orphaned, reclaim it because it has already been reported
3298                  * to the process as an error (it was just waiting for
3299                  * a completion that didn't come, and now it will never come)
3300                  * If busy, change to a state that will cause it to error
3301                  * out in the wait routine and let it do the normal
3302                  * reporting and reclaiming
3303                  */
3304                 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3305                         if (skspcl->orphaned) {
3306                                 pr_debug("%s:%s:%d orphaned %p\n",
3307                                          skdev->name, __func__, __LINE__,
3308                                          skspcl);
3309                                 skd_release_special(skdev, skspcl);
3310                         } else {
3311                                 pr_debug("%s:%s:%d not orphaned %p\n",
3312                                          skdev->name, __func__, __LINE__,
3313                                          skspcl);
3314                                 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3315                         }
3316                 }
3317         }
3318         skdev->skspcl_free_list = skdev->skspcl_table;
3319
3320         for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3321                 skdev->timeout_slot[i] = 0;
3322
3323         skdev->in_flight = 0;
3324 }
3325
3326 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3327 {
3328         u32 mfd;
3329         u32 mtd;
3330         u32 data;
3331
3332         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3333
3334         pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3335                  skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3336
3337         /* ignore any mtd that is an ack for something we didn't send */
3338         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3339                 return;
3340
3341         switch (FIT_MXD_TYPE(mfd)) {
3342         case FIT_MTD_FITFW_INIT:
3343                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3344
3345                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3346                         pr_err("(%s): protocol mismatch\n",
3347                                skdev->name);
3348                         pr_err("(%s):   got=%d support=%d\n",
3349                                skdev->name, skdev->proto_ver,
3350                                FIT_PROTOCOL_VERSION_1);
3351                         pr_err("(%s):   please upgrade driver\n",
3352                                skdev->name);
3353                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3354                         skd_soft_reset(skdev);
3355                         break;
3356                 }
3357                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3358                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3359                 skdev->last_mtd = mtd;
3360                 break;
3361
3362         case FIT_MTD_GET_CMDQ_DEPTH:
3363                 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3364                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3365                                    SKD_N_COMPLETION_ENTRY);
3366                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3367                 skdev->last_mtd = mtd;
3368                 break;
3369
3370         case FIT_MTD_SET_COMPQ_DEPTH:
3371                 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3372                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3373                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3374                 skdev->last_mtd = mtd;
3375                 break;
3376
3377         case FIT_MTD_SET_COMPQ_ADDR:
3378                 skd_reset_skcomp(skdev);
3379                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3380                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3381                 skdev->last_mtd = mtd;
3382                 break;
3383
3384         case FIT_MTD_CMD_LOG_HOST_ID:
3385                 skdev->connect_time_stamp = get_seconds();
3386                 data = skdev->connect_time_stamp & 0xFFFF;
3387                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3388                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3389                 skdev->last_mtd = mtd;
3390                 break;
3391
3392         case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3393                 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3394                 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3395                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3396                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3397                 skdev->last_mtd = mtd;
3398                 break;
3399
3400         case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3401                 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3402                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3403                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3404                 skdev->last_mtd = mtd;
3405
3406                 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3407                        skd_name(skdev),
3408                        skdev->connect_time_stamp, skdev->drive_jiffies);
3409                 break;
3410
3411         case FIT_MTD_ARM_QUEUE:
3412                 skdev->last_mtd = 0;
3413                 /*
3414                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3415                  */
3416                 break;
3417
3418         default:
3419                 break;
3420         }
3421 }
3422
3423 static void skd_disable_interrupts(struct skd_device *skdev)
3424 {
3425         u32 sense;
3426
3427         sense = SKD_READL(skdev, FIT_CONTROL);
3428         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3429         SKD_WRITEL(skdev, sense, FIT_CONTROL);
3430         pr_debug("%s:%s:%d sense 0x%x\n",
3431                  skdev->name, __func__, __LINE__, sense);
3432
3433         /* Note that the 1s is written. A 1-bit means
3434          * disable, a 0 means enable.
3435          */
3436         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3437 }
3438
3439 static void skd_enable_interrupts(struct skd_device *skdev)
3440 {
3441         u32 val;
3442
3443         /* unmask interrupts first */
3444         val = FIT_ISH_FW_STATE_CHANGE +
3445               FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3446
3447         /* Note that the compliment of mask is written. A 1-bit means
3448          * disable, a 0 means enable. */
3449         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3450         pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3451                  skdev->name, __func__, __LINE__, ~val);
3452
3453         val = SKD_READL(skdev, FIT_CONTROL);
3454         val |= FIT_CR_ENABLE_INTERRUPTS;
3455         pr_debug("%s:%s:%d control=0x%x\n",
3456                  skdev->name, __func__, __LINE__, val);
3457         SKD_WRITEL(skdev, val, FIT_CONTROL);
3458 }
3459
3460 /*
3461  *****************************************************************************
3462  * START, STOP, RESTART, QUIESCE, UNQUIESCE
3463  *****************************************************************************
3464  */
3465
3466 static void skd_soft_reset(struct skd_device *skdev)
3467 {
3468         u32 val;
3469
3470         val = SKD_READL(skdev, FIT_CONTROL);
3471         val |= (FIT_CR_SOFT_RESET);
3472         pr_debug("%s:%s:%d control=0x%x\n",
3473                  skdev->name, __func__, __LINE__, val);
3474         SKD_WRITEL(skdev, val, FIT_CONTROL);
3475 }
3476
3477 static void skd_start_device(struct skd_device *skdev)
3478 {
3479         unsigned long flags;
3480         u32 sense;
3481         u32 state;
3482
3483         spin_lock_irqsave(&skdev->lock, flags);
3484
3485         /* ack all ghost interrupts */
3486         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3487
3488         sense = SKD_READL(skdev, FIT_STATUS);
3489
3490         pr_debug("%s:%s:%d initial status=0x%x\n",
3491                  skdev->name, __func__, __LINE__, sense);
3492
3493         state = sense & FIT_SR_DRIVE_STATE_MASK;
3494         skdev->drive_state = state;
3495         skdev->last_mtd = 0;
3496
3497         skdev->state = SKD_DRVR_STATE_STARTING;
3498         skdev->timer_countdown = SKD_STARTING_TIMO;
3499
3500         skd_enable_interrupts(skdev);
3501
3502         switch (skdev->drive_state) {
3503         case FIT_SR_DRIVE_OFFLINE:
3504                 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3505                 break;
3506
3507         case FIT_SR_DRIVE_FW_BOOTING:
3508                 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3509                          skdev->name, __func__, __LINE__, skdev->name);
3510                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3511                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3512                 break;
3513
3514         case FIT_SR_DRIVE_BUSY_SANITIZE:
3515                 pr_info("(%s): Start: BUSY_SANITIZE\n",
3516                        skd_name(skdev));
3517                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3518                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3519                 break;
3520
3521         case FIT_SR_DRIVE_BUSY_ERASE:
3522                 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3523                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3524                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3525                 break;
3526
3527         case FIT_SR_DRIVE_INIT:
3528         case FIT_SR_DRIVE_ONLINE:
3529                 skd_soft_reset(skdev);
3530                 break;
3531
3532         case FIT_SR_DRIVE_BUSY:
3533                 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3534                 skdev->state = SKD_DRVR_STATE_BUSY;
3535                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3536                 break;
3537
3538         case FIT_SR_DRIVE_SOFT_RESET:
3539                 pr_err("(%s) drive soft reset in prog\n",
3540                        skd_name(skdev));
3541                 break;
3542
3543         case FIT_SR_DRIVE_FAULT:
3544                 /* Fault state is bad...soft reset won't do it...
3545                  * Hard reset, maybe, but does it work on device?
3546                  * For now, just fault so the system doesn't hang.
3547                  */
3548                 skd_drive_fault(skdev);
3549                 /*start the queue so we can respond with error to requests */
3550                 pr_debug("%s:%s:%d starting %s queue\n",
3551                          skdev->name, __func__, __LINE__, skdev->name);
3552                 blk_start_queue(skdev->queue);
3553                 skdev->gendisk_on = -1;
3554                 wake_up_interruptible(&skdev->waitq);
3555                 break;
3556
3557         case 0xFF:
3558                 /* Most likely the device isn't there or isn't responding
3559                  * to the BAR1 addresses. */
3560                 skd_drive_disappeared(skdev);
3561                 /*start the queue so we can respond with error to requests */
3562                 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3563                          skdev->name, __func__, __LINE__, skdev->name);
3564                 blk_start_queue(skdev->queue);
3565                 skdev->gendisk_on = -1;
3566                 wake_up_interruptible(&skdev->waitq);
3567                 break;
3568
3569         default:
3570                 pr_err("(%s) Start: unknown state %x\n",
3571                        skd_name(skdev), skdev->drive_state);
3572                 break;
3573         }
3574
3575         state = SKD_READL(skdev, FIT_CONTROL);
3576         pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3577                  skdev->name, __func__, __LINE__, state);
3578
3579         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3580         pr_debug("%s:%s:%d Intr Status=0x%x\n",
3581                  skdev->name, __func__, __LINE__, state);
3582
3583         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3584         pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3585                  skdev->name, __func__, __LINE__, state);
3586
3587         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3588         pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3589                  skdev->name, __func__, __LINE__, state);
3590
3591         state = SKD_READL(skdev, FIT_HW_VERSION);
3592         pr_debug("%s:%s:%d HW version=0x%x\n",
3593                  skdev->name, __func__, __LINE__, state);
3594
3595         spin_unlock_irqrestore(&skdev->lock, flags);
3596 }
3597
3598 static void skd_stop_device(struct skd_device *skdev)
3599 {
3600         unsigned long flags;
3601         struct skd_special_context *skspcl = &skdev->internal_skspcl;
3602         u32 dev_state;
3603         int i;
3604
3605         spin_lock_irqsave(&skdev->lock, flags);
3606
3607         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3608                 pr_err("(%s): skd_stop_device not online no sync\n",
3609                        skd_name(skdev));
3610                 goto stop_out;
3611         }
3612
3613         if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3614                 pr_err("(%s): skd_stop_device no special\n",
3615                        skd_name(skdev));
3616                 goto stop_out;
3617         }
3618
3619         skdev->state = SKD_DRVR_STATE_SYNCING;
3620         skdev->sync_done = 0;
3621
3622         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3623
3624         spin_unlock_irqrestore(&skdev->lock, flags);
3625
3626         wait_event_interruptible_timeout(skdev->waitq,
3627                                          (skdev->sync_done), (10 * HZ));
3628
3629         spin_lock_irqsave(&skdev->lock, flags);
3630
3631         switch (skdev->sync_done) {
3632         case 0:
3633                 pr_err("(%s): skd_stop_device no sync\n",
3634                        skd_name(skdev));
3635                 break;
3636         case 1:
3637                 pr_err("(%s): skd_stop_device sync done\n",
3638                        skd_name(skdev));
3639                 break;
3640         default:
3641                 pr_err("(%s): skd_stop_device sync error\n",
3642                        skd_name(skdev));
3643         }
3644
3645 stop_out:
3646         skdev->state = SKD_DRVR_STATE_STOPPING;
3647         spin_unlock_irqrestore(&skdev->lock, flags);
3648
3649         skd_kill_timer(skdev);
3650
3651         spin_lock_irqsave(&skdev->lock, flags);
3652         skd_disable_interrupts(skdev);
3653
3654         /* ensure all ints on device are cleared */
3655         /* soft reset the device to unload with a clean slate */
3656         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3657         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3658
3659         spin_unlock_irqrestore(&skdev->lock, flags);
3660
3661         /* poll every 100ms, 1 second timeout */
3662         for (i = 0; i < 10; i++) {
3663                 dev_state =
3664                         SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3665                 if (dev_state == FIT_SR_DRIVE_INIT)
3666                         break;
3667                 set_current_state(TASK_INTERRUPTIBLE);
3668                 schedule_timeout(msecs_to_jiffies(100));
3669         }
3670
3671         if (dev_state != FIT_SR_DRIVE_INIT)
3672                 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3673                        skd_name(skdev), dev_state);
3674 }
3675
3676 /* assume spinlock is held */
3677 static void skd_restart_device(struct skd_device *skdev)
3678 {
3679         u32 state;
3680
3681         /* ack all ghost interrupts */
3682         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3683
3684         state = SKD_READL(skdev, FIT_STATUS);
3685
3686         pr_debug("%s:%s:%d drive status=0x%x\n",
3687                  skdev->name, __func__, __LINE__, state);
3688
3689         state &= FIT_SR_DRIVE_STATE_MASK;
3690         skdev->drive_state = state;
3691         skdev->last_mtd = 0;
3692
3693         skdev->state = SKD_DRVR_STATE_RESTARTING;
3694         skdev->timer_countdown = SKD_RESTARTING_TIMO;
3695
3696         skd_soft_reset(skdev);
3697 }
3698
3699 /* assume spinlock is held */
3700 static int skd_quiesce_dev(struct skd_device *skdev)
3701 {
3702         int rc = 0;
3703
3704         switch (skdev->state) {
3705         case SKD_DRVR_STATE_BUSY:
3706         case SKD_DRVR_STATE_BUSY_IMMINENT:
3707                 pr_debug("%s:%s:%d stopping %s queue\n",
3708                          skdev->name, __func__, __LINE__, skdev->name);
3709                 blk_stop_queue(skdev->queue);
3710                 break;
3711         case SKD_DRVR_STATE_ONLINE:
3712         case SKD_DRVR_STATE_STOPPING:
3713         case SKD_DRVR_STATE_SYNCING:
3714         case SKD_DRVR_STATE_PAUSING:
3715         case SKD_DRVR_STATE_PAUSED:
3716         case SKD_DRVR_STATE_STARTING:
3717         case SKD_DRVR_STATE_RESTARTING:
3718         case SKD_DRVR_STATE_RESUMING:
3719         default:
3720                 rc = -EINVAL;
3721                 pr_debug("%s:%s:%d state [%d] not implemented\n",
3722                          skdev->name, __func__, __LINE__, skdev->state);
3723         }
3724         return rc;
3725 }
3726
3727 /* assume spinlock is held */
3728 static int skd_unquiesce_dev(struct skd_device *skdev)
3729 {
3730         int prev_driver_state = skdev->state;
3731
3732         skd_log_skdev(skdev, "unquiesce");
3733         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3734                 pr_debug("%s:%s:%d **** device already ONLINE\n",
3735                          skdev->name, __func__, __LINE__);
3736                 return 0;
3737         }
3738         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3739                 /*
3740                  * If there has been an state change to other than
3741                  * ONLINE, we will rely on controller state change
3742                  * to come back online and restart the queue.
3743                  * The BUSY state means that driver is ready to
3744                  * continue normal processing but waiting for controller
3745                  * to become available.
3746                  */
3747                 skdev->state = SKD_DRVR_STATE_BUSY;
3748                 pr_debug("%s:%s:%d drive BUSY state\n",
3749                          skdev->name, __func__, __LINE__);
3750                 return 0;
3751         }
3752
3753         /*
3754          * Drive has just come online, driver is either in startup,
3755          * paused performing a task, or bust waiting for hardware.
3756          */
3757         switch (skdev->state) {
3758         case SKD_DRVR_STATE_PAUSED:
3759         case SKD_DRVR_STATE_BUSY:
3760         case SKD_DRVR_STATE_BUSY_IMMINENT:
3761         case SKD_DRVR_STATE_BUSY_ERASE:
3762         case SKD_DRVR_STATE_STARTING:
3763         case SKD_DRVR_STATE_RESTARTING:
3764         case SKD_DRVR_STATE_FAULT:
3765         case SKD_DRVR_STATE_IDLE:
3766         case SKD_DRVR_STATE_LOAD:
3767                 skdev->state = SKD_DRVR_STATE_ONLINE;
3768                 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3769                        skd_name(skdev),
3770                        skd_skdev_state_to_str(prev_driver_state),
3771                        prev_driver_state, skd_skdev_state_to_str(skdev->state),
3772                        skdev->state);
3773                 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3774                          skdev->name, __func__, __LINE__);
3775                 pr_debug("%s:%s:%d starting %s queue\n",
3776                          skdev->name, __func__, __LINE__, skdev->name);
3777                 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3778                 blk_start_queue(skdev->queue);
3779                 skdev->gendisk_on = 1;
3780                 wake_up_interruptible(&skdev->waitq);
3781                 break;
3782
3783         case SKD_DRVR_STATE_DISAPPEARED:
3784         default:
3785                 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3786                          skdev->name, __func__, __LINE__,
3787                          skdev->state);
3788                 return -EBUSY;
3789         }
3790         return 0;
3791 }
3792
3793 /*
3794  *****************************************************************************
3795  * PCIe MSI/MSI-X INTERRUPT HANDLERS
3796  *****************************************************************************
3797  */
3798
3799 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3800 {
3801         struct skd_device *skdev = skd_host_data;
3802         unsigned long flags;
3803
3804         spin_lock_irqsave(&skdev->lock, flags);
3805         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3806                  skdev->name, __func__, __LINE__,
3807                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3808         pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3809                irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3810         SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3811         spin_unlock_irqrestore(&skdev->lock, flags);
3812         return IRQ_HANDLED;
3813 }
3814
3815 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3816 {
3817         struct skd_device *skdev = skd_host_data;
3818         unsigned long flags;
3819
3820         spin_lock_irqsave(&skdev->lock, flags);
3821         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3822                  skdev->name, __func__, __LINE__,
3823                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3824         SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3825         skd_isr_fwstate(skdev);
3826         spin_unlock_irqrestore(&skdev->lock, flags);
3827         return IRQ_HANDLED;
3828 }
3829
3830 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3831 {
3832         struct skd_device *skdev = skd_host_data;
3833         unsigned long flags;
3834         int flush_enqueued = 0;
3835         int deferred;
3836
3837         spin_lock_irqsave(&skdev->lock, flags);
3838         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3839                  skdev->name, __func__, __LINE__,
3840                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3841         SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3842         deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3843                                                 &flush_enqueued);
3844         if (flush_enqueued)
3845                 skd_request_fn(skdev->queue);
3846
3847         if (deferred)
3848                 schedule_work(&skdev->completion_worker);
3849         else if (!flush_enqueued)
3850                 skd_request_fn(skdev->queue);
3851
3852         spin_unlock_irqrestore(&skdev->lock, flags);
3853
3854         return IRQ_HANDLED;
3855 }
3856
3857 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3858 {
3859         struct skd_device *skdev = skd_host_data;
3860         unsigned long flags;
3861
3862         spin_lock_irqsave(&skdev->lock, flags);
3863         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3864                  skdev->name, __func__, __LINE__,
3865                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3866         SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3867         skd_isr_msg_from_dev(skdev);
3868         spin_unlock_irqrestore(&skdev->lock, flags);
3869         return IRQ_HANDLED;
3870 }
3871
3872 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3873 {
3874         struct skd_device *skdev = skd_host_data;
3875         unsigned long flags;
3876
3877         spin_lock_irqsave(&skdev->lock, flags);
3878         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3879                  skdev->name, __func__, __LINE__,
3880                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3881         SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3882         spin_unlock_irqrestore(&skdev->lock, flags);
3883         return IRQ_HANDLED;
3884 }
3885
3886 /*
3887  *****************************************************************************
3888  * PCIe MSI/MSI-X SETUP
3889  *****************************************************************************
3890  */
3891
3892 struct skd_msix_entry {
3893         int have_irq;
3894         u32 vector;
3895         u32 entry;
3896         struct skd_device *rsp;
3897         char isr_name[30];
3898 };
3899
3900 struct skd_init_msix_entry {
3901         const char *name;
3902         irq_handler_t handler;
3903 };
3904
3905 #define SKD_MAX_MSIX_COUNT              13
3906 #define SKD_MIN_MSIX_COUNT              7
3907 #define SKD_BASE_MSIX_IRQ               4
3908
3909 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3910         { "(DMA 0)",        skd_reserved_isr },
3911         { "(DMA 1)",        skd_reserved_isr },
3912         { "(DMA 2)",        skd_reserved_isr },
3913         { "(DMA 3)",        skd_reserved_isr },
3914         { "(State Change)", skd_statec_isr   },
3915         { "(COMPL_Q)",      skd_comp_q       },
3916         { "(MSG)",          skd_msg_isr      },
3917         { "(Reserved)",     skd_reserved_isr },
3918         { "(Reserved)",     skd_reserved_isr },
3919         { "(Queue Full 0)", skd_qfull_isr    },
3920         { "(Queue Full 1)", skd_qfull_isr    },
3921         { "(Queue Full 2)", skd_qfull_isr    },
3922         { "(Queue Full 3)", skd_qfull_isr    },
3923 };
3924
3925 static void skd_release_msix(struct skd_device *skdev)
3926 {
3927         struct skd_msix_entry *qentry;
3928         int i;
3929
3930         if (skdev->msix_entries == NULL)
3931                 return;
3932         for (i = 0; i < skdev->msix_count; i++) {
3933                 qentry = &skdev->msix_entries[i];
3934                 skdev = qentry->rsp;
3935
3936                 if (qentry->have_irq)
3937                         devm_free_irq(&skdev->pdev->dev,
3938                                       qentry->vector, qentry->rsp);
3939         }
3940         pci_disable_msix(skdev->pdev);
3941         kfree(skdev->msix_entries);
3942         skdev->msix_count = 0;
3943         skdev->msix_entries = NULL;
3944 }
3945
3946 static int skd_acquire_msix(struct skd_device *skdev)
3947 {
3948         int i, rc;
3949         struct pci_dev *pdev;
3950         struct msix_entry *entries = NULL;
3951         struct skd_msix_entry *qentry;
3952
3953         pdev = skdev->pdev;
3954         skdev->msix_count = SKD_MAX_MSIX_COUNT;
3955         entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
3956                           GFP_KERNEL);
3957         if (!entries)
3958                 return -ENOMEM;
3959
3960         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
3961                 entries[i].entry = i;
3962
3963         rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
3964         if (rc < 0)
3965                 goto msix_out;
3966         if (rc) {
3967                 if (rc < SKD_MIN_MSIX_COUNT) {
3968                         pr_err("(%s): failed to enable MSI-X %d\n",
3969                                skd_name(skdev), rc);
3970                         goto msix_out;
3971                 }
3972                 pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
3973                          skdev->name, __func__, __LINE__,
3974                          pci_name(pdev), skdev->name, rc);
3975
3976                 skdev->msix_count = rc;
3977                 rc = pci_enable_msix(pdev, entries, skdev->msix_count);
3978                 if (rc) {
3979                         pr_err("(%s): failed to enable MSI-X "
3980                                "support (%d) %d\n",
3981                                skd_name(skdev), skdev->msix_count, rc);
3982                         goto msix_out;
3983                 }
3984         }
3985         skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
3986                                       skdev->msix_count, GFP_KERNEL);
3987         if (!skdev->msix_entries) {
3988                 rc = -ENOMEM;
3989                 skdev->msix_count = 0;
3990                 pr_err("(%s): msix table allocation error\n",
3991                        skd_name(skdev));
3992                 goto msix_out;
3993         }
3994
3995         qentry = skdev->msix_entries;
3996         for (i = 0; i < skdev->msix_count; i++) {
3997                 qentry->vector = entries[i].vector;
3998                 qentry->entry = entries[i].entry;
3999                 qentry->rsp = NULL;
4000                 qentry->have_irq = 0;
4001                 pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
4002                          skdev->name, __func__, __LINE__,
4003                          pci_name(pdev), skdev->name,
4004                          i, qentry->vector, qentry->entry);
4005                 qentry++;
4006         }
4007
4008         /* Enable MSI-X vectors for the base queue */
4009         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
4010                 qentry = &skdev->msix_entries[i];
4011                 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
4012                          "%s%d-msix %s", DRV_NAME, skdev->devno,
4013                          msix_entries[i].name);
4014                 rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
4015                                       msix_entries[i].handler, 0,
4016                                       qentry->isr_name, skdev);
4017                 if (rc) {
4018                         pr_err("(%s): Unable to register(%d) MSI-X "
4019                                "handler %d: %s\n",
4020                                skd_name(skdev), rc, i, qentry->isr_name);
4021                         goto msix_out;
4022                 } else {
4023                         qentry->have_irq = 1;
4024                         qentry->rsp = skdev;
4025                 }
4026         }
4027         pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
4028                  skdev->name, __func__, __LINE__,
4029                  pci_name(pdev), skdev->name, skdev->msix_count);
4030         return 0;
4031
4032 msix_out:
4033         if (entries)
4034                 kfree(entries);
4035         skd_release_msix(skdev);
4036         return rc;
4037 }
4038
4039 static int skd_acquire_irq(struct skd_device *skdev)
4040 {
4041         int rc;
4042         struct pci_dev *pdev;
4043
4044         pdev = skdev->pdev;
4045         skdev->msix_count = 0;
4046
4047 RETRY_IRQ_TYPE:
4048         switch (skdev->irq_type) {
4049         case SKD_IRQ_MSIX:
4050                 rc = skd_acquire_msix(skdev);
4051                 if (!rc)
4052                         pr_info("(%s): MSI-X %d irqs enabled\n",
4053                                skd_name(skdev), skdev->msix_count);
4054                 else {
4055                         pr_err(
4056                                "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
4057                                skd_name(skdev), rc);
4058                         skdev->irq_type = SKD_IRQ_MSI;
4059                         goto RETRY_IRQ_TYPE;
4060                 }
4061                 break;
4062         case SKD_IRQ_MSI:
4063                 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
4064                          DRV_NAME, skdev->devno);
4065                 rc = pci_enable_msi(pdev);
4066                 if (!rc) {
4067                         rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
4068                                               skdev->isr_name, skdev);
4069                         if (rc) {
4070                                 pci_disable_msi(pdev);
4071                                 pr_err(
4072                                        "(%s): failed to allocate the MSI interrupt %d\n",
4073                                        skd_name(skdev), rc);
4074                                 goto RETRY_IRQ_LEGACY;
4075                         }
4076                         pr_info("(%s): MSI irq %d enabled\n",
4077                                skd_name(skdev), pdev->irq);
4078                 } else {
4079 RETRY_IRQ_LEGACY:
4080                         pr_err(
4081                                "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
4082                                skd_name(skdev), rc);
4083                         skdev->irq_type = SKD_IRQ_LEGACY;
4084                         goto RETRY_IRQ_TYPE;
4085                 }
4086                 break;
4087         case SKD_IRQ_LEGACY:
4088                 snprintf(skdev->isr_name, sizeof(skdev->isr_name),
4089                          "%s%d-legacy", DRV_NAME, skdev->devno);
4090                 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
4091                                       IRQF_SHARED, skdev->isr_name, skdev);
4092                 if (!rc)
4093                         pr_info("(%s): LEGACY irq %d enabled\n",
4094                                skd_name(skdev), pdev->irq);
4095                 else
4096                         pr_err("(%s): request LEGACY irq error %d\n",
4097                                skd_name(skdev), rc);
4098                 break;
4099         default:
4100                 pr_info("(%s): irq_type %d invalid, re-set to %d\n",
4101                        skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
4102                 skdev->irq_type = SKD_IRQ_LEGACY;
4103                 goto RETRY_IRQ_TYPE;
4104         }
4105         return rc;
4106 }
4107
4108 static void skd_release_irq(struct skd_device *skdev)
4109 {
4110         switch (skdev->irq_type) {
4111         case SKD_IRQ_MSIX:
4112                 skd_release_msix(skdev);
4113                 break;
4114         case SKD_IRQ_MSI:
4115                 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4116                 pci_disable_msi(skdev->pdev);
4117                 break;
4118         case SKD_IRQ_LEGACY:
4119                 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4120                 break;
4121         default:
4122                 pr_err("(%s): wrong irq type %d!",
4123                        skd_name(skdev), skdev->irq_type);
4124                 break;
4125         }
4126 }
4127
4128 /*
4129  *****************************************************************************
4130  * CONSTRUCT
4131  *****************************************************************************
4132  */
4133
4134 static int skd_cons_skcomp(struct skd_device *skdev);
4135 static int skd_cons_skmsg(struct skd_device *skdev);
4136 static int skd_cons_skreq(struct skd_device *skdev);
4137 static int skd_cons_skspcl(struct skd_device *skdev);
4138 static int skd_cons_sksb(struct skd_device *skdev);
4139 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4140                                                   u32 n_sg,
4141                                                   dma_addr_t *ret_dma_addr);
4142 static int skd_cons_disk(struct skd_device *skdev);
4143
4144 #define SKD_N_DEV_TABLE         16u
4145 static u32 skd_next_devno;
4146
4147 static struct skd_device *skd_construct(struct pci_dev *pdev)
4148 {
4149         struct skd_device *skdev;
4150         int blk_major = skd_major;
4151         int rc;
4152
4153         skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4154
4155         if (!skdev) {
4156                 pr_err(PFX "(%s): memory alloc failure\n",
4157                        pci_name(pdev));
4158                 return NULL;
4159         }
4160
4161         skdev->state = SKD_DRVR_STATE_LOAD;
4162         skdev->pdev = pdev;
4163         skdev->devno = skd_next_devno++;
4164         skdev->major = blk_major;
4165         skdev->irq_type = skd_isr_type;
4166         sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4167         skdev->dev_max_queue_depth = 0;
4168
4169         skdev->num_req_context = skd_max_queue_depth;
4170         skdev->num_fitmsg_context = skd_max_queue_depth;
4171         skdev->n_special = skd_max_pass_thru;
4172         skdev->cur_max_queue_depth = 1;
4173         skdev->queue_low_water_mark = 1;
4174         skdev->proto_ver = 99;
4175         skdev->sgs_per_request = skd_sgs_per_request;
4176         skdev->dbg_level = skd_dbg_level;
4177
4178         atomic_set(&skdev->device_count, 0);
4179
4180         spin_lock_init(&skdev->lock);
4181
4182         INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4183
4184         pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4185         rc = skd_cons_skcomp(skdev);
4186         if (rc < 0)
4187                 goto err_out;
4188
4189         pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4190         rc = skd_cons_skmsg(skdev);
4191         if (rc < 0)
4192                 goto err_out;
4193
4194         pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4195         rc = skd_cons_skreq(skdev);
4196         if (rc < 0)
4197                 goto err_out;
4198
4199         pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4200         rc = skd_cons_skspcl(skdev);
4201         if (rc < 0)
4202                 goto err_out;
4203
4204         pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4205         rc = skd_cons_sksb(skdev);
4206         if (rc < 0)
4207                 goto err_out;
4208
4209         pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4210         rc = skd_cons_disk(skdev);
4211         if (rc < 0)
4212                 goto err_out;
4213
4214         pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4215         return skdev;
4216
4217 err_out:
4218         pr_debug("%s:%s:%d construct failed\n",
4219                  skdev->name, __func__, __LINE__);
4220         skd_destruct(skdev);
4221         return NULL;
4222 }
4223
4224 static int skd_cons_skcomp(struct skd_device *skdev)
4225 {
4226         int rc = 0;
4227         struct fit_completion_entry_v1 *skcomp;
4228         u32 nbytes;
4229
4230         nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
4231         nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
4232
4233         pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
4234                  skdev->name, __func__, __LINE__,
4235                  nbytes, SKD_N_COMPLETION_ENTRY);
4236
4237         skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
4238                                       &skdev->cq_dma_address);
4239
4240         if (skcomp == NULL) {
4241                 rc = -ENOMEM;
4242                 goto err_out;
4243         }
4244
4245         memset(skcomp, 0, nbytes);
4246
4247         skdev->skcomp_table = skcomp;
4248         skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
4249                                                            sizeof(*skcomp) *
4250                                                            SKD_N_COMPLETION_ENTRY);
4251
4252 err_out:
4253         return rc;
4254 }
4255
4256 static int skd_cons_skmsg(struct skd_device *skdev)
4257 {
4258         int rc = 0;
4259         u32 i;
4260
4261         pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4262                  skdev->name, __func__, __LINE__,
4263                  sizeof(struct skd_fitmsg_context),
4264                  skdev->num_fitmsg_context,
4265                  sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
4266
4267         skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4268                                      *skdev->num_fitmsg_context, GFP_KERNEL);
4269         if (skdev->skmsg_table == NULL) {
4270                 rc = -ENOMEM;
4271                 goto err_out;
4272         }
4273
4274         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4275                 struct skd_fitmsg_context *skmsg;
4276
4277                 skmsg = &skdev->skmsg_table[i];
4278
4279                 skmsg->id = i + SKD_ID_FIT_MSG;
4280
4281                 skmsg->state = SKD_MSG_STATE_IDLE;
4282                 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4283                                                       SKD_N_FITMSG_BYTES + 64,
4284                                                       &skmsg->mb_dma_address);
4285
4286                 if (skmsg->msg_buf == NULL) {
4287                         rc = -ENOMEM;
4288                         goto err_out;
4289                 }
4290
4291                 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4292                                       (~FIT_QCMD_BASE_ADDRESS_MASK));
4293                 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4294                 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4295                                        FIT_QCMD_BASE_ADDRESS_MASK);
4296                 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4297                 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4298                 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4299
4300                 skmsg->next = &skmsg[1];
4301         }
4302
4303         /* Free list is in order starting with the 0th entry. */
4304         skdev->skmsg_table[i - 1].next = NULL;
4305         skdev->skmsg_free_list = skdev->skmsg_table;
4306
4307 err_out:
4308         return rc;
4309 }
4310
4311 static int skd_cons_skreq(struct skd_device *skdev)
4312 {
4313         int rc = 0;
4314         u32 i;
4315
4316         pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4317                  skdev->name, __func__, __LINE__,
4318                  sizeof(struct skd_request_context),
4319                  skdev->num_req_context,
4320                  sizeof(struct skd_request_context) * skdev->num_req_context);
4321
4322         skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4323                                      * skdev->num_req_context, GFP_KERNEL);
4324         if (skdev->skreq_table == NULL) {
4325                 rc = -ENOMEM;
4326                 goto err_out;
4327         }
4328
4329         pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4330                  skdev->name, __func__, __LINE__,
4331                  skdev->sgs_per_request, sizeof(struct scatterlist),
4332                  skdev->sgs_per_request * sizeof(struct scatterlist));
4333
4334         for (i = 0; i < skdev->num_req_context; i++) {
4335                 struct skd_request_context *skreq;
4336
4337                 skreq = &skdev->skreq_table[i];
4338
4339                 skreq->id = i + SKD_ID_RW_REQUEST;
4340                 skreq->state = SKD_REQ_STATE_IDLE;
4341
4342                 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4343                                     skdev->sgs_per_request, GFP_KERNEL);
4344                 if (skreq->sg == NULL) {
4345                         rc = -ENOMEM;
4346                         goto err_out;
4347                 }
4348                 sg_init_table(skreq->sg, skdev->sgs_per_request);
4349
4350                 skreq->sksg_list = skd_cons_sg_list(skdev,
4351                                                     skdev->sgs_per_request,
4352                                                     &skreq->sksg_dma_address);
4353
4354                 if (skreq->sksg_list == NULL) {
4355                         rc = -ENOMEM;
4356                         goto err_out;
4357                 }
4358
4359                 skreq->next = &skreq[1];
4360         }
4361
4362         /* Free list is in order starting with the 0th entry. */
4363         skdev->skreq_table[i - 1].next = NULL;
4364         skdev->skreq_free_list = skdev->skreq_table;
4365
4366 err_out:
4367         return rc;
4368 }
4369
4370 static int skd_cons_skspcl(struct skd_device *skdev)
4371 {
4372         int rc = 0;
4373         u32 i, nbytes;
4374
4375         pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4376                  skdev->name, __func__, __LINE__,
4377                  sizeof(struct skd_special_context),
4378                  skdev->n_special,
4379                  sizeof(struct skd_special_context) * skdev->n_special);
4380
4381         skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4382                                       * skdev->n_special, GFP_KERNEL);
4383         if (skdev->skspcl_table == NULL) {
4384                 rc = -ENOMEM;
4385                 goto err_out;
4386         }
4387
4388         for (i = 0; i < skdev->n_special; i++) {
4389                 struct skd_special_context *skspcl;
4390
4391                 skspcl = &skdev->skspcl_table[i];
4392
4393                 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4394                 skspcl->req.state = SKD_REQ_STATE_IDLE;
4395
4396                 skspcl->req.next = &skspcl[1].req;
4397
4398                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4399
4400                 skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4401                                                        &skspcl->mb_dma_address);
4402                 if (skspcl->msg_buf == NULL) {
4403                         rc = -ENOMEM;
4404                         goto err_out;
4405                 }
4406
4407                 memset(skspcl->msg_buf, 0, nbytes);
4408
4409                 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4410                                          SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4411                 if (skspcl->req.sg == NULL) {
4412                         rc = -ENOMEM;
4413                         goto err_out;
4414                 }
4415
4416                 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4417                                                          SKD_N_SG_PER_SPECIAL,
4418                                                          &skspcl->req.
4419                                                          sksg_dma_address);
4420                 if (skspcl->req.sksg_list == NULL) {
4421                         rc = -ENOMEM;
4422                         goto err_out;
4423                 }
4424         }
4425
4426         /* Free list is in order starting with the 0th entry. */
4427         skdev->skspcl_table[i - 1].req.next = NULL;
4428         skdev->skspcl_free_list = skdev->skspcl_table;
4429
4430         return rc;
4431
4432 err_out:
4433         return rc;
4434 }
4435
4436 static int skd_cons_sksb(struct skd_device *skdev)
4437 {
4438         int rc = 0;
4439         struct skd_special_context *skspcl;
4440         u32 nbytes;
4441
4442         skspcl = &skdev->internal_skspcl;
4443
4444         skspcl->req.id = 0 + SKD_ID_INTERNAL;
4445         skspcl->req.state = SKD_REQ_STATE_IDLE;
4446
4447         nbytes = SKD_N_INTERNAL_BYTES;
4448
4449         skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4450                                                 &skspcl->db_dma_address);
4451         if (skspcl->data_buf == NULL) {
4452                 rc = -ENOMEM;
4453                 goto err_out;
4454         }
4455
4456         memset(skspcl->data_buf, 0, nbytes);
4457
4458         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4459         skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4460                                                &skspcl->mb_dma_address);
4461         if (skspcl->msg_buf == NULL) {
4462                 rc = -ENOMEM;
4463                 goto err_out;
4464         }
4465
4466         memset(skspcl->msg_buf, 0, nbytes);
4467
4468         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4469                                                  &skspcl->req.sksg_dma_address);
4470         if (skspcl->req.sksg_list == NULL) {
4471                 rc = -ENOMEM;
4472                 goto err_out;
4473         }
4474
4475         if (!skd_format_internal_skspcl(skdev)) {
4476                 rc = -EINVAL;
4477                 goto err_out;
4478         }
4479
4480 err_out:
4481         return rc;
4482 }
4483
4484 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4485                                                   u32 n_sg,
4486                                                   dma_addr_t *ret_dma_addr)
4487 {
4488         struct fit_sg_descriptor *sg_list;
4489         u32 nbytes;
4490
4491         nbytes = sizeof(*sg_list) * n_sg;
4492
4493         sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4494
4495         if (sg_list != NULL) {
4496                 uint64_t dma_address = *ret_dma_addr;
4497                 u32 i;
4498
4499                 memset(sg_list, 0, nbytes);
4500
4501                 for (i = 0; i < n_sg - 1; i++) {
4502                         uint64_t ndp_off;
4503                         ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4504
4505                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
4506                 }
4507                 sg_list[i].next_desc_ptr = 0LL;
4508         }
4509
4510         return sg_list;
4511 }
4512
4513 static int skd_cons_disk(struct skd_device *skdev)
4514 {
4515         int rc = 0;
4516         struct gendisk *disk;
4517         struct request_queue *q;
4518         unsigned long flags;
4519
4520         disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4521         if (!disk) {
4522                 rc = -ENOMEM;
4523                 goto err_out;
4524         }
4525
4526         skdev->disk = disk;
4527         sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4528
4529         disk->major = skdev->major;
4530         disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4531         disk->fops = &skd_blockdev_ops;
4532         disk->private_data = skdev;
4533
4534         q = blk_init_queue(skd_request_fn, &skdev->lock);
4535         if (!q) {
4536                 rc = -ENOMEM;
4537                 goto err_out;
4538         }
4539
4540         skdev->queue = q;
4541         disk->queue = q;
4542         q->queuedata = skdev;
4543
4544         blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
4545         blk_queue_max_segments(q, skdev->sgs_per_request);
4546         blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4547
4548         /* set sysfs ptimal_io_size to 8K */
4549         blk_queue_io_opt(q, 8192);
4550
4551         /* DISCARD Flag initialization. */
4552         q->limits.discard_granularity = 8192;
4553         q->limits.discard_alignment = 0;
4554         q->limits.max_discard_sectors = UINT_MAX >> 9;
4555         q->limits.discard_zeroes_data = 1;
4556         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4557         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4558
4559         spin_lock_irqsave(&skdev->lock, flags);
4560         pr_debug("%s:%s:%d stopping %s queue\n",
4561                  skdev->name, __func__, __LINE__, skdev->name);
4562         blk_stop_queue(skdev->queue);
4563         spin_unlock_irqrestore(&skdev->lock, flags);
4564
4565 err_out:
4566         return rc;
4567 }
4568
4569 /*
4570  *****************************************************************************
4571  * DESTRUCT (FREE)
4572  *****************************************************************************
4573  */
4574
4575 static void skd_free_skcomp(struct skd_device *skdev);
4576 static void skd_free_skmsg(struct skd_device *skdev);
4577 static void skd_free_skreq(struct skd_device *skdev);
4578 static void skd_free_skspcl(struct skd_device *skdev);
4579 static void skd_free_sksb(struct skd_device *skdev);
4580 static void skd_free_sg_list(struct skd_device *skdev,
4581                              struct fit_sg_descriptor *sg_list,
4582                              u32 n_sg, dma_addr_t dma_addr);
4583 static void skd_free_disk(struct skd_device *skdev);
4584
4585 static void skd_destruct(struct skd_device *skdev)
4586 {
4587         if (skdev == NULL)
4588                 return;
4589
4590
4591         pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4592         skd_free_disk(skdev);
4593
4594         pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4595         skd_free_sksb(skdev);
4596
4597         pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4598         skd_free_skspcl(skdev);
4599
4600         pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4601         skd_free_skreq(skdev);
4602
4603         pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4604         skd_free_skmsg(skdev);
4605
4606         pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4607         skd_free_skcomp(skdev);
4608
4609         pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4610         kfree(skdev);
4611 }
4612
4613 static void skd_free_skcomp(struct skd_device *skdev)
4614 {
4615         if (skdev->skcomp_table != NULL) {
4616                 u32 nbytes;
4617
4618                 nbytes = sizeof(skdev->skcomp_table[0]) *
4619                          SKD_N_COMPLETION_ENTRY;
4620                 pci_free_consistent(skdev->pdev, nbytes,
4621                                     skdev->skcomp_table, skdev->cq_dma_address);
4622         }
4623
4624         skdev->skcomp_table = NULL;
4625         skdev->cq_dma_address = 0;
4626 }
4627
4628 static void skd_free_skmsg(struct skd_device *skdev)
4629 {
4630         u32 i;
4631
4632         if (skdev->skmsg_table == NULL)
4633                 return;
4634
4635         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4636                 struct skd_fitmsg_context *skmsg;
4637
4638                 skmsg = &skdev->skmsg_table[i];
4639
4640                 if (skmsg->msg_buf != NULL) {
4641                         skmsg->msg_buf += skmsg->offset;
4642                         skmsg->mb_dma_address += skmsg->offset;
4643                         pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4644                                             skmsg->msg_buf,
4645                                             skmsg->mb_dma_address);
4646                 }
4647                 skmsg->msg_buf = NULL;
4648                 skmsg->mb_dma_address = 0;
4649         }
4650
4651         kfree(skdev->skmsg_table);
4652         skdev->skmsg_table = NULL;
4653 }
4654
4655 static void skd_free_skreq(struct skd_device *skdev)
4656 {
4657         u32 i;
4658
4659         if (skdev->skreq_table == NULL)
4660                 return;
4661
4662         for (i = 0; i < skdev->num_req_context; i++) {
4663                 struct skd_request_context *skreq;
4664
4665                 skreq = &skdev->skreq_table[i];
4666
4667                 skd_free_sg_list(skdev, skreq->sksg_list,
4668                                  skdev->sgs_per_request,
4669                                  skreq->sksg_dma_address);
4670
4671                 skreq->sksg_list = NULL;
4672                 skreq->sksg_dma_address = 0;
4673
4674                 kfree(skreq->sg);
4675         }
4676
4677         kfree(skdev->skreq_table);
4678         skdev->skreq_table = NULL;
4679 }
4680
4681 static void skd_free_skspcl(struct skd_device *skdev)
4682 {
4683         u32 i;
4684         u32 nbytes;
4685
4686         if (skdev->skspcl_table == NULL)
4687                 return;
4688
4689         for (i = 0; i < skdev->n_special; i++) {
4690                 struct skd_special_context *skspcl;
4691
4692                 skspcl = &skdev->skspcl_table[i];
4693
4694                 if (skspcl->msg_buf != NULL) {
4695                         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4696                         pci_free_consistent(skdev->pdev, nbytes,
4697                                             skspcl->msg_buf,
4698                                             skspcl->mb_dma_address);
4699                 }
4700
4701                 skspcl->msg_buf = NULL;
4702                 skspcl->mb_dma_address = 0;
4703
4704                 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4705                                  SKD_N_SG_PER_SPECIAL,
4706                                  skspcl->req.sksg_dma_address);
4707
4708                 skspcl->req.sksg_list = NULL;
4709                 skspcl->req.sksg_dma_address = 0;
4710
4711                 kfree(skspcl->req.sg);
4712         }
4713
4714         kfree(skdev->skspcl_table);
4715         skdev->skspcl_table = NULL;
4716 }
4717
4718 static void skd_free_sksb(struct skd_device *skdev)
4719 {
4720         struct skd_special_context *skspcl;
4721         u32 nbytes;
4722
4723         skspcl = &skdev->internal_skspcl;
4724
4725         if (skspcl->data_buf != NULL) {
4726                 nbytes = SKD_N_INTERNAL_BYTES;
4727
4728                 pci_free_consistent(skdev->pdev, nbytes,
4729                                     skspcl->data_buf, skspcl->db_dma_address);
4730         }
4731
4732         skspcl->data_buf = NULL;
4733         skspcl->db_dma_address = 0;
4734
4735         if (skspcl->msg_buf != NULL) {
4736                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4737                 pci_free_consistent(skdev->pdev, nbytes,
4738                                     skspcl->msg_buf, skspcl->mb_dma_address);
4739         }
4740
4741         skspcl->msg_buf = NULL;
4742         skspcl->mb_dma_address = 0;
4743
4744         skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4745                          skspcl->req.sksg_dma_address);
4746
4747         skspcl->req.sksg_list = NULL;
4748         skspcl->req.sksg_dma_address = 0;
4749 }
4750
4751 static void skd_free_sg_list(struct skd_device *skdev,
4752                              struct fit_sg_descriptor *sg_list,
4753                              u32 n_sg, dma_addr_t dma_addr)
4754 {
4755         if (sg_list != NULL) {
4756                 u32 nbytes;
4757
4758                 nbytes = sizeof(*sg_list) * n_sg;
4759
4760                 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4761         }
4762 }
4763
4764 static void skd_free_disk(struct skd_device *skdev)
4765 {
4766         struct gendisk *disk = skdev->disk;
4767
4768         if (disk != NULL) {
4769                 struct request_queue *q = disk->queue;
4770
4771                 if (disk->flags & GENHD_FL_UP)
4772                         del_gendisk(disk);
4773                 if (q)
4774                         blk_cleanup_queue(q);
4775                 put_disk(disk);
4776         }
4777         skdev->disk = NULL;
4778 }
4779
4780
4781
4782 /*
4783  *****************************************************************************
4784  * BLOCK DEVICE (BDEV) GLUE
4785  *****************************************************************************
4786  */
4787
4788 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4789 {
4790         struct skd_device *skdev;
4791         u64 capacity;
4792
4793         skdev = bdev->bd_disk->private_data;
4794
4795         pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4796                  skdev->name, __func__, __LINE__,
4797                  bdev->bd_disk->disk_name, current->comm);
4798
4799         if (skdev->read_cap_is_valid) {
4800                 capacity = get_capacity(skdev->disk);
4801                 geo->heads = 64;
4802                 geo->sectors = 255;
4803                 geo->cylinders = (capacity) / (255 * 64);
4804
4805                 return 0;
4806         }
4807         return -EIO;
4808 }
4809
4810 static int skd_bdev_attach(struct skd_device *skdev)
4811 {
4812         pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4813         add_disk(skdev->disk);
4814         return 0;
4815 }
4816
4817 static const struct block_device_operations skd_blockdev_ops = {
4818         .owner          = THIS_MODULE,
4819         .ioctl          = skd_bdev_ioctl,
4820         .getgeo         = skd_bdev_getgeo,
4821 };
4822
4823
4824 /*
4825  *****************************************************************************
4826  * PCIe DRIVER GLUE
4827  *****************************************************************************
4828  */
4829
4830 static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
4831         { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4832           PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4833         { 0 }                     /* terminate list */
4834 };
4835
4836 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4837
4838 static char *skd_pci_info(struct skd_device *skdev, char *str)
4839 {
4840         int pcie_reg;
4841
4842         strcpy(str, "PCIe (");
4843         pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4844
4845         if (pcie_reg) {
4846
4847                 char lwstr[6];
4848                 uint16_t pcie_lstat, lspeed, lwidth;
4849
4850                 pcie_reg += 0x12;
4851                 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4852                 lspeed = pcie_lstat & (0xF);
4853                 lwidth = (pcie_lstat & 0x3F0) >> 4;
4854
4855                 if (lspeed == 1)
4856                         strcat(str, "2.5GT/s ");
4857                 else if (lspeed == 2)
4858                         strcat(str, "5.0GT/s ");
4859                 else
4860                         strcat(str, "<unknown> ");
4861                 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4862                 strcat(str, lwstr);
4863         }
4864         return str;
4865 }
4866
4867 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4868 {
4869         int i;
4870         int rc = 0;
4871         char pci_str[32];
4872         struct skd_device *skdev;
4873
4874         pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4875                DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4876         pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4877                pci_name(pdev), pdev->vendor, pdev->device);
4878
4879         rc = pci_enable_device(pdev);
4880         if (rc)
4881                 return rc;
4882         rc = pci_request_regions(pdev, DRV_NAME);
4883         if (rc)
4884                 goto err_out;
4885         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4886         if (!rc) {
4887                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4888
4889                         pr_err("(%s): consistent DMA mask error %d\n",
4890                                pci_name(pdev), rc);
4891                 }
4892         } else {
4893                 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4894                 if (rc) {
4895
4896                         pr_err("(%s): DMA mask error %d\n",
4897                                pci_name(pdev), rc);
4898                         goto err_out_regions;
4899                 }
4900         }
4901
4902         skdev = skd_construct(pdev);
4903         if (skdev == NULL) {
4904                 rc = -ENOMEM;
4905                 goto err_out_regions;
4906         }
4907
4908         skd_pci_info(skdev, pci_str);
4909         pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4910
4911         pci_set_master(pdev);
4912         rc = pci_enable_pcie_error_reporting(pdev);
4913         if (rc) {
4914                 pr_err(
4915                        "(%s): bad enable of PCIe error reporting rc=%d\n",
4916                        skd_name(skdev), rc);
4917                 skdev->pcie_error_reporting_is_enabled = 0;
4918         } else
4919                 skdev->pcie_error_reporting_is_enabled = 1;
4920
4921
4922         pci_set_drvdata(pdev, skdev);
4923         skdev->pdev = pdev;
4924         skdev->disk->driverfs_dev = &pdev->dev;
4925
4926         for (i = 0; i < SKD_MAX_BARS; i++) {
4927                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4928                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4929                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4930                                             skdev->mem_size[i]);
4931                 if (!skdev->mem_map[i]) {
4932                         pr_err("(%s): Unable to map adapter memory!\n",
4933                                skd_name(skdev));
4934                         rc = -ENODEV;
4935                         goto err_out_iounmap;
4936                 }
4937                 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4938                          skdev->name, __func__, __LINE__,
4939                          skdev->mem_map[i],
4940                          (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4941         }
4942
4943         rc = skd_acquire_irq(skdev);
4944         if (rc) {
4945                 pr_err("(%s): interrupt resource error %d\n",
4946                        skd_name(skdev), rc);
4947                 goto err_out_iounmap;
4948         }
4949
4950         rc = skd_start_timer(skdev);
4951         if (rc)
4952                 goto err_out_timer;
4953
4954         init_waitqueue_head(&skdev->waitq);
4955
4956         skd_start_device(skdev);
4957
4958         rc = wait_event_interruptible_timeout(skdev->waitq,
4959                                               (skdev->gendisk_on),
4960                                               (SKD_START_WAIT_SECONDS * HZ));
4961         if (skdev->gendisk_on > 0) {
4962                 /* device came on-line after reset */
4963                 skd_bdev_attach(skdev);
4964                 rc = 0;
4965         } else {
4966                 /* we timed out, something is wrong with the device,
4967                    don't add the disk structure */
4968                 pr_err(
4969                        "(%s): error: waiting for s1120 timed out %d!\n",
4970                        skd_name(skdev), rc);
4971                 /* in case of no error; we timeout with ENXIO */
4972                 if (!rc)
4973                         rc = -ENXIO;
4974                 goto err_out_timer;
4975         }
4976
4977
4978 #ifdef SKD_VMK_POLL_HANDLER
4979         if (skdev->irq_type == SKD_IRQ_MSIX) {
4980                 /* MSIX completion handler is being used for coredump */
4981                 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4982                                                   skdev->msix_entries[5].vector,
4983                                                   skd_comp_q, skdev);
4984         } else {
4985                 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
4986                                                   skdev->pdev->irq, skd_isr,
4987                                                   skdev);
4988         }
4989 #endif  /* SKD_VMK_POLL_HANDLER */
4990
4991         return rc;
4992
4993 err_out_timer:
4994         skd_stop_device(skdev);
4995         skd_release_irq(skdev);
4996
4997 err_out_iounmap:
4998         for (i = 0; i < SKD_MAX_BARS; i++)
4999                 if (skdev->mem_map[i])
5000                         iounmap(skdev->mem_map[i]);
5001
5002         if (skdev->pcie_error_reporting_is_enabled)
5003                 pci_disable_pcie_error_reporting(pdev);
5004
5005         skd_destruct(skdev);
5006
5007 err_out_regions:
5008         pci_release_regions(pdev);
5009
5010 err_out:
5011         pci_disable_device(pdev);
5012         pci_set_drvdata(pdev, NULL);
5013         return rc;
5014 }
5015
5016 static void skd_pci_remove(struct pci_dev *pdev)
5017 {
5018         int i;
5019         struct skd_device *skdev;
5020
5021         skdev = pci_get_drvdata(pdev);
5022         if (!skdev) {
5023                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5024                 return;
5025         }
5026         skd_stop_device(skdev);
5027         skd_release_irq(skdev);
5028
5029         for (i = 0; i < SKD_MAX_BARS; i++)
5030                 if (skdev->mem_map[i])
5031                         iounmap((u32 *)skdev->mem_map[i]);
5032
5033         if (skdev->pcie_error_reporting_is_enabled)
5034                 pci_disable_pcie_error_reporting(pdev);
5035
5036         skd_destruct(skdev);
5037
5038         pci_release_regions(pdev);
5039         pci_disable_device(pdev);
5040         pci_set_drvdata(pdev, NULL);
5041
5042         return;
5043 }
5044
5045 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5046 {
5047         int i;
5048         struct skd_device *skdev;
5049
5050         skdev = pci_get_drvdata(pdev);
5051         if (!skdev) {
5052                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5053                 return -EIO;
5054         }
5055
5056         skd_stop_device(skdev);
5057
5058         skd_release_irq(skdev);
5059
5060         for (i = 0; i < SKD_MAX_BARS; i++)
5061                 if (skdev->mem_map[i])
5062                         iounmap((u32 *)skdev->mem_map[i]);
5063
5064         if (skdev->pcie_error_reporting_is_enabled)
5065                 pci_disable_pcie_error_reporting(pdev);
5066
5067         pci_release_regions(pdev);
5068         pci_save_state(pdev);
5069         pci_disable_device(pdev);
5070         pci_set_power_state(pdev, pci_choose_state(pdev, state));
5071         return 0;
5072 }
5073
5074 static int skd_pci_resume(struct pci_dev *pdev)
5075 {
5076         int i;
5077         int rc = 0;
5078         struct skd_device *skdev;
5079
5080         skdev = pci_get_drvdata(pdev);
5081         if (!skdev) {
5082                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5083                 return -1;
5084         }
5085
5086         pci_set_power_state(pdev, PCI_D0);
5087         pci_enable_wake(pdev, PCI_D0, 0);
5088         pci_restore_state(pdev);
5089
5090         rc = pci_enable_device(pdev);
5091         if (rc)
5092                 return rc;
5093         rc = pci_request_regions(pdev, DRV_NAME);
5094         if (rc)
5095                 goto err_out;
5096         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5097         if (!rc) {
5098                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5099
5100                         pr_err("(%s): consistent DMA mask error %d\n",
5101                                pci_name(pdev), rc);
5102                 }
5103         } else {
5104                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5105                 if (rc) {
5106
5107                         pr_err("(%s): DMA mask error %d\n",
5108                                pci_name(pdev), rc);
5109                         goto err_out_regions;
5110                 }
5111         }
5112
5113         pci_set_master(pdev);
5114         rc = pci_enable_pcie_error_reporting(pdev);
5115         if (rc) {
5116                 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
5117                        skdev->name, rc);
5118                 skdev->pcie_error_reporting_is_enabled = 0;
5119         } else
5120                 skdev->pcie_error_reporting_is_enabled = 1;
5121
5122         for (i = 0; i < SKD_MAX_BARS; i++) {
5123
5124                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
5125                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5126                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5127                                             skdev->mem_size[i]);
5128                 if (!skdev->mem_map[i]) {
5129                         pr_err("(%s): Unable to map adapter memory!\n",
5130                                skd_name(skdev));
5131                         rc = -ENODEV;
5132                         goto err_out_iounmap;
5133                 }
5134                 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
5135                          skdev->name, __func__, __LINE__,
5136                          skdev->mem_map[i],
5137                          (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
5138         }
5139         rc = skd_acquire_irq(skdev);
5140         if (rc) {
5141
5142                 pr_err("(%s): interrupt resource error %d\n",
5143                        pci_name(pdev), rc);
5144                 goto err_out_iounmap;
5145         }
5146
5147         rc = skd_start_timer(skdev);
5148         if (rc)
5149                 goto err_out_timer;
5150
5151         init_waitqueue_head(&skdev->waitq);
5152
5153         skd_start_device(skdev);
5154
5155         return rc;
5156
5157 err_out_timer:
5158         skd_stop_device(skdev);
5159         skd_release_irq(skdev);
5160
5161 err_out_iounmap:
5162         for (i = 0; i < SKD_MAX_BARS; i++)
5163                 if (skdev->mem_map[i])
5164                         iounmap(skdev->mem_map[i]);
5165
5166         if (skdev->pcie_error_reporting_is_enabled)
5167                 pci_disable_pcie_error_reporting(pdev);
5168
5169 err_out_regions:
5170         pci_release_regions(pdev);
5171
5172 err_out:
5173         pci_disable_device(pdev);
5174         return rc;
5175 }
5176
5177 static void skd_pci_shutdown(struct pci_dev *pdev)
5178 {
5179         struct skd_device *skdev;
5180
5181         pr_err("skd_pci_shutdown called\n");
5182
5183         skdev = pci_get_drvdata(pdev);
5184         if (!skdev) {
5185                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5186                 return;
5187         }
5188
5189         pr_err("%s: calling stop\n", skd_name(skdev));
5190         skd_stop_device(skdev);
5191 }
5192
5193 static struct pci_driver skd_driver = {
5194         .name           = DRV_NAME,
5195         .id_table       = skd_pci_tbl,
5196         .probe          = skd_pci_probe,
5197         .remove         = skd_pci_remove,
5198         .suspend        = skd_pci_suspend,
5199         .resume         = skd_pci_resume,
5200         .shutdown       = skd_pci_shutdown,
5201 };
5202
5203 /*
5204  *****************************************************************************
5205  * LOGGING SUPPORT
5206  *****************************************************************************
5207  */
5208
5209 static const char *skd_name(struct skd_device *skdev)
5210 {
5211         memset(skdev->id_str, 0, sizeof(skdev->id_str));
5212
5213         if (skdev->inquiry_is_valid)
5214                 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
5215                          skdev->name, skdev->inq_serial_num,
5216                          pci_name(skdev->pdev));
5217         else
5218                 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
5219                          skdev->name, pci_name(skdev->pdev));
5220
5221         return skdev->id_str;
5222 }
5223
5224 const char *skd_drive_state_to_str(int state)
5225 {
5226         switch (state) {
5227         case FIT_SR_DRIVE_OFFLINE:
5228                 return "OFFLINE";
5229         case FIT_SR_DRIVE_INIT:
5230                 return "INIT";
5231         case FIT_SR_DRIVE_ONLINE:
5232                 return "ONLINE";
5233         case FIT_SR_DRIVE_BUSY:
5234                 return "BUSY";
5235         case FIT_SR_DRIVE_FAULT:
5236                 return "FAULT";
5237         case FIT_SR_DRIVE_DEGRADED:
5238                 return "DEGRADED";
5239         case FIT_SR_PCIE_LINK_DOWN:
5240                 return "INK_DOWN";
5241         case FIT_SR_DRIVE_SOFT_RESET:
5242                 return "SOFT_RESET";
5243         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5244                 return "NEED_FW";
5245         case FIT_SR_DRIVE_INIT_FAULT:
5246                 return "INIT_FAULT";
5247         case FIT_SR_DRIVE_BUSY_SANITIZE:
5248                 return "BUSY_SANITIZE";
5249         case FIT_SR_DRIVE_BUSY_ERASE:
5250                 return "BUSY_ERASE";
5251         case FIT_SR_DRIVE_FW_BOOTING:
5252                 return "FW_BOOTING";
5253         default:
5254                 return "???";
5255         }
5256 }
5257
5258 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5259 {
5260         switch (state) {
5261         case SKD_DRVR_STATE_LOAD:
5262                 return "LOAD";
5263         case SKD_DRVR_STATE_IDLE:
5264                 return "IDLE";
5265         case SKD_DRVR_STATE_BUSY:
5266                 return "BUSY";
5267         case SKD_DRVR_STATE_STARTING:
5268                 return "STARTING";
5269         case SKD_DRVR_STATE_ONLINE:
5270                 return "ONLINE";
5271         case SKD_DRVR_STATE_PAUSING:
5272                 return "PAUSING";
5273         case SKD_DRVR_STATE_PAUSED:
5274                 return "PAUSED";
5275         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5276                 return "DRAINING_TIMEOUT";
5277         case SKD_DRVR_STATE_RESTARTING:
5278                 return "RESTARTING";
5279         case SKD_DRVR_STATE_RESUMING:
5280                 return "RESUMING";
5281         case SKD_DRVR_STATE_STOPPING:
5282                 return "STOPPING";
5283         case SKD_DRVR_STATE_SYNCING:
5284                 return "SYNCING";
5285         case SKD_DRVR_STATE_FAULT:
5286                 return "FAULT";
5287         case SKD_DRVR_STATE_DISAPPEARED:
5288                 return "DISAPPEARED";
5289         case SKD_DRVR_STATE_BUSY_ERASE:
5290                 return "BUSY_ERASE";
5291         case SKD_DRVR_STATE_BUSY_SANITIZE:
5292                 return "BUSY_SANITIZE";
5293         case SKD_DRVR_STATE_BUSY_IMMINENT:
5294                 return "BUSY_IMMINENT";
5295         case SKD_DRVR_STATE_WAIT_BOOT:
5296                 return "WAIT_BOOT";
5297
5298         default:
5299                 return "???";
5300         }
5301 }
5302
5303 const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5304 {
5305         switch (state) {
5306         case SKD_MSG_STATE_IDLE:
5307                 return "IDLE";
5308         case SKD_MSG_STATE_BUSY:
5309                 return "BUSY";
5310         default:
5311                 return "???";
5312         }
5313 }
5314
5315 const char *skd_skreq_state_to_str(enum skd_req_state state)
5316 {
5317         switch (state) {
5318         case SKD_REQ_STATE_IDLE:
5319                 return "IDLE";
5320         case SKD_REQ_STATE_SETUP:
5321                 return "SETUP";
5322         case SKD_REQ_STATE_BUSY:
5323                 return "BUSY";
5324         case SKD_REQ_STATE_COMPLETED:
5325                 return "COMPLETED";
5326         case SKD_REQ_STATE_TIMEOUT:
5327                 return "TIMEOUT";
5328         case SKD_REQ_STATE_ABORTED:
5329                 return "ABORTED";
5330         default:
5331                 return "???";
5332         }
5333 }
5334
5335 static void skd_log_skdev(struct skd_device *skdev, const char *event)
5336 {
5337         pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5338                  skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5339         pr_debug("%s:%s:%d   drive_state=%s(%d) driver_state=%s(%d)\n",
5340                  skdev->name, __func__, __LINE__,
5341                  skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5342                  skd_skdev_state_to_str(skdev->state), skdev->state);
5343         pr_debug("%s:%s:%d   busy=%d limit=%d dev=%d lowat=%d\n",
5344                  skdev->name, __func__, __LINE__,
5345                  skdev->in_flight, skdev->cur_max_queue_depth,
5346                  skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5347         pr_debug("%s:%s:%d   timestamp=0x%x cycle=%d cycle_ix=%d\n",
5348                  skdev->name, __func__, __LINE__,
5349                  skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5350 }
5351
5352 static void skd_log_skmsg(struct skd_device *skdev,
5353                           struct skd_fitmsg_context *skmsg, const char *event)
5354 {
5355         pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5356                  skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5357         pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x length=%d\n",
5358                  skdev->name, __func__, __LINE__,
5359                  skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5360                  skmsg->id, skmsg->length);
5361 }
5362
5363 static void skd_log_skreq(struct skd_device *skdev,
5364                           struct skd_request_context *skreq, const char *event)
5365 {
5366         pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5367                  skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5368         pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5369                  skdev->name, __func__, __LINE__,
5370                  skd_skreq_state_to_str(skreq->state), skreq->state,
5371                  skreq->id, skreq->fitmsg_id);
5372         pr_debug("%s:%s:%d   timo=0x%x sg_dir=%d n_sg=%d\n",
5373                  skdev->name, __func__, __LINE__,
5374                  skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5375
5376         if (skreq->req != NULL) {
5377                 struct request *req = skreq->req;
5378                 u32 lba = (u32)blk_rq_pos(req);
5379                 u32 count = blk_rq_sectors(req);
5380
5381                 pr_debug("%s:%s:%d "
5382                          "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5383                          skdev->name, __func__, __LINE__,
5384                          req, lba, lba, count, count,
5385                          (int)rq_data_dir(req));
5386         } else
5387                 pr_debug("%s:%s:%d req=NULL\n",
5388                          skdev->name, __func__, __LINE__);
5389 }
5390
5391 /*
5392  *****************************************************************************
5393  * MODULE GLUE
5394  *****************************************************************************
5395  */
5396
5397 static int __init skd_init(void)
5398 {
5399         int rc = -ENOMEM;
5400
5401         pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5402
5403         switch (skd_isr_type) {
5404         case SKD_IRQ_LEGACY:
5405         case SKD_IRQ_MSI:
5406         case SKD_IRQ_MSIX:
5407                 break;
5408         default:
5409                 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5410                        skd_isr_type, SKD_IRQ_DEFAULT);
5411                 skd_isr_type = SKD_IRQ_DEFAULT;
5412         }
5413
5414         if (skd_max_queue_depth < 1 ||
5415             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5416                 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5417                        skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5418                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5419         }
5420
5421         if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5422                 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5423                        skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5424                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5425         }
5426
5427         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5428                 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5429                        skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5430                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5431         }
5432
5433         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5434                 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5435                        skd_dbg_level, 0);
5436                 skd_dbg_level = 0;
5437         }
5438
5439         if (skd_isr_comp_limit < 0) {
5440                 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5441                        skd_isr_comp_limit, 0);
5442                 skd_isr_comp_limit = 0;
5443         }
5444
5445         if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5446                 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5447                        skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5448                 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5449         }
5450
5451         /* Obtain major device number. */
5452         rc = register_blkdev(0, DRV_NAME);
5453         if (rc < 0)
5454                 goto err_register_blkdev;
5455
5456         skd_major = rc;
5457
5458         rc = pci_register_driver(&skd_driver);
5459         if (rc < 0)
5460                 goto err_pci_register_driver;
5461
5462         return rc;
5463
5464 err_pci_register_driver:
5465         unregister_blkdev(skd_major, DRV_NAME);
5466
5467 err_register_blkdev:
5468         return rc;
5469 }
5470
5471 static void __exit skd_exit(void)
5472 {
5473         pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5474
5475         pci_unregister_driver(&skd_driver);
5476         unregister_blkdev(skd_major, DRV_NAME);
5477 }
5478
5479 module_init(skd_init);
5480 module_exit(skd_exit);