Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
[platform/kernel/linux-rpi.git] / drivers / scsi / dpt_i2o.c
1 /***************************************************************************
2                           dpti.c  -  description
3                              -------------------
4     begin                : Thu Sep 7 2000
5     copyright            : (C) 2000 by Adaptec
6
7                            July 30, 2001 First version being submitted
8                            for inclusion in the kernel.  V2.4
9
10     See Documentation/scsi/dpti.txt for history, notes, license info
11     and credits
12  ***************************************************************************/
13
14 /***************************************************************************
15  *                                                                         *
16  *   This program is free software; you can redistribute it and/or modify  *
17  *   it under the terms of the GNU General Public License as published by  *
18  *   the Free Software Foundation; either version 2 of the License, or     *
19  *   (at your option) any later version.                                   *
20  *                                                                         *
21  ***************************************************************************/
22 /***************************************************************************
23  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24  - Support 2.6 kernel and DMA-mapping
25  - ioctl fix for raid tools
26  - use schedule_timeout in long long loop
27  **************************************************************************/
28
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31
32 #include <linux/module.h>
33
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37 ////////////////////////////////////////////////////////////////
38
39 #include <linux/ioctl.h>        /* For SCSI-Passthrough */
40 #include <linux/uaccess.h>
41
42 #include <linux/stat.h>
43 #include <linux/slab.h>         /* for kmalloc() */
44 #include <linux/pci.h>          /* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>        /* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h>       /* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
54
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
59
60 #include <asm/processor.h>      /* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h>             /* for virt_to_bus, etc. */
63
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
69
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
72
73 /*============================================================================
74  * Create a binary signature - this is read by dptsig
75  * Needed for our management apps
76  *============================================================================
77  */
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80         {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82         PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84         PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86         PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88         PROC_ALPHA, PROC_ALPHA,
89 #else
90         (-1),(-1),
91 #endif
92          FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93         ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94         DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95 };
96
97
98
99
100 /*============================================================================
101  * Globals
102  *============================================================================
103  */
104
105 static DEFINE_MUTEX(adpt_configuration_lock);
106
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
111
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
114
115 static struct class *adpt_sysfs_class;
116
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
121
122 static const struct file_operations adpt_fops = {
123         .unlocked_ioctl = adpt_unlocked_ioctl,
124         .open           = adpt_open,
125         .release        = adpt_close,
126 #ifdef CONFIG_COMPAT
127         .compat_ioctl   = compat_adpt_ioctl,
128 #endif
129         .llseek         = noop_llseek,
130 };
131
132 /* Structures and definitions for synchronous message posting.
133  * See adpt_i2o_post_wait() for description
134  * */
135 struct adpt_i2o_post_wait_data
136 {
137         int status;
138         u32 id;
139         adpt_wait_queue_head_t *wq;
140         struct adpt_i2o_post_wait_data *next;
141 };
142
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148 /*============================================================================
149  *                              Functions
150  *============================================================================
151  */
152
153 static inline int dpt_dma64(adpt_hba *pHba)
154 {
155         return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156 }
157
158 static inline u32 dma_high(dma_addr_t addr)
159 {
160         return upper_32_bits(addr);
161 }
162
163 static inline u32 dma_low(dma_addr_t addr)
164 {
165         return (u32)addr;
166 }
167
168 static u8 adpt_read_blink_led(adpt_hba* host)
169 {
170         if (host->FwDebugBLEDflag_P) {
171                 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172                         return readb(host->FwDebugBLEDvalue_P);
173                 }
174         }
175         return 0;
176 }
177
178 /*============================================================================
179  * Scsi host template interface functions
180  *============================================================================
181  */
182
183 #ifdef MODULE
184 static struct pci_device_id dptids[] = {
185         { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186         { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
187         { 0, }
188 };
189 #endif
190
191 MODULE_DEVICE_TABLE(pci,dptids);
192
193 static int adpt_detect(struct scsi_host_template* sht)
194 {
195         struct pci_dev *pDev = NULL;
196         adpt_hba *pHba;
197         adpt_hba *next;
198
199         PINFO("Detecting Adaptec I2O RAID controllers...\n");
200
201         /* search for all Adatpec I2O RAID cards */
202         while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
203                 if(pDev->device == PCI_DPT_DEVICE_ID ||
204                    pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
205                         if(adpt_install_hba(sht, pDev) ){
206                                 PERROR("Could not Init an I2O RAID device\n");
207                                 PERROR("Will not try to detect others.\n");
208                                 return hba_count-1;
209                         }
210                         pci_dev_get(pDev);
211                 }
212         }
213
214         /* In INIT state, Activate IOPs */
215         for (pHba = hba_chain; pHba; pHba = next) {
216                 next = pHba->next;
217                 // Activate does get status , init outbound, and get hrt
218                 if (adpt_i2o_activate_hba(pHba) < 0) {
219                         adpt_i2o_delete_hba(pHba);
220                 }
221         }
222
223
224         /* Active IOPs in HOLD state */
225
226 rebuild_sys_tab:
227         if (hba_chain == NULL) 
228                 return 0;
229
230         /*
231          * If build_sys_table fails, we kill everything and bail
232          * as we can't init the IOPs w/o a system table
233          */     
234         if (adpt_i2o_build_sys_table() < 0) {
235                 adpt_i2o_sys_shutdown();
236                 return 0;
237         }
238
239         PDEBUG("HBA's in HOLD state\n");
240
241         /* If IOP don't get online, we need to rebuild the System table */
242         for (pHba = hba_chain; pHba; pHba = pHba->next) {
243                 if (adpt_i2o_online_hba(pHba) < 0) {
244                         adpt_i2o_delete_hba(pHba);      
245                         goto rebuild_sys_tab;
246                 }
247         }
248
249         /* Active IOPs now in OPERATIONAL state */
250         PDEBUG("HBA's in OPERATIONAL state\n");
251
252         printk("dpti: If you have a lot of devices this could take a few minutes.\n");
253         for (pHba = hba_chain; pHba; pHba = next) {
254                 next = pHba->next;
255                 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
256                 if (adpt_i2o_lct_get(pHba) < 0){
257                         adpt_i2o_delete_hba(pHba);
258                         continue;
259                 }
260
261                 if (adpt_i2o_parse_lct(pHba) < 0){
262                         adpt_i2o_delete_hba(pHba);
263                         continue;
264                 }
265                 adpt_inquiry(pHba);
266         }
267
268         adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
269         if (IS_ERR(adpt_sysfs_class)) {
270                 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
271                 adpt_sysfs_class = NULL;
272         }
273
274         for (pHba = hba_chain; pHba; pHba = next) {
275                 next = pHba->next;
276                 if (adpt_scsi_host_alloc(pHba, sht) < 0){
277                         adpt_i2o_delete_hba(pHba);
278                         continue;
279                 }
280                 pHba->initialized = TRUE;
281                 pHba->state &= ~DPTI_STATE_RESET;
282                 if (adpt_sysfs_class) {
283                         struct device *dev = device_create(adpt_sysfs_class,
284                                 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
285                                 "dpti%d", pHba->unit);
286                         if (IS_ERR(dev)) {
287                                 printk(KERN_WARNING"dpti%d: unable to "
288                                         "create device in dpt_i2o class\n",
289                                         pHba->unit);
290                         }
291                 }
292         }
293
294         // Register our control device node
295         // nodes will need to be created in /dev to access this
296         // the nodes can not be created from within the driver
297         if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
298                 adpt_i2o_sys_shutdown();
299                 return 0;
300         }
301         return hba_count;
302 }
303
304
305 static void adpt_release(adpt_hba *pHba)
306 {
307         struct Scsi_Host *shost = pHba->host;
308
309         scsi_remove_host(shost);
310 //      adpt_i2o_quiesce_hba(pHba);
311         adpt_i2o_delete_hba(pHba);
312         scsi_host_put(shost);
313 }
314
315
316 static void adpt_inquiry(adpt_hba* pHba)
317 {
318         u32 msg[17]; 
319         u32 *mptr;
320         u32 *lenptr;
321         int direction;
322         int scsidir;
323         u32 len;
324         u32 reqlen;
325         u8* buf;
326         dma_addr_t addr;
327         u8  scb[16];
328         s32 rcode;
329
330         memset(msg, 0, sizeof(msg));
331         buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
332         if(!buf){
333                 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
334                 return;
335         }
336         memset((void*)buf, 0, 36);
337         
338         len = 36;
339         direction = 0x00000000; 
340         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
341
342         if (dpt_dma64(pHba))
343                 reqlen = 17;            // SINGLE SGE, 64 bit
344         else
345                 reqlen = 14;            // SINGLE SGE, 32 bit
346         /* Stick the headers on */
347         msg[0] = reqlen<<16 | SGL_OFFSET_12;
348         msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
349         msg[2] = 0;
350         msg[3]  = 0;
351         // Adaptec/DPT Private stuff 
352         msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
353         msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
354         /* Direction, disconnect ok | sense data | simple queue , CDBLen */
355         // I2O_SCB_FLAG_ENABLE_DISCONNECT | 
356         // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
357         // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
358         msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
359
360         mptr=msg+7;
361
362         memset(scb, 0, sizeof(scb));
363         // Write SCSI command into the message - always 16 byte block 
364         scb[0] = INQUIRY;
365         scb[1] = 0;
366         scb[2] = 0;
367         scb[3] = 0;
368         scb[4] = 36;
369         scb[5] = 0;
370         // Don't care about the rest of scb
371
372         memcpy(mptr, scb, sizeof(scb));
373         mptr+=4;
374         lenptr=mptr++;          /* Remember me - fill in when we know */
375
376         /* Now fill in the SGList and command */
377         *lenptr = len;
378         if (dpt_dma64(pHba)) {
379                 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
380                 *mptr++ = 1 << PAGE_SHIFT;
381                 *mptr++ = 0xD0000000|direction|len;
382                 *mptr++ = dma_low(addr);
383                 *mptr++ = dma_high(addr);
384         } else {
385                 *mptr++ = 0xD0000000|direction|len;
386                 *mptr++ = addr;
387         }
388
389         // Send it on it's way
390         rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
391         if (rcode != 0) {
392                 sprintf(pHba->detail, "Adaptec I2O RAID");
393                 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
394                 if (rcode != -ETIME && rcode != -EINTR)
395                         dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
396         } else {
397                 memset(pHba->detail, 0, sizeof(pHba->detail));
398                 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
399                 memcpy(&(pHba->detail[16]), " Model: ", 8);
400                 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
401                 memcpy(&(pHba->detail[40]), " FW: ", 4);
402                 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
403                 pHba->detail[48] = '\0';        /* precautionary */
404                 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
405         }
406         adpt_i2o_status_get(pHba);
407         return ;
408 }
409
410
411 static int adpt_slave_configure(struct scsi_device * device)
412 {
413         struct Scsi_Host *host = device->host;
414         adpt_hba* pHba;
415
416         pHba = (adpt_hba *) host->hostdata[0];
417
418         if (host->can_queue && device->tagged_supported) {
419                 scsi_change_queue_depth(device,
420                                 host->can_queue - 1);
421         }
422         return 0;
423 }
424
425 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
426 {
427         adpt_hba* pHba = NULL;
428         struct adpt_device* pDev = NULL;        /* dpt per device information */
429
430         cmd->scsi_done = done;
431         /*
432          * SCSI REQUEST_SENSE commands will be executed automatically by the 
433          * Host Adapter for any errors, so they should not be executed 
434          * explicitly unless the Sense Data is zero indicating that no error 
435          * occurred.
436          */
437
438         if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
439                 cmd->result = (DID_OK << 16);
440                 cmd->scsi_done(cmd);
441                 return 0;
442         }
443
444         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
445         if (!pHba) {
446                 return FAILED;
447         }
448
449         rmb();
450         if ((pHba->state) & DPTI_STATE_RESET)
451                 return SCSI_MLQUEUE_HOST_BUSY;
452
453         // TODO if the cmd->device if offline then I may need to issue a bus rescan
454         // followed by a get_lct to see if the device is there anymore
455         if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
456                 /*
457                  * First command request for this device.  Set up a pointer
458                  * to the device structure.  This should be a TEST_UNIT_READY
459                  * command from scan_scsis_single.
460                  */
461                 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
462                         // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 
463                         // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
464                         cmd->result = (DID_NO_CONNECT << 16);
465                         cmd->scsi_done(cmd);
466                         return 0;
467                 }
468                 cmd->device->hostdata = pDev;
469         }
470         pDev->pScsi_dev = cmd->device;
471
472         /*
473          * If we are being called from when the device is being reset, 
474          * delay processing of the command until later.
475          */
476         if (pDev->state & DPTI_DEV_RESET ) {
477                 return FAILED;
478         }
479         return adpt_scsi_to_i2o(pHba, cmd, pDev);
480 }
481
482 static DEF_SCSI_QCMD(adpt_queue)
483
484 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
485                 sector_t capacity, int geom[])
486 {
487         int heads=-1;
488         int sectors=-1;
489         int cylinders=-1;
490
491         // *** First lets set the default geometry ****
492         
493         // If the capacity is less than ox2000
494         if (capacity < 0x2000 ) {       // floppy
495                 heads = 18;
496                 sectors = 2;
497         } 
498         // else if between 0x2000 and 0x20000
499         else if (capacity < 0x20000) {
500                 heads = 64;
501                 sectors = 32;
502         }
503         // else if between 0x20000 and 0x40000
504         else if (capacity < 0x40000) {
505                 heads = 65;
506                 sectors = 63;
507         }
508         // else if between 0x4000 and 0x80000
509         else if (capacity < 0x80000) {
510                 heads = 128;
511                 sectors = 63;
512         }
513         // else if greater than 0x80000
514         else {
515                 heads = 255;
516                 sectors = 63;
517         }
518         cylinders = sector_div(capacity, heads * sectors);
519
520         // Special case if CDROM
521         if(sdev->type == 5) {  // CDROM
522                 heads = 252;
523                 sectors = 63;
524                 cylinders = 1111;
525         }
526
527         geom[0] = heads;
528         geom[1] = sectors;
529         geom[2] = cylinders;
530         
531         PDEBUG("adpt_bios_param: exit\n");
532         return 0;
533 }
534
535
536 static const char *adpt_info(struct Scsi_Host *host)
537 {
538         adpt_hba* pHba;
539
540         pHba = (adpt_hba *) host->hostdata[0];
541         return (char *) (pHba->detail);
542 }
543
544 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
545 {
546         struct adpt_device* d;
547         int id;
548         int chan;
549         adpt_hba* pHba;
550         int unit;
551
552         // Find HBA (host bus adapter) we are looking for
553         mutex_lock(&adpt_configuration_lock);
554         for (pHba = hba_chain; pHba; pHba = pHba->next) {
555                 if (pHba->host == host) {
556                         break;  /* found adapter */
557                 }
558         }
559         mutex_unlock(&adpt_configuration_lock);
560         if (pHba == NULL) {
561                 return 0;
562         }
563         host = pHba->host;
564
565         seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
566         seq_printf(m, "%s\n", pHba->detail);
567         seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n", 
568                         pHba->host->host_no, pHba->name, host->irq);
569         seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
570                         host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
571
572         seq_puts(m, "Devices:\n");
573         for(chan = 0; chan < MAX_CHANNEL; chan++) {
574                 for(id = 0; id < MAX_ID; id++) {
575                         d = pHba->channel[chan].device[id];
576                         while(d) {
577                                 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
578                                 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
579
580                                 unit = d->pI2o_dev->lct_data.tid;
581                                 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
582                                                unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
583                                                scsi_device_online(d->pScsi_dev)? "online":"offline"); 
584                                 d = d->next_lun;
585                         }
586                 }
587         }
588         return 0;
589 }
590
591 /*
592  *      Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
593  */
594 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
595 {
596         return (u32)cmd->serial_number;
597 }
598
599 /*
600  *      Go from a u32 'context' to a struct scsi_cmnd * .
601  *      This could probably be made more efficient.
602  */
603 static struct scsi_cmnd *
604         adpt_cmd_from_context(adpt_hba * pHba, u32 context)
605 {
606         struct scsi_cmnd * cmd;
607         struct scsi_device * d;
608
609         if (context == 0)
610                 return NULL;
611
612         spin_unlock(pHba->host->host_lock);
613         shost_for_each_device(d, pHba->host) {
614                 unsigned long flags;
615                 spin_lock_irqsave(&d->list_lock, flags);
616                 list_for_each_entry(cmd, &d->cmd_list, list) {
617                         if (((u32)cmd->serial_number == context)) {
618                                 spin_unlock_irqrestore(&d->list_lock, flags);
619                                 scsi_device_put(d);
620                                 spin_lock(pHba->host->host_lock);
621                                 return cmd;
622                         }
623                 }
624                 spin_unlock_irqrestore(&d->list_lock, flags);
625         }
626         spin_lock(pHba->host->host_lock);
627
628         return NULL;
629 }
630
631 /*
632  *      Turn a pointer to ioctl reply data into an u32 'context'
633  */
634 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
635 {
636 #if BITS_PER_LONG == 32
637         return (u32)(unsigned long)reply;
638 #else
639         ulong flags = 0;
640         u32 nr, i;
641
642         spin_lock_irqsave(pHba->host->host_lock, flags);
643         nr = ARRAY_SIZE(pHba->ioctl_reply_context);
644         for (i = 0; i < nr; i++) {
645                 if (pHba->ioctl_reply_context[i] == NULL) {
646                         pHba->ioctl_reply_context[i] = reply;
647                         break;
648                 }
649         }
650         spin_unlock_irqrestore(pHba->host->host_lock, flags);
651         if (i >= nr) {
652                 printk(KERN_WARNING"%s: Too many outstanding "
653                                 "ioctl commands\n", pHba->name);
654                 return (u32)-1;
655         }
656
657         return i;
658 #endif
659 }
660
661 /*
662  *      Go from an u32 'context' to a pointer to ioctl reply data.
663  */
664 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
665 {
666 #if BITS_PER_LONG == 32
667         return (void *)(unsigned long)context;
668 #else
669         void *p = pHba->ioctl_reply_context[context];
670         pHba->ioctl_reply_context[context] = NULL;
671
672         return p;
673 #endif
674 }
675
676 /*===========================================================================
677  * Error Handling routines
678  *===========================================================================
679  */
680
681 static int adpt_abort(struct scsi_cmnd * cmd)
682 {
683         adpt_hba* pHba = NULL;  /* host bus adapter structure */
684         struct adpt_device* dptdevice;  /* dpt per device information */
685         u32 msg[5];
686         int rcode;
687
688         if(cmd->serial_number == 0){
689                 return FAILED;
690         }
691         pHba = (adpt_hba*) cmd->device->host->hostdata[0];
692         printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
693         if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
694                 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
695                 return FAILED;
696         }
697
698         memset(msg, 0, sizeof(msg));
699         msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
700         msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
701         msg[2] = 0;
702         msg[3]= 0; 
703         msg[4] = adpt_cmd_to_context(cmd);
704         if (pHba->host)
705                 spin_lock_irq(pHba->host->host_lock);
706         rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
707         if (pHba->host)
708                 spin_unlock_irq(pHba->host->host_lock);
709         if (rcode != 0) {
710                 if(rcode == -EOPNOTSUPP ){
711                         printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
712                         return FAILED;
713                 }
714                 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
715                 return FAILED;
716         } 
717         printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
718         return SUCCESS;
719 }
720
721
722 #define I2O_DEVICE_RESET 0x27
723 // This is the same for BLK and SCSI devices
724 // NOTE this is wrong in the i2o.h definitions
725 // This is not currently supported by our adapter but we issue it anyway
726 static int adpt_device_reset(struct scsi_cmnd* cmd)
727 {
728         adpt_hba* pHba;
729         u32 msg[4];
730         u32 rcode;
731         int old_state;
732         struct adpt_device* d = cmd->device->hostdata;
733
734         pHba = (void*) cmd->device->host->hostdata[0];
735         printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
736         if (!d) {
737                 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
738                 return FAILED;
739         }
740         memset(msg, 0, sizeof(msg));
741         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
742         msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
743         msg[2] = 0;
744         msg[3] = 0;
745
746         if (pHba->host)
747                 spin_lock_irq(pHba->host->host_lock);
748         old_state = d->state;
749         d->state |= DPTI_DEV_RESET;
750         rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
751         d->state = old_state;
752         if (pHba->host)
753                 spin_unlock_irq(pHba->host->host_lock);
754         if (rcode != 0) {
755                 if(rcode == -EOPNOTSUPP ){
756                         printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
757                         return FAILED;
758                 }
759                 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
760                 return FAILED;
761         } else {
762                 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
763                 return SUCCESS;
764         }
765 }
766
767
768 #define I2O_HBA_BUS_RESET 0x87
769 // This version of bus reset is called by the eh_error handler
770 static int adpt_bus_reset(struct scsi_cmnd* cmd)
771 {
772         adpt_hba* pHba;
773         u32 msg[4];
774         u32 rcode;
775
776         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
777         memset(msg, 0, sizeof(msg));
778         printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
779         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
780         msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
781         msg[2] = 0;
782         msg[3] = 0;
783         if (pHba->host)
784                 spin_lock_irq(pHba->host->host_lock);
785         rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
786         if (pHba->host)
787                 spin_unlock_irq(pHba->host->host_lock);
788         if (rcode != 0) {
789                 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
790                 return FAILED;
791         } else {
792                 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
793                 return SUCCESS;
794         }
795 }
796
797 // This version of reset is called by the eh_error_handler
798 static int __adpt_reset(struct scsi_cmnd* cmd)
799 {
800         adpt_hba* pHba;
801         int rcode;
802         char name[32];
803
804         pHba = (adpt_hba*)cmd->device->host->hostdata[0];
805         strncpy(name, pHba->name, sizeof(name));
806         printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
807         rcode =  adpt_hba_reset(pHba);
808         if(rcode == 0){
809                 printk(KERN_WARNING"%s: HBA reset complete\n", name);
810                 return SUCCESS;
811         } else {
812                 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
813                 return FAILED;
814         }
815 }
816
817 static int adpt_reset(struct scsi_cmnd* cmd)
818 {
819         int rc;
820
821         spin_lock_irq(cmd->device->host->host_lock);
822         rc = __adpt_reset(cmd);
823         spin_unlock_irq(cmd->device->host->host_lock);
824
825         return rc;
826 }
827
828 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
829 static int adpt_hba_reset(adpt_hba* pHba)
830 {
831         int rcode;
832
833         pHba->state |= DPTI_STATE_RESET;
834
835         // Activate does get status , init outbound, and get hrt
836         if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
837                 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
838                 adpt_i2o_delete_hba(pHba);
839                 return rcode;
840         }
841
842         if ((rcode=adpt_i2o_build_sys_table()) < 0) {
843                 adpt_i2o_delete_hba(pHba);
844                 return rcode;
845         }
846         PDEBUG("%s: in HOLD state\n",pHba->name);
847
848         if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
849                 adpt_i2o_delete_hba(pHba);      
850                 return rcode;
851         }
852         PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
853
854         if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
855                 adpt_i2o_delete_hba(pHba);
856                 return rcode;
857         }
858
859         if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
860                 adpt_i2o_delete_hba(pHba);
861                 return rcode;
862         }
863         pHba->state &= ~DPTI_STATE_RESET;
864
865         adpt_fail_posted_scbs(pHba);
866         return 0;       /* return success */
867 }
868
869 /*===========================================================================
870  * 
871  *===========================================================================
872  */
873
874
875 static void adpt_i2o_sys_shutdown(void)
876 {
877         adpt_hba *pHba, *pNext;
878         struct adpt_i2o_post_wait_data *p1, *old;
879
880          printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
881          printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
882         /* Delete all IOPs from the controller chain */
883         /* They should have already been released by the
884          * scsi-core
885          */
886         for (pHba = hba_chain; pHba; pHba = pNext) {
887                 pNext = pHba->next;
888                 adpt_i2o_delete_hba(pHba);
889         }
890
891         /* Remove any timedout entries from the wait queue.  */
892 //      spin_lock_irqsave(&adpt_post_wait_lock, flags);
893         /* Nothing should be outstanding at this point so just
894          * free them 
895          */
896         for(p1 = adpt_post_wait_queue; p1;) {
897                 old = p1;
898                 p1 = p1->next;
899                 kfree(old);
900         }
901 //      spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
902         adpt_post_wait_queue = NULL;
903
904          printk(KERN_INFO "Adaptec I2O controllers down.\n");
905 }
906
907 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
908 {
909
910         adpt_hba* pHba = NULL;
911         adpt_hba* p = NULL;
912         ulong base_addr0_phys = 0;
913         ulong base_addr1_phys = 0;
914         u32 hba_map0_area_size = 0;
915         u32 hba_map1_area_size = 0;
916         void __iomem *base_addr_virt = NULL;
917         void __iomem *msg_addr_virt = NULL;
918         int dma64 = 0;
919
920         int raptorFlag = FALSE;
921
922         if(pci_enable_device(pDev)) {
923                 return -EINVAL;
924         }
925
926         if (pci_request_regions(pDev, "dpt_i2o")) {
927                 PERROR("dpti: adpt_config_hba: pci request region failed\n");
928                 return -EINVAL;
929         }
930
931         pci_set_master(pDev);
932
933         /*
934          *      See if we should enable dma64 mode.
935          */
936         if (sizeof(dma_addr_t) > 4 &&
937             pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
938                 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
939                         dma64 = 1;
940         }
941         if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
942                 return -EINVAL;
943
944         /* adapter only supports message blocks below 4GB */
945         pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
946
947         base_addr0_phys = pci_resource_start(pDev,0);
948         hba_map0_area_size = pci_resource_len(pDev,0);
949
950         // Check if standard PCI card or single BAR Raptor
951         if(pDev->device == PCI_DPT_DEVICE_ID){
952                 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
953                         // Raptor card with this device id needs 4M
954                         hba_map0_area_size = 0x400000;
955                 } else { // Not Raptor - it is a PCI card
956                         if(hba_map0_area_size > 0x100000 ){ 
957                                 hba_map0_area_size = 0x100000;
958                         }
959                 }
960         } else {// Raptor split BAR config
961                 // Use BAR1 in this configuration
962                 base_addr1_phys = pci_resource_start(pDev,1);
963                 hba_map1_area_size = pci_resource_len(pDev,1);
964                 raptorFlag = TRUE;
965         }
966
967 #if BITS_PER_LONG == 64
968         /*
969          *      The original Adaptec 64 bit driver has this comment here:
970          *      "x86_64 machines need more optimal mappings"
971          *
972          *      I assume some HBAs report ridiculously large mappings
973          *      and we need to limit them on platforms with IOMMUs.
974          */
975         if (raptorFlag == TRUE) {
976                 if (hba_map0_area_size > 128)
977                         hba_map0_area_size = 128;
978                 if (hba_map1_area_size > 524288)
979                         hba_map1_area_size = 524288;
980         } else {
981                 if (hba_map0_area_size > 524288)
982                         hba_map0_area_size = 524288;
983         }
984 #endif
985
986         base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
987         if (!base_addr_virt) {
988                 pci_release_regions(pDev);
989                 PERROR("dpti: adpt_config_hba: io remap failed\n");
990                 return -EINVAL;
991         }
992
993         if(raptorFlag == TRUE) {
994                 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
995                 if (!msg_addr_virt) {
996                         PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
997                         iounmap(base_addr_virt);
998                         pci_release_regions(pDev);
999                         return -EINVAL;
1000                 }
1001         } else {
1002                 msg_addr_virt = base_addr_virt;
1003         }
1004         
1005         // Allocate and zero the data structure
1006         pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1007         if (!pHba) {
1008                 if (msg_addr_virt != base_addr_virt)
1009                         iounmap(msg_addr_virt);
1010                 iounmap(base_addr_virt);
1011                 pci_release_regions(pDev);
1012                 return -ENOMEM;
1013         }
1014
1015         mutex_lock(&adpt_configuration_lock);
1016
1017         if(hba_chain != NULL){
1018                 for(p = hba_chain; p->next; p = p->next);
1019                 p->next = pHba;
1020         } else {
1021                 hba_chain = pHba;
1022         }
1023         pHba->next = NULL;
1024         pHba->unit = hba_count;
1025         sprintf(pHba->name, "dpti%d", hba_count);
1026         hba_count++;
1027         
1028         mutex_unlock(&adpt_configuration_lock);
1029
1030         pHba->pDev = pDev;
1031         pHba->base_addr_phys = base_addr0_phys;
1032
1033         // Set up the Virtual Base Address of the I2O Device
1034         pHba->base_addr_virt = base_addr_virt;
1035         pHba->msg_addr_virt = msg_addr_virt;
1036         pHba->irq_mask = base_addr_virt+0x30;
1037         pHba->post_port = base_addr_virt+0x40;
1038         pHba->reply_port = base_addr_virt+0x44;
1039
1040         pHba->hrt = NULL;
1041         pHba->lct = NULL;
1042         pHba->lct_size = 0;
1043         pHba->status_block = NULL;
1044         pHba->post_count = 0;
1045         pHba->state = DPTI_STATE_RESET;
1046         pHba->pDev = pDev;
1047         pHba->devices = NULL;
1048         pHba->dma64 = dma64;
1049
1050         // Initializing the spinlocks
1051         spin_lock_init(&pHba->state_lock);
1052         spin_lock_init(&adpt_post_wait_lock);
1053
1054         if(raptorFlag == 0){
1055                 printk(KERN_INFO "Adaptec I2O RAID controller"
1056                                  " %d at %p size=%x irq=%d%s\n", 
1057                         hba_count-1, base_addr_virt,
1058                         hba_map0_area_size, pDev->irq,
1059                         dma64 ? " (64-bit DMA)" : "");
1060         } else {
1061                 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1062                         hba_count-1, pDev->irq,
1063                         dma64 ? " (64-bit DMA)" : "");
1064                 printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1065                 printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1066         }
1067
1068         if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1069                 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1070                 adpt_i2o_delete_hba(pHba);
1071                 return -EINVAL;
1072         }
1073
1074         return 0;
1075 }
1076
1077
1078 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1079 {
1080         adpt_hba* p1;
1081         adpt_hba* p2;
1082         struct i2o_device* d;
1083         struct i2o_device* next;
1084         int i;
1085         int j;
1086         struct adpt_device* pDev;
1087         struct adpt_device* pNext;
1088
1089
1090         mutex_lock(&adpt_configuration_lock);
1091         if(pHba->host){
1092                 free_irq(pHba->host->irq, pHba);
1093         }
1094         p2 = NULL;
1095         for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1096                 if(p1 == pHba) {
1097                         if(p2) {
1098                                 p2->next = p1->next;
1099                         } else {
1100                                 hba_chain = p1->next;
1101                         }
1102                         break;
1103                 }
1104         }
1105
1106         hba_count--;
1107         mutex_unlock(&adpt_configuration_lock);
1108
1109         iounmap(pHba->base_addr_virt);
1110         pci_release_regions(pHba->pDev);
1111         if(pHba->msg_addr_virt != pHba->base_addr_virt){
1112                 iounmap(pHba->msg_addr_virt);
1113         }
1114         if(pHba->FwDebugBuffer_P)
1115                 iounmap(pHba->FwDebugBuffer_P);
1116         if(pHba->hrt) {
1117                 dma_free_coherent(&pHba->pDev->dev,
1118                         pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1119                         pHba->hrt, pHba->hrt_pa);
1120         }
1121         if(pHba->lct) {
1122                 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1123                         pHba->lct, pHba->lct_pa);
1124         }
1125         if(pHba->status_block) {
1126                 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1127                         pHba->status_block, pHba->status_block_pa);
1128         }
1129         if(pHba->reply_pool) {
1130                 dma_free_coherent(&pHba->pDev->dev,
1131                         pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1132                         pHba->reply_pool, pHba->reply_pool_pa);
1133         }
1134
1135         for(d = pHba->devices; d ; d = next){
1136                 next = d->next;
1137                 kfree(d);
1138         }
1139         for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1140                 for(j = 0; j < MAX_ID; j++){
1141                         if(pHba->channel[i].device[j] != NULL){
1142                                 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1143                                         pNext = pDev->next_lun;
1144                                         kfree(pDev);
1145                                 }
1146                         }
1147                 }
1148         }
1149         pci_dev_put(pHba->pDev);
1150         if (adpt_sysfs_class)
1151                 device_destroy(adpt_sysfs_class,
1152                                 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1153         kfree(pHba);
1154
1155         if(hba_count <= 0){
1156                 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);   
1157                 if (adpt_sysfs_class) {
1158                         class_destroy(adpt_sysfs_class);
1159                         adpt_sysfs_class = NULL;
1160                 }
1161         }
1162 }
1163
1164 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1165 {
1166         struct adpt_device* d;
1167
1168         if(chan < 0 || chan >= MAX_CHANNEL)
1169                 return NULL;
1170         
1171         d = pHba->channel[chan].device[id];
1172         if(!d || d->tid == 0) {
1173                 return NULL;
1174         }
1175
1176         /* If it is the only lun at that address then this should match*/
1177         if(d->scsi_lun == lun){
1178                 return d;
1179         }
1180
1181         /* else we need to look through all the luns */
1182         for(d=d->next_lun ; d ; d = d->next_lun){
1183                 if(d->scsi_lun == lun){
1184                         return d;
1185                 }
1186         }
1187         return NULL;
1188 }
1189
1190
1191 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1192 {
1193         // I used my own version of the WAIT_QUEUE_HEAD
1194         // to handle some version differences
1195         // When embedded in the kernel this could go back to the vanilla one
1196         ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1197         int status = 0;
1198         ulong flags = 0;
1199         struct adpt_i2o_post_wait_data *p1, *p2;
1200         struct adpt_i2o_post_wait_data *wait_data =
1201                 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1202         DECLARE_WAITQUEUE(wait, current);
1203
1204         if (!wait_data)
1205                 return -ENOMEM;
1206
1207         /*
1208          * The spin locking is needed to keep anyone from playing
1209          * with the queue pointers and id while we do the same
1210          */
1211         spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212        // TODO we need a MORE unique way of getting ids
1213        // to support async LCT get
1214         wait_data->next = adpt_post_wait_queue;
1215         adpt_post_wait_queue = wait_data;
1216         adpt_post_wait_id++;
1217         adpt_post_wait_id &= 0x7fff;
1218         wait_data->id =  adpt_post_wait_id;
1219         spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1220
1221         wait_data->wq = &adpt_wq_i2o_post;
1222         wait_data->status = -ETIMEDOUT;
1223
1224         add_wait_queue(&adpt_wq_i2o_post, &wait);
1225
1226         msg[2] |= 0x80000000 | ((u32)wait_data->id);
1227         timeout *= HZ;
1228         if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1229                 set_current_state(TASK_INTERRUPTIBLE);
1230                 if(pHba->host)
1231                         spin_unlock_irq(pHba->host->host_lock);
1232                 if (!timeout)
1233                         schedule();
1234                 else{
1235                         timeout = schedule_timeout(timeout);
1236                         if (timeout == 0) {
1237                                 // I/O issued, but cannot get result in
1238                                 // specified time. Freeing resorces is
1239                                 // dangerous.
1240                                 status = -ETIME;
1241                         }
1242                 }
1243                 if(pHba->host)
1244                         spin_lock_irq(pHba->host->host_lock);
1245         }
1246         remove_wait_queue(&adpt_wq_i2o_post, &wait);
1247
1248         if(status == -ETIMEDOUT){
1249                 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1250                 // We will have to free the wait_data memory during shutdown
1251                 return status;
1252         }
1253
1254         /* Remove the entry from the queue.  */
1255         p2 = NULL;
1256         spin_lock_irqsave(&adpt_post_wait_lock, flags);
1257         for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1258                 if(p1 == wait_data) {
1259                         if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1260                                 status = -EOPNOTSUPP;
1261                         }
1262                         if(p2) {
1263                                 p2->next = p1->next;
1264                         } else {
1265                                 adpt_post_wait_queue = p1->next;
1266                         }
1267                         break;
1268                 }
1269         }
1270         spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1271
1272         kfree(wait_data);
1273
1274         return status;
1275 }
1276
1277
1278 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1279 {
1280
1281         u32 m = EMPTY_QUEUE;
1282         u32 __iomem *msg;
1283         ulong timeout = jiffies + 30*HZ;
1284         do {
1285                 rmb();
1286                 m = readl(pHba->post_port);
1287                 if (m != EMPTY_QUEUE) {
1288                         break;
1289                 }
1290                 if(time_after(jiffies,timeout)){
1291                         printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1292                         return -ETIMEDOUT;
1293                 }
1294                 schedule_timeout_uninterruptible(1);
1295         } while(m == EMPTY_QUEUE);
1296                 
1297         msg = pHba->msg_addr_virt + m;
1298         memcpy_toio(msg, data, len);
1299         wmb();
1300
1301         //post message
1302         writel(m, pHba->post_port);
1303         wmb();
1304
1305         return 0;
1306 }
1307
1308
1309 static void adpt_i2o_post_wait_complete(u32 context, int status)
1310 {
1311         struct adpt_i2o_post_wait_data *p1 = NULL;
1312         /*
1313          * We need to search through the adpt_post_wait
1314          * queue to see if the given message is still
1315          * outstanding.  If not, it means that the IOP
1316          * took longer to respond to the message than we
1317          * had allowed and timer has already expired.
1318          * Not much we can do about that except log
1319          * it for debug purposes, increase timeout, and recompile
1320          *
1321          * Lock needed to keep anyone from moving queue pointers
1322          * around while we're looking through them.
1323          */
1324
1325         context &= 0x7fff;
1326
1327         spin_lock(&adpt_post_wait_lock);
1328         for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1329                 if(p1->id == context) {
1330                         p1->status = status;
1331                         spin_unlock(&adpt_post_wait_lock);
1332                         wake_up_interruptible(p1->wq);
1333                         return;
1334                 }
1335         }
1336         spin_unlock(&adpt_post_wait_lock);
1337         // If this happens we lose commands that probably really completed
1338         printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1339         printk(KERN_DEBUG"      Tasks in wait queue:\n");
1340         for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1341                 printk(KERN_DEBUG"           %d\n",p1->id);
1342         }
1343         return;
1344 }
1345
1346 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)                   
1347 {
1348         u32 msg[8];
1349         u8* status;
1350         dma_addr_t addr;
1351         u32 m = EMPTY_QUEUE ;
1352         ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1353
1354         if(pHba->initialized  == FALSE) {       // First time reset should be quick
1355                 timeout = jiffies + (25*HZ);
1356         } else {
1357                 adpt_i2o_quiesce_hba(pHba);
1358         }
1359
1360         do {
1361                 rmb();
1362                 m = readl(pHba->post_port);
1363                 if (m != EMPTY_QUEUE) {
1364                         break;
1365                 }
1366                 if(time_after(jiffies,timeout)){
1367                         printk(KERN_WARNING"Timeout waiting for message!\n");
1368                         return -ETIMEDOUT;
1369                 }
1370                 schedule_timeout_uninterruptible(1);
1371         } while (m == EMPTY_QUEUE);
1372
1373         status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1374         if(status == NULL) {
1375                 adpt_send_nop(pHba, m);
1376                 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1377                 return -ENOMEM;
1378         }
1379         memset(status,0,4);
1380
1381         msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1382         msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1383         msg[2]=0;
1384         msg[3]=0;
1385         msg[4]=0;
1386         msg[5]=0;
1387         msg[6]=dma_low(addr);
1388         msg[7]=dma_high(addr);
1389
1390         memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1391         wmb();
1392         writel(m, pHba->post_port);
1393         wmb();
1394
1395         while(*status == 0){
1396                 if(time_after(jiffies,timeout)){
1397                         printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1398                         /* We lose 4 bytes of "status" here, but we cannot
1399                            free these because controller may awake and corrupt
1400                            those bytes at any time */
1401                         /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1402                         return -ETIMEDOUT;
1403                 }
1404                 rmb();
1405                 schedule_timeout_uninterruptible(1);
1406         }
1407
1408         if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1409                 PDEBUG("%s: Reset in progress...\n", pHba->name);
1410                 // Here we wait for message frame to become available
1411                 // indicated that reset has finished
1412                 do {
1413                         rmb();
1414                         m = readl(pHba->post_port);
1415                         if (m != EMPTY_QUEUE) {
1416                                 break;
1417                         }
1418                         if(time_after(jiffies,timeout)){
1419                                 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1420                                 /* We lose 4 bytes of "status" here, but we
1421                                    cannot free these because controller may
1422                                    awake and corrupt those bytes at any time */
1423                                 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1424                                 return -ETIMEDOUT;
1425                         }
1426                         schedule_timeout_uninterruptible(1);
1427                 } while (m == EMPTY_QUEUE);
1428                 // Flush the offset
1429                 adpt_send_nop(pHba, m);
1430         }
1431         adpt_i2o_status_get(pHba);
1432         if(*status == 0x02 ||
1433                         pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1434                 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1435                                 pHba->name);
1436         } else {
1437                 PDEBUG("%s: Reset completed.\n", pHba->name);
1438         }
1439
1440         dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1441 #ifdef UARTDELAY
1442         // This delay is to allow someone attached to the card through the debug UART to 
1443         // set up the dump levels that they want before the rest of the initialization sequence
1444         adpt_delay(20000);
1445 #endif
1446         return 0;
1447 }
1448
1449
1450 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1451 {
1452         int i;
1453         int max;
1454         int tid;
1455         struct i2o_device *d;
1456         i2o_lct *lct = pHba->lct;
1457         u8 bus_no = 0;
1458         s16 scsi_id;
1459         u64 scsi_lun;
1460         u32 buf[10]; // larger than 7, or 8 ...
1461         struct adpt_device* pDev; 
1462         
1463         if (lct == NULL) {
1464                 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1465                 return -1;
1466         }
1467         
1468         max = lct->table_size;  
1469         max -= 3;
1470         max /= 9;
1471
1472         for(i=0;i<max;i++) {
1473                 if( lct->lct_entry[i].user_tid != 0xfff){
1474                         /*
1475                          * If we have hidden devices, we need to inform the upper layers about
1476                          * the possible maximum id reference to handle device access when
1477                          * an array is disassembled. This code has no other purpose but to
1478                          * allow us future access to devices that are currently hidden
1479                          * behind arrays, hotspares or have not been configured (JBOD mode).
1480                          */
1481                         if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1482                             lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1483                             lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1484                                 continue;
1485                         }
1486                         tid = lct->lct_entry[i].tid;
1487                         // I2O_DPT_DEVICE_INFO_GROUP_NO;
1488                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1489                                 continue;
1490                         }
1491                         bus_no = buf[0]>>16;
1492                         scsi_id = buf[1];
1493                         scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1494                         if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1495                                 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1496                                 continue;
1497                         }
1498                         if (scsi_id >= MAX_ID){
1499                                 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1500                                 continue;
1501                         }
1502                         if(bus_no > pHba->top_scsi_channel){
1503                                 pHba->top_scsi_channel = bus_no;
1504                         }
1505                         if(scsi_id > pHba->top_scsi_id){
1506                                 pHba->top_scsi_id = scsi_id;
1507                         }
1508                         if(scsi_lun > pHba->top_scsi_lun){
1509                                 pHba->top_scsi_lun = scsi_lun;
1510                         }
1511                         continue;
1512                 }
1513                 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1514                 if(d==NULL)
1515                 {
1516                         printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1517                         return -ENOMEM;
1518                 }
1519                 
1520                 d->controller = pHba;
1521                 d->next = NULL;
1522
1523                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1524
1525                 d->flags = 0;
1526                 tid = d->lct_data.tid;
1527                 adpt_i2o_report_hba_unit(pHba, d);
1528                 adpt_i2o_install_device(pHba, d);
1529         }
1530         bus_no = 0;
1531         for(d = pHba->devices; d ; d = d->next) {
1532                 if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1533                    d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1534                         tid = d->lct_data.tid;
1535                         // TODO get the bus_no from hrt-but for now they are in order
1536                         //bus_no = 
1537                         if(bus_no > pHba->top_scsi_channel){
1538                                 pHba->top_scsi_channel = bus_no;
1539                         }
1540                         pHba->channel[bus_no].type = d->lct_data.class_id;
1541                         pHba->channel[bus_no].tid = tid;
1542                         if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1543                         {
1544                                 pHba->channel[bus_no].scsi_id = buf[1];
1545                                 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1546                         }
1547                         // TODO remove - this is just until we get from hrt
1548                         bus_no++;
1549                         if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1550                                 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1551                                 break;
1552                         }
1553                 }
1554         }
1555
1556         // Setup adpt_device table
1557         for(d = pHba->devices; d ; d = d->next) {
1558                 if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1559                    d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1560                    d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1561
1562                         tid = d->lct_data.tid;
1563                         scsi_id = -1;
1564                         // I2O_DPT_DEVICE_INFO_GROUP_NO;
1565                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1566                                 bus_no = buf[0]>>16;
1567                                 scsi_id = buf[1];
1568                                 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1569                                 if(bus_no >= MAX_CHANNEL) {     // Something wrong skip it
1570                                         continue;
1571                                 }
1572                                 if (scsi_id >= MAX_ID) {
1573                                         continue;
1574                                 }
1575                                 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1576                                         pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1577                                         if(pDev == NULL) {
1578                                                 return -ENOMEM;
1579                                         }
1580                                         pHba->channel[bus_no].device[scsi_id] = pDev;
1581                                 } else {
1582                                         for( pDev = pHba->channel[bus_no].device[scsi_id];      
1583                                                         pDev->next_lun; pDev = pDev->next_lun){
1584                                         }
1585                                         pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1586                                         if(pDev->next_lun == NULL) {
1587                                                 return -ENOMEM;
1588                                         }
1589                                         pDev = pDev->next_lun;
1590                                 }
1591                                 pDev->tid = tid;
1592                                 pDev->scsi_channel = bus_no;
1593                                 pDev->scsi_id = scsi_id;
1594                                 pDev->scsi_lun = scsi_lun;
1595                                 pDev->pI2o_dev = d;
1596                                 d->owner = pDev;
1597                                 pDev->type = (buf[0])&0xff;
1598                                 pDev->flags = (buf[0]>>8)&0xff;
1599                                 if(scsi_id > pHba->top_scsi_id){
1600                                         pHba->top_scsi_id = scsi_id;
1601                                 }
1602                                 if(scsi_lun > pHba->top_scsi_lun){
1603                                         pHba->top_scsi_lun = scsi_lun;
1604                                 }
1605                         }
1606                         if(scsi_id == -1){
1607                                 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1608                                                 d->lct_data.identity_tag);
1609                         }
1610                 }
1611         }
1612         return 0;
1613 }
1614
1615
1616 /*
1617  *      Each I2O controller has a chain of devices on it - these match
1618  *      the useful parts of the LCT of the board.
1619  */
1620  
1621 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1622 {
1623         mutex_lock(&adpt_configuration_lock);
1624         d->controller=pHba;
1625         d->owner=NULL;
1626         d->next=pHba->devices;
1627         d->prev=NULL;
1628         if (pHba->devices != NULL){
1629                 pHba->devices->prev=d;
1630         }
1631         pHba->devices=d;
1632         *d->dev_name = 0;
1633
1634         mutex_unlock(&adpt_configuration_lock);
1635         return 0;
1636 }
1637
1638 static int adpt_open(struct inode *inode, struct file *file)
1639 {
1640         int minor;
1641         adpt_hba* pHba;
1642
1643         mutex_lock(&adpt_mutex);
1644         //TODO check for root access
1645         //
1646         minor = iminor(inode);
1647         if (minor >= hba_count) {
1648                 mutex_unlock(&adpt_mutex);
1649                 return -ENXIO;
1650         }
1651         mutex_lock(&adpt_configuration_lock);
1652         for (pHba = hba_chain; pHba; pHba = pHba->next) {
1653                 if (pHba->unit == minor) {
1654                         break;  /* found adapter */
1655                 }
1656         }
1657         if (pHba == NULL) {
1658                 mutex_unlock(&adpt_configuration_lock);
1659                 mutex_unlock(&adpt_mutex);
1660                 return -ENXIO;
1661         }
1662
1663 //      if(pHba->in_use){
1664         //      mutex_unlock(&adpt_configuration_lock);
1665 //              return -EBUSY;
1666 //      }
1667
1668         pHba->in_use = 1;
1669         mutex_unlock(&adpt_configuration_lock);
1670         mutex_unlock(&adpt_mutex);
1671
1672         return 0;
1673 }
1674
1675 static int adpt_close(struct inode *inode, struct file *file)
1676 {
1677         int minor;
1678         adpt_hba* pHba;
1679
1680         minor = iminor(inode);
1681         if (minor >= hba_count) {
1682                 return -ENXIO;
1683         }
1684         mutex_lock(&adpt_configuration_lock);
1685         for (pHba = hba_chain; pHba; pHba = pHba->next) {
1686                 if (pHba->unit == minor) {
1687                         break;  /* found adapter */
1688                 }
1689         }
1690         mutex_unlock(&adpt_configuration_lock);
1691         if (pHba == NULL) {
1692                 return -ENXIO;
1693         }
1694
1695         pHba->in_use = 0;
1696
1697         return 0;
1698 }
1699
1700
1701 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1702 {
1703         u32 msg[MAX_MESSAGE_SIZE];
1704         u32* reply = NULL;
1705         u32 size = 0;
1706         u32 reply_size = 0;
1707         u32 __user *user_msg = arg;
1708         u32 __user * user_reply = NULL;
1709         void *sg_list[pHba->sg_tablesize];
1710         u32 sg_offset = 0;
1711         u32 sg_count = 0;
1712         int sg_index = 0;
1713         u32 i = 0;
1714         u32 rcode = 0;
1715         void *p = NULL;
1716         dma_addr_t addr;
1717         ulong flags = 0;
1718
1719         memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1720         // get user msg size in u32s 
1721         if(get_user(size, &user_msg[0])){
1722                 return -EFAULT;
1723         }
1724         size = size>>16;
1725
1726         user_reply = &user_msg[size];
1727         if(size > MAX_MESSAGE_SIZE){
1728                 return -EFAULT;
1729         }
1730         size *= 4; // Convert to bytes
1731
1732         /* Copy in the user's I2O command */
1733         if(copy_from_user(msg, user_msg, size)) {
1734                 return -EFAULT;
1735         }
1736         get_user(reply_size, &user_reply[0]);
1737         reply_size = reply_size>>16;
1738         if(reply_size > REPLY_FRAME_SIZE){
1739                 reply_size = REPLY_FRAME_SIZE;
1740         }
1741         reply_size *= 4;
1742         reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1743         if(reply == NULL) {
1744                 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1745                 return -ENOMEM;
1746         }
1747         sg_offset = (msg[0]>>4)&0xf;
1748         msg[2] = 0x40000000; // IOCTL context
1749         msg[3] = adpt_ioctl_to_context(pHba, reply);
1750         if (msg[3] == (u32)-1) {
1751                 kfree(reply);
1752                 return -EBUSY;
1753         }
1754
1755         memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1756         if(sg_offset) {
1757                 // TODO add 64 bit API
1758                 struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1759                 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1760                 if (sg_count > pHba->sg_tablesize){
1761                         printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1762                         kfree (reply);
1763                         return -EINVAL;
1764                 }
1765
1766                 for(i = 0; i < sg_count; i++) {
1767                         int sg_size;
1768
1769                         if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1770                                 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1771                                 rcode = -EINVAL;
1772                                 goto cleanup;
1773                         }
1774                         sg_size = sg[i].flag_count & 0xffffff;      
1775                         /* Allocate memory for the transfer */
1776                         p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1777                         if(!p) {
1778                                 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1779                                                 pHba->name,sg_size,i,sg_count);
1780                                 rcode = -ENOMEM;
1781                                 goto cleanup;
1782                         }
1783                         sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1784                         /* Copy in the user's SG buffer if necessary */
1785                         if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1786                                 // sg_simple_element API is 32 bit
1787                                 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1788                                         printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1789                                         rcode = -EFAULT;
1790                                         goto cleanup;
1791                                 }
1792                         }
1793                         /* sg_simple_element API is 32 bit, but addr < 4GB */
1794                         sg[i].addr_bus = addr;
1795                 }
1796         }
1797
1798         do {
1799                 /*
1800                  * Stop any new commands from enterring the
1801                  * controller while processing the ioctl
1802                  */
1803                 if (pHba->host) {
1804                         scsi_block_requests(pHba->host);
1805                         spin_lock_irqsave(pHba->host->host_lock, flags);
1806                 }
1807                 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1808                 if (rcode != 0)
1809                         printk("adpt_i2o_passthru: post wait failed %d %p\n",
1810                                         rcode, reply);
1811                 if (pHba->host) {
1812                         spin_unlock_irqrestore(pHba->host->host_lock, flags);
1813                         scsi_unblock_requests(pHba->host);
1814                 }
1815         } while (rcode == -ETIMEDOUT);
1816
1817         if(rcode){
1818                 goto cleanup;
1819         }
1820
1821         if(sg_offset) {
1822         /* Copy back the Scatter Gather buffers back to user space */
1823                 u32 j;
1824                 // TODO add 64 bit API
1825                 struct sg_simple_element* sg;
1826                 int sg_size;
1827
1828                 // re-acquire the original message to handle correctly the sg copy operation
1829                 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 
1830                 // get user msg size in u32s 
1831                 if(get_user(size, &user_msg[0])){
1832                         rcode = -EFAULT; 
1833                         goto cleanup; 
1834                 }
1835                 size = size>>16;
1836                 size *= 4;
1837                 if (size > MAX_MESSAGE_SIZE) {
1838                         rcode = -EINVAL;
1839                         goto cleanup;
1840                 }
1841                 /* Copy in the user's I2O command */
1842                 if (copy_from_user (msg, user_msg, size)) {
1843                         rcode = -EFAULT;
1844                         goto cleanup;
1845                 }
1846                 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1847
1848                 // TODO add 64 bit API
1849                 sg       = (struct sg_simple_element*)(msg + sg_offset);
1850                 for (j = 0; j < sg_count; j++) {
1851                         /* Copy out the SG list to user's buffer if necessary */
1852                         if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1853                                 sg_size = sg[j].flag_count & 0xffffff; 
1854                                 // sg_simple_element API is 32 bit
1855                                 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1856                                         printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1857                                         rcode = -EFAULT;
1858                                         goto cleanup;
1859                                 }
1860                         }
1861                 }
1862         } 
1863
1864         /* Copy back the reply to user space */
1865         if (reply_size) {
1866                 // we wrote our own values for context - now restore the user supplied ones
1867                 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1868                         printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1869                         rcode = -EFAULT;
1870                 }
1871                 if(copy_to_user(user_reply, reply, reply_size)) {
1872                         printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1873                         rcode = -EFAULT;
1874                 }
1875         }
1876
1877
1878 cleanup:
1879         if (rcode != -ETIME && rcode != -EINTR) {
1880                 struct sg_simple_element *sg =
1881                                 (struct sg_simple_element*) (msg +sg_offset);
1882                 kfree (reply);
1883                 while(sg_index) {
1884                         if(sg_list[--sg_index]) {
1885                                 dma_free_coherent(&pHba->pDev->dev,
1886                                         sg[sg_index].flag_count & 0xffffff,
1887                                         sg_list[sg_index],
1888                                         sg[sg_index].addr_bus);
1889                         }
1890                 }
1891         }
1892         return rcode;
1893 }
1894
1895 #if defined __ia64__ 
1896 static void adpt_ia64_info(sysInfo_S* si)
1897 {
1898         // This is all the info we need for now
1899         // We will add more info as our new
1900         // managmenent utility requires it
1901         si->processorType = PROC_IA64;
1902 }
1903 #endif
1904
1905 #if defined __sparc__ 
1906 static void adpt_sparc_info(sysInfo_S* si)
1907 {
1908         // This is all the info we need for now
1909         // We will add more info as our new
1910         // managmenent utility requires it
1911         si->processorType = PROC_ULTRASPARC;
1912 }
1913 #endif
1914 #if defined __alpha__ 
1915 static void adpt_alpha_info(sysInfo_S* si)
1916 {
1917         // This is all the info we need for now
1918         // We will add more info as our new
1919         // managmenent utility requires it
1920         si->processorType = PROC_ALPHA;
1921 }
1922 #endif
1923
1924 #if defined __i386__
1925
1926 #include <uapi/asm/vm86.h>
1927
1928 static void adpt_i386_info(sysInfo_S* si)
1929 {
1930         // This is all the info we need for now
1931         // We will add more info as our new
1932         // managmenent utility requires it
1933         switch (boot_cpu_data.x86) {
1934         case CPU_386:
1935                 si->processorType = PROC_386;
1936                 break;
1937         case CPU_486:
1938                 si->processorType = PROC_486;
1939                 break;
1940         case CPU_586:
1941                 si->processorType = PROC_PENTIUM;
1942                 break;
1943         default:  // Just in case 
1944                 si->processorType = PROC_PENTIUM;
1945                 break;
1946         }
1947 }
1948 #endif
1949
1950 /*
1951  * This routine returns information about the system.  This does not effect
1952  * any logic and if the info is wrong - it doesn't matter.
1953  */
1954
1955 /* Get all the info we can not get from kernel services */
1956 static int adpt_system_info(void __user *buffer)
1957 {
1958         sysInfo_S si;
1959
1960         memset(&si, 0, sizeof(si));
1961
1962         si.osType = OS_LINUX;
1963         si.osMajorVersion = 0;
1964         si.osMinorVersion = 0;
1965         si.osRevision = 0;
1966         si.busType = SI_PCI_BUS;
1967         si.processorFamily = DPTI_sig.dsProcessorFamily;
1968
1969 #if defined __i386__
1970         adpt_i386_info(&si);
1971 #elif defined (__ia64__)
1972         adpt_ia64_info(&si);
1973 #elif defined(__sparc__)
1974         adpt_sparc_info(&si);
1975 #elif defined (__alpha__)
1976         adpt_alpha_info(&si);
1977 #else
1978         si.processorType = 0xff ;
1979 #endif
1980         if (copy_to_user(buffer, &si, sizeof(si))){
1981                 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1982                 return -EFAULT;
1983         }
1984
1985         return 0;
1986 }
1987
1988 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1989 {
1990         int minor;
1991         int error = 0;
1992         adpt_hba* pHba;
1993         ulong flags = 0;
1994         void __user *argp = (void __user *)arg;
1995
1996         minor = iminor(inode);
1997         if (minor >= DPTI_MAX_HBA){
1998                 return -ENXIO;
1999         }
2000         mutex_lock(&adpt_configuration_lock);
2001         for (pHba = hba_chain; pHba; pHba = pHba->next) {
2002                 if (pHba->unit == minor) {
2003                         break;  /* found adapter */
2004                 }
2005         }
2006         mutex_unlock(&adpt_configuration_lock);
2007         if(pHba == NULL){
2008                 return -ENXIO;
2009         }
2010
2011         while((volatile u32) pHba->state & DPTI_STATE_RESET )
2012                 schedule_timeout_uninterruptible(2);
2013
2014         switch (cmd) {
2015         // TODO: handle 3 cases
2016         case DPT_SIGNATURE:
2017                 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2018                         return -EFAULT;
2019                 }
2020                 break;
2021         case I2OUSRCMD:
2022                 return adpt_i2o_passthru(pHba, argp);
2023
2024         case DPT_CTRLINFO:{
2025                 drvrHBAinfo_S HbaInfo;
2026
2027 #define FLG_OSD_PCI_VALID 0x0001
2028 #define FLG_OSD_DMA       0x0002
2029 #define FLG_OSD_I2O       0x0004
2030                 memset(&HbaInfo, 0, sizeof(HbaInfo));
2031                 HbaInfo.drvrHBAnum = pHba->unit;
2032                 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2033                 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2034                 HbaInfo.pciBusNum =  pHba->pDev->bus->number;
2035                 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); 
2036                 HbaInfo.Interrupt = pHba->pDev->irq; 
2037                 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2038                 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2039                         printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2040                         return -EFAULT;
2041                 }
2042                 break;
2043                 }
2044         case DPT_SYSINFO:
2045                 return adpt_system_info(argp);
2046         case DPT_BLINKLED:{
2047                 u32 value;
2048                 value = (u32)adpt_read_blink_led(pHba);
2049                 if (copy_to_user(argp, &value, sizeof(value))) {
2050                         return -EFAULT;
2051                 }
2052                 break;
2053                 }
2054         case I2ORESETCMD:
2055                 if(pHba->host)
2056                         spin_lock_irqsave(pHba->host->host_lock, flags);
2057                 adpt_hba_reset(pHba);
2058                 if(pHba->host)
2059                         spin_unlock_irqrestore(pHba->host->host_lock, flags);
2060                 break;
2061         case I2ORESCANCMD:
2062                 adpt_rescan(pHba);
2063                 break;
2064         default:
2065                 return -EINVAL;
2066         }
2067
2068         return error;
2069 }
2070
2071 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2072 {
2073         struct inode *inode;
2074         long ret;
2075  
2076         inode = file_inode(file);
2077  
2078         mutex_lock(&adpt_mutex);
2079         ret = adpt_ioctl(inode, file, cmd, arg);
2080         mutex_unlock(&adpt_mutex);
2081
2082         return ret;
2083 }
2084
2085 #ifdef CONFIG_COMPAT
2086 static long compat_adpt_ioctl(struct file *file,
2087                                 unsigned int cmd, unsigned long arg)
2088 {
2089         struct inode *inode;
2090         long ret;
2091  
2092         inode = file_inode(file);
2093  
2094         mutex_lock(&adpt_mutex);
2095  
2096         switch(cmd) {
2097                 case DPT_SIGNATURE:
2098                 case I2OUSRCMD:
2099                 case DPT_CTRLINFO:
2100                 case DPT_SYSINFO:
2101                 case DPT_BLINKLED:
2102                 case I2ORESETCMD:
2103                 case I2ORESCANCMD:
2104                 case (DPT_TARGET_BUSY & 0xFFFF):
2105                 case DPT_TARGET_BUSY:
2106                         ret = adpt_ioctl(inode, file, cmd, arg);
2107                         break;
2108                 default:
2109                         ret =  -ENOIOCTLCMD;
2110         }
2111  
2112         mutex_unlock(&adpt_mutex);
2113  
2114         return ret;
2115 }
2116 #endif
2117
2118 static irqreturn_t adpt_isr(int irq, void *dev_id)
2119 {
2120         struct scsi_cmnd* cmd;
2121         adpt_hba* pHba = dev_id;
2122         u32 m;
2123         void __iomem *reply;
2124         u32 status=0;
2125         u32 context;
2126         ulong flags = 0;
2127         int handled = 0;
2128
2129         if (pHba == NULL){
2130                 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2131                 return IRQ_NONE;
2132         }
2133         if(pHba->host)
2134                 spin_lock_irqsave(pHba->host->host_lock, flags);
2135
2136         while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2137                 m = readl(pHba->reply_port);
2138                 if(m == EMPTY_QUEUE){
2139                         // Try twice then give up
2140                         rmb();
2141                         m = readl(pHba->reply_port);
2142                         if(m == EMPTY_QUEUE){ 
2143                                 // This really should not happen
2144                                 printk(KERN_ERR"dpti: Could not get reply frame\n");
2145                                 goto out;
2146                         }
2147                 }
2148                 if (pHba->reply_pool_pa <= m &&
2149                     m < pHba->reply_pool_pa +
2150                         (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2151                         reply = (u8 *)pHba->reply_pool +
2152                                                 (m - pHba->reply_pool_pa);
2153                 } else {
2154                         /* Ick, we should *never* be here */
2155                         printk(KERN_ERR "dpti: reply frame not from pool\n");
2156                         reply = (u8 *)bus_to_virt(m);
2157                 }
2158
2159                 if (readl(reply) & MSG_FAIL) {
2160                         u32 old_m = readl(reply+28); 
2161                         void __iomem *msg;
2162                         u32 old_context;
2163                         PDEBUG("%s: Failed message\n",pHba->name);
2164                         if(old_m >= 0x100000){
2165                                 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2166                                 writel(m,pHba->reply_port);
2167                                 continue;
2168                         }
2169                         // Transaction context is 0 in failed reply frame
2170                         msg = pHba->msg_addr_virt + old_m;
2171                         old_context = readl(msg+12);
2172                         writel(old_context, reply+12);
2173                         adpt_send_nop(pHba, old_m);
2174                 } 
2175                 context = readl(reply+8);
2176                 if(context & 0x40000000){ // IOCTL
2177                         void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2178                         if( p != NULL) {
2179                                 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2180                         }
2181                         // All IOCTLs will also be post wait
2182                 }
2183                 if(context & 0x80000000){ // Post wait message
2184                         status = readl(reply+16);
2185                         if(status  >> 24){
2186                                 status &=  0xffff; /* Get detail status */
2187                         } else {
2188                                 status = I2O_POST_WAIT_OK;
2189                         }
2190                         if(!(context & 0x40000000)) {
2191                                 cmd = adpt_cmd_from_context(pHba,
2192                                                         readl(reply+12));
2193                                 if(cmd != NULL) {
2194                                         printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2195                                 }
2196                         }
2197                         adpt_i2o_post_wait_complete(context, status);
2198                 } else { // SCSI message
2199                         cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2200                         if(cmd != NULL){
2201                                 scsi_dma_unmap(cmd);
2202                                 if(cmd->serial_number != 0) { // If not timedout
2203                                         adpt_i2o_to_scsi(reply, cmd);
2204                                 }
2205                         }
2206                 }
2207                 writel(m, pHba->reply_port);
2208                 wmb();
2209                 rmb();
2210         }
2211         handled = 1;
2212 out:    if(pHba->host)
2213                 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2214         return IRQ_RETVAL(handled);
2215 }
2216
2217 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2218 {
2219         int i;
2220         u32 msg[MAX_MESSAGE_SIZE];
2221         u32* mptr;
2222         u32* lptr;
2223         u32 *lenptr;
2224         int direction;
2225         int scsidir;
2226         int nseg;
2227         u32 len;
2228         u32 reqlen;
2229         s32 rcode;
2230         dma_addr_t addr;
2231
2232         memset(msg, 0 , sizeof(msg));
2233         len = scsi_bufflen(cmd);
2234         direction = 0x00000000; 
2235         
2236         scsidir = 0x00000000;                   // DATA NO XFER
2237         if(len) {
2238                 /*
2239                  * Set SCBFlags to indicate if data is being transferred
2240                  * in or out, or no data transfer
2241                  * Note:  Do not have to verify index is less than 0 since
2242                  * cmd->cmnd[0] is an unsigned char
2243                  */
2244                 switch(cmd->sc_data_direction){
2245                 case DMA_FROM_DEVICE:
2246                         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
2247                         break;
2248                 case DMA_TO_DEVICE:
2249                         direction=0x04000000;   // SGL OUT
2250                         scsidir  =0x80000000;   // DATA OUT (iop-->dev)
2251                         break;
2252                 case DMA_NONE:
2253                         break;
2254                 case DMA_BIDIRECTIONAL:
2255                         scsidir  =0x40000000;   // DATA IN  (iop<--dev)
2256                         // Assume In - and continue;
2257                         break;
2258                 default:
2259                         printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2260                              pHba->name, cmd->cmnd[0]);
2261                         cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2262                         cmd->scsi_done(cmd);
2263                         return  0;
2264                 }
2265         }
2266         // msg[0] is set later
2267         // I2O_CMD_SCSI_EXEC
2268         msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2269         msg[2] = 0;
2270         msg[3] = adpt_cmd_to_context(cmd);  /* Want SCSI control block back */
2271         // Our cards use the transaction context as the tag for queueing
2272         // Adaptec/DPT Private stuff 
2273         msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2274         msg[5] = d->tid;
2275         /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2276         // I2O_SCB_FLAG_ENABLE_DISCONNECT | 
2277         // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | 
2278         // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2279         msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2280
2281         mptr=msg+7;
2282
2283         // Write SCSI command into the message - always 16 byte block 
2284         memset(mptr, 0,  16);
2285         memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2286         mptr+=4;
2287         lenptr=mptr++;          /* Remember me - fill in when we know */
2288         if (dpt_dma64(pHba)) {
2289                 reqlen = 16;            // SINGLE SGE
2290                 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2291                 *mptr++ = 1 << PAGE_SHIFT;
2292         } else {
2293                 reqlen = 14;            // SINGLE SGE
2294         }
2295         /* Now fill in the SGList and command */
2296
2297         nseg = scsi_dma_map(cmd);
2298         BUG_ON(nseg < 0);
2299         if (nseg) {
2300                 struct scatterlist *sg;
2301
2302                 len = 0;
2303                 scsi_for_each_sg(cmd, sg, nseg, i) {
2304                         lptr = mptr;
2305                         *mptr++ = direction|0x10000000|sg_dma_len(sg);
2306                         len+=sg_dma_len(sg);
2307                         addr = sg_dma_address(sg);
2308                         *mptr++ = dma_low(addr);
2309                         if (dpt_dma64(pHba))
2310                                 *mptr++ = dma_high(addr);
2311                         /* Make this an end of list */
2312                         if (i == nseg - 1)
2313                                 *lptr = direction|0xD0000000|sg_dma_len(sg);
2314                 }
2315                 reqlen = mptr - msg;
2316                 *lenptr = len;
2317                 
2318                 if(cmd->underflow && len != cmd->underflow){
2319                         printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2320                                 len, cmd->underflow);
2321                 }
2322         } else {
2323                 *lenptr = len = 0;
2324                 reqlen = 12;
2325         }
2326         
2327         /* Stick the headers on */
2328         msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2329         
2330         // Send it on it's way
2331         rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2332         if (rcode == 0) {
2333                 return 0;
2334         }
2335         return rcode;
2336 }
2337
2338
2339 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2340 {
2341         struct Scsi_Host *host;
2342
2343         host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2344         if (host == NULL) {
2345                 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2346                 return -1;
2347         }
2348         host->hostdata[0] = (unsigned long)pHba;
2349         pHba->host = host;
2350
2351         host->irq = pHba->pDev->irq;
2352         /* no IO ports, so don't have to set host->io_port and
2353          * host->n_io_port
2354          */
2355         host->io_port = 0;
2356         host->n_io_port = 0;
2357                                 /* see comments in scsi_host.h */
2358         host->max_id = 16;
2359         host->max_lun = 256;
2360         host->max_channel = pHba->top_scsi_channel + 1;
2361         host->cmd_per_lun = 1;
2362         host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2363         host->sg_tablesize = pHba->sg_tablesize;
2364         host->can_queue = pHba->post_fifo_size;
2365         host->use_cmd_list = 1;
2366
2367         return 0;
2368 }
2369
2370
2371 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2372 {
2373         adpt_hba* pHba;
2374         u32 hba_status;
2375         u32 dev_status;
2376         u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits 
2377         // I know this would look cleaner if I just read bytes
2378         // but the model I have been using for all the rest of the
2379         // io is in 4 byte words - so I keep that model
2380         u16 detailed_status = readl(reply+16) &0xffff;
2381         dev_status = (detailed_status & 0xff);
2382         hba_status = detailed_status >> 8;
2383
2384         // calculate resid for sg 
2385         scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2386
2387         pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2388
2389         cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2390
2391         if(!(reply_flags & MSG_FAIL)) {
2392                 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2393                 case I2O_SCSI_DSC_SUCCESS:
2394                         cmd->result = (DID_OK << 16);
2395                         // handle underflow
2396                         if (readl(reply+20) < cmd->underflow) {
2397                                 cmd->result = (DID_ERROR <<16);
2398                                 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2399                         }
2400                         break;
2401                 case I2O_SCSI_DSC_REQUEST_ABORTED:
2402                         cmd->result = (DID_ABORT << 16);
2403                         break;
2404                 case I2O_SCSI_DSC_PATH_INVALID:
2405                 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2406                 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2407                 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2408                 case I2O_SCSI_DSC_NO_ADAPTER:
2409                 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2410                         printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2411                                 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2412                         cmd->result = (DID_TIME_OUT << 16);
2413                         break;
2414                 case I2O_SCSI_DSC_ADAPTER_BUSY:
2415                 case I2O_SCSI_DSC_BUS_BUSY:
2416                         cmd->result = (DID_BUS_BUSY << 16);
2417                         break;
2418                 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2419                 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2420                         cmd->result = (DID_RESET << 16);
2421                         break;
2422                 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2423                         printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2424                         cmd->result = (DID_PARITY << 16);
2425                         break;
2426                 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2427                 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2428                 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2429                 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2430                 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2431                 case I2O_SCSI_DSC_DATA_OVERRUN:
2432                 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2433                 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2434                 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2435                 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2436                 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2437                 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2438                 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2439                 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2440                 case I2O_SCSI_DSC_INVALID_CDB:
2441                 case I2O_SCSI_DSC_LUN_INVALID:
2442                 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2443                 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2444                 case I2O_SCSI_DSC_NO_NEXUS:
2445                 case I2O_SCSI_DSC_CDB_RECEIVED:
2446                 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2447                 case I2O_SCSI_DSC_QUEUE_FROZEN:
2448                 case I2O_SCSI_DSC_REQUEST_INVALID:
2449                 default:
2450                         printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2451                                 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2452                                hba_status, dev_status, cmd->cmnd[0]);
2453                         cmd->result = (DID_ERROR << 16);
2454                         break;
2455                 }
2456
2457                 // copy over the request sense data if it was a check
2458                 // condition status
2459                 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2460                         u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2461                         // Copy over the sense data
2462                         memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2463                         if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && 
2464                            cmd->sense_buffer[2] == DATA_PROTECT ){
2465                                 /* This is to handle an array failed */
2466                                 cmd->result = (DID_TIME_OUT << 16);
2467                                 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2468                                         pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2469                                         hba_status, dev_status, cmd->cmnd[0]);
2470
2471                         }
2472                 }
2473         } else {
2474                 /* In this condtion we could not talk to the tid
2475                  * the card rejected it.  We should signal a retry
2476                  * for a limitted number of retries.
2477                  */
2478                 cmd->result = (DID_TIME_OUT << 16);
2479                 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2480                         pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2481                         ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2482         }
2483
2484         cmd->result |= (dev_status);
2485
2486         if(cmd->scsi_done != NULL){
2487                 cmd->scsi_done(cmd);
2488         } 
2489         return cmd->result;
2490 }
2491
2492
2493 static s32 adpt_rescan(adpt_hba* pHba)
2494 {
2495         s32 rcode;
2496         ulong flags = 0;
2497
2498         if(pHba->host)
2499                 spin_lock_irqsave(pHba->host->host_lock, flags);
2500         if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2501                 goto out;
2502         if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2503                 goto out;
2504         rcode = 0;
2505 out:    if(pHba->host)
2506                 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2507         return rcode;
2508 }
2509
2510
2511 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2512 {
2513         int i;
2514         int max;
2515         int tid;
2516         struct i2o_device *d;
2517         i2o_lct *lct = pHba->lct;
2518         u8 bus_no = 0;
2519         s16 scsi_id;
2520         u64 scsi_lun;
2521         u32 buf[10]; // at least 8 u32's
2522         struct adpt_device* pDev = NULL;
2523         struct i2o_device* pI2o_dev = NULL;
2524         
2525         if (lct == NULL) {
2526                 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2527                 return -1;
2528         }
2529         
2530         max = lct->table_size;  
2531         max -= 3;
2532         max /= 9;
2533
2534         // Mark each drive as unscanned
2535         for (d = pHba->devices; d; d = d->next) {
2536                 pDev =(struct adpt_device*) d->owner;
2537                 if(!pDev){
2538                         continue;
2539                 }
2540                 pDev->state |= DPTI_DEV_UNSCANNED;
2541         }
2542
2543         printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2544         
2545         for(i=0;i<max;i++) {
2546                 if( lct->lct_entry[i].user_tid != 0xfff){
2547                         continue;
2548                 }
2549
2550                 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2551                     lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2552                     lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2553                         tid = lct->lct_entry[i].tid;
2554                         if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2555                                 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2556                                 continue;
2557                         }
2558                         bus_no = buf[0]>>16;
2559                         if (bus_no >= MAX_CHANNEL) {    /* Something wrong skip it */
2560                                 printk(KERN_WARNING
2561                                         "%s: Channel number %d out of range\n",
2562                                         pHba->name, bus_no);
2563                                 continue;
2564                         }
2565
2566                         scsi_id = buf[1];
2567                         scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2568                         pDev = pHba->channel[bus_no].device[scsi_id];
2569                         /* da lun */
2570                         while(pDev) {
2571                                 if(pDev->scsi_lun == scsi_lun) {
2572                                         break;
2573                                 }
2574                                 pDev = pDev->next_lun;
2575                         }
2576                         if(!pDev ) { // Something new add it
2577                                 d = kmalloc(sizeof(struct i2o_device),
2578                                             GFP_ATOMIC);
2579                                 if(d==NULL)
2580                                 {
2581                                         printk(KERN_CRIT "Out of memory for I2O device data.\n");
2582                                         return -ENOMEM;
2583                                 }
2584                                 
2585                                 d->controller = pHba;
2586                                 d->next = NULL;
2587
2588                                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2589
2590                                 d->flags = 0;
2591                                 adpt_i2o_report_hba_unit(pHba, d);
2592                                 adpt_i2o_install_device(pHba, d);
2593         
2594                                 pDev = pHba->channel[bus_no].device[scsi_id];   
2595                                 if( pDev == NULL){
2596                                         pDev =
2597                                           kzalloc(sizeof(struct adpt_device),
2598                                                   GFP_ATOMIC);
2599                                         if(pDev == NULL) {
2600                                                 return -ENOMEM;
2601                                         }
2602                                         pHba->channel[bus_no].device[scsi_id] = pDev;
2603                                 } else {
2604                                         while (pDev->next_lun) {
2605                                                 pDev = pDev->next_lun;
2606                                         }
2607                                         pDev = pDev->next_lun =
2608                                           kzalloc(sizeof(struct adpt_device),
2609                                                   GFP_ATOMIC);
2610                                         if(pDev == NULL) {
2611                                                 return -ENOMEM;
2612                                         }
2613                                 }
2614                                 pDev->tid = d->lct_data.tid;
2615                                 pDev->scsi_channel = bus_no;
2616                                 pDev->scsi_id = scsi_id;
2617                                 pDev->scsi_lun = scsi_lun;
2618                                 pDev->pI2o_dev = d;
2619                                 d->owner = pDev;
2620                                 pDev->type = (buf[0])&0xff;
2621                                 pDev->flags = (buf[0]>>8)&0xff;
2622                                 // Too late, SCSI system has made up it's mind, but what the hey ...
2623                                 if(scsi_id > pHba->top_scsi_id){
2624                                         pHba->top_scsi_id = scsi_id;
2625                                 }
2626                                 if(scsi_lun > pHba->top_scsi_lun){
2627                                         pHba->top_scsi_lun = scsi_lun;
2628                                 }
2629                                 continue;
2630                         } // end of new i2o device
2631
2632                         // We found an old device - check it
2633                         while(pDev) {
2634                                 if(pDev->scsi_lun == scsi_lun) {
2635                                         if(!scsi_device_online(pDev->pScsi_dev)) {
2636                                                 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2637                                                                 pHba->name,bus_no,scsi_id,scsi_lun);
2638                                                 if (pDev->pScsi_dev) {
2639                                                         scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2640                                                 }
2641                                         }
2642                                         d = pDev->pI2o_dev;
2643                                         if(d->lct_data.tid != tid) { // something changed
2644                                                 pDev->tid = tid;
2645                                                 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2646                                                 if (pDev->pScsi_dev) {
2647                                                         pDev->pScsi_dev->changed = TRUE;
2648                                                         pDev->pScsi_dev->removable = TRUE;
2649                                                 }
2650                                         }
2651                                         // Found it - mark it scanned
2652                                         pDev->state = DPTI_DEV_ONLINE;
2653                                         break;
2654                                 }
2655                                 pDev = pDev->next_lun;
2656                         }
2657                 }
2658         }
2659         for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2660                 pDev =(struct adpt_device*) pI2o_dev->owner;
2661                 if(!pDev){
2662                         continue;
2663                 }
2664                 // Drive offline drives that previously existed but could not be found
2665                 // in the LCT table
2666                 if (pDev->state & DPTI_DEV_UNSCANNED){
2667                         pDev->state = DPTI_DEV_OFFLINE;
2668                         printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2669                         if (pDev->pScsi_dev) {
2670                                 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2671                         }
2672                 }
2673         }
2674         return 0;
2675 }
2676
2677 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2678 {
2679         struct scsi_cmnd*       cmd = NULL;
2680         struct scsi_device*     d = NULL;
2681
2682         shost_for_each_device(d, pHba->host) {
2683                 unsigned long flags;
2684                 spin_lock_irqsave(&d->list_lock, flags);
2685                 list_for_each_entry(cmd, &d->cmd_list, list) {
2686                         if(cmd->serial_number == 0){
2687                                 continue;
2688                         }
2689                         cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2690                         cmd->scsi_done(cmd);
2691                 }
2692                 spin_unlock_irqrestore(&d->list_lock, flags);
2693         }
2694 }
2695
2696
2697 /*============================================================================
2698  *  Routines from i2o subsystem
2699  *============================================================================
2700  */
2701
2702
2703
2704 /*
2705  *      Bring an I2O controller into HOLD state. See the spec.
2706  */
2707 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2708 {
2709         int rcode;
2710
2711         if(pHba->initialized ) {
2712                 if (adpt_i2o_status_get(pHba) < 0) {
2713                         if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2714                                 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2715                                 return rcode;
2716                         }
2717                         if (adpt_i2o_status_get(pHba) < 0) {
2718                                 printk(KERN_INFO "HBA not responding.\n");
2719                                 return -1;
2720                         }
2721                 }
2722
2723                 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2724                         printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2725                         return -1;
2726                 }
2727
2728                 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2729                     pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2730                     pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2731                     pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2732                         adpt_i2o_reset_hba(pHba);                       
2733                         if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2734                                 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2735                                 return -1;
2736                         }
2737                 }
2738         } else {
2739                 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2740                         printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2741                         return rcode;
2742                 }
2743
2744         }
2745
2746         if (adpt_i2o_init_outbound_q(pHba) < 0) {
2747                 return -1;
2748         }
2749
2750         /* In HOLD state */
2751         
2752         if (adpt_i2o_hrt_get(pHba) < 0) {
2753                 return -1;
2754         }
2755
2756         return 0;
2757 }
2758
2759 /*
2760  *      Bring a controller online into OPERATIONAL state. 
2761  */
2762  
2763 static int adpt_i2o_online_hba(adpt_hba* pHba)
2764 {
2765         if (adpt_i2o_systab_send(pHba) < 0)
2766                 return -1;
2767         /* In READY state */
2768
2769         if (adpt_i2o_enable_hba(pHba) < 0)
2770                 return -1;
2771
2772         /* In OPERATIONAL state  */
2773         return 0;
2774 }
2775
2776 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2777 {
2778         u32 __iomem *msg;
2779         ulong timeout = jiffies + 5*HZ;
2780
2781         while(m == EMPTY_QUEUE){
2782                 rmb();
2783                 m = readl(pHba->post_port);
2784                 if(m != EMPTY_QUEUE){
2785                         break;
2786                 }
2787                 if(time_after(jiffies,timeout)){
2788                         printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2789                         return 2;
2790                 }
2791                 schedule_timeout_uninterruptible(1);
2792         }
2793         msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2794         writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2795         writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2796         writel( 0,&msg[2]);
2797         wmb();
2798
2799         writel(m, pHba->post_port);
2800         wmb();
2801         return 0;
2802 }
2803
2804 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2805 {
2806         u8 *status;
2807         dma_addr_t addr;
2808         u32 __iomem *msg = NULL;
2809         int i;
2810         ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2811         u32 m;
2812
2813         do {
2814                 rmb();
2815                 m = readl(pHba->post_port);
2816                 if (m != EMPTY_QUEUE) {
2817                         break;
2818                 }
2819
2820                 if(time_after(jiffies,timeout)){
2821                         printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2822                         return -ETIMEDOUT;
2823                 }
2824                 schedule_timeout_uninterruptible(1);
2825         } while(m == EMPTY_QUEUE);
2826
2827         msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2828
2829         status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2830         if (!status) {
2831                 adpt_send_nop(pHba, m);
2832                 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2833                         pHba->name);
2834                 return -ENOMEM;
2835         }
2836         memset(status, 0, 4);
2837
2838         writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2839         writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2840         writel(0, &msg[2]);
2841         writel(0x0106, &msg[3]);        /* Transaction context */
2842         writel(4096, &msg[4]);          /* Host page frame size */
2843         writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);   /* Outbound msg frame size and Initcode */
2844         writel(0xD0000004, &msg[6]);            /* Simple SG LE, EOB */
2845         writel((u32)addr, &msg[7]);
2846
2847         writel(m, pHba->post_port);
2848         wmb();
2849
2850         // Wait for the reply status to come back
2851         do {
2852                 if (*status) {
2853                         if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2854                                 break;
2855                         }
2856                 }
2857                 rmb();
2858                 if(time_after(jiffies,timeout)){
2859                         printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2860                         /* We lose 4 bytes of "status" here, but we
2861                            cannot free these because controller may
2862                            awake and corrupt those bytes at any time */
2863                         /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2864                         return -ETIMEDOUT;
2865                 }
2866                 schedule_timeout_uninterruptible(1);
2867         } while (1);
2868
2869         // If the command was successful, fill the fifo with our reply
2870         // message packets
2871         if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2872                 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2873                 return -2;
2874         }
2875         dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2876
2877         if(pHba->reply_pool != NULL) {
2878                 dma_free_coherent(&pHba->pDev->dev,
2879                         pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2880                         pHba->reply_pool, pHba->reply_pool_pa);
2881         }
2882
2883         pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2884                                 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2885                                 &pHba->reply_pool_pa, GFP_KERNEL);
2886         if (!pHba->reply_pool) {
2887                 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2888                 return -ENOMEM;
2889         }
2890         memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2891
2892         for(i = 0; i < pHba->reply_fifo_size; i++) {
2893                 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2894                         pHba->reply_port);
2895                 wmb();
2896         }
2897         adpt_i2o_status_get(pHba);
2898         return 0;
2899 }
2900
2901
2902 /*
2903  * I2O System Table.  Contains information about
2904  * all the IOPs in the system.  Used to inform IOPs
2905  * about each other's existence.
2906  *
2907  * sys_tbl_ver is the CurrentChangeIndicator that is
2908  * used by IOPs to track changes.
2909  */
2910
2911
2912
2913 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2914 {
2915         ulong timeout;
2916         u32 m;
2917         u32 __iomem *msg;
2918         u8 *status_block=NULL;
2919
2920         if(pHba->status_block == NULL) {
2921                 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2922                                         sizeof(i2o_status_block),
2923                                         &pHba->status_block_pa, GFP_KERNEL);
2924                 if(pHba->status_block == NULL) {
2925                         printk(KERN_ERR
2926                         "dpti%d: Get Status Block failed; Out of memory. \n", 
2927                         pHba->unit);
2928                         return -ENOMEM;
2929                 }
2930         }
2931         memset(pHba->status_block, 0, sizeof(i2o_status_block));
2932         status_block = (u8*)(pHba->status_block);
2933         timeout = jiffies+TMOUT_GETSTATUS*HZ;
2934         do {
2935                 rmb();
2936                 m = readl(pHba->post_port);
2937                 if (m != EMPTY_QUEUE) {
2938                         break;
2939                 }
2940                 if(time_after(jiffies,timeout)){
2941                         printk(KERN_ERR "%s: Timeout waiting for message !\n",
2942                                         pHba->name);
2943                         return -ETIMEDOUT;
2944                 }
2945                 schedule_timeout_uninterruptible(1);
2946         } while(m==EMPTY_QUEUE);
2947
2948         
2949         msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2950
2951         writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2952         writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2953         writel(1, &msg[2]);
2954         writel(0, &msg[3]);
2955         writel(0, &msg[4]);
2956         writel(0, &msg[5]);
2957         writel( dma_low(pHba->status_block_pa), &msg[6]);
2958         writel( dma_high(pHba->status_block_pa), &msg[7]);
2959         writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2960
2961         //post message
2962         writel(m, pHba->post_port);
2963         wmb();
2964
2965         while(status_block[87]!=0xff){
2966                 if(time_after(jiffies,timeout)){
2967                         printk(KERN_ERR"dpti%d: Get status timeout.\n",
2968                                 pHba->unit);
2969                         return -ETIMEDOUT;
2970                 }
2971                 rmb();
2972                 schedule_timeout_uninterruptible(1);
2973         }
2974
2975         // Set up our number of outbound and inbound messages
2976         pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2977         if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2978                 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2979         }
2980
2981         pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2982         if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2983                 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2984         }
2985
2986         // Calculate the Scatter Gather list size
2987         if (dpt_dma64(pHba)) {
2988                 pHba->sg_tablesize
2989                   = ((pHba->status_block->inbound_frame_size * 4
2990                   - 14 * sizeof(u32))
2991                   / (sizeof(struct sg_simple_element) + sizeof(u32)));
2992         } else {
2993                 pHba->sg_tablesize
2994                   = ((pHba->status_block->inbound_frame_size * 4
2995                   - 12 * sizeof(u32))
2996                   / sizeof(struct sg_simple_element));
2997         }
2998         if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2999                 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3000         }
3001
3002
3003 #ifdef DEBUG
3004         printk("dpti%d: State = ",pHba->unit);
3005         switch(pHba->status_block->iop_state) {
3006                 case 0x01:
3007                         printk("INIT\n");
3008                         break;
3009                 case 0x02:
3010                         printk("RESET\n");
3011                         break;
3012                 case 0x04:
3013                         printk("HOLD\n");
3014                         break;
3015                 case 0x05:
3016                         printk("READY\n");
3017                         break;
3018                 case 0x08:
3019                         printk("OPERATIONAL\n");
3020                         break;
3021                 case 0x10:
3022                         printk("FAILED\n");
3023                         break;
3024                 case 0x11:
3025                         printk("FAULTED\n");
3026                         break;
3027                 default:
3028                         printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3029         }
3030 #endif
3031         return 0;
3032 }
3033
3034 /*
3035  * Get the IOP's Logical Configuration Table
3036  */
3037 static int adpt_i2o_lct_get(adpt_hba* pHba)
3038 {
3039         u32 msg[8];
3040         int ret;
3041         u32 buf[16];
3042
3043         if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3044                 pHba->lct_size = pHba->status_block->expected_lct_size;
3045         }
3046         do {
3047                 if (pHba->lct == NULL) {
3048                         pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3049                                         pHba->lct_size, &pHba->lct_pa,
3050                                         GFP_ATOMIC);
3051                         if(pHba->lct == NULL) {
3052                                 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3053                                         pHba->name);
3054                                 return -ENOMEM;
3055                         }
3056                 }
3057                 memset(pHba->lct, 0, pHba->lct_size);
3058
3059                 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3060                 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3061                 msg[2] = 0;
3062                 msg[3] = 0;
3063                 msg[4] = 0xFFFFFFFF;    /* All devices */
3064                 msg[5] = 0x00000000;    /* Report now */
3065                 msg[6] = 0xD0000000|pHba->lct_size;
3066                 msg[7] = (u32)pHba->lct_pa;
3067
3068                 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3069                         printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 
3070                                 pHba->name, ret);       
3071                         printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3072                         return ret;
3073                 }
3074
3075                 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3076                         pHba->lct_size = pHba->lct->table_size << 2;
3077                         dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3078                                         pHba->lct, pHba->lct_pa);
3079                         pHba->lct = NULL;
3080                 }
3081         } while (pHba->lct == NULL);
3082
3083         PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3084
3085
3086         // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3087         if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3088                 pHba->FwDebugBufferSize = buf[1];
3089                 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3090                                                 pHba->FwDebugBufferSize);
3091                 if (pHba->FwDebugBuffer_P) {
3092                         pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3093                                                         FW_DEBUG_FLAGS_OFFSET;
3094                         pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3095                                                         FW_DEBUG_BLED_OFFSET;
3096                         pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3097                         pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3098                                                 FW_DEBUG_STR_LENGTH_OFFSET;
3099                         pHba->FwDebugBuffer_P += buf[2]; 
3100                         pHba->FwDebugFlags = 0;
3101                 }
3102         }
3103
3104         return 0;
3105 }
3106
3107 static int adpt_i2o_build_sys_table(void)
3108 {
3109         adpt_hba* pHba = hba_chain;
3110         int count = 0;
3111
3112         if (sys_tbl)
3113                 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3114                                         sys_tbl, sys_tbl_pa);
3115
3116         sys_tbl_len = sizeof(struct i2o_sys_tbl) +      // Header + IOPs
3117                                 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3118
3119         sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3120                                 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3121         if (!sys_tbl) {
3122                 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");     
3123                 return -ENOMEM;
3124         }
3125         memset(sys_tbl, 0, sys_tbl_len);
3126
3127         sys_tbl->num_entries = hba_count;
3128         sys_tbl->version = I2OVERSION;
3129         sys_tbl->change_ind = sys_tbl_ind++;
3130
3131         for(pHba = hba_chain; pHba; pHba = pHba->next) {
3132                 u64 addr;
3133                 // Get updated Status Block so we have the latest information
3134                 if (adpt_i2o_status_get(pHba)) {
3135                         sys_tbl->num_entries--;
3136                         continue; // try next one       
3137                 }
3138
3139                 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3140                 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3141                 sys_tbl->iops[count].seg_num = 0;
3142                 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3143                 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3144                 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3145                 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3146                 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3147                 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3148                 addr = pHba->base_addr_phys + 0x40;
3149                 sys_tbl->iops[count].inbound_low = dma_low(addr);
3150                 sys_tbl->iops[count].inbound_high = dma_high(addr);
3151
3152                 count++;
3153         }
3154
3155 #ifdef DEBUG
3156 {
3157         u32 *table = (u32*)sys_tbl;
3158         printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3159         for(count = 0; count < (sys_tbl_len >>2); count++) {
3160                 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", 
3161                         count, table[count]);
3162         }
3163 }
3164 #endif
3165
3166         return 0;
3167 }
3168
3169
3170 /*
3171  *       Dump the information block associated with a given unit (TID)
3172  */
3173  
3174 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3175 {
3176         char buf[64];
3177         int unit = d->lct_data.tid;
3178
3179         printk(KERN_INFO "TID %3.3d ", unit);
3180
3181         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3182         {
3183                 buf[16]=0;
3184                 printk(" Vendor: %-12.12s", buf);
3185         }
3186         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3187         {
3188                 buf[16]=0;
3189                 printk(" Device: %-12.12s", buf);
3190         }
3191         if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3192         {
3193                 buf[8]=0;
3194                 printk(" Rev: %-12.12s\n", buf);
3195         }
3196 #ifdef DEBUG
3197          printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3198          printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3199          printk(KERN_INFO "\tFlags: ");
3200
3201          if(d->lct_data.device_flags&(1<<0))
3202                   printk("C");       // ConfigDialog requested
3203          if(d->lct_data.device_flags&(1<<1))
3204                   printk("U");       // Multi-user capable
3205          if(!(d->lct_data.device_flags&(1<<4)))
3206                   printk("P");       // Peer service enabled!
3207          if(!(d->lct_data.device_flags&(1<<5)))
3208                   printk("M");       // Mgmt service enabled!
3209          printk("\n");
3210 #endif
3211 }
3212
3213 #ifdef DEBUG
3214 /*
3215  *      Do i2o class name lookup
3216  */
3217 static const char *adpt_i2o_get_class_name(int class)
3218 {
3219         int idx = 16;
3220         static char *i2o_class_name[] = {
3221                 "Executive",
3222                 "Device Driver Module",
3223                 "Block Device",
3224                 "Tape Device",
3225                 "LAN Interface",
3226                 "WAN Interface",
3227                 "Fibre Channel Port",
3228                 "Fibre Channel Device",
3229                 "SCSI Device",
3230                 "ATE Port",
3231                 "ATE Device",
3232                 "Floppy Controller",
3233                 "Floppy Device",
3234                 "Secondary Bus Port",
3235                 "Peer Transport Agent",
3236                 "Peer Transport",
3237                 "Unknown"
3238         };
3239         
3240         switch(class&0xFFF) {
3241         case I2O_CLASS_EXECUTIVE:
3242                 idx = 0; break;
3243         case I2O_CLASS_DDM:
3244                 idx = 1; break;
3245         case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3246                 idx = 2; break;
3247         case I2O_CLASS_SEQUENTIAL_STORAGE:
3248                 idx = 3; break;
3249         case I2O_CLASS_LAN:
3250                 idx = 4; break;
3251         case I2O_CLASS_WAN:
3252                 idx = 5; break;
3253         case I2O_CLASS_FIBRE_CHANNEL_PORT:
3254                 idx = 6; break;
3255         case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3256                 idx = 7; break;
3257         case I2O_CLASS_SCSI_PERIPHERAL:
3258                 idx = 8; break;
3259         case I2O_CLASS_ATE_PORT:
3260                 idx = 9; break;
3261         case I2O_CLASS_ATE_PERIPHERAL:
3262                 idx = 10; break;
3263         case I2O_CLASS_FLOPPY_CONTROLLER:
3264                 idx = 11; break;
3265         case I2O_CLASS_FLOPPY_DEVICE:
3266                 idx = 12; break;
3267         case I2O_CLASS_BUS_ADAPTER_PORT:
3268                 idx = 13; break;
3269         case I2O_CLASS_PEER_TRANSPORT_AGENT:
3270                 idx = 14; break;
3271         case I2O_CLASS_PEER_TRANSPORT:
3272                 idx = 15; break;
3273         }
3274         return i2o_class_name[idx];
3275 }
3276 #endif
3277
3278
3279 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3280 {
3281         u32 msg[6];
3282         int ret, size = sizeof(i2o_hrt);
3283
3284         do {
3285                 if (pHba->hrt == NULL) {
3286                         pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3287                                         size, &pHba->hrt_pa, GFP_KERNEL);
3288                         if (pHba->hrt == NULL) {
3289                                 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3290                                 return -ENOMEM;
3291                         }
3292                 }
3293
3294                 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3295                 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3296                 msg[2]= 0;
3297                 msg[3]= 0;
3298                 msg[4]= (0xD0000000 | size);    /* Simple transaction */
3299                 msg[5]= (u32)pHba->hrt_pa;      /* Dump it here */
3300
3301                 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3302                         printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3303                         return ret;
3304                 }
3305
3306                 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3307                         int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3308                         dma_free_coherent(&pHba->pDev->dev, size,
3309                                 pHba->hrt, pHba->hrt_pa);
3310                         size = newsize;
3311                         pHba->hrt = NULL;
3312                 }
3313         } while(pHba->hrt == NULL);
3314         return 0;
3315 }                                                                                                                                       
3316
3317 /*
3318  *       Query one scalar group value or a whole scalar group.
3319  */                     
3320 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 
3321                         int group, int field, void *buf, int buflen)
3322 {
3323         u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3324         u8 *opblk_va;
3325         dma_addr_t opblk_pa;
3326         u8 *resblk_va;
3327         dma_addr_t resblk_pa;
3328
3329         int size;
3330
3331         /* 8 bytes for header */
3332         resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3333                         sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3334         if (resblk_va == NULL) {
3335                 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3336                 return -ENOMEM;
3337         }
3338
3339         opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3340                         sizeof(opblk), &opblk_pa, GFP_KERNEL);
3341         if (opblk_va == NULL) {
3342                 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3343                         resblk_va, resblk_pa);
3344                 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3345                         pHba->name);
3346                 return -ENOMEM;
3347         }
3348         if (field == -1)                /* whole group */
3349                         opblk[4] = -1;
3350
3351         memcpy(opblk_va, opblk, sizeof(opblk));
3352         size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 
3353                 opblk_va, opblk_pa, sizeof(opblk),
3354                 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3355         dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3356         if (size == -ETIME) {
3357                 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3358                                                         resblk_va, resblk_pa);
3359                 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3360                 return -ETIME;
3361         } else if (size == -EINTR) {
3362                 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3363                                                         resblk_va, resblk_pa);
3364                 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3365                 return -EINTR;
3366         }
3367                         
3368         memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3369
3370         dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3371                                                 resblk_va, resblk_pa);
3372         if (size < 0)
3373                 return size;    
3374
3375         return buflen;
3376 }
3377
3378
3379 /*      Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3380  *
3381  *      This function can be used for all UtilParamsGet/Set operations.
3382  *      The OperationBlock is given in opblk-buffer, 
3383  *      and results are returned in resblk-buffer.
3384  *      Note that the minimum sized resblk is 8 bytes and contains
3385  *      ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3386  */
3387 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 
3388                   void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3389                 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3390 {
3391         u32 msg[9]; 
3392         u32 *res = (u32 *)resblk_va;
3393         int wait_status;
3394
3395         msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3396         msg[1] = cmd << 24 | HOST_TID << 12 | tid; 
3397         msg[2] = 0;
3398         msg[3] = 0;
3399         msg[4] = 0;
3400         msg[5] = 0x54000000 | oplen;    /* OperationBlock */
3401         msg[6] = (u32)opblk_pa;
3402         msg[7] = 0xD0000000 | reslen;   /* ResultBlock */
3403         msg[8] = (u32)resblk_pa;
3404
3405         if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3406                 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3407                 return wait_status;     /* -DetailedStatus */
3408         }
3409
3410         if (res[1]&0x00FF0000) {        /* BlockStatus != SUCCESS */
3411                 printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3412                         "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3413                         pHba->name,
3414                         (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3415                                                          : "PARAMS_GET",   
3416                         res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3417                 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3418         }
3419
3420          return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ 
3421 }
3422
3423
3424 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3425 {
3426         u32 msg[4];
3427         int ret;
3428
3429         adpt_i2o_status_get(pHba);
3430
3431         /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3432
3433         if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3434            (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3435                 return 0;
3436         }
3437
3438         msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3439         msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3440         msg[2] = 0;
3441         msg[3] = 0;
3442
3443         if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3444                 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3445                                 pHba->unit, -ret);
3446         } else {
3447                 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3448         }
3449
3450         adpt_i2o_status_get(pHba);
3451         return ret;
3452 }
3453
3454
3455 /* 
3456  * Enable IOP. Allows the IOP to resume external operations.
3457  */
3458 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3459 {
3460         u32 msg[4];
3461         int ret;
3462         
3463         adpt_i2o_status_get(pHba);
3464         if(!pHba->status_block){
3465                 return -ENOMEM;
3466         }
3467         /* Enable only allowed on READY state */
3468         if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3469                 return 0;
3470
3471         if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3472                 return -EINVAL;
3473
3474         msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3475         msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3476         msg[2]= 0;
3477         msg[3]= 0;
3478
3479         if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3480                 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", 
3481                         pHba->name, ret);
3482         } else {
3483                 PDEBUG("%s: Enabled.\n", pHba->name);
3484         }
3485
3486         adpt_i2o_status_get(pHba);
3487         return ret;
3488 }
3489
3490
3491 static int adpt_i2o_systab_send(adpt_hba* pHba)
3492 {
3493          u32 msg[12];
3494          int ret;
3495
3496         msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3497         msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3498         msg[2] = 0;
3499         msg[3] = 0;
3500         msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3501         msg[5] = 0;                                /* Segment 0 */
3502
3503         /* 
3504          * Provide three SGL-elements:
3505          * System table (SysTab), Private memory space declaration and 
3506          * Private i/o space declaration  
3507          */
3508         msg[6] = 0x54000000 | sys_tbl_len;
3509         msg[7] = (u32)sys_tbl_pa;
3510         msg[8] = 0x54000000 | 0;
3511         msg[9] = 0;
3512         msg[10] = 0xD4000000 | 0;
3513         msg[11] = 0;
3514
3515         if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3516                 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", 
3517                         pHba->name, ret);
3518         }
3519 #ifdef DEBUG
3520         else {
3521                 PINFO("%s: SysTab set.\n", pHba->name);
3522         }
3523 #endif
3524
3525         return ret;     
3526 }
3527
3528
3529 /*============================================================================
3530  *
3531  *============================================================================
3532  */
3533
3534
3535 #ifdef UARTDELAY 
3536
3537 static static void adpt_delay(int millisec)
3538 {
3539         int i;
3540         for (i = 0; i < millisec; i++) {
3541                 udelay(1000);   /* delay for one millisecond */
3542         }
3543 }
3544
3545 #endif
3546
3547 static struct scsi_host_template driver_template = {
3548         .module                 = THIS_MODULE,
3549         .name                   = "dpt_i2o",
3550         .proc_name              = "dpt_i2o",
3551         .show_info              = adpt_show_info,
3552         .info                   = adpt_info,
3553         .queuecommand           = adpt_queue,
3554         .eh_abort_handler       = adpt_abort,
3555         .eh_device_reset_handler = adpt_device_reset,
3556         .eh_bus_reset_handler   = adpt_bus_reset,
3557         .eh_host_reset_handler  = adpt_reset,
3558         .bios_param             = adpt_bios_param,
3559         .slave_configure        = adpt_slave_configure,
3560         .can_queue              = MAX_TO_IOP_MESSAGES,
3561         .this_id                = 7,
3562         .use_clustering         = ENABLE_CLUSTERING,
3563 };
3564
3565 static int __init adpt_init(void)
3566 {
3567         int             error;
3568         adpt_hba        *pHba, *next;
3569
3570         printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3571
3572         error = adpt_detect(&driver_template);
3573         if (error < 0)
3574                 return error;
3575         if (hba_chain == NULL)
3576                 return -ENODEV;
3577
3578         for (pHba = hba_chain; pHba; pHba = pHba->next) {
3579                 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3580                 if (error)
3581                         goto fail;
3582                 scsi_scan_host(pHba->host);
3583         }
3584         return 0;
3585 fail:
3586         for (pHba = hba_chain; pHba; pHba = next) {
3587                 next = pHba->next;
3588                 scsi_remove_host(pHba->host);
3589         }
3590         return error;
3591 }
3592
3593 static void __exit adpt_exit(void)
3594 {
3595         adpt_hba        *pHba, *next;
3596
3597         for (pHba = hba_chain; pHba; pHba = next) {
3598                 next = pHba->next;
3599                 adpt_release(pHba);
3600         }
3601 }
3602
3603 module_init(adpt_init);
3604 module_exit(adpt_exit);
3605
3606 MODULE_LICENSE("GPL");