1 /******************************************************************************
2 * QLOGIC LINUX SOFTWARE
4 * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
5 * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
6 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
7 * Copyright (C) 2003-2004 Christoph Hellwig
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 ******************************************************************************/
20 #define QLA1280_VERSION "3.27.1"
21 /*****************************************************************************
23 Rev 3.27.1, February 8, 2010, Michael Reed
24 - Retain firmware image for error recovery.
25 Rev 3.27, February 10, 2009, Michael Reed
26 - General code cleanup.
27 - Improve error recovery.
28 Rev 3.26, January 16, 2006 Jes Sorensen
29 - Ditch all < 2.6 support
30 Rev 3.25.1, February 10, 2005 Christoph Hellwig
31 - use pci_map_single to map non-S/G requests
32 - remove qla1280_proc_info
33 Rev 3.25, September 28, 2004, Christoph Hellwig
34 - add support for ISP1020/1040
35 - don't include "scsi.h" anymore for 2.6.x
36 Rev 3.24.4 June 7, 2004 Christoph Hellwig
37 - restructure firmware loading, cleanup initialization code
38 - prepare support for ISP1020/1040 chips
39 Rev 3.24.3 January 19, 2004, Jes Sorensen
40 - Handle PCI DMA mask settings correctly
41 - Correct order of error handling in probe_one, free_irq should not
42 be called if request_irq failed
43 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
44 - Big endian fixes (James)
45 - Remove bogus IOCB content on zero data transfer commands (Andrew)
46 Rev 3.24.1 January 5, 2004, Jes Sorensen
47 - Initialize completion queue to avoid OOPS on probe
48 - Handle interrupts during mailbox testing
49 Rev 3.24 November 17, 2003, Christoph Hellwig
50 - use struct list_head for completion queue
51 - avoid old Scsi_FOO typedefs
52 - cleanup 2.4 compat glue a bit
53 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
54 - make initialization for memory mapped vs port I/O more similar
55 - remove broken pci config space manipulation
57 - this is an almost perfect 2.6 scsi driver now! ;)
58 Rev 3.23.39 December 17, 2003, Jes Sorensen
59 - Delete completion queue from srb if mailbox command failed to
60 to avoid qla1280_done completeting qla1280_error_action's
62 - Reduce arguments for qla1280_done
63 Rev 3.23.38 October 18, 2003, Christoph Hellwig
64 - Convert to new-style hotplugable driver for 2.6
65 - Fix missing scsi_unregister/scsi_host_put on HBA removal
66 - Kill some more cruft
67 Rev 3.23.37 October 1, 2003, Jes Sorensen
68 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
70 - Clean up locking in probe path
71 Rev 3.23.36 October 1, 2003, Christoph Hellwig
72 - queuecommand only ever receives new commands - clear flags
73 - Reintegrate lost fixes from Linux 2.5
74 Rev 3.23.35 August 14, 2003, Jes Sorensen
76 Rev 3.23.34 July 23, 2003, Jes Sorensen
77 - Remove pointless TRUE/FALSE macros
78 - Clean up vchan handling
79 Rev 3.23.33 July 3, 2003, Jes Sorensen
80 - Don't define register access macros before define determining MMIO.
81 This just happened to work out on ia64 but not elsewhere.
82 - Don't try and read from the card while it is in reset as
83 it won't respond and causes an MCA
84 Rev 3.23.32 June 23, 2003, Jes Sorensen
85 - Basic support for boot time arguments
86 Rev 3.23.31 June 8, 2003, Jes Sorensen
87 - Reduce boot time messages
88 Rev 3.23.30 June 6, 2003, Jes Sorensen
89 - Do not enable sync/wide/ppr before it has been determined
90 that the target device actually supports it
91 - Enable DMA arbitration for multi channel controllers
92 Rev 3.23.29 June 3, 2003, Jes Sorensen
94 Rev 3.23.28 June 3, 2003, Jes Sorensen
95 - Eliminate duplicate marker commands on bus resets
96 - Handle outstanding commands appropriately on bus/device resets
97 Rev 3.23.27 May 28, 2003, Jes Sorensen
98 - Remove bogus input queue code, let the Linux SCSI layer do the work
99 - Clean up NVRAM handling, only read it once from the card
100 - Add a number of missing default nvram parameters
101 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
102 - Use completion queue for mailbox commands instead of busy wait
103 Rev 3.23.25 Beta May 27, 2003, James Bottomley
104 - Migrate to use new error handling code
105 Rev 3.23.24 Beta May 21, 2003, James Bottomley
107 - Cleanup data direction code
108 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
109 - Switch to using MMIO instead of PIO
110 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
111 - Fix PCI parity problem with 12160 during reset.
112 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
113 - Use pci_map_page()/pci_unmap_page() instead of map_single version.
114 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
115 - Remove < 2.4.x support
116 - Introduce HOST_LOCK to make the spin lock changes portable.
117 - Remove a bunch of idiotic and unnecessary typedef's
118 - Kill all leftovers of target-mode support which never worked anyway
119 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
120 - Do qla1280_pci_config() before calling request_irq() and
122 - Use pci_dma_hi32() to handle upper word of DMA addresses instead
124 - Hand correct arguments to free_irq() in case of failure
125 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
126 - Run source through Lindent and clean up the output
127 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
128 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
129 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
130 - Rely on mailbox commands generating interrupts - do not
131 run qla1280_isr() from ql1280_mailbox_command()
132 - Remove device_reg_t
133 - Integrate ql12160_set_target_parameters() with 1280 version
134 - Make qla1280_setup() non static
135 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
136 sent to the card - this command pauses the firmware!!!
137 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
138 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
139 - Remove a pile of pointless and confusing (srb_t **) and
140 (scsi_lu_t *) typecasts
141 - Explicit mark that we do not use the new error handling (for now)
142 - Remove scsi_qla_host_t and use 'struct' instead
143 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
144 pci_64bit_slot flags which weren't used for anything anyway
145 - Grab host->host_lock while calling qla1280_isr() from abort()
146 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
147 do not need to save/restore flags in the interrupt handler
148 - Enable interrupts early (before any mailbox access) in preparation
149 for cleaning up the mailbox handling
150 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
151 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
152 it with proper use of dprintk().
153 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
154 a debug level argument to determine if data is to be printed
155 - Add KERN_* info to printk()
156 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
157 - Significant cosmetic cleanups
158 - Change debug code to use dprintk() and remove #if mess
159 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
160 - More cosmetic cleanups, fix places treating return as function
161 - use cpu_relax() in qla1280_debounce_register()
162 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
163 - Make it compile under 2.5.5
164 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
165 - Do no typecast short * to long * in QL1280BoardTbl, this
166 broke miserably on big endian boxes
167 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
168 - Remove pre 2.2 hack for checking for reentrance in interrupt handler
169 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
170 unsigned int to match the types from struct scsi_cmnd
171 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
172 - Remove bogus timer_t typedef from qla1280.h
173 - Remove obsolete pre 2.2 PCI setup code, use proper #define's
174 for PCI_ values, call pci_set_master()
175 - Fix memleak of qla1280_buffer on module unload
176 - Only compile module parsing code #ifdef MODULE - should be
177 changed to use individual MODULE_PARM's later
178 - Remove dummy_buffer that was never modified nor printed
179 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
180 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
181 - Remove \r from print statements, this is Linux, not DOS
182 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
184 - Remove C++ compile hack in header file as Linux driver are not
185 supposed to be compiled as C++
186 - Kill MS_64BITS macro as it makes the code more readable
187 - Remove unnecessary flags.in_interrupts bit
188 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
189 - Dont' check for set flags on q->q_flag one by one in qla1280_next()
190 - Check whether the interrupt was generated by the QLA1280 before
192 - qla1280_status_entry(): Only zero out part of sense_buffer that
193 is not being copied into
194 - Remove more superflouous typecasts
195 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
196 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
197 - Don't walk the entire list in qla1280_putq_t() just to directly
198 grab the pointer to the last element afterwards
199 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
200 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
201 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
202 - Set dev->max_sectors to 1024
203 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
204 - Provide compat macros for pci_enable_device(), pci_find_subsys()
205 and scsi_set_pci_device()
206 - Call scsi_set_pci_device() for all devices
207 - Reduce size of kernel version dependent device probe code
208 - Move duplicate probe/init code to separate function
209 - Handle error if qla1280_mem_alloc() fails
210 - Kill OFFSET() macro and use Linux's PCI definitions instead
211 - Kill private structure defining PCI config space (struct config_reg)
212 - Only allocate I/O port region if not in MMIO mode
213 - Remove duplicate (unused) sanity check of sife of srb_t
214 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
215 - Change home-brew memset() implementations to use memset()
216 - Remove all references to COMTRACE() - accessing a PC's COM2 serial
217 port directly is not legal under Linux.
218 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
219 - Remove pre 2.2 kernel support
220 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
221 - Fix MMIO access to use readl/writel instead of directly
222 dereferencing pointers
223 - Nuke MSDOS debugging code
224 - Change true/false data types to int from uint8_t
225 - Use int for counters instead of uint8_t etc.
226 - Clean up size & byte order conversion macro usage
227 Rev 3.23 Beta January 11, 2001 BN Qlogic
228 - Added check of device_id when handling non
229 QLA12160s during detect().
230 Rev 3.22 Beta January 5, 2001 BN Qlogic
231 - Changed queue_task() to schedule_task()
232 for kernels 2.4.0 and higher.
233 Note: 2.4.0-testxx kernels released prior to
234 the actual 2.4.0 kernel release on January 2001
235 will get compile/link errors with schedule_task().
236 Please update your kernel to released 2.4.0 level,
237 or comment lines in this file flagged with 3.22
238 to resolve compile/link error of schedule_task().
239 - Added -DCONFIG_SMP in addition to -D__SMP__
240 in Makefile for 2.4.0 builds of driver as module.
241 Rev 3.21 Beta January 4, 2001 BN Qlogic
242 - Changed criteria of 64/32 Bit mode of HBA
243 operation according to BITS_PER_LONG rather
244 than HBA's NVRAM setting of >4Gig memory bit;
245 so that the HBA auto-configures without the need
246 to setup each system individually.
247 Rev 3.20 Beta December 5, 2000 BN Qlogic
248 - Added priority handling to IA-64 onboard SCSI
249 ISP12160 chip for kernels greater than 2.3.18.
250 - Added irqrestore for qla1280_intr_handler.
251 - Enabled /proc/scsi/qla1280 interface.
252 - Clear /proc/scsi/qla1280 counters in detect().
253 Rev 3.19 Beta October 13, 2000 BN Qlogic
254 - Declare driver_template for new kernel
255 (2.4.0 and greater) scsi initialization scheme.
256 - Update /proc/scsi entry for 2.3.18 kernels and
258 Rev 3.18 Beta October 10, 2000 BN Qlogic
259 - Changed scan order of adapters to map
260 the QLA12160 followed by the QLA1280.
261 Rev 3.17 Beta September 18, 2000 BN Qlogic
262 - Removed warnings for 32 bit 2.4.x compiles
263 - Corrected declared size for request and response
264 DMA addresses that are kept in each ha
265 Rev. 3.16 Beta August 25, 2000 BN Qlogic
266 - Corrected 64 bit addressing issue on IA-64
267 where the upper 32 bits were not properly
268 passed to the RISC engine.
269 Rev. 3.15 Beta August 22, 2000 BN Qlogic
270 - Modified qla1280_setup_chip to properly load
271 ISP firmware for greater that 4 Gig memory on IA-64
272 Rev. 3.14 Beta August 16, 2000 BN Qlogic
273 - Added setting of dma_mask to full 64 bit
274 if flags.enable_64bit_addressing is set in NVRAM
275 Rev. 3.13 Beta August 16, 2000 BN Qlogic
276 - Use new PCI DMA mapping APIs for 2.4.x kernel
277 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
278 - Added check of pci_enable_device to detect() for 2.3.x
279 - Use pci_resource_start() instead of
280 pdev->resource[0].start in detect() for 2.3.x
281 - Updated driver version
282 Rev. 3.11 July 14, 2000 BN Qlogic
283 - Updated SCSI Firmware to following versions:
286 - Updated driver version to 3.11
287 Rev. 3.10 June 23, 2000 BN Qlogic
288 - Added filtering of AMI SubSys Vendor ID devices
290 - DEBUG_QLA1280 undefined and new version BN Qlogic
291 Rev. 3.08b May 9, 2000 MD Dell
292 - Added logic to check against AMI subsystem vendor ID
293 Rev. 3.08 May 4, 2000 DG Qlogic
294 - Added logic to check for PCI subsystem ID.
295 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
296 - Updated SCSI Firmware to following versions:
299 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
300 - Internal revision; not released
301 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
302 - Edit correction for virt_to_bus and PROC.
303 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
304 - Merge changes from ia64 port.
305 Rev. 3.03 Mar 28, 2000 BN Qlogic
306 - Increase version to reflect new code drop with compile fix
307 of issue with inclusion of linux/spinlock for 2.3 kernels
308 Rev. 3.02 Mar 15, 2000 BN Qlogic
309 - Merge qla1280_proc_info from 2.10 code base
310 Rev. 3.01 Feb 10, 2000 BN Qlogic
311 - Corrected code to compile on a 2.2.x kernel.
312 Rev. 3.00 Jan 17, 2000 DG Qlogic
313 - Added 64-bit support.
314 Rev. 2.07 Nov 9, 1999 DG Qlogic
315 - Added new routine to set target parameters for ISP12160.
316 Rev. 2.06 Sept 10, 1999 DG Qlogic
317 - Added support for ISP12160 Ultra 3 chip.
318 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
319 - Modified code to remove errors generated when compiling with
320 Cygnus IA64 Compiler.
321 - Changed conversion of pointers to unsigned longs instead of integers.
322 - Changed type of I/O port variables from uint32_t to unsigned long.
323 - Modified OFFSET macro to work with 64-bit as well as 32-bit.
324 - Changed sprintf and printk format specifiers for pointers to %p.
325 - Changed some int to long type casts where needed in sprintf & printk.
326 - Added l modifiers to sprintf and printk format specifiers for longs.
327 - Removed unused local variables.
328 Rev. 1.20 June 8, 1999 DG, Qlogic
329 Changes to support RedHat release 6.0 (kernel 2.2.5).
330 - Added SCSI exclusive access lock (io_request_lock) when accessing
332 - Added changes for the new LINUX interface template. Some new error
333 handling routines have been added to the template, but for now we
334 will use the old ones.
335 - Initial Beta Release.
336 *****************************************************************************/
339 #include <linux/module.h>
341 #include <linux/types.h>
342 #include <linux/string.h>
343 #include <linux/errno.h>
344 #include <linux/kernel.h>
345 #include <linux/ioport.h>
346 #include <linux/delay.h>
347 #include <linux/timer.h>
348 #include <linux/pci.h>
349 #include <linux/proc_fs.h>
350 #include <linux/stat.h>
351 #include <linux/pci_ids.h>
352 #include <linux/interrupt.h>
353 #include <linux/init.h>
354 #include <linux/dma-mapping.h>
355 #include <linux/firmware.h>
359 #include <asm/byteorder.h>
360 #include <asm/processor.h>
361 #include <asm/types.h>
363 #include <scsi/scsi.h>
364 #include <scsi/scsi_cmnd.h>
365 #include <scsi/scsi_device.h>
366 #include <scsi/scsi_host.h>
367 #include <scsi/scsi_tcq.h>
369 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
370 #include <asm/sn/io.h>
375 * Compile time Options:
376 * 0 - Disable and 1 - Enable
378 #define DEBUG_QLA1280_INTR 0
379 #define DEBUG_PRINT_NVRAM 0
380 #define DEBUG_QLA1280 0
383 * The SGI VISWS is broken and doesn't support MMIO ;-(
385 #ifdef CONFIG_X86_VISWS
386 #define MEMORY_MAPPED_IO 0
388 #define MEMORY_MAPPED_IO 1
393 #ifndef BITS_PER_LONG
394 #error "BITS_PER_LONG not defined!"
396 #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
397 #define QLA_64BIT_PTR 1
401 #define pci_dma_hi32(a) ((a >> 16) >> 16)
403 #define pci_dma_hi32(a) 0
405 #define pci_dma_lo32(a) (a & 0xffffffff)
407 #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
409 #if defined(__ia64__) && !defined(ia64_platform_is)
410 #define ia64_platform_is(foo) (!strcmp(x, platform_name))
414 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
415 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
416 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
417 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
418 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
421 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
422 static void qla1280_remove_one(struct pci_dev *);
425 * QLogic Driver Support Function Prototypes.
427 static void qla1280_done(struct scsi_qla_host *);
428 static int qla1280_get_token(char *);
429 static int qla1280_setup(char *s) __init;
432 * QLogic ISP1280 Hardware Support Function Prototypes.
434 static int qla1280_load_firmware(struct scsi_qla_host *);
435 static int qla1280_init_rings(struct scsi_qla_host *);
436 static int qla1280_nvram_config(struct scsi_qla_host *);
437 static int qla1280_mailbox_command(struct scsi_qla_host *,
438 uint8_t, uint16_t *);
439 static int qla1280_bus_reset(struct scsi_qla_host *, int);
440 static int qla1280_device_reset(struct scsi_qla_host *, int, int);
441 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
442 static int qla1280_abort_isp(struct scsi_qla_host *);
444 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
446 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
448 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
449 static void qla1280_poll(struct scsi_qla_host *);
450 static void qla1280_reset_adapter(struct scsi_qla_host *);
451 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
452 static void qla1280_isp_cmd(struct scsi_qla_host *);
453 static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
454 static void qla1280_rst_aen(struct scsi_qla_host *);
455 static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
457 static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
459 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
460 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
461 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
462 static request_t *qla1280_req_pkt(struct scsi_qla_host *);
463 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
465 static void qla1280_get_target_parameters(struct scsi_qla_host *,
466 struct scsi_device *);
467 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
470 static struct qla_driver_setup driver_setup;
473 * convert scsi data direction to request_t control flags
475 static inline uint16_t
476 qla1280_data_direction(struct scsi_cmnd *cmnd)
478 switch(cmnd->sc_data_direction) {
479 case DMA_FROM_DEVICE:
483 case DMA_BIDIRECTIONAL:
484 return BIT_5 | BIT_6;
486 * We could BUG() on default here if one of the four cases aren't
487 * met, but then again if we receive something like that from the
488 * SCSI layer we have more serious problems. This shuts up GCC.
497 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
498 static void __qla1280_dump_buffer(char *, int);
503 * insmod needs to find the variable and make it point to something
506 static char *qla1280;
508 /* insmod qla1280 options=verbose" */
509 module_param(qla1280, charp, 0);
511 __setup("qla1280=", qla1280_setup);
516 * We use the scsi_pointer structure that's included with each scsi_command
517 * to overlay our struct srb over it. qla1280_init() checks that a srb is not
518 * bigger than a scsi_pointer.
521 #define CMD_SP(Cmnd) &Cmnd->SCp
522 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
523 #define CMD_CDBP(Cmnd) Cmnd->cmnd
524 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
525 #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
526 #define CMD_RESULT(Cmnd) Cmnd->result
527 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
528 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
530 #define CMD_HOST(Cmnd) Cmnd->device->host
531 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
532 #define SCSI_TCN_32(Cmnd) Cmnd->device->id
533 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
536 /*****************************************/
537 /* ISP Boards supported by this driver */
538 /*****************************************/
541 char *name; /* Board ID String */
542 int numPorts; /* Number of SCSI ports */
543 int fw_index; /* index into qla1280_fw_tbl for firmware */
546 /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
547 static struct pci_device_id qla1280_pci_tbl[] = {
548 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
549 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
550 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
551 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
552 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
553 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
554 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
555 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
556 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
557 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
558 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
559 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
562 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
564 DEFINE_MUTEX(qla1280_firmware_mutex);
568 const struct firmware *fw;
571 #define QL_NUM_FW_IMAGES 3
573 struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
574 {"qlogic/1040.bin", NULL}, /* image 0 */
575 {"qlogic/1280.bin", NULL}, /* image 1 */
576 {"qlogic/12160.bin", NULL}, /* image 2 */
579 /* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
580 static struct qla_boards ql1280_board_tbl[] = {
581 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
582 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
583 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
584 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
585 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
586 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
587 {.name = " ", .numPorts = 0, .fw_index = -1},
590 static int qla1280_verbose = 1;
593 static int ql_debug_level = 1;
594 #define dprintk(level, format, a...) \
595 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
596 #define qla1280_dump_buffer(level, buf, size) \
597 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
598 #define qla1280_print_scsi_cmd(level, cmd) \
599 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
601 #define ql_debug_level 0
602 #define dprintk(level, format, a...) do{}while(0)
603 #define qla1280_dump_buffer(a, b, c) do{}while(0)
604 #define qla1280_print_scsi_cmd(a, b) do{}while(0)
607 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
608 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
609 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
610 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
613 static int qla1280_read_nvram(struct scsi_qla_host *ha)
620 ENTER("qla1280_read_nvram");
622 if (driver_setup.no_nvram)
625 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
627 wptr = (uint16_t *)&ha->nvram;
630 for (cnt = 0; cnt < 3; cnt++) {
631 *wptr = qla1280_get_nvram_word(ha, cnt);
632 chksum += *wptr & 0xff;
633 chksum += (*wptr >> 8) & 0xff;
637 if (nv->id0 != 'I' || nv->id1 != 'S' ||
638 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
639 dprintk(2, "Invalid nvram ID or version!\n");
642 for (; cnt < sizeof(struct nvram); cnt++) {
643 *wptr = qla1280_get_nvram_word(ha, cnt);
644 chksum += *wptr & 0xff;
645 chksum += (*wptr >> 8) & 0xff;
650 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
651 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
656 if (!driver_setup.no_nvram)
657 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
658 "validate NVRAM checksum, using default "
659 "settings\n", ha->host_no);
664 /* The firmware interface is, um, interesting, in that the
665 * actual firmware image on the chip is little endian, thus,
666 * the process of taking that image to the CPU would end up
667 * little endian. However, the firmware interface requires it
668 * to be read a word (two bytes) at a time.
670 * The net result of this would be that the word (and
671 * doubleword) quantites in the firmware would be correct, but
672 * the bytes would be pairwise reversed. Since most of the
673 * firmware quantites are, in fact, bytes, we do an extra
674 * le16_to_cpu() in the firmware read routine.
676 * The upshot of all this is that the bytes in the firmware
677 * are in the correct places, but the 16 and 32 bit quantites
678 * are still in little endian format. We fix that up below by
679 * doing extra reverses on them */
680 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
681 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
682 for(i = 0; i < MAX_BUSES; i++) {
683 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
684 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
686 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
687 LEAVE("qla1280_read_nvram");
692 /**************************************************************************
694 * Return a string describing the driver.
695 **************************************************************************/
697 qla1280_info(struct Scsi_Host *host)
699 static char qla1280_scsi_name_buffer[125];
701 struct scsi_qla_host *ha;
702 struct qla_boards *bdp;
704 bp = &qla1280_scsi_name_buffer[0];
705 ha = (struct scsi_qla_host *)host->hostdata;
706 bdp = &ql1280_board_tbl[ha->devnum];
707 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
710 "QLogic %s PCI to SCSI Host Adapter\n"
711 " Firmware version: %2d.%02d.%02d, Driver version %s",
712 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
717 /**************************************************************************
718 * qla1280_queuecommand
719 * Queue a command to the controller.
722 * The mid-level driver tries to ensures that queuecommand never gets invoked
723 * concurrently with itself or the interrupt handler (although the
724 * interrupt handler may call this routine as part of request-completion
725 * handling). Unfortunely, it sometimes calls the scheduler in interrupt
726 * context which is a big NO! NO!.
727 **************************************************************************/
729 qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
731 struct Scsi_Host *host = cmd->device->host;
732 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
733 struct srb *sp = (struct srb *)CMD_SP(cmd);
740 CMD_HANDLE(cmd) = (unsigned char *)NULL;
742 qla1280_print_scsi_cmd(5, cmd);
746 * Using 64 bit commands if the PCI bridge doesn't support it is a
747 * bit wasteful, however this should really only happen if one's
748 * PCI controller is completely broken, like the BCM1250. For
749 * sane hardware this is not an issue.
751 status = qla1280_64bit_start_scsi(ha, sp);
753 status = qla1280_32bit_start_scsi(ha, sp);
758 static DEF_SCSI_QCMD(qla1280_queuecommand)
768 static void qla1280_mailbox_timeout(unsigned long __data)
770 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
771 struct device_reg __iomem *reg;
774 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
775 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
776 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
777 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
778 complete(ha->mailbox_wait);
782 _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
783 struct completion *wait)
786 struct scsi_cmnd *cmd = sp->cmd;
788 spin_unlock_irq(ha->host->host_lock);
789 wait_for_completion_timeout(wait, 4*HZ);
790 spin_lock_irq(ha->host->host_lock);
792 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
794 (*cmd->scsi_done)(cmd);
800 qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
802 DECLARE_COMPLETION_ONSTACK(wait);
805 return _qla1280_wait_for_single_command(ha, sp, &wait);
809 qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
814 struct scsi_cmnd *cmd;
819 * Wait for all commands with the designated bus/target
820 * to be completed by the firmware
822 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
823 sp = ha->outstanding_cmds[cnt];
827 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
829 if (target >= 0 && SCSI_TCN_32(cmd) != target)
832 status = qla1280_wait_for_single_command(ha, sp);
833 if (status == FAILED)
840 /**************************************************************************
841 * qla1280_error_action
842 * The function will attempt to perform a specified error action and
843 * wait for the results (or time out).
846 * cmd = Linux SCSI command packet of the command that cause the
848 * action = error action to take (see action_t)
853 **************************************************************************/
855 qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
857 struct scsi_qla_host *ha;
858 int bus, target, lun;
863 int wait_for_target = -1;
864 DECLARE_COMPLETION_ONSTACK(wait);
866 ENTER("qla1280_error_action");
868 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
869 sp = (struct srb *)CMD_SP(cmd);
870 bus = SCSI_BUS_32(cmd);
871 target = SCSI_TCN_32(cmd);
872 lun = SCSI_LUN_32(cmd);
874 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
875 RD_REG_WORD(&ha->iobase->istatus));
877 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
878 RD_REG_WORD(&ha->iobase->host_cmd),
879 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
882 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
883 "Handle=0x%p, action=0x%x\n",
884 ha->host_no, cmd, CMD_HANDLE(cmd), action);
887 * Check to see if we have the command in the outstanding_cmds[]
888 * array. If not then it must have completed before this error
889 * action was initiated. If the error_action isn't ABORT_COMMAND
890 * then the driver must proceed with the requested action.
893 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
894 if (sp == ha->outstanding_cmds[i]) {
896 sp->wait = &wait; /* we'll wait for it to complete */
901 if (found < 0) { /* driver doesn't have command */
903 if (qla1280_verbose) {
905 "scsi(%ld:%d:%d:%d): specified command has "
906 "already completed.\n", ha->host_no, bus,
914 dprintk(1, "qla1280: RISC aborting command\n");
916 * The abort might fail due to race when the host_lock
917 * is released to issue the abort. As such, we
918 * don't bother to check the return status.
921 qla1280_abort_command(ha, sp, found);
927 "scsi(%ld:%d:%d:%d): Queueing device reset "
928 "command.\n", ha->host_no, bus, target, lun);
929 if (qla1280_device_reset(ha, bus, target) == 0) {
930 /* issued device reset, set wait conditions */
932 wait_for_target = target;
938 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
939 "reset.\n", ha->host_no, bus);
940 if (qla1280_bus_reset(ha, bus) == 0) {
941 /* issued bus reset, set wait conditions */
948 if (qla1280_verbose) {
950 "scsi(%ld): Issued ADAPTER RESET\n",
952 printk(KERN_INFO "scsi(%ld): I/O processing will "
953 "continue automatically\n", ha->host_no);
955 ha->flags.reset_active = 1;
957 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
961 ha->flags.reset_active = 0;
965 * At this point, the host_lock has been released and retaken
966 * by the issuance of the mailbox command.
967 * Wait for the command passed in by the mid-layer if it
968 * was found by the driver. It might have been returned
969 * between eh recovery steps, hence the check of the "found"
974 result = _qla1280_wait_for_single_command(ha, sp, &wait);
976 if (action == ABORT_COMMAND && result != SUCCESS) {
978 "scsi(%li:%i:%i:%i): "
979 "Unable to abort command!\n",
980 ha->host_no, bus, target, lun);
984 * If the command passed in by the mid-layer has been
985 * returned by the board, then wait for any additional
986 * commands which are supposed to complete based upon
989 * All commands are unconditionally returned during a
990 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
993 if (result == SUCCESS && wait_for_bus >= 0) {
994 result = qla1280_wait_for_pending_commands(ha,
995 wait_for_bus, wait_for_target);
998 dprintk(1, "RESET returning %d\n", result);
1000 LEAVE("qla1280_error_action");
1004 /**************************************************************************
1006 * Abort the specified SCSI command(s).
1007 **************************************************************************/
1009 qla1280_eh_abort(struct scsi_cmnd * cmd)
1013 spin_lock_irq(cmd->device->host->host_lock);
1014 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1015 spin_unlock_irq(cmd->device->host->host_lock);
1020 /**************************************************************************
1021 * qla1280_device_reset
1022 * Reset the specified SCSI device
1023 **************************************************************************/
1025 qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1029 spin_lock_irq(cmd->device->host->host_lock);
1030 rc = qla1280_error_action(cmd, DEVICE_RESET);
1031 spin_unlock_irq(cmd->device->host->host_lock);
1036 /**************************************************************************
1038 * Reset the specified bus.
1039 **************************************************************************/
1041 qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1045 spin_lock_irq(cmd->device->host->host_lock);
1046 rc = qla1280_error_action(cmd, BUS_RESET);
1047 spin_unlock_irq(cmd->device->host->host_lock);
1052 /**************************************************************************
1053 * qla1280_adapter_reset
1054 * Reset the specified adapter (both channels)
1055 **************************************************************************/
1057 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1061 spin_lock_irq(cmd->device->host->host_lock);
1062 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1063 spin_unlock_irq(cmd->device->host->host_lock);
1069 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1070 sector_t capacity, int geom[])
1072 int heads, sectors, cylinders;
1076 cylinders = (unsigned long)capacity / (heads * sectors);
1077 if (cylinders > 1024) {
1080 cylinders = (unsigned long)capacity / (heads * sectors);
1081 /* if (cylinders > 1023)
1082 cylinders = 1023; */
1087 geom[2] = cylinders;
1093 /* disable risc and host interrupts */
1095 qla1280_disable_intrs(struct scsi_qla_host *ha)
1097 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1098 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1101 /* enable risc and host interrupts */
1103 qla1280_enable_intrs(struct scsi_qla_host *ha)
1105 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1106 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1109 /**************************************************************************
1110 * qla1280_intr_handler
1111 * Handles the H/W interrupt
1112 **************************************************************************/
1114 qla1280_intr_handler(int irq, void *dev_id)
1116 struct scsi_qla_host *ha;
1117 struct device_reg __iomem *reg;
1121 ENTER_INTR ("qla1280_intr_handler");
1122 ha = (struct scsi_qla_host *)dev_id;
1124 spin_lock(ha->host->host_lock);
1129 qla1280_disable_intrs(ha);
1131 data = qla1280_debounce_register(®->istatus);
1132 /* Check for pending interrupts. */
1133 if (data & RISC_INT) {
1134 qla1280_isr(ha, &ha->done_q);
1137 if (!list_empty(&ha->done_q))
1140 spin_unlock(ha->host->host_lock);
1142 qla1280_enable_intrs(ha);
1144 LEAVE_INTR("qla1280_intr_handler");
1145 return IRQ_RETVAL(handled);
1150 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1153 uint16_t mb[MAILBOX_REGISTER_COUNT];
1159 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1161 /* Set Target Parameters. */
1162 mb[0] = MBC_SET_TARGET_PARAMETERS;
1163 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1164 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1165 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1166 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1167 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1168 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1169 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1170 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1171 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1173 if (IS_ISP1x160(ha)) {
1174 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1175 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1176 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1177 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1180 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1182 mb[3] |= nv->bus[bus].target[target].sync_period;
1184 status = qla1280_mailbox_command(ha, mr, mb);
1186 /* Set Device Queue Parameters. */
1187 for (lun = 0; lun < MAX_LUNS; lun++) {
1188 mb[0] = MBC_SET_DEVICE_QUEUE;
1189 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1191 mb[2] = nv->bus[bus].max_queue_depth;
1192 mb[3] = nv->bus[bus].target[target].execution_throttle;
1193 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1197 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1198 "qla1280_set_target_parameters() failed\n",
1199 ha->host_no, bus, target);
1204 /**************************************************************************
1205 * qla1280_slave_configure
1208 * Determines the queue depth for a given device. There are two ways
1209 * a queue depth can be obtained for a tagged queueing device. One
1210 * way is the default queue depth which is determined by whether
1211 * If it is defined, then it is used
1212 * as the default queue depth. Otherwise, we use either 4 or 8 as the
1213 * default queue depth (dependent on the number of hardware SCBs).
1214 **************************************************************************/
1216 qla1280_slave_configure(struct scsi_device *device)
1218 struct scsi_qla_host *ha;
1219 int default_depth = 3;
1220 int bus = device->channel;
1221 int target = device->id;
1224 unsigned long flags;
1226 ha = (struct scsi_qla_host *)device->host->hostdata;
1229 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1232 if (device->tagged_supported &&
1233 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1234 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
1235 ha->bus_settings[bus].hiwat);
1237 scsi_adjust_queue_depth(device, 0, default_depth);
1240 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1241 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1242 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1244 if (driver_setup.no_sync ||
1245 (driver_setup.sync_mask &&
1246 (~driver_setup.sync_mask & (1 << target))))
1247 nv->bus[bus].target[target].parameter.enable_sync = 0;
1248 if (driver_setup.no_wide ||
1249 (driver_setup.wide_mask &&
1250 (~driver_setup.wide_mask & (1 << target))))
1251 nv->bus[bus].target[target].parameter.enable_wide = 0;
1252 if (IS_ISP1x160(ha)) {
1253 if (driver_setup.no_ppr ||
1254 (driver_setup.ppr_mask &&
1255 (~driver_setup.ppr_mask & (1 << target))))
1256 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1259 spin_lock_irqsave(ha->host->host_lock, flags);
1260 if (nv->bus[bus].target[target].parameter.enable_sync)
1261 status = qla1280_set_target_parameters(ha, bus, target);
1262 qla1280_get_target_parameters(ha, device);
1263 spin_unlock_irqrestore(ha->host->host_lock, flags);
1270 * Process completed commands.
1273 * ha = adapter block pointer.
1276 qla1280_done(struct scsi_qla_host *ha)
1279 struct list_head *done_q;
1280 int bus, target, lun;
1281 struct scsi_cmnd *cmd;
1283 ENTER("qla1280_done");
1285 done_q = &ha->done_q;
1287 while (!list_empty(done_q)) {
1288 sp = list_entry(done_q->next, struct srb, list);
1290 list_del(&sp->list);
1293 bus = SCSI_BUS_32(cmd);
1294 target = SCSI_TCN_32(cmd);
1295 lun = SCSI_LUN_32(cmd);
1297 switch ((CMD_RESULT(cmd) >> 16)) {
1299 /* Issue marker command. */
1300 if (!ha->flags.abort_isp_active)
1301 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1304 sp->flags &= ~SRB_ABORT_PENDING;
1305 sp->flags |= SRB_ABORTED;
1311 /* Release memory used for this I/O */
1312 scsi_dma_unmap(cmd);
1314 /* Call the mid-level driver interrupt handler */
1317 if (sp->wait == NULL)
1318 (*(cmd)->scsi_done)(cmd);
1322 LEAVE("qla1280_done");
1326 * Translates a ISP error to a Linux SCSI error
1329 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1331 int host_status = DID_ERROR;
1332 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1333 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1334 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1335 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1336 #if DEBUG_QLA1280_INTR
1337 static char *reason[] = {
1349 #endif /* DEBUG_QLA1280_INTR */
1351 ENTER("qla1280_return_status");
1353 #if DEBUG_QLA1280_INTR
1355 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
1360 switch (comp_status) {
1362 host_status = DID_OK;
1366 if (!(state_flags & SF_GOT_BUS))
1367 host_status = DID_NO_CONNECT;
1368 else if (!(state_flags & SF_GOT_TARGET))
1369 host_status = DID_BAD_TARGET;
1370 else if (!(state_flags & SF_SENT_CDB))
1371 host_status = DID_ERROR;
1372 else if (!(state_flags & SF_TRANSFERRED_DATA))
1373 host_status = DID_ERROR;
1374 else if (!(state_flags & SF_GOT_STATUS))
1375 host_status = DID_ERROR;
1376 else if (!(state_flags & SF_GOT_SENSE))
1377 host_status = DID_ERROR;
1381 host_status = DID_RESET;
1385 host_status = DID_ABORT;
1389 host_status = DID_TIME_OUT;
1392 case CS_DATA_OVERRUN:
1393 dprintk(2, "Data overrun 0x%x\n", residual_length);
1394 dprintk(2, "qla1280_return_status: response packet data\n");
1395 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1396 host_status = DID_ERROR;
1399 case CS_DATA_UNDERRUN:
1400 if ((scsi_bufflen(cp) - residual_length) <
1403 "scsi: Underflow detected - retrying "
1405 host_status = DID_ERROR;
1407 scsi_set_resid(cp, residual_length);
1408 host_status = DID_OK;
1413 host_status = DID_ERROR;
1417 #if DEBUG_QLA1280_INTR
1418 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1419 reason[host_status], scsi_status);
1422 LEAVE("qla1280_return_status");
1424 return (scsi_status & 0xff) | (host_status << 16);
1427 /****************************************************************************/
1428 /* QLogic ISP1280 Hardware Support Functions. */
1429 /****************************************************************************/
1432 * qla1280_initialize_adapter
1436 * ha = adapter block pointer.
1441 static int __devinit
1442 qla1280_initialize_adapter(struct scsi_qla_host *ha)
1444 struct device_reg __iomem *reg;
1447 unsigned long flags;
1449 ENTER("qla1280_initialize_adapter");
1451 /* Clear adapter flags. */
1452 ha->flags.online = 0;
1453 ha->flags.disable_host_adapter = 0;
1454 ha->flags.reset_active = 0;
1455 ha->flags.abort_isp_active = 0;
1457 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1458 if (ia64_platform_is("sn2")) {
1459 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1460 "dual channel lockup workaround\n", ha->host_no);
1461 ha->flags.use_pci_vchannel = 1;
1462 driver_setup.no_nvram = 1;
1466 /* TODO: implement support for the 1040 nvram format */
1468 driver_setup.no_nvram = 1;
1470 dprintk(1, "Configure PCI space for adapter...\n");
1474 /* Insure mailbox registers are free. */
1475 WRT_REG_WORD(®->semaphore, 0);
1476 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1477 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1478 RD_REG_WORD(®->host_cmd);
1480 if (qla1280_read_nvram(ha)) {
1481 dprintk(2, "qla1280_initialize_adapter: failed to read "
1486 * It's necessary to grab the spin here as qla1280_mailbox_command
1487 * needs to be able to drop the lock unconditionally to wait
1490 spin_lock_irqsave(ha->host->host_lock, flags);
1492 status = qla1280_load_firmware(ha);
1494 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1499 /* Setup adapter based on NVRAM parameters. */
1500 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1501 qla1280_nvram_config(ha);
1503 if (ha->flags.disable_host_adapter) {
1508 status = qla1280_init_rings(ha);
1512 /* Issue SCSI reset, if we can't reset twice then bus is dead */
1513 for (bus = 0; bus < ha->ports; bus++) {
1514 if (!ha->bus_settings[bus].disable_scsi_reset &&
1515 qla1280_bus_reset(ha, bus) &&
1516 qla1280_bus_reset(ha, bus))
1517 ha->bus_settings[bus].scsi_bus_dead = 1;
1520 ha->flags.online = 1;
1522 spin_unlock_irqrestore(ha->host->host_lock, flags);
1525 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1527 LEAVE("qla1280_initialize_adapter");
1532 * qla1280_request_firmware
1533 * Acquire firmware for chip. Retain in memory
1534 * for error recovery.
1537 * ha = adapter block pointer.
1540 * Pointer to firmware image or an error code
1541 * cast to pointer via ERR_PTR().
1543 static const struct firmware *
1544 qla1280_request_firmware(struct scsi_qla_host *ha)
1546 const struct firmware *fw;
1551 spin_unlock_irq(ha->host->host_lock);
1552 mutex_lock(&qla1280_firmware_mutex);
1554 index = ql1280_board_tbl[ha->devnum].fw_index;
1555 fw = qla1280_fw_tbl[index].fw;
1559 fwname = qla1280_fw_tbl[index].fwname;
1560 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1563 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1568 if ((fw->size % 2) || (fw->size < 6)) {
1569 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1571 release_firmware(fw);
1572 fw = ERR_PTR(-EINVAL);
1576 qla1280_fw_tbl[index].fw = fw;
1579 ha->fwver1 = fw->data[0];
1580 ha->fwver2 = fw->data[1];
1581 ha->fwver3 = fw->data[2];
1583 mutex_unlock(&qla1280_firmware_mutex);
1584 spin_lock_irq(ha->host->host_lock);
1590 * Test chip for proper operation.
1593 * ha = adapter block pointer.
1599 qla1280_chip_diag(struct scsi_qla_host *ha)
1601 uint16_t mb[MAILBOX_REGISTER_COUNT];
1602 struct device_reg __iomem *reg = ha->iobase;
1606 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1608 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1610 /* Soft reset chip and wait for it to finish. */
1611 WRT_REG_WORD(®->ictrl, ISP_RESET);
1614 * We can't do a traditional PCI write flush here by reading
1615 * back the register. The card will not respond once the reset
1616 * is in action and we end up with a machine check exception
1617 * instead. Nothing to do but wait and hope for the best.
1618 * A portable pci_write_flush(pdev) call would be very useful here.
1621 data = qla1280_debounce_register(®->ictrl);
1623 * Yet another QLogic gem ;-(
1625 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1627 data = RD_REG_WORD(®->ictrl);
1633 /* Reset register cleared by chip reset. */
1634 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1636 WRT_REG_WORD(®->cfg_1, 0);
1638 /* Reset RISC and disable BIOS which
1639 allows RISC to execute out of RAM. */
1640 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1641 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1643 RD_REG_WORD(®->id_l); /* Flush PCI write */
1644 data = qla1280_debounce_register(®->mailbox0);
1647 * I *LOVE* this code!
1649 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1651 data = RD_REG_WORD(®->mailbox0);
1657 /* Check product ID of chip */
1658 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1660 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1661 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1662 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1663 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1664 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1665 printk(KERN_INFO "qla1280: Wrong product ID = "
1666 "0x%x,0x%x,0x%x,0x%x\n",
1667 RD_REG_WORD(®->mailbox1),
1668 RD_REG_WORD(®->mailbox2),
1669 RD_REG_WORD(®->mailbox3),
1670 RD_REG_WORD(®->mailbox4));
1675 * Enable ints early!!!
1677 qla1280_enable_intrs(ha);
1679 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1680 /* Wrap Incoming Mailboxes Test. */
1681 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1690 status = qla1280_mailbox_command(ha, 0xff, mb);
1694 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1695 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1697 printk(KERN_INFO "qla1280: Failed mbox check\n");
1701 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1704 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1709 qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1711 /* enter with host_lock acquired */
1713 const struct firmware *fw;
1714 const __le16 *fw_data;
1715 uint16_t risc_address, risc_code_size;
1716 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1719 fw = qla1280_request_firmware(ha);
1723 fw_data = (const __le16 *)&fw->data[0];
1724 ha->fwstart = __le16_to_cpu(fw_data[2]);
1726 /* Load RISC code. */
1727 risc_address = ha->fwstart;
1728 fw_data = (const __le16 *)&fw->data[6];
1729 risc_code_size = (fw->size - 6) / 2;
1731 for (i = 0; i < risc_code_size; i++) {
1732 mb[0] = MBC_WRITE_RAM_WORD;
1733 mb[1] = risc_address + i;
1734 mb[2] = __le16_to_cpu(fw_data[i]);
1736 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1738 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1747 #define DUMP_IT_BACK 0 /* for debug of RISC loading */
1749 qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1751 /* enter with host_lock acquired */
1752 const struct firmware *fw;
1753 const __le16 *fw_data;
1754 uint16_t risc_address, risc_code_size;
1755 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1756 int err = 0, num, i;
1761 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1766 fw = qla1280_request_firmware(ha);
1770 fw_data = (const __le16 *)&fw->data[0];
1771 ha->fwstart = __le16_to_cpu(fw_data[2]);
1773 /* Load RISC code. */
1774 risc_address = ha->fwstart;
1775 fw_data = (const __le16 *)&fw->data[6];
1776 risc_code_size = (fw->size - 6) / 2;
1778 dprintk(1, "%s: DMA RISC code (%i) words\n",
1779 __func__, risc_code_size);
1782 while (risc_code_size > 0) {
1783 int warn __attribute__((unused)) = 0;
1787 if (cnt > risc_code_size)
1788 cnt = risc_code_size;
1790 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1792 fw_data, cnt, num, risc_address);
1793 for(i = 0; i < cnt; i++)
1794 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1796 mb[0] = MBC_LOAD_RAM;
1797 mb[1] = risc_address;
1799 mb[3] = ha->request_dma & 0xffff;
1800 mb[2] = (ha->request_dma >> 16) & 0xffff;
1801 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1802 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1803 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1805 (void *)(long)ha->request_dma,
1806 mb[6], mb[7], mb[2], mb[3]);
1807 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1810 printk(KERN_ERR "scsi(%li): Failed to load partial "
1811 "segment of f\n", ha->host_no);
1816 mb[0] = MBC_DUMP_RAM;
1817 mb[1] = risc_address;
1819 mb[3] = p_tbuf & 0xffff;
1820 mb[2] = (p_tbuf >> 16) & 0xffff;
1821 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1822 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1824 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1828 "Failed to dump partial segment of f/w\n");
1831 sp = (uint8_t *)ha->request_ring;
1832 for (i = 0; i < (cnt << 1); i++) {
1833 if (tbuf[i] != sp[i] && warn++ < 10) {
1834 printk(KERN_ERR "%s: FW compare error @ "
1835 "byte(0x%x) loop#=%x\n",
1837 printk(KERN_ERR "%s: FWbyte=%x "
1839 __func__, sp[i], tbuf[i]);
1844 risc_address += cnt;
1845 risc_code_size = risc_code_size - cnt;
1846 fw_data = fw_data + cnt;
1852 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1858 qla1280_start_firmware(struct scsi_qla_host *ha)
1860 uint16_t mb[MAILBOX_REGISTER_COUNT];
1863 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1866 /* Verify checksum of loaded RISC code. */
1867 mb[0] = MBC_VERIFY_CHECKSUM;
1868 /* mb[1] = ql12_risc_code_addr01; */
1869 mb[1] = ha->fwstart;
1870 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1872 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1876 /* Start firmware execution. */
1877 dprintk(1, "%s: start firmware running.\n", __func__);
1878 mb[0] = MBC_EXECUTE_FIRMWARE;
1879 mb[1] = ha->fwstart;
1880 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1882 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1890 qla1280_load_firmware(struct scsi_qla_host *ha)
1892 /* enter with host_lock taken */
1895 err = qla1280_chip_diag(ha);
1899 err = qla1280_load_firmware_pio(ha);
1901 err = qla1280_load_firmware_dma(ha);
1904 err = qla1280_start_firmware(ha);
1913 * ha = adapter block pointer.
1914 * ha->request_ring = request ring virtual address
1915 * ha->response_ring = response ring virtual address
1916 * ha->request_dma = request ring physical address
1917 * ha->response_dma = response ring physical address
1923 qla1280_init_rings(struct scsi_qla_host *ha)
1925 uint16_t mb[MAILBOX_REGISTER_COUNT];
1928 ENTER("qla1280_init_rings");
1930 /* Clear outstanding commands array. */
1931 memset(ha->outstanding_cmds, 0,
1932 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1934 /* Initialize request queue. */
1935 ha->request_ring_ptr = ha->request_ring;
1936 ha->req_ring_index = 0;
1937 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1938 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
1939 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1940 mb[1] = REQUEST_ENTRY_CNT;
1941 mb[3] = ha->request_dma & 0xffff;
1942 mb[2] = (ha->request_dma >> 16) & 0xffff;
1944 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1945 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1946 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1947 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1949 /* Initialize response queue. */
1950 ha->response_ring_ptr = ha->response_ring;
1951 ha->rsp_ring_index = 0;
1952 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
1953 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1954 mb[1] = RESPONSE_ENTRY_CNT;
1955 mb[3] = ha->response_dma & 0xffff;
1956 mb[2] = (ha->response_dma >> 16) & 0xffff;
1958 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1959 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1960 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1961 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1966 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1968 LEAVE("qla1280_init_rings");
1973 qla1280_print_settings(struct nvram *nv)
1975 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1976 nv->bus[0].config_1.initiator_id);
1977 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1978 nv->bus[1].config_1.initiator_id);
1980 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1981 nv->bus[0].bus_reset_delay);
1982 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1983 nv->bus[1].bus_reset_delay);
1985 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1986 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1987 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1988 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1990 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1991 nv->bus[0].config_2.async_data_setup_time);
1992 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1993 nv->bus[1].config_2.async_data_setup_time);
1995 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1996 nv->bus[0].config_2.req_ack_active_negation);
1997 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1998 nv->bus[1].config_2.req_ack_active_negation);
2000 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
2001 nv->bus[0].config_2.data_line_active_negation);
2002 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
2003 nv->bus[1].config_2.data_line_active_negation);
2005 dprintk(1, "qla1280 : disable loading risc code=%d\n",
2006 nv->cntr_flags_1.disable_loading_risc_code);
2008 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
2009 nv->cntr_flags_1.enable_64bit_addressing);
2011 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
2012 nv->bus[0].selection_timeout);
2013 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
2014 nv->bus[1].selection_timeout);
2016 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
2017 nv->bus[0].max_queue_depth);
2018 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
2019 nv->bus[1].max_queue_depth);
2023 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2025 struct nvram *nv = &ha->nvram;
2027 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2028 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2029 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2030 nv->bus[bus].target[target].parameter.enable_sync = 1;
2031 #if 1 /* Some SCSI Processors do not seem to like this */
2032 nv->bus[bus].target[target].parameter.enable_wide = 1;
2034 nv->bus[bus].target[target].execution_throttle =
2035 nv->bus[bus].max_queue_depth - 1;
2036 nv->bus[bus].target[target].parameter.parity_checking = 1;
2037 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2039 if (IS_ISP1x160(ha)) {
2040 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2041 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2042 nv->bus[bus].target[target].sync_period = 9;
2043 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2044 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2045 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2047 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2048 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2049 nv->bus[bus].target[target].sync_period = 10;
2054 qla1280_set_defaults(struct scsi_qla_host *ha)
2056 struct nvram *nv = &ha->nvram;
2059 dprintk(1, "Using defaults for NVRAM: \n");
2060 memset(nv, 0, sizeof(struct nvram));
2062 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2063 nv->firmware_feature.f.enable_fast_posting = 1;
2064 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2065 nv->termination.scsi_bus_0_control = 3;
2066 nv->termination.scsi_bus_1_control = 3;
2067 nv->termination.auto_term_support = 1;
2070 * Set default FIFO magic - What appropriate values would be here
2071 * is unknown. This is what I have found testing with 12160s.
2073 * Now, I would love the magic decoder ring for this one, the
2074 * header file provided by QLogic seems to be bogus or incomplete
2077 nv->isp_config.burst_enable = 1;
2079 nv->isp_config.fifo_threshold |= 3;
2081 nv->isp_config.fifo_threshold |= 4;
2083 if (IS_ISP1x160(ha))
2084 nv->isp_parameter = 0x01; /* fast memory enable */
2086 for (bus = 0; bus < MAX_BUSES; bus++) {
2087 nv->bus[bus].config_1.initiator_id = 7;
2088 nv->bus[bus].config_2.req_ack_active_negation = 1;
2089 nv->bus[bus].config_2.data_line_active_negation = 1;
2090 nv->bus[bus].selection_timeout = 250;
2091 nv->bus[bus].max_queue_depth = 32;
2093 if (IS_ISP1040(ha)) {
2094 nv->bus[bus].bus_reset_delay = 3;
2095 nv->bus[bus].config_2.async_data_setup_time = 6;
2096 nv->bus[bus].retry_delay = 1;
2098 nv->bus[bus].bus_reset_delay = 5;
2099 nv->bus[bus].config_2.async_data_setup_time = 8;
2102 for (target = 0; target < MAX_TARGETS; target++)
2103 qla1280_set_target_defaults(ha, bus, target);
2108 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2110 struct nvram *nv = &ha->nvram;
2111 uint16_t mb[MAILBOX_REGISTER_COUNT];
2115 /* Set Target Parameters. */
2116 mb[0] = MBC_SET_TARGET_PARAMETERS;
2117 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2120 * Do not enable sync and ppr for the initial INQUIRY run. We
2121 * enable this later if we determine the target actually
2124 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2125 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2127 if (IS_ISP1x160(ha))
2128 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2130 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2131 mb[3] |= nv->bus[bus].target[target].sync_period;
2132 status = qla1280_mailbox_command(ha, 0x0f, mb);
2134 /* Save Tag queuing enable flag. */
2135 flag = (BIT_0 << target);
2136 if (nv->bus[bus].target[target].parameter.tag_queuing)
2137 ha->bus_settings[bus].qtag_enables |= flag;
2139 /* Save Device enable flag. */
2140 if (IS_ISP1x160(ha)) {
2141 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2142 ha->bus_settings[bus].device_enables |= flag;
2143 ha->bus_settings[bus].lun_disables |= 0;
2145 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2146 ha->bus_settings[bus].device_enables |= flag;
2147 /* Save LUN disable flag. */
2148 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2149 ha->bus_settings[bus].lun_disables |= flag;
2152 /* Set Device Queue Parameters. */
2153 for (lun = 0; lun < MAX_LUNS; lun++) {
2154 mb[0] = MBC_SET_DEVICE_QUEUE;
2155 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2157 mb[2] = nv->bus[bus].max_queue_depth;
2158 mb[3] = nv->bus[bus].target[target].execution_throttle;
2159 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2166 qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2168 struct nvram *nv = &ha->nvram;
2169 uint16_t mb[MAILBOX_REGISTER_COUNT];
2172 /* SCSI Reset Disable. */
2173 ha->bus_settings[bus].disable_scsi_reset =
2174 nv->bus[bus].config_1.scsi_reset_disable;
2177 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2178 mb[0] = MBC_SET_INITIATOR_ID;
2179 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2180 ha->bus_settings[bus].id;
2181 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2184 ha->bus_settings[bus].bus_reset_delay =
2185 nv->bus[bus].bus_reset_delay;
2187 /* Command queue depth per device. */
2188 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2190 /* Set target parameters. */
2191 for (target = 0; target < MAX_TARGETS; target++)
2192 status |= qla1280_config_target(ha, bus, target);
2198 qla1280_nvram_config(struct scsi_qla_host *ha)
2200 struct device_reg __iomem *reg = ha->iobase;
2201 struct nvram *nv = &ha->nvram;
2202 int bus, target, status = 0;
2203 uint16_t mb[MAILBOX_REGISTER_COUNT];
2205 ENTER("qla1280_nvram_config");
2207 if (ha->nvram_valid) {
2208 /* Always force AUTO sense for LINUX SCSI */
2209 for (bus = 0; bus < MAX_BUSES; bus++)
2210 for (target = 0; target < MAX_TARGETS; target++) {
2211 nv->bus[bus].target[target].parameter.
2212 auto_request_sense = 1;
2215 qla1280_set_defaults(ha);
2218 qla1280_print_settings(nv);
2220 /* Disable RISC load of firmware. */
2221 ha->flags.disable_risc_code_load =
2222 nv->cntr_flags_1.disable_loading_risc_code;
2224 if (IS_ISP1040(ha)) {
2225 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2227 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2229 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2230 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2231 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2233 /* Busted fifo, says mjacob. */
2234 if (hwrev != ISP_CFG0_1040A)
2235 cfg1 |= nv->isp_config.fifo_threshold << 4;
2237 cfg1 |= nv->isp_config.burst_enable << 2;
2238 WRT_REG_WORD(®->cfg_1, cfg1);
2240 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2241 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2243 uint16_t cfg1, term;
2245 /* Set ISP hardware DMA burst */
2246 cfg1 = nv->isp_config.fifo_threshold << 4;
2247 cfg1 |= nv->isp_config.burst_enable << 2;
2248 /* Enable DMA arbitration on dual channel controllers */
2251 WRT_REG_WORD(®->cfg_1, cfg1);
2253 /* Set SCSI termination. */
2254 WRT_REG_WORD(®->gpio_enable,
2255 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2256 term = nv->termination.scsi_bus_1_control;
2257 term |= nv->termination.scsi_bus_0_control << 2;
2258 term |= nv->termination.auto_term_support << 7;
2259 RD_REG_WORD(®->id_l); /* Flush PCI write */
2260 WRT_REG_WORD(®->gpio_data, term);
2262 RD_REG_WORD(®->id_l); /* Flush PCI write */
2264 /* ISP parameter word. */
2265 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2266 mb[1] = nv->isp_parameter;
2267 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2269 if (IS_ISP1x40(ha)) {
2270 /* clock rate - for qla1240 and older, only */
2271 mb[0] = MBC_SET_CLOCK_RATE;
2273 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2276 /* Firmware feature word. */
2277 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2278 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2279 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2280 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2281 #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2282 if (ia64_platform_is("sn2")) {
2283 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2284 "workaround\n", ha->host_no);
2285 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2288 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2290 /* Retry count and delay. */
2291 mb[0] = MBC_SET_RETRY_COUNT;
2292 mb[1] = nv->bus[0].retry_count;
2293 mb[2] = nv->bus[0].retry_delay;
2294 mb[6] = nv->bus[1].retry_count;
2295 mb[7] = nv->bus[1].retry_delay;
2296 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2297 BIT_1 | BIT_0, &mb[0]);
2299 /* ASYNC data setup time. */
2300 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2301 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2302 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2303 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2305 /* Active negation states. */
2306 mb[0] = MBC_SET_ACTIVE_NEGATION;
2308 if (nv->bus[0].config_2.req_ack_active_negation)
2310 if (nv->bus[0].config_2.data_line_active_negation)
2313 if (nv->bus[1].config_2.req_ack_active_negation)
2315 if (nv->bus[1].config_2.data_line_active_negation)
2317 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2319 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2320 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2321 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2324 mb[0] = MBC_SET_PCI_CONTROL;
2325 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2326 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2327 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2329 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2331 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2333 /* Selection timeout. */
2334 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2335 mb[1] = nv->bus[0].selection_timeout;
2336 mb[2] = nv->bus[1].selection_timeout;
2337 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2339 for (bus = 0; bus < ha->ports; bus++)
2340 status |= qla1280_config_bus(ha, bus);
2343 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2345 LEAVE("qla1280_nvram_config");
2350 * Get NVRAM data word
2351 * Calculates word position in NVRAM and calls request routine to
2352 * get the word from NVRAM.
2355 * ha = adapter block pointer.
2356 * address = NVRAM word address.
2362 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2367 nv_cmd = address << 16;
2368 nv_cmd |= NV_READ_OP;
2370 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2372 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2380 * Sends read command to NVRAM and gets data from NVRAM.
2383 * ha = adapter block pointer.
2384 * nv_cmd = Bit 26 = start bit
2385 * Bit 25, 24 = opcode
2386 * Bit 23-16 = address
2387 * Bit 15-0 = write data
2393 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2395 struct device_reg __iomem *reg = ha->iobase;
2400 /* Send command to NVRAM. */
2403 for (cnt = 0; cnt < 11; cnt++) {
2404 if (nv_cmd & BIT_31)
2405 qla1280_nv_write(ha, NV_DATA_OUT);
2407 qla1280_nv_write(ha, 0);
2411 /* Read data from NVRAM. */
2413 for (cnt = 0; cnt < 16; cnt++) {
2414 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2415 RD_REG_WORD(®->id_l); /* Flush PCI write */
2418 reg_data = RD_REG_WORD(®->nvram);
2419 if (reg_data & NV_DATA_IN)
2421 WRT_REG_WORD(®->nvram, NV_SELECT);
2422 RD_REG_WORD(®->id_l); /* Flush PCI write */
2426 /* Deselect chip. */
2428 WRT_REG_WORD(®->nvram, NV_DESELECT);
2429 RD_REG_WORD(®->id_l); /* Flush PCI write */
2436 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2438 struct device_reg __iomem *reg = ha->iobase;
2440 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2441 RD_REG_WORD(®->id_l); /* Flush PCI write */
2443 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2444 RD_REG_WORD(®->id_l); /* Flush PCI write */
2446 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2447 RD_REG_WORD(®->id_l); /* Flush PCI write */
2453 * Issue mailbox command and waits for completion.
2456 * ha = adapter block pointer.
2457 * mr = mailbox registers to load.
2458 * mb = data pointer for mailbox registers.
2461 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
2467 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2469 struct device_reg __iomem *reg = ha->iobase;
2472 uint16_t *optr, *iptr;
2473 uint16_t __iomem *mptr;
2475 DECLARE_COMPLETION_ONSTACK(wait);
2476 struct timer_list timer;
2478 ENTER("qla1280_mailbox_command");
2480 if (ha->mailbox_wait) {
2481 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2483 ha->mailbox_wait = &wait;
2486 * We really should start out by verifying that the mailbox is
2487 * available before starting sending the command data
2489 /* Load mailbox registers. */
2490 mptr = (uint16_t __iomem *) ®->mailbox0;
2492 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2494 WRT_REG_WORD(mptr, (*iptr));
2502 /* Issue set host interrupt command. */
2504 /* set up a timer just in case we're really jammed */
2506 timer.expires = jiffies + 20*HZ;
2507 timer.data = (unsigned long)ha;
2508 timer.function = qla1280_mailbox_timeout;
2511 spin_unlock_irq(ha->host->host_lock);
2512 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2513 data = qla1280_debounce_register(®->istatus);
2515 wait_for_completion(&wait);
2516 del_timer_sync(&timer);
2518 spin_lock_irq(ha->host->host_lock);
2520 ha->mailbox_wait = NULL;
2522 /* Check for mailbox command timeout. */
2523 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2524 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2525 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2527 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2528 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2529 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2530 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2531 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2532 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2533 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2537 /* Load return mailbox registers. */
2539 iptr = (uint16_t *) &ha->mailbox_out[0];
2540 mr = MAILBOX_REGISTER_COUNT;
2541 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2543 if (ha->flags.reset_marker)
2544 qla1280_rst_aen(ha);
2547 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2548 "0x%x ****\n", mb[0]);
2550 LEAVE("qla1280_mailbox_command");
2556 * Polls ISP for interrupts.
2559 * ha = adapter block pointer.
2562 qla1280_poll(struct scsi_qla_host *ha)
2564 struct device_reg __iomem *reg = ha->iobase;
2568 /* ENTER("qla1280_poll"); */
2570 /* Check for pending interrupts. */
2571 data = RD_REG_WORD(®->istatus);
2572 if (data & RISC_INT)
2573 qla1280_isr(ha, &done_q);
2575 if (!ha->mailbox_wait) {
2576 if (ha->flags.reset_marker)
2577 qla1280_rst_aen(ha);
2580 if (!list_empty(&done_q))
2583 /* LEAVE("qla1280_poll"); */
2588 * Issue SCSI bus reset.
2591 * ha = adapter block pointer.
2592 * bus = SCSI bus number.
2598 qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2600 uint16_t mb[MAILBOX_REGISTER_COUNT];
2601 uint16_t reset_delay;
2604 dprintk(3, "qla1280_bus_reset: entered\n");
2606 if (qla1280_verbose)
2607 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2610 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2611 mb[0] = MBC_BUS_RESET;
2612 mb[1] = reset_delay;
2613 mb[2] = (uint16_t) bus;
2614 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2617 if (ha->bus_settings[bus].failed_reset_count > 2)
2618 ha->bus_settings[bus].scsi_bus_dead = 1;
2619 ha->bus_settings[bus].failed_reset_count++;
2621 spin_unlock_irq(ha->host->host_lock);
2622 ssleep(reset_delay);
2623 spin_lock_irq(ha->host->host_lock);
2625 ha->bus_settings[bus].scsi_bus_dead = 0;
2626 ha->bus_settings[bus].failed_reset_count = 0;
2627 ha->bus_settings[bus].reset_marker = 0;
2628 /* Issue marker command. */
2629 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2633 * We should probably call qla1280_set_target_parameters()
2634 * here as well for all devices on the bus.
2638 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2640 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2646 * qla1280_device_reset
2647 * Issue bus device reset message to the target.
2650 * ha = adapter block pointer.
2651 * bus = SCSI BUS number.
2658 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2660 uint16_t mb[MAILBOX_REGISTER_COUNT];
2663 ENTER("qla1280_device_reset");
2665 mb[0] = MBC_ABORT_TARGET;
2666 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2668 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2670 /* Issue marker command. */
2671 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2674 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2676 LEAVE("qla1280_device_reset");
2681 * qla1280_abort_command
2682 * Abort command aborts a specified IOCB.
2685 * ha = adapter block pointer.
2686 * sp = SB structure pointer.
2692 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2694 uint16_t mb[MAILBOX_REGISTER_COUNT];
2695 unsigned int bus, target, lun;
2698 ENTER("qla1280_abort_command");
2700 bus = SCSI_BUS_32(sp->cmd);
2701 target = SCSI_TCN_32(sp->cmd);
2702 lun = SCSI_LUN_32(sp->cmd);
2704 sp->flags |= SRB_ABORT_PENDING;
2706 mb[0] = MBC_ABORT_COMMAND;
2707 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2708 mb[2] = handle >> 16;
2709 mb[3] = handle & 0xffff;
2710 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2713 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2714 sp->flags &= ~SRB_ABORT_PENDING;
2718 LEAVE("qla1280_abort_command");
2723 * qla1280_reset_adapter
2727 * ha = adapter block pointer.
2730 qla1280_reset_adapter(struct scsi_qla_host *ha)
2732 struct device_reg __iomem *reg = ha->iobase;
2734 ENTER("qla1280_reset_adapter");
2736 /* Disable ISP chip */
2737 ha->flags.online = 0;
2738 WRT_REG_WORD(®->ictrl, ISP_RESET);
2739 WRT_REG_WORD(®->host_cmd,
2740 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2741 RD_REG_WORD(®->id_l); /* Flush PCI write */
2743 LEAVE("qla1280_reset_adapter");
2747 * Issue marker command.
2748 * Function issues marker IOCB.
2751 * ha = adapter block pointer.
2752 * bus = SCSI BUS number
2755 * type = marker modifier
2758 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2760 struct mrk_entry *pkt;
2762 ENTER("qla1280_marker");
2764 /* Get request packet. */
2765 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2766 pkt->entry_type = MARKER_TYPE;
2767 pkt->lun = (uint8_t) lun;
2768 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2769 pkt->modifier = type;
2770 pkt->entry_status = 0;
2772 /* Issue command to ISP */
2773 qla1280_isp_cmd(ha);
2776 LEAVE("qla1280_marker");
2781 * qla1280_64bit_start_scsi
2782 * The start SCSI is responsible for building request packets on
2783 * request ring and modifying ISP input pointer.
2786 * ha = adapter block pointer.
2787 * sp = SB structure pointer.
2790 * 0 = success, was able to issue command.
2792 #ifdef QLA_64BIT_PTR
2794 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2796 struct device_reg __iomem *reg = ha->iobase;
2797 struct scsi_cmnd *cmd = sp->cmd;
2798 cmd_a64_entry_t *pkt;
2800 dma_addr_t dma_handle;
2807 ENTER("qla1280_64bit_start_scsi:");
2809 /* Calculate number of entries and segments required. */
2811 seg_cnt = scsi_dma_map(cmd);
2814 req_cnt += (seg_cnt - 2) / 5;
2815 if ((seg_cnt - 2) % 5)
2818 } else if (seg_cnt < 0) {
2823 if ((req_cnt + 2) >= ha->req_q_cnt) {
2824 /* Calculate number of free request entries. */
2825 cnt = RD_REG_WORD(®->mailbox4);
2826 if (ha->req_ring_index < cnt)
2827 ha->req_q_cnt = cnt - ha->req_ring_index;
2830 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2833 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2834 ha->req_q_cnt, seg_cnt);
2836 /* If room for request in request ring. */
2837 if ((req_cnt + 2) >= ha->req_q_cnt) {
2838 status = SCSI_MLQUEUE_HOST_BUSY;
2839 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2840 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2845 /* Check for room in outstanding command list. */
2846 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2847 ha->outstanding_cmds[cnt] != NULL; cnt++);
2849 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2850 status = SCSI_MLQUEUE_HOST_BUSY;
2851 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2852 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2856 ha->outstanding_cmds[cnt] = sp;
2857 ha->req_q_cnt -= req_cnt;
2858 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2860 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2861 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2862 dprintk(2, " bus %i, target %i, lun %i\n",
2863 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2864 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2867 * Build command packet.
2869 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2871 pkt->entry_type = COMMAND_A64_TYPE;
2872 pkt->entry_count = (uint8_t) req_cnt;
2873 pkt->sys_define = (uint8_t) ha->req_ring_index;
2874 pkt->entry_status = 0;
2875 pkt->handle = cpu_to_le32(cnt);
2877 /* Zero out remaining portion of packet. */
2878 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2880 /* Set ISP command timeout. */
2881 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2883 /* Set device target ID and LUN */
2884 pkt->lun = SCSI_LUN_32(cmd);
2885 pkt->target = SCSI_BUS_32(cmd) ?
2886 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2888 /* Enable simple tag queuing if device supports it. */
2889 if (cmd->device->simple_tags)
2890 pkt->control_flags |= cpu_to_le16(BIT_3);
2892 /* Load SCSI command packet. */
2893 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2894 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2895 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2897 /* Set transfer direction. */
2898 dir = qla1280_data_direction(cmd);
2899 pkt->control_flags |= cpu_to_le16(dir);
2901 /* Set total data segment count. */
2902 pkt->dseg_count = cpu_to_le16(seg_cnt);
2905 * Load data segments.
2907 if (seg_cnt) { /* If data transfer. */
2908 struct scatterlist *sg, *s;
2909 int remseg = seg_cnt;
2911 sg = scsi_sglist(cmd);
2913 /* Setup packet address segment pointer. */
2914 dword_ptr = (u32 *)&pkt->dseg_0_address;
2916 /* Load command entry data segments. */
2917 for_each_sg(sg, s, seg_cnt, cnt) {
2921 dma_handle = sg_dma_address(s);
2922 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2923 if (ha->flags.use_pci_vchannel)
2924 sn_pci_set_vchan(ha->pdev,
2925 (unsigned long *)&dma_handle,
2929 cpu_to_le32(pci_dma_lo32(dma_handle));
2931 cpu_to_le32(pci_dma_hi32(dma_handle));
2932 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2933 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2934 cpu_to_le32(pci_dma_hi32(dma_handle)),
2935 cpu_to_le32(pci_dma_lo32(dma_handle)),
2936 cpu_to_le32(sg_dma_len(sg_next(s))));
2939 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2940 "command packet data - b %i, t %i, l %i \n",
2941 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2943 qla1280_dump_buffer(5, (char *)pkt,
2944 REQUEST_ENTRY_SIZE);
2947 * Build continuation packets.
2949 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2950 "remains\n", seg_cnt);
2952 while (remseg > 0) {
2953 /* Update sg start */
2955 /* Adjust ring index. */
2956 ha->req_ring_index++;
2957 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2958 ha->req_ring_index = 0;
2959 ha->request_ring_ptr =
2962 ha->request_ring_ptr++;
2964 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2966 /* Zero out packet. */
2967 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2969 /* Load packet defaults. */
2970 ((struct cont_a64_entry *) pkt)->entry_type =
2972 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2973 ((struct cont_a64_entry *) pkt)->sys_define =
2974 (uint8_t)ha->req_ring_index;
2975 /* Setup packet address segment pointer. */
2977 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2979 /* Load continuation entry data segments. */
2980 for_each_sg(sg, s, remseg, cnt) {
2983 dma_handle = sg_dma_address(s);
2984 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2985 if (ha->flags.use_pci_vchannel)
2986 sn_pci_set_vchan(ha->pdev,
2987 (unsigned long *)&dma_handle,
2991 cpu_to_le32(pci_dma_lo32(dma_handle));
2993 cpu_to_le32(pci_dma_hi32(dma_handle));
2995 cpu_to_le32(sg_dma_len(s));
2996 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2997 cpu_to_le32(pci_dma_hi32(dma_handle)),
2998 cpu_to_le32(pci_dma_lo32(dma_handle)),
2999 cpu_to_le32(sg_dma_len(s)));
3002 dprintk(5, "qla1280_64bit_start_scsi: "
3003 "continuation packet data - b %i, t "
3004 "%i, l %i \n", SCSI_BUS_32(cmd),
3005 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3006 qla1280_dump_buffer(5, (char *)pkt,
3007 REQUEST_ENTRY_SIZE);
3009 } else { /* No data transfer */
3010 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
3011 "packet data - b %i, t %i, l %i \n",
3012 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3013 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3015 /* Adjust ring index. */
3016 ha->req_ring_index++;
3017 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3018 ha->req_ring_index = 0;
3019 ha->request_ring_ptr = ha->request_ring;
3021 ha->request_ring_ptr++;
3023 /* Set chip new ring index. */
3025 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3026 sp->flags |= SRB_SENT;
3028 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3029 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3034 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3036 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3040 #else /* !QLA_64BIT_PTR */
3043 * qla1280_32bit_start_scsi
3044 * The start SCSI is responsible for building request packets on
3045 * request ring and modifying ISP input pointer.
3047 * The Qlogic firmware interface allows every queue slot to have a SCSI
3048 * command and up to 4 scatter/gather (SG) entries. If we need more
3049 * than 4 SG entries, then continuation entries are used that can
3050 * hold another 7 entries each. The start routine determines if there
3051 * is eought empty slots then build the combination of requests to
3052 * fulfill the OS request.
3055 * ha = adapter block pointer.
3056 * sp = SCSI Request Block structure pointer.
3059 * 0 = success, was able to issue command.
3062 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3064 struct device_reg __iomem *reg = ha->iobase;
3065 struct scsi_cmnd *cmd = sp->cmd;
3066 struct cmd_entry *pkt;
3074 ENTER("qla1280_32bit_start_scsi");
3076 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3079 /* Calculate number of entries and segments required. */
3081 seg_cnt = scsi_dma_map(cmd);
3084 * if greater than four sg entries then we need to allocate
3085 * continuation entries
3088 req_cnt += (seg_cnt - 4) / 7;
3089 if ((seg_cnt - 4) % 7)
3092 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3093 cmd, seg_cnt, req_cnt);
3094 } else if (seg_cnt < 0) {
3099 if ((req_cnt + 2) >= ha->req_q_cnt) {
3100 /* Calculate number of free request entries. */
3101 cnt = RD_REG_WORD(®->mailbox4);
3102 if (ha->req_ring_index < cnt)
3103 ha->req_q_cnt = cnt - ha->req_ring_index;
3106 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3109 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3110 ha->req_q_cnt, seg_cnt);
3111 /* If room for request in request ring. */
3112 if ((req_cnt + 2) >= ha->req_q_cnt) {
3113 status = SCSI_MLQUEUE_HOST_BUSY;
3114 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3115 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3116 ha->req_q_cnt, req_cnt);
3120 /* Check for empty slot in outstanding command list. */
3121 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3122 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3124 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3125 status = SCSI_MLQUEUE_HOST_BUSY;
3126 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3127 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3131 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3132 ha->outstanding_cmds[cnt] = sp;
3133 ha->req_q_cnt -= req_cnt;
3136 * Build command packet.
3138 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3140 pkt->entry_type = COMMAND_TYPE;
3141 pkt->entry_count = (uint8_t) req_cnt;
3142 pkt->sys_define = (uint8_t) ha->req_ring_index;
3143 pkt->entry_status = 0;
3144 pkt->handle = cpu_to_le32(cnt);
3146 /* Zero out remaining portion of packet. */
3147 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3149 /* Set ISP command timeout. */
3150 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3152 /* Set device target ID and LUN */
3153 pkt->lun = SCSI_LUN_32(cmd);
3154 pkt->target = SCSI_BUS_32(cmd) ?
3155 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3157 /* Enable simple tag queuing if device supports it. */
3158 if (cmd->device->simple_tags)
3159 pkt->control_flags |= cpu_to_le16(BIT_3);
3161 /* Load SCSI command packet. */
3162 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3163 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3165 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3166 /* Set transfer direction. */
3167 dir = qla1280_data_direction(cmd);
3168 pkt->control_flags |= cpu_to_le16(dir);
3170 /* Set total data segment count. */
3171 pkt->dseg_count = cpu_to_le16(seg_cnt);
3174 * Load data segments.
3177 struct scatterlist *sg, *s;
3178 int remseg = seg_cnt;
3180 sg = scsi_sglist(cmd);
3182 /* Setup packet address segment pointer. */
3183 dword_ptr = &pkt->dseg_0_address;
3185 dprintk(3, "Building S/G data segments..\n");
3186 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3188 /* Load command entry data segments. */
3189 for_each_sg(sg, s, seg_cnt, cnt) {
3193 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3194 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3195 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3196 (pci_dma_lo32(sg_dma_address(s))),
3201 * Build continuation packets.
3203 dprintk(3, "S/G Building Continuation"
3204 "...seg_cnt=0x%x remains\n", seg_cnt);
3205 while (remseg > 0) {
3206 /* Continue from end point */
3208 /* Adjust ring index. */
3209 ha->req_ring_index++;
3210 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3211 ha->req_ring_index = 0;
3212 ha->request_ring_ptr =
3215 ha->request_ring_ptr++;
3217 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3219 /* Zero out packet. */
3220 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3222 /* Load packet defaults. */
3223 ((struct cont_entry *) pkt)->
3224 entry_type = CONTINUE_TYPE;
3225 ((struct cont_entry *) pkt)->entry_count = 1;
3227 ((struct cont_entry *) pkt)->sys_define =
3228 (uint8_t) ha->req_ring_index;
3230 /* Setup packet address segment pointer. */
3232 &((struct cont_entry *) pkt)->dseg_0_address;
3234 /* Load continuation entry data segments. */
3235 for_each_sg(sg, s, remseg, cnt) {
3239 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3241 cpu_to_le32(sg_dma_len(s));
3243 "S/G Segment Cont. phys_addr=0x%x, "
3245 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3246 cpu_to_le32(sg_dma_len(s)));
3249 dprintk(5, "qla1280_32bit_start_scsi: "
3250 "continuation packet data - "
3251 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3252 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3253 qla1280_dump_buffer(5, (char *)pkt,
3254 REQUEST_ENTRY_SIZE);
3256 } else { /* No data transfer at all */
3257 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3258 "packet data - \n");
3259 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3261 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3262 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3263 REQUEST_ENTRY_SIZE);
3265 /* Adjust ring index. */
3266 ha->req_ring_index++;
3267 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3268 ha->req_ring_index = 0;
3269 ha->request_ring_ptr = ha->request_ring;
3271 ha->request_ring_ptr++;
3273 /* Set chip new ring index. */
3274 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3275 "for pending command\n");
3276 sp->flags |= SRB_SENT;
3278 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3279 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3284 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3286 LEAVE("qla1280_32bit_start_scsi");
3294 * Function is responsible for locking ring and
3295 * getting a zeroed out request packet.
3298 * ha = adapter block pointer.
3301 * 0 = failed to get slot.
3304 qla1280_req_pkt(struct scsi_qla_host *ha)
3306 struct device_reg __iomem *reg = ha->iobase;
3307 request_t *pkt = NULL;
3311 ENTER("qla1280_req_pkt");
3314 * This can be called from interrupt context, damn it!!!
3316 /* Wait for 30 seconds for slot. */
3317 for (timer = 15000000; timer; timer--) {
3318 if (ha->req_q_cnt > 0) {
3319 /* Calculate number of free request entries. */
3320 cnt = RD_REG_WORD(®->mailbox4);
3321 if (ha->req_ring_index < cnt)
3322 ha->req_q_cnt = cnt - ha->req_ring_index;
3325 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3328 /* Found empty request ring slot? */
3329 if (ha->req_q_cnt > 0) {
3331 pkt = ha->request_ring_ptr;
3333 /* Zero out packet. */
3334 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3337 * How can this be right when we have a ring
3340 /* Set system defined field. */
3341 pkt->sys_define = (uint8_t) ha->req_ring_index;
3343 /* Set entry count. */
3344 pkt->entry_count = 1;
3351 /* Check for pending interrupts. */
3356 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3358 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3365 * Function is responsible for modifying ISP input pointer.
3366 * Releases ring lock.
3369 * ha = adapter block pointer.
3372 qla1280_isp_cmd(struct scsi_qla_host *ha)
3374 struct device_reg __iomem *reg = ha->iobase;
3376 ENTER("qla1280_isp_cmd");
3378 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3379 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3380 REQUEST_ENTRY_SIZE);
3382 /* Adjust ring index. */
3383 ha->req_ring_index++;
3384 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3385 ha->req_ring_index = 0;
3386 ha->request_ring_ptr = ha->request_ring;
3388 ha->request_ring_ptr++;
3391 * Update request index to mailbox4 (Request Queue In).
3392 * The mmiowb() ensures that this write is ordered with writes by other
3393 * CPUs. Without the mmiowb(), it is possible for the following:
3394 * CPUA posts write of index 5 to mailbox4
3395 * CPUA releases host lock
3396 * CPUB acquires host lock
3397 * CPUB posts write of index 6 to mailbox4
3398 * On PCI bus, order reverses and write of 6 posts, then index 5,
3399 * causing chip to issue full queue of stale commands
3400 * The mmiowb() prevents future writes from crossing the barrier.
3401 * See Documentation/DocBook/deviceiobook.tmpl for more information.
3403 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3406 LEAVE("qla1280_isp_cmd");
3409 /****************************************************************************/
3410 /* Interrupt Service Routine. */
3411 /****************************************************************************/
3413 /****************************************************************************
3415 * Calls I/O done on command completion.
3418 * ha = adapter block pointer.
3419 * done_q = done queue.
3420 ****************************************************************************/
3422 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3424 struct device_reg __iomem *reg = ha->iobase;
3425 struct response *pkt;
3426 struct srb *sp = NULL;
3427 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3432 ENTER("qla1280_isr");
3434 istatus = RD_REG_WORD(®->istatus);
3435 if (!(istatus & (RISC_INT | PCI_INT)))
3438 /* Save mailbox register 5 */
3439 mailbox[5] = RD_REG_WORD(®->mailbox5);
3441 /* Check for mailbox interrupt. */
3443 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3445 if (mailbox[0] & BIT_0) {
3446 /* Get mailbox data. */
3447 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
3450 *wptr++ = RD_REG_WORD(®->mailbox0);
3451 *wptr++ = RD_REG_WORD(®->mailbox1);
3452 *wptr = RD_REG_WORD(®->mailbox2);
3453 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3455 *wptr++ = RD_REG_WORD(®->mailbox3);
3456 *wptr++ = RD_REG_WORD(®->mailbox4);
3458 *wptr++ = RD_REG_WORD(®->mailbox6);
3459 *wptr = RD_REG_WORD(®->mailbox7);
3462 /* Release mailbox registers. */
3464 WRT_REG_WORD(®->semaphore, 0);
3465 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3467 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3470 /* Handle asynchronous event */
3471 switch (mailbox[0]) {
3472 case MBA_SCSI_COMPLETION: /* Response completion */
3473 dprintk(5, "qla1280_isr: mailbox SCSI response "
3476 if (ha->flags.online) {
3477 /* Get outstanding command index. */
3478 index = mailbox[2] << 16 | mailbox[1];
3480 /* Validate handle. */
3481 if (index < MAX_OUTSTANDING_COMMANDS)
3482 sp = ha->outstanding_cmds[index];
3487 /* Free outstanding command slot. */
3488 ha->outstanding_cmds[index] = NULL;
3490 /* Save ISP completion status */
3491 CMD_RESULT(sp->cmd) = 0;
3492 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3494 /* Place block on done queue */
3495 list_add_tail(&sp->list, done_q);
3498 * If we get here we have a real problem!
3501 "qla1280: ISP invalid handle\n");
3506 case MBA_BUS_RESET: /* SCSI Bus Reset */
3507 ha->flags.reset_marker = 1;
3508 index = mailbox[6] & BIT_0;
3509 ha->bus_settings[index].reset_marker = 1;
3511 printk(KERN_DEBUG "qla1280_isr(): index %i "
3512 "asynchronous BUS_RESET\n", index);
3515 case MBA_SYSTEM_ERR: /* System Error */
3517 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3518 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3522 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3524 "qla1280: ISP Request Transfer Error\n");
3527 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3529 "qla1280: ISP Response Transfer Error\n");
3532 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
3533 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3536 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
3538 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3541 case MBA_DEVICE_RESET: /* Bus Device Reset */
3542 printk(KERN_INFO "qla1280_isr(): asynchronous "
3543 "BUS_DEVICE_RESET\n");
3545 ha->flags.reset_marker = 1;
3546 index = mailbox[6] & BIT_0;
3547 ha->bus_settings[index].reset_marker = 1;
3550 case MBA_BUS_MODE_CHANGE:
3552 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3556 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
3557 if (mailbox[0] < MBA_ASYNC_EVENT) {
3559 memcpy((uint16_t *) ha->mailbox_out, wptr,
3560 MAILBOX_REGISTER_COUNT *
3563 if(ha->mailbox_wait != NULL)
3564 complete(ha->mailbox_wait);
3569 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3573 * We will receive interrupts during mailbox testing prior to
3574 * the card being marked online, hence the double check.
3576 if (!(ha->flags.online && !ha->mailbox_wait)) {
3577 dprintk(2, "qla1280_isr: Response pointer Error\n");
3581 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3584 while (ha->rsp_ring_index != mailbox[5]) {
3585 pkt = ha->response_ring_ptr;
3587 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3588 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3589 dprintk(5,"qla1280_isr: response packet data\n");
3590 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3592 if (pkt->entry_type == STATUS_TYPE) {
3593 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3594 || pkt->comp_status || pkt->entry_status) {
3595 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3596 "0x%x mailbox[5] = 0x%x, comp_status "
3597 "= 0x%x, scsi_status = 0x%x\n",
3598 ha->rsp_ring_index, mailbox[5],
3599 le16_to_cpu(pkt->comp_status),
3600 le16_to_cpu(pkt->scsi_status));
3603 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3604 "0x%x, mailbox[5] = 0x%x\n",
3605 ha->rsp_ring_index, mailbox[5]);
3606 dprintk(2, "qla1280_isr: response packet data\n");
3607 qla1280_dump_buffer(2, (char *)pkt,
3608 RESPONSE_ENTRY_SIZE);
3611 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3612 dprintk(2, "status: Cmd %p, handle %i\n",
3613 ha->outstanding_cmds[pkt->handle]->cmd,
3615 if (pkt->entry_type == STATUS_TYPE)
3616 qla1280_status_entry(ha, pkt, done_q);
3618 qla1280_error_entry(ha, pkt, done_q);
3619 /* Adjust ring index. */
3620 ha->rsp_ring_index++;
3621 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3622 ha->rsp_ring_index = 0;
3623 ha->response_ring_ptr = ha->response_ring;
3625 ha->response_ring_ptr++;
3626 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3631 LEAVE("qla1280_isr");
3636 * Processes asynchronous reset.
3639 * ha = adapter block pointer.
3642 qla1280_rst_aen(struct scsi_qla_host *ha)
3646 ENTER("qla1280_rst_aen");
3648 if (ha->flags.online && !ha->flags.reset_active &&
3649 !ha->flags.abort_isp_active) {
3650 ha->flags.reset_active = 1;
3651 while (ha->flags.reset_marker) {
3652 /* Issue marker command. */
3653 ha->flags.reset_marker = 0;
3654 for (bus = 0; bus < ha->ports &&
3655 !ha->flags.reset_marker; bus++) {
3656 if (ha->bus_settings[bus].reset_marker) {
3657 ha->bus_settings[bus].reset_marker = 0;
3658 qla1280_marker(ha, bus, 0, 0,
3665 LEAVE("qla1280_rst_aen");
3670 * qla1280_status_entry
3671 * Processes received ISP status entry.
3674 * ha = adapter block pointer.
3675 * pkt = entry pointer.
3676 * done_q = done queue.
3679 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3680 struct list_head *done_q)
3682 unsigned int bus, target, lun;
3685 struct scsi_cmnd *cmd;
3686 uint32_t handle = le32_to_cpu(pkt->handle);
3687 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3688 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3690 ENTER("qla1280_status_entry");
3692 /* Validate handle. */
3693 if (handle < MAX_OUTSTANDING_COMMANDS)
3694 sp = ha->outstanding_cmds[handle];
3699 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3703 /* Free outstanding command slot. */
3704 ha->outstanding_cmds[handle] = NULL;
3708 /* Generate LU queue on cntrl, target, LUN */
3709 bus = SCSI_BUS_32(cmd);
3710 target = SCSI_TCN_32(cmd);
3711 lun = SCSI_LUN_32(cmd);
3713 if (comp_status || scsi_status) {
3714 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3715 "0x%x, handle = 0x%x\n", comp_status,
3716 scsi_status, handle);
3719 /* Target busy or queue full */
3720 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3721 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3722 CMD_RESULT(cmd) = scsi_status & 0xff;
3725 /* Save ISP completion status */
3726 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3728 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3729 if (comp_status != CS_ARS_FAILED) {
3730 uint16_t req_sense_length =
3731 le16_to_cpu(pkt->req_sense_length);
3732 if (req_sense_length < CMD_SNSLEN(cmd))
3733 sense_sz = req_sense_length;
3736 * scsi_cmnd->sense_buffer is
3737 * 64 bytes, why only copy 63?
3738 * This looks wrong! /Jes
3740 sense_sz = CMD_SNSLEN(cmd) - 1;
3742 memcpy(cmd->sense_buffer,
3743 &pkt->req_sense_data, sense_sz);
3746 memset(cmd->sense_buffer + sense_sz, 0,
3747 SCSI_SENSE_BUFFERSIZE - sense_sz);
3749 dprintk(2, "qla1280_status_entry: Check "
3750 "condition Sense data, b %i, t %i, "
3751 "l %i\n", bus, target, lun);
3753 qla1280_dump_buffer(2,
3754 (char *)cmd->sense_buffer,
3759 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3761 /* Place command on done queue. */
3762 list_add_tail(&sp->list, done_q);
3764 LEAVE("qla1280_status_entry");
3768 * qla1280_error_entry
3769 * Processes error entry.
3772 * ha = adapter block pointer.
3773 * pkt = entry pointer.
3774 * done_q = done queue.
3777 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3778 struct list_head *done_q)
3781 uint32_t handle = le32_to_cpu(pkt->handle);
3783 ENTER("qla1280_error_entry");
3785 if (pkt->entry_status & BIT_3)
3786 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3787 else if (pkt->entry_status & BIT_2)
3788 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3789 else if (pkt->entry_status & BIT_1)
3790 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3792 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3794 /* Validate handle. */
3795 if (handle < MAX_OUTSTANDING_COMMANDS)
3796 sp = ha->outstanding_cmds[handle];
3801 /* Free outstanding command slot. */
3802 ha->outstanding_cmds[handle] = NULL;
3804 /* Bad payload or header */
3805 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3806 /* Bad payload or header, set error status. */
3807 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
3808 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3809 } else if (pkt->entry_status & BIT_1) { /* FULL flag */
3810 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3812 /* Set error status. */
3813 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3816 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3818 /* Place command on done queue. */
3819 list_add_tail(&sp->list, done_q);
3821 #ifdef QLA_64BIT_PTR
3822 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3823 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3827 LEAVE("qla1280_error_entry");
3832 * Resets ISP and aborts all outstanding commands.
3835 * ha = adapter block pointer.
3841 qla1280_abort_isp(struct scsi_qla_host *ha)
3843 struct device_reg __iomem *reg = ha->iobase;
3849 ENTER("qla1280_abort_isp");
3851 if (ha->flags.abort_isp_active || !ha->flags.online)
3854 ha->flags.abort_isp_active = 1;
3856 /* Disable ISP interrupts. */
3857 qla1280_disable_intrs(ha);
3858 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3859 RD_REG_WORD(®->id_l);
3861 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3863 /* Dequeue all commands in outstanding command list. */
3864 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3865 struct scsi_cmnd *cmd;
3866 sp = ha->outstanding_cmds[cnt];
3869 CMD_RESULT(cmd) = DID_RESET << 16;
3870 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3871 ha->outstanding_cmds[cnt] = NULL;
3872 list_add_tail(&sp->list, &ha->done_q);
3878 status = qla1280_load_firmware(ha);
3882 /* Setup adapter based on NVRAM parameters. */
3883 qla1280_nvram_config (ha);
3885 status = qla1280_init_rings(ha);
3889 /* Issue SCSI reset. */
3890 for (bus = 0; bus < ha->ports; bus++)
3891 qla1280_bus_reset(ha, bus);
3893 ha->flags.abort_isp_active = 0;
3897 "qla1280: ISP error recovery failed, board disabled");
3898 qla1280_reset_adapter(ha);
3899 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3902 LEAVE("qla1280_abort_isp");
3908 * qla1280_debounce_register
3909 * Debounce register.
3912 * port = register address.
3918 qla1280_debounce_register(volatile u16 __iomem * addr)
3923 ret = RD_REG_WORD(addr);
3924 ret2 = RD_REG_WORD(addr);
3931 ret = RD_REG_WORD(addr);
3932 ret2 = RD_REG_WORD(addr);
3933 } while (ret != ret2);
3939 /************************************************************************
3940 * qla1280_check_for_dead_scsi_bus *
3942 * This routine checks for a dead SCSI bus *
3943 ************************************************************************/
3944 #define SET_SXP_BANK 0x0100
3945 #define SCSI_PHASE_INVALID 0x87FF
3947 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3949 uint16_t config_reg, scsi_control;
3950 struct device_reg __iomem *reg = ha->iobase;
3952 if (ha->bus_settings[bus].scsi_bus_dead) {
3953 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3954 config_reg = RD_REG_WORD(®->cfg_1);
3955 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3956 scsi_control = RD_REG_WORD(®->scsiControlPins);
3957 WRT_REG_WORD(®->cfg_1, config_reg);
3958 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3960 if (scsi_control == SCSI_PHASE_INVALID) {
3961 ha->bus_settings[bus].scsi_bus_dead = 1;
3962 return 1; /* bus is dead */
3964 ha->bus_settings[bus].scsi_bus_dead = 0;
3965 ha->bus_settings[bus].failed_reset_count = 0;
3968 return 0; /* bus is not dead */
3972 qla1280_get_target_parameters(struct scsi_qla_host *ha,
3973 struct scsi_device *device)
3975 uint16_t mb[MAILBOX_REGISTER_COUNT];
3976 int bus, target, lun;
3978 bus = device->channel;
3979 target = device->id;
3983 mb[0] = MBC_GET_TARGET_PARAMETERS;
3984 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3986 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3989 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3992 printk(" Sync: period %d, offset %d",
3993 (mb[3] & 0xff), (mb[3] >> 8));
3996 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
4001 if (device->simple_tags)
4002 printk(", Tagged queuing: depth %d", device->queue_depth);
4009 __qla1280_dump_buffer(char *b, int size)
4014 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
4015 "Bh Ch Dh Eh Fh\n");
4016 printk(KERN_DEBUG "---------------------------------------------"
4017 "------------------\n");
4019 for (cnt = 0; cnt < size;) {
4022 printk("0x%02x", c);
4033 /**************************************************************************
4034 * ql1280_print_scsi_cmd
4036 **************************************************************************/
4038 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4040 struct scsi_qla_host *ha;
4041 struct Scsi_Host *host = CMD_HOST(cmd);
4043 /* struct scatterlist *sg; */
4046 ha = (struct scsi_qla_host *)host->hostdata;
4048 sp = (struct srb *)CMD_SP(cmd);
4049 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4050 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4051 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4054 for (i = 0; i < cmd->cmd_len; i++) {
4055 printk("0x%02x ", cmd->cmnd[i]);
4057 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4058 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4059 scsi_sglist(cmd), scsi_bufflen(cmd));
4062 sg = (struct scatterlist *) cmd->request_buffer;
4063 printk(" SG buffer: \n");
4064 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
4066 printk(" tag=%d, transfersize=0x%x \n",
4067 cmd->tag, cmd->transfersize);
4068 printk(" SP=0x%p\n", CMD_SP(cmd));
4069 printk(" underflow size = 0x%x, direction=0x%x\n",
4070 cmd->underflow, cmd->sc_data_direction);
4073 /**************************************************************************
4074 * ql1280_dump_device
4076 **************************************************************************/
4078 ql1280_dump_device(struct scsi_qla_host *ha)
4081 struct scsi_cmnd *cp;
4085 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4087 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4088 if ((sp = ha->outstanding_cmds[i]) == NULL)
4090 if ((cp = sp->cmd) == NULL)
4092 qla1280_print_scsi_cmd(1, cp);
4107 struct setup_tokens {
4112 static struct setup_tokens setup_token[] __initdata =
4114 { "nvram", TOKEN_NVRAM },
4115 { "sync", TOKEN_SYNC },
4116 { "wide", TOKEN_WIDE },
4117 { "ppr", TOKEN_PPR },
4118 { "verbose", TOKEN_VERBOSE },
4119 { "debug", TOKEN_DEBUG },
4123 /**************************************************************************
4126 * Handle boot parameters. This really needs to be changed so one
4127 * can specify per adapter parameters.
4128 **************************************************************************/
4130 qla1280_setup(char *s)
4138 while (cp && (ptr = strchr(cp, ':'))) {
4140 if (!strcmp(ptr, "yes")) {
4143 } else if (!strcmp(ptr, "no")) {
4147 val = simple_strtoul(ptr, &ptr, 0);
4149 switch ((toke = qla1280_get_token(cp))) {
4152 driver_setup.no_nvram = 1;
4156 driver_setup.no_sync = 1;
4157 else if (val != 0x10000)
4158 driver_setup.sync_mask = val;
4162 driver_setup.no_wide = 1;
4163 else if (val != 0x10000)
4164 driver_setup.wide_mask = val;
4168 driver_setup.no_ppr = 1;
4169 else if (val != 0x10000)
4170 driver_setup.ppr_mask = val;
4173 qla1280_verbose = val;
4176 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4180 cp = strchr(ptr, ';');
4192 qla1280_get_token(char *str)
4198 sep = strchr(str, ':');
4201 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4202 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4203 ret = setup_token[i].val;
4213 static struct scsi_host_template qla1280_driver_template = {
4214 .module = THIS_MODULE,
4215 .proc_name = "qla1280",
4216 .name = "Qlogic ISP 1280/12160",
4217 .info = qla1280_info,
4218 .slave_configure = qla1280_slave_configure,
4219 .queuecommand = qla1280_queuecommand,
4220 .eh_abort_handler = qla1280_eh_abort,
4221 .eh_device_reset_handler= qla1280_eh_device_reset,
4222 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4223 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4224 .bios_param = qla1280_biosparam,
4225 .can_queue = 0xfffff,
4227 .sg_tablesize = SG_ALL,
4229 .use_clustering = ENABLE_CLUSTERING,
4233 static int __devinit
4234 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4236 int devnum = id->driver_data;
4237 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4238 struct Scsi_Host *host;
4239 struct scsi_qla_host *ha;
4240 int error = -ENODEV;
4242 /* Bypass all AMI SUBSYS VENDOR IDs */
4243 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4245 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4249 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4250 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4252 if (pci_enable_device(pdev)) {
4254 "qla1280: Failed to enabled pci device, aborting.\n");
4258 pci_set_master(pdev);
4261 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4264 "qla1280: Failed to register host, aborting.\n");
4265 goto error_disable_device;
4268 ha = (struct scsi_qla_host *)host->hostdata;
4269 memset(ha, 0, sizeof(struct scsi_qla_host));
4272 ha->devnum = devnum; /* specifies microcode load address */
4274 #ifdef QLA_64BIT_PTR
4275 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4276 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4277 printk(KERN_WARNING "scsi(%li): Unable to set a "
4278 "suitable DMA mask - aborting\n", ha->host_no);
4280 goto error_put_host;
4283 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4286 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4287 printk(KERN_WARNING "scsi(%li): Unable to set a "
4288 "suitable DMA mask - aborting\n", ha->host_no);
4290 goto error_put_host;
4294 ha->request_ring = pci_alloc_consistent(ha->pdev,
4295 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4297 if (!ha->request_ring) {
4298 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4299 goto error_put_host;
4302 ha->response_ring = pci_alloc_consistent(ha->pdev,
4303 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4305 if (!ha->response_ring) {
4306 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4307 goto error_free_request_ring;
4310 ha->ports = bdp->numPorts;
4313 ha->host_no = host->host_no;
4315 host->irq = pdev->irq;
4316 host->max_channel = bdp->numPorts - 1;
4317 host->max_lun = MAX_LUNS - 1;
4318 host->max_id = MAX_TARGETS;
4319 host->max_sectors = 1024;
4320 host->unique_id = host->host_no;
4324 #if MEMORY_MAPPED_IO
4325 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4327 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4328 goto error_free_response_ring;
4331 host->base = (unsigned long)ha->mmpbase;
4332 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4334 host->io_port = pci_resource_start(ha->pdev, 0);
4335 if (!request_region(host->io_port, 0xff, "qla1280")) {
4336 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4337 "0x%04lx-0x%04lx - already in use\n",
4338 host->io_port, host->io_port + 0xff);
4339 goto error_free_response_ring;
4342 ha->iobase = (struct device_reg *)host->io_port;
4345 INIT_LIST_HEAD(&ha->done_q);
4347 /* Disable ISP interrupts. */
4348 qla1280_disable_intrs(ha);
4350 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4352 printk("qla1280 : Failed to reserve interrupt %d already "
4353 "in use\n", pdev->irq);
4354 goto error_release_region;
4357 /* load the F/W, read paramaters, and init the H/W */
4358 if (qla1280_initialize_adapter(ha)) {
4359 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4360 goto error_free_irq;
4363 /* set our host ID (need to do something about our two IDs) */
4364 host->this_id = ha->bus_settings[0].id;
4366 pci_set_drvdata(pdev, host);
4368 error = scsi_add_host(host, &pdev->dev);
4370 goto error_disable_adapter;
4371 scsi_scan_host(host);
4375 error_disable_adapter:
4376 qla1280_disable_intrs(ha);
4378 free_irq(pdev->irq, ha);
4379 error_release_region:
4380 #if MEMORY_MAPPED_IO
4381 iounmap(ha->mmpbase);
4383 release_region(host->io_port, 0xff);
4385 error_free_response_ring:
4386 pci_free_consistent(ha->pdev,
4387 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4388 ha->response_ring, ha->response_dma);
4389 error_free_request_ring:
4390 pci_free_consistent(ha->pdev,
4391 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4392 ha->request_ring, ha->request_dma);
4394 scsi_host_put(host);
4395 error_disable_device:
4396 pci_disable_device(pdev);
4402 static void __devexit
4403 qla1280_remove_one(struct pci_dev *pdev)
4405 struct Scsi_Host *host = pci_get_drvdata(pdev);
4406 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4408 scsi_remove_host(host);
4410 qla1280_disable_intrs(ha);
4412 free_irq(pdev->irq, ha);
4414 #if MEMORY_MAPPED_IO
4415 iounmap(ha->mmpbase);
4417 release_region(host->io_port, 0xff);
4420 pci_free_consistent(ha->pdev,
4421 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4422 ha->request_ring, ha->request_dma);
4423 pci_free_consistent(ha->pdev,
4424 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4425 ha->response_ring, ha->response_dma);
4427 pci_disable_device(pdev);
4429 scsi_host_put(host);
4432 static struct pci_driver qla1280_pci_driver = {
4434 .id_table = qla1280_pci_tbl,
4435 .probe = qla1280_probe_one,
4436 .remove = __devexit_p(qla1280_remove_one),
4442 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4444 "qla1280: struct srb too big, aborting\n");
4450 * If we are called as a module, the qla1280 pointer may not be null
4451 * and it would point to our bootup string, just like on the lilo
4452 * command line. IF not NULL, then process this config string with
4456 * To add options at boot time add a line to your lilo.conf file like:
4457 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
4458 * which will result in the first four devices on the first two
4459 * controllers being set to a tagged queue depth of 32.
4462 qla1280_setup(qla1280);
4465 return pci_register_driver(&qla1280_pci_driver);
4473 pci_unregister_driver(&qla1280_pci_driver);
4474 /* release any allocated firmware images */
4475 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4476 release_firmware(qla1280_fw_tbl[i].fw);
4477 qla1280_fw_tbl[i].fw = NULL;
4481 module_init(qla1280_init);
4482 module_exit(qla1280_exit);
4484 MODULE_AUTHOR("Qlogic & Jes Sorensen");
4485 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4486 MODULE_LICENSE("GPL");
4487 MODULE_FIRMWARE("qlogic/1040.bin");
4488 MODULE_FIRMWARE("qlogic/1280.bin");
4489 MODULE_FIRMWARE("qlogic/12160.bin");
4490 MODULE_VERSION(QLA1280_VERSION);
4493 * Overrides for Emacs so that we almost follow Linus's tabbing style.
4494 * Emacs will notice this stuff at the end of the file and automatically
4495 * adjust the settings for this buffer only. This must remain at the end
4497 * ---------------------------------------------------------------------------