scsi: ipr: use sg helper to iterate over scatterlist
[platform/kernel/linux-rpi.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4086: SAS Adapter Hardware Configuration Error"},
440         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "3140: Device bus not ready to ready transition"},
442         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset"},
444         {0x06290500, 0, 0,
445         "FFFE: SCSI bus transition to single ended"},
446         {0x06290600, 0, 0,
447         "FFFE: SCSI bus transition to LVD"},
448         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "FFFB: SCSI bus was reset by another initiator"},
450         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "3029: A device replacement has occurred"},
452         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4102: Device bus fabric performance degradation"},
454         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9051: IOA cache data exists for a missing or failed device"},
456         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9025: Disk unit is not supported at its physical location"},
460         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3020: IOA detected a SCSI bus configuration error"},
462         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
463         "3150: SCSI bus configuration error"},
464         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9074: Asymmetric advanced function disk configuration"},
466         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4040: Incomplete multipath connection between IOA and enclosure"},
468         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4041: Incomplete multipath connection between enclosure and device"},
470         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9075: Incomplete multipath connection between IOA and remote IOA"},
472         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9076: Configuration error, missing remote IOA"},
474         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4050: Enclosure does not support a required multipath function"},
476         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4121: Configuration error, required cable is missing"},
478         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4122: Cable is not plugged into the correct location on remote IOA"},
480         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4123: Configuration error, invalid cable vital product data"},
482         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4124: Configuration error, both cable ends are plugged into the same IOA"},
484         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485         "4070: Logically bad block written on device"},
486         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9041: Array protection temporarily suspended"},
488         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9042: Corrupt array parity detected on specified device"},
490         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9030: Array no longer protected due to missing or failed disk unit"},
492         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9071: Link operational transition"},
494         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9072: Link not operational transition"},
496         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9032: Array exposed but still protected"},
498         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
499         "70DD: Device forced failed by disrupt device command"},
500         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4061: Multipath redundancy level got better"},
502         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
503         "4060: Multipath redundancy level got worse"},
504         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
505         "9083: Device raw mode enabled"},
506         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
507         "9084: Device raw mode disabled"},
508         {0x07270000, 0, 0,
509         "Failure due to other device"},
510         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9008: IOA does not support functions expected by devices"},
512         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9010: Cache data associated with attached devices cannot be found"},
514         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9011: Cache data belongs to devices other than those attached"},
516         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9020: Array missing 2 or more devices with only 1 device present"},
518         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9021: Array missing 2 or more devices with 2 or more devices present"},
520         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9022: Exposed array is missing a required device"},
522         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9023: Array member(s) not at required physical locations"},
524         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9024: Array not functional due to present hardware configuration"},
526         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9026: Array not functional due to present hardware configuration"},
528         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9027: Array is missing a device and parity is out of sync"},
530         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9028: Maximum number of arrays already exist"},
532         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9050: Required cache data cannot be located for a disk unit"},
534         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9052: Cache data exists for a device that has been modified"},
536         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9054: IOA resources not available due to previous problems"},
538         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9092: Disk unit requires initialization before use"},
540         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9029: Incorrect hardware configuration change has been detected"},
542         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9060: One or more disk pairs are missing from an array"},
544         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9061: One or more disks are missing from an array"},
546         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9062: One or more disks are missing from an array"},
548         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
549         "9063: Maximum number of functional arrays has been exceeded"},
550         {0x07279A00, 0, 0,
551         "Data protect, other volume set problem"},
552         {0x0B260000, 0, 0,
553         "Aborted command, invalid descriptor"},
554         {0x0B3F9000, 0, 0,
555         "Target operating conditions have changed, dual adapter takeover"},
556         {0x0B530200, 0, 0,
557         "Aborted command, medium removal prevented"},
558         {0x0B5A0000, 0, 0,
559         "Command terminated by host"},
560         {0x0B5B8000, 0, 0,
561         "Aborted command, command terminated by host"}
562 };
563
564 static const struct ipr_ses_table_entry ipr_ses_table[] = {
565         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
566         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
567         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
572         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
576         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578 };
579
580 /*
581  *  Function Prototypes
582  */
583 static int ipr_reset_alert(struct ipr_cmnd *);
584 static void ipr_process_ccn(struct ipr_cmnd *);
585 static void ipr_process_error(struct ipr_cmnd *);
586 static void ipr_reset_ioa_job(struct ipr_cmnd *);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588                                    enum ipr_shutdown_type);
589
590 #ifdef CONFIG_SCSI_IPR_TRACE
591 /**
592  * ipr_trc_hook - Add a trace entry to the driver trace
593  * @ipr_cmd:    ipr command struct
594  * @type:               trace type
595  * @add_data:   additional data
596  *
597  * Return value:
598  *      none
599  **/
600 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601                          u8 type, u32 add_data)
602 {
603         struct ipr_trace_entry *trace_entry;
604         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
605         unsigned int trace_index;
606
607         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608         trace_entry = &ioa_cfg->trace[trace_index];
609         trace_entry->time = jiffies;
610         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611         trace_entry->type = type;
612         if (ipr_cmd->ioa_cfg->sis64)
613                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614         else
615                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
616         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
617         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618         trace_entry->u.add_data = add_data;
619         wmb();
620 }
621 #else
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
623 #endif
624
625 /**
626  * ipr_lock_and_done - Acquire lock and complete command
627  * @ipr_cmd:    ipr command struct
628  *
629  * Return value:
630  *      none
631  **/
632 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633 {
634         unsigned long lock_flags;
635         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638         ipr_cmd->done(ipr_cmd);
639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640 }
641
642 /**
643  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644  * @ipr_cmd:    ipr command struct
645  *
646  * Return value:
647  *      none
648  **/
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650 {
651         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
652         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
654         dma_addr_t dma_addr = ipr_cmd->dma_addr;
655         int hrrq_id;
656
657         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
658         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
659         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
660         ioarcb->data_transfer_length = 0;
661         ioarcb->read_data_transfer_length = 0;
662         ioarcb->ioadl_len = 0;
663         ioarcb->read_ioadl_len = 0;
664
665         if (ipr_cmd->ioa_cfg->sis64) {
666                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
668                 ioasa64->u.gata.status = 0;
669         } else {
670                 ioarcb->write_ioadl_addr =
671                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
673                 ioasa->u.gata.status = 0;
674         }
675
676         ioasa->hdr.ioasc = 0;
677         ioasa->hdr.residual_data_len = 0;
678         ipr_cmd->scsi_cmd = NULL;
679         ipr_cmd->qc = NULL;
680         ipr_cmd->sense_buffer[0] = 0;
681         ipr_cmd->dma_use_sg = 0;
682 }
683
684 /**
685  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686  * @ipr_cmd:    ipr command struct
687  *
688  * Return value:
689  *      none
690  **/
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692                               void (*fast_done) (struct ipr_cmnd *))
693 {
694         ipr_reinit_ipr_cmnd(ipr_cmd);
695         ipr_cmd->u.scratch = 0;
696         ipr_cmd->sibling = NULL;
697         ipr_cmd->eh_comp = NULL;
698         ipr_cmd->fast_done = fast_done;
699         timer_setup(&ipr_cmd->timer, NULL, 0);
700 }
701
702 /**
703  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704  * @ioa_cfg:    ioa config struct
705  *
706  * Return value:
707  *      pointer to ipr command struct
708  **/
709 static
710 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
711 {
712         struct ipr_cmnd *ipr_cmd = NULL;
713
714         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716                         struct ipr_cmnd, queue);
717                 list_del(&ipr_cmd->queue);
718         }
719
720
721         return ipr_cmd;
722 }
723
724 /**
725  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726  * @ioa_cfg:    ioa config struct
727  *
728  * Return value:
729  *      pointer to ipr command struct
730  **/
731 static
732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733 {
734         struct ipr_cmnd *ipr_cmd =
735                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
737         return ipr_cmd;
738 }
739
740 /**
741  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742  * @ioa_cfg:    ioa config struct
743  * @clr_ints:     interrupts to clear
744  *
745  * This function masks all interrupts on the adapter, then clears the
746  * interrupts specified in the mask
747  *
748  * Return value:
749  *      none
750  **/
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752                                           u32 clr_ints)
753 {
754         volatile u32 int_reg;
755         int i;
756
757         /* Stop new interrupts */
758         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759                 spin_lock(&ioa_cfg->hrrq[i]._lock);
760                 ioa_cfg->hrrq[i].allow_interrupts = 0;
761                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762         }
763
764         /* Set interrupt mask to stop all new interrupts */
765         if (ioa_cfg->sis64)
766                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767         else
768                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
769
770         /* Clear any pending interrupts */
771         if (ioa_cfg->sis64)
772                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
773         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
774         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
775 }
776
777 /**
778  * ipr_save_pcix_cmd_reg - Save PCI-X command register
779  * @ioa_cfg:    ioa config struct
780  *
781  * Return value:
782  *      0 on success / -EIO on failure
783  **/
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785 {
786         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787
788         if (pcix_cmd_reg == 0)
789                 return 0;
790
791         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
792                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
793                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
794                 return -EIO;
795         }
796
797         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
798         return 0;
799 }
800
801 /**
802  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803  * @ioa_cfg:    ioa config struct
804  *
805  * Return value:
806  *      0 on success / -EIO on failure
807  **/
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
809 {
810         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
811
812         if (pcix_cmd_reg) {
813                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
814                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
815                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
816                         return -EIO;
817                 }
818         }
819
820         return 0;
821 }
822
823 /**
824  * __ipr_sata_eh_done - done function for aborted SATA commands
825  * @ipr_cmd:    ipr command struct
826  *
827  * This function is invoked for ops generated to SATA
828  * devices which are being aborted.
829  *
830  * Return value:
831  *      none
832  **/
833 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
834 {
835         struct ata_queued_cmd *qc = ipr_cmd->qc;
836         struct ipr_sata_port *sata_port = qc->ap->private_data;
837
838         qc->err_mask |= AC_ERR_OTHER;
839         sata_port->ioasa.status |= ATA_BUSY;
840         ata_qc_complete(qc);
841         if (ipr_cmd->eh_comp)
842                 complete(ipr_cmd->eh_comp);
843         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
844 }
845
846 /**
847  * ipr_sata_eh_done - done function for aborted SATA commands
848  * @ipr_cmd:    ipr command struct
849  *
850  * This function is invoked for ops generated to SATA
851  * devices which are being aborted.
852  *
853  * Return value:
854  *      none
855  **/
856 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
857 {
858         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
859         unsigned long hrrq_flags;
860
861         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
862         __ipr_sata_eh_done(ipr_cmd);
863         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
864 }
865
866 /**
867  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868  * @ipr_cmd:    ipr command struct
869  *
870  * This function is invoked by the interrupt handler for
871  * ops generated by the SCSI mid-layer which are being aborted.
872  *
873  * Return value:
874  *      none
875  **/
876 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
877 {
878         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
879
880         scsi_cmd->result |= (DID_ERROR << 16);
881
882         scsi_dma_unmap(ipr_cmd->scsi_cmd);
883         scsi_cmd->scsi_done(scsi_cmd);
884         if (ipr_cmd->eh_comp)
885                 complete(ipr_cmd->eh_comp);
886         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
887 }
888
889 /**
890  * ipr_scsi_eh_done - mid-layer done function for aborted ops
891  * @ipr_cmd:    ipr command struct
892  *
893  * This function is invoked by the interrupt handler for
894  * ops generated by the SCSI mid-layer which are being aborted.
895  *
896  * Return value:
897  *      none
898  **/
899 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
900 {
901         unsigned long hrrq_flags;
902         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
903
904         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
905         __ipr_scsi_eh_done(ipr_cmd);
906         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
907 }
908
909 /**
910  * ipr_fail_all_ops - Fails all outstanding ops.
911  * @ioa_cfg:    ioa config struct
912  *
913  * This function fails all outstanding ops.
914  *
915  * Return value:
916  *      none
917  **/
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
919 {
920         struct ipr_cmnd *ipr_cmd, *temp;
921         struct ipr_hrr_queue *hrrq;
922
923         ENTER;
924         for_each_hrrq(hrrq, ioa_cfg) {
925                 spin_lock(&hrrq->_lock);
926                 list_for_each_entry_safe(ipr_cmd,
927                                         temp, &hrrq->hrrq_pending_q, queue) {
928                         list_del(&ipr_cmd->queue);
929
930                         ipr_cmd->s.ioasa.hdr.ioasc =
931                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
932                         ipr_cmd->s.ioasa.hdr.ilid =
933                                 cpu_to_be32(IPR_DRIVER_ILID);
934
935                         if (ipr_cmd->scsi_cmd)
936                                 ipr_cmd->done = __ipr_scsi_eh_done;
937                         else if (ipr_cmd->qc)
938                                 ipr_cmd->done = __ipr_sata_eh_done;
939
940                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
941                                      IPR_IOASC_IOA_WAS_RESET);
942                         del_timer(&ipr_cmd->timer);
943                         ipr_cmd->done(ipr_cmd);
944                 }
945                 spin_unlock(&hrrq->_lock);
946         }
947         LEAVE;
948 }
949
950 /**
951  * ipr_send_command -  Send driver initiated requests.
952  * @ipr_cmd:            ipr command struct
953  *
954  * This function sends a command to the adapter using the correct write call.
955  * In the case of sis64, calculate the ioarcb size required. Then or in the
956  * appropriate bits.
957  *
958  * Return value:
959  *      none
960  **/
961 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
962 {
963         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
964         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
965
966         if (ioa_cfg->sis64) {
967                 /* The default size is 256 bytes */
968                 send_dma_addr |= 0x1;
969
970                 /* If the number of ioadls * size of ioadl > 128 bytes,
971                    then use a 512 byte ioarcb */
972                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
973                         send_dma_addr |= 0x4;
974                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
975         } else
976                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
977 }
978
979 /**
980  * ipr_do_req -  Send driver initiated requests.
981  * @ipr_cmd:            ipr command struct
982  * @done:                       done function
983  * @timeout_func:       timeout function
984  * @timeout:            timeout value
985  *
986  * This function sends the specified command to the adapter with the
987  * timeout given. The done function is invoked on command completion.
988  *
989  * Return value:
990  *      none
991  **/
992 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
993                        void (*done) (struct ipr_cmnd *),
994                        void (*timeout_func) (struct timer_list *), u32 timeout)
995 {
996         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
997
998         ipr_cmd->done = done;
999
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct timer_list *),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535  * @i:          index into buffer
1536  * @buf:                string to modify
1537  *
1538  * This function will strip all trailing whitespace, pad the end
1539  * of the string with a single space, and NULL terminate the string.
1540  *
1541  * Return value:
1542  *      new length of string
1543  **/
1544 static int strip_and_pad_whitespace(int i, char *buf)
1545 {
1546         while (i && buf[i] == ' ')
1547                 i--;
1548         buf[i+1] = ' ';
1549         buf[i+2] = '\0';
1550         return i + 2;
1551 }
1552
1553 /**
1554  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                 struct ipr_vpd *vpd)
1564 {
1565         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566         int i = 0;
1567
1568         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578 }
1579
1580 /**
1581  * ipr_log_vpd - Log the passed VPD to the error log.
1582  * @vpd:                vendor/product id/sn struct
1583  *
1584  * Return value:
1585  *      none
1586  **/
1587 static void ipr_log_vpd(struct ipr_vpd *vpd)
1588 {
1589         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590                     + IPR_SERIAL_NUM_LEN];
1591
1592         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1594                IPR_PROD_ID_LEN);
1595         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596         ipr_err("Vendor/Product ID: %s\n", buffer);
1597
1598         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1599         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600         ipr_err("    Serial Number: %s\n", buffer);
1601 }
1602
1603 /**
1604  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605  * @prefix:             string to print at start of printk
1606  * @hostrcb:    hostrcb pointer
1607  * @vpd:                vendor/product id/sn/wwn struct
1608  *
1609  * Return value:
1610  *      none
1611  **/
1612 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613                                     struct ipr_ext_vpd *vpd)
1614 {
1615         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618 }
1619
1620 /**
1621  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622  * @vpd:                vendor/product id/sn/wwn struct
1623  *
1624  * Return value:
1625  *      none
1626  **/
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628 {
1629         ipr_log_vpd(&vpd->vpd);
1630         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631                 be32_to_cpu(vpd->wwid[1]));
1632 }
1633
1634 /**
1635  * ipr_log_enhanced_cache_error - Log a cache error.
1636  * @ioa_cfg:    ioa config struct
1637  * @hostrcb:    hostrcb struct
1638  *
1639  * Return value:
1640  *      none
1641  **/
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643                                          struct ipr_hostrcb *hostrcb)
1644 {
1645         struct ipr_hostrcb_type_12_error *error;
1646
1647         if (ioa_cfg->sis64)
1648                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649         else
1650                 error = &hostrcb->hcam.u.error.u.type_12_error;
1651
1652         ipr_err("-----Current Configuration-----\n");
1653         ipr_err("Cache Directory Card Information:\n");
1654         ipr_log_ext_vpd(&error->ioa_vpd);
1655         ipr_err("Adapter Card Information:\n");
1656         ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658         ipr_err("-----Expected Configuration-----\n");
1659         ipr_err("Cache Directory Card Information:\n");
1660         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661         ipr_err("Adapter Card Information:\n");
1662         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665                      be32_to_cpu(error->ioa_data[0]),
1666                      be32_to_cpu(error->ioa_data[1]),
1667                      be32_to_cpu(error->ioa_data[2]));
1668 }
1669
1670 /**
1671  * ipr_log_cache_error - Log a cache error.
1672  * @ioa_cfg:    ioa config struct
1673  * @hostrcb:    hostrcb struct
1674  *
1675  * Return value:
1676  *      none
1677  **/
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679                                 struct ipr_hostrcb *hostrcb)
1680 {
1681         struct ipr_hostrcb_type_02_error *error =
1682                 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684         ipr_err("-----Current Configuration-----\n");
1685         ipr_err("Cache Directory Card Information:\n");
1686         ipr_log_vpd(&error->ioa_vpd);
1687         ipr_err("Adapter Card Information:\n");
1688         ipr_log_vpd(&error->cfc_vpd);
1689
1690         ipr_err("-----Expected Configuration-----\n");
1691         ipr_err("Cache Directory Card Information:\n");
1692         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1693         ipr_err("Adapter Card Information:\n");
1694         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1695
1696         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697                      be32_to_cpu(error->ioa_data[0]),
1698                      be32_to_cpu(error->ioa_data[1]),
1699                      be32_to_cpu(error->ioa_data[2]));
1700 }
1701
1702 /**
1703  * ipr_log_enhanced_config_error - Log a configuration error.
1704  * @ioa_cfg:    ioa config struct
1705  * @hostrcb:    hostrcb struct
1706  *
1707  * Return value:
1708  *      none
1709  **/
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711                                           struct ipr_hostrcb *hostrcb)
1712 {
1713         int errors_logged, i;
1714         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715         struct ipr_hostrcb_type_13_error *error;
1716
1717         error = &hostrcb->hcam.u.error.u.type_13_error;
1718         errors_logged = be32_to_cpu(error->errors_logged);
1719
1720         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721                 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723         dev_entry = error->dev;
1724
1725         for (i = 0; i < errors_logged; i++, dev_entry++) {
1726                 ipr_err_separator;
1727
1728                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729                 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                 ipr_err("-----New Device Information-----\n");
1732                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                 ipr_err("Cache Directory Card Information:\n");
1735                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                 ipr_err("Adapter Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739         }
1740 }
1741
1742 /**
1743  * ipr_log_sis64_config_error - Log a device error.
1744  * @ioa_cfg:    ioa config struct
1745  * @hostrcb:    hostrcb struct
1746  *
1747  * Return value:
1748  *      none
1749  **/
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                        struct ipr_hostrcb *hostrcb)
1752 {
1753         int errors_logged, i;
1754         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755         struct ipr_hostrcb_type_23_error *error;
1756         char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758         error = &hostrcb->hcam.u.error64.u.type_23_error;
1759         errors_logged = be32_to_cpu(error->errors_logged);
1760
1761         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762                 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764         dev_entry = error->dev;
1765
1766         for (i = 0; i < errors_logged; i++, dev_entry++) {
1767                 ipr_err_separator;
1768
1769                 ipr_err("Device %d : %s", i + 1,
1770                         __ipr_format_res_path(dev_entry->res_path,
1771                                               buffer, sizeof(buffer)));
1772                 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774                 ipr_err("-----New Device Information-----\n");
1775                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777                 ipr_err("Cache Directory Card Information:\n");
1778                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780                 ipr_err("Adapter Card Information:\n");
1781                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782         }
1783 }
1784
1785 /**
1786  * ipr_log_config_error - Log a configuration error.
1787  * @ioa_cfg:    ioa config struct
1788  * @hostrcb:    hostrcb struct
1789  *
1790  * Return value:
1791  *      none
1792  **/
1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794                                  struct ipr_hostrcb *hostrcb)
1795 {
1796         int errors_logged, i;
1797         struct ipr_hostrcb_device_data_entry *dev_entry;
1798         struct ipr_hostrcb_type_03_error *error;
1799
1800         error = &hostrcb->hcam.u.error.u.type_03_error;
1801         errors_logged = be32_to_cpu(error->errors_logged);
1802
1803         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804                 be32_to_cpu(error->errors_detected), errors_logged);
1805
1806         dev_entry = error->dev;
1807
1808         for (i = 0; i < errors_logged; i++, dev_entry++) {
1809                 ipr_err_separator;
1810
1811                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1812                 ipr_log_vpd(&dev_entry->vpd);
1813
1814                 ipr_err("-----New Device Information-----\n");
1815                 ipr_log_vpd(&dev_entry->new_vpd);
1816
1817                 ipr_err("Cache Directory Card Information:\n");
1818                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1819
1820                 ipr_err("Adapter Card Information:\n");
1821                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1822
1823                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824                         be32_to_cpu(dev_entry->ioa_data[0]),
1825                         be32_to_cpu(dev_entry->ioa_data[1]),
1826                         be32_to_cpu(dev_entry->ioa_data[2]),
1827                         be32_to_cpu(dev_entry->ioa_data[3]),
1828                         be32_to_cpu(dev_entry->ioa_data[4]));
1829         }
1830 }
1831
1832 /**
1833  * ipr_log_enhanced_array_error - Log an array configuration error.
1834  * @ioa_cfg:    ioa config struct
1835  * @hostrcb:    hostrcb struct
1836  *
1837  * Return value:
1838  *      none
1839  **/
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841                                          struct ipr_hostrcb *hostrcb)
1842 {
1843         int i, num_entries;
1844         struct ipr_hostrcb_type_14_error *error;
1845         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848         error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850         ipr_err_separator;
1851
1852         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853                 error->protection_level,
1854                 ioa_cfg->host->host_no,
1855                 error->last_func_vset_res_addr.bus,
1856                 error->last_func_vset_res_addr.target,
1857                 error->last_func_vset_res_addr.lun);
1858
1859         ipr_err_separator;
1860
1861         array_entry = error->array_member;
1862         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1863                             ARRAY_SIZE(error->array_member));
1864
1865         for (i = 0; i < num_entries; i++, array_entry++) {
1866                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867                         continue;
1868
1869                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870                         ipr_err("Exposed Array Member %d:\n", i);
1871                 else
1872                         ipr_err("Array Member %d:\n", i);
1873
1874                 ipr_log_ext_vpd(&array_entry->vpd);
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880         }
1881 }
1882
1883 /**
1884  * ipr_log_array_error - Log an array configuration error.
1885  * @ioa_cfg:    ioa config struct
1886  * @hostrcb:    hostrcb struct
1887  *
1888  * Return value:
1889  *      none
1890  **/
1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892                                 struct ipr_hostrcb *hostrcb)
1893 {
1894         int i;
1895         struct ipr_hostrcb_type_04_error *error;
1896         struct ipr_hostrcb_array_data_entry *array_entry;
1897         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899         error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901         ipr_err_separator;
1902
1903         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904                 error->protection_level,
1905                 ioa_cfg->host->host_no,
1906                 error->last_func_vset_res_addr.bus,
1907                 error->last_func_vset_res_addr.target,
1908                 error->last_func_vset_res_addr.lun);
1909
1910         ipr_err_separator;
1911
1912         array_entry = error->array_member;
1913
1914         for (i = 0; i < 18; i++) {
1915                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1916                         continue;
1917
1918                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1919                         ipr_err("Exposed Array Member %d:\n", i);
1920                 else
1921                         ipr_err("Array Member %d:\n", i);
1922
1923                 ipr_log_vpd(&array_entry->vpd);
1924
1925                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927                                  "Expected Location");
1928
1929                 ipr_err_separator;
1930
1931                 if (i == 9)
1932                         array_entry = error->array_member2;
1933                 else
1934                         array_entry++;
1935         }
1936 }
1937
1938 /**
1939  * ipr_log_hex_data - Log additional hex IOA error data.
1940  * @ioa_cfg:    ioa config struct
1941  * @data:               IOA error data
1942  * @len:                data length
1943  *
1944  * Return value:
1945  *      none
1946  **/
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1948 {
1949         int i;
1950
1951         if (len == 0)
1952                 return;
1953
1954         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
1957         for (i = 0; i < len / 4; i += 4) {
1958                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959                         be32_to_cpu(data[i]),
1960                         be32_to_cpu(data[i+1]),
1961                         be32_to_cpu(data[i+2]),
1962                         be32_to_cpu(data[i+3]));
1963         }
1964 }
1965
1966 /**
1967  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968  * @ioa_cfg:    ioa config struct
1969  * @hostrcb:    hostrcb struct
1970  *
1971  * Return value:
1972  *      none
1973  **/
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975                                             struct ipr_hostrcb *hostrcb)
1976 {
1977         struct ipr_hostrcb_type_17_error *error;
1978
1979         if (ioa_cfg->sis64)
1980                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981         else
1982                 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
1984         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1985         strim(error->failure_reason);
1986
1987         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1989         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1990         ipr_log_hex_data(ioa_cfg, error->data,
1991                          be32_to_cpu(hostrcb->hcam.length) -
1992                          (offsetof(struct ipr_hostrcb_error, u) +
1993                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1994 }
1995
1996 /**
1997  * ipr_log_dual_ioa_error - Log a dual adapter error.
1998  * @ioa_cfg:    ioa config struct
1999  * @hostrcb:    hostrcb struct
2000  *
2001  * Return value:
2002  *      none
2003  **/
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005                                    struct ipr_hostrcb *hostrcb)
2006 {
2007         struct ipr_hostrcb_type_07_error *error;
2008
2009         error = &hostrcb->hcam.u.error.u.type_07_error;
2010         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2011         strim(error->failure_reason);
2012
2013         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2015         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2016         ipr_log_hex_data(ioa_cfg, error->data,
2017                          be32_to_cpu(hostrcb->hcam.length) -
2018                          (offsetof(struct ipr_hostrcb_error, u) +
2019                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2020 }
2021
2022 static const struct {
2023         u8 active;
2024         char *desc;
2025 } path_active_desc[] = {
2026         { IPR_PATH_NO_INFO, "Path" },
2027         { IPR_PATH_ACTIVE, "Active path" },
2028         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029 };
2030
2031 static const struct {
2032         u8 state;
2033         char *desc;
2034 } path_state_desc[] = {
2035         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036         { IPR_PATH_HEALTHY, "is healthy" },
2037         { IPR_PATH_DEGRADED, "is degraded" },
2038         { IPR_PATH_FAILED, "is failed" }
2039 };
2040
2041 /**
2042  * ipr_log_fabric_path - Log a fabric path error
2043  * @hostrcb:    hostrcb struct
2044  * @fabric:             fabric descriptor
2045  *
2046  * Return value:
2047  *      none
2048  **/
2049 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050                                 struct ipr_hostrcb_fabric_desc *fabric)
2051 {
2052         int i, j;
2053         u8 path_state = fabric->path_state;
2054         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055         u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067                                              path_active_desc[i].desc, path_state_desc[j].desc,
2068                                              fabric->ioa_port);
2069                         } else if (fabric->cascaded_expander == 0xff) {
2070                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071                                              path_active_desc[i].desc, path_state_desc[j].desc,
2072                                              fabric->ioa_port, fabric->phy);
2073                         } else if (fabric->phy == 0xff) {
2074                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075                                              path_active_desc[i].desc, path_state_desc[j].desc,
2076                                              fabric->ioa_port, fabric->cascaded_expander);
2077                         } else {
2078                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079                                              path_active_desc[i].desc, path_state_desc[j].desc,
2080                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081                         }
2082                         return;
2083                 }
2084         }
2085
2086         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088 }
2089
2090 /**
2091  * ipr_log64_fabric_path - Log a fabric path error
2092  * @hostrcb:    hostrcb struct
2093  * @fabric:             fabric descriptor
2094  *
2095  * Return value:
2096  *      none
2097  **/
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099                                   struct ipr_hostrcb64_fabric_desc *fabric)
2100 {
2101         int i, j;
2102         u8 path_state = fabric->path_state;
2103         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104         u8 state = path_state & IPR_PATH_STATE_MASK;
2105         char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108                 if (path_active_desc[i].active != active)
2109                         continue;
2110
2111                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112                         if (path_state_desc[j].state != state)
2113                                 continue;
2114
2115                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116                                      path_active_desc[i].desc, path_state_desc[j].desc,
2117                                      ipr_format_res_path(hostrcb->ioa_cfg,
2118                                                 fabric->res_path,
2119                                                 buffer, sizeof(buffer)));
2120                         return;
2121                 }
2122         }
2123
2124         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2125                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126                                     buffer, sizeof(buffer)));
2127 }
2128
2129 static const struct {
2130         u8 type;
2131         char *desc;
2132 } path_type_desc[] = {
2133         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137 };
2138
2139 static const struct {
2140         u8 status;
2141         char *desc;
2142 } path_status_desc[] = {
2143         { IPR_PATH_CFG_NO_PROB, "Functional" },
2144         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145         { IPR_PATH_CFG_FAILED, "Failed" },
2146         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147         { IPR_PATH_NOT_DETECTED, "Missing" },
2148         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149 };
2150
2151 static const char *link_rate[] = {
2152         "unknown",
2153         "disabled",
2154         "phy reset problem",
2155         "spinup hold",
2156         "port selector",
2157         "unknown",
2158         "unknown",
2159         "unknown",
2160         "1.5Gbps",
2161         "3.0Gbps",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown"
2168 };
2169
2170 /**
2171  * ipr_log_path_elem - Log a fabric path element.
2172  * @hostrcb:    hostrcb struct
2173  * @cfg:                fabric path element struct
2174  *
2175  * Return value:
2176  *      none
2177  **/
2178 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179                               struct ipr_hostrcb_config_element *cfg)
2180 {
2181         int i, j;
2182         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185         if (type == IPR_PATH_CFG_NOT_EXIST)
2186                 return;
2187
2188         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189                 if (path_type_desc[i].type != type)
2190                         continue;
2191
2192                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193                         if (path_status_desc[j].status != status)
2194                                 continue;
2195
2196                         if (type == IPR_PATH_CFG_IOA_PORT) {
2197                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198                                              path_status_desc[j].desc, path_type_desc[i].desc,
2199                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201                         } else {
2202                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2205                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207                                 } else if (cfg->cascaded_expander == 0xff) {
2208                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2210                                                      path_type_desc[i].desc, cfg->phy,
2211                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213                                 } else if (cfg->phy == 0xff) {
2214                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2216                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2217                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219                                 } else {
2220                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2222                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225                                 }
2226                         }
2227                         return;
2228                 }
2229         }
2230
2231         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235 }
2236
2237 /**
2238  * ipr_log64_path_elem - Log a fabric path element.
2239  * @hostrcb:    hostrcb struct
2240  * @cfg:                fabric path element struct
2241  *
2242  * Return value:
2243  *      none
2244  **/
2245 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246                                 struct ipr_hostrcb64_config_element *cfg)
2247 {
2248         int i, j;
2249         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252         char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255                 return;
2256
2257         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258                 if (path_type_desc[i].type != type)
2259                         continue;
2260
2261                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262                         if (path_status_desc[j].status != status)
2263                                 continue;
2264
2265                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266                                      path_status_desc[j].desc, path_type_desc[i].desc,
2267                                      ipr_format_res_path(hostrcb->ioa_cfg,
2268                                         cfg->res_path, buffer, sizeof(buffer)),
2269                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270                                         be32_to_cpu(cfg->wwid[0]),
2271                                         be32_to_cpu(cfg->wwid[1]));
2272                         return;
2273                 }
2274         }
2275         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276                      "WWN=%08X%08X\n", cfg->type_status,
2277                      ipr_format_res_path(hostrcb->ioa_cfg,
2278                         cfg->res_path, buffer, sizeof(buffer)),
2279                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2281 }
2282
2283 /**
2284  * ipr_log_fabric_error - Log a fabric error.
2285  * @ioa_cfg:    ioa config struct
2286  * @hostrcb:    hostrcb struct
2287  *
2288  * Return value:
2289  *      none
2290  **/
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292                                  struct ipr_hostrcb *hostrcb)
2293 {
2294         struct ipr_hostrcb_type_20_error *error;
2295         struct ipr_hostrcb_fabric_desc *fabric;
2296         struct ipr_hostrcb_config_element *cfg;
2297         int i, add_len;
2298
2299         error = &hostrcb->hcam.u.error.u.type_20_error;
2300         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303         add_len = be32_to_cpu(hostrcb->hcam.length) -
2304                 (offsetof(struct ipr_hostrcb_error, u) +
2305                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308                 ipr_log_fabric_path(hostrcb, fabric);
2309                 for_each_fabric_cfg(fabric, cfg)
2310                         ipr_log_path_elem(hostrcb, cfg);
2311
2312                 add_len -= be16_to_cpu(fabric->length);
2313                 fabric = (struct ipr_hostrcb_fabric_desc *)
2314                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315         }
2316
2317         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2318 }
2319
2320 /**
2321  * ipr_log_sis64_array_error - Log a sis64 array error.
2322  * @ioa_cfg:    ioa config struct
2323  * @hostrcb:    hostrcb struct
2324  *
2325  * Return value:
2326  *      none
2327  **/
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329                                       struct ipr_hostrcb *hostrcb)
2330 {
2331         int i, num_entries;
2332         struct ipr_hostrcb_type_24_error *error;
2333         struct ipr_hostrcb64_array_data_entry *array_entry;
2334         char buffer[IPR_MAX_RES_PATH_LENGTH];
2335         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337         error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339         ipr_err_separator;
2340
2341         ipr_err("RAID %s Array Configuration: %s\n",
2342                 error->protection_level,
2343                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344                         buffer, sizeof(buffer)));
2345
2346         ipr_err_separator;
2347
2348         array_entry = error->array_member;
2349         num_entries = min_t(u32, error->num_entries,
2350                             ARRAY_SIZE(error->array_member));
2351
2352         for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355                         continue;
2356
2357                 if (error->exposed_mode_adn == i)
2358                         ipr_err("Exposed Array Member %d:\n", i);
2359                 else
2360                         ipr_err("Array Member %d:\n", i);
2361
2362                 ipr_err("Array Member %d:\n", i);
2363                 ipr_log_ext_vpd(&array_entry->vpd);
2364                 ipr_err("Current Location: %s\n",
2365                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366                                 buffer, sizeof(buffer)));
2367                 ipr_err("Expected Location: %s\n",
2368                          ipr_format_res_path(ioa_cfg,
2369                                 array_entry->expected_res_path,
2370                                 buffer, sizeof(buffer)));
2371
2372                 ipr_err_separator;
2373         }
2374 }
2375
2376 /**
2377  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378  * @ioa_cfg:    ioa config struct
2379  * @hostrcb:    hostrcb struct
2380  *
2381  * Return value:
2382  *      none
2383  **/
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385                                        struct ipr_hostrcb *hostrcb)
2386 {
2387         struct ipr_hostrcb_type_30_error *error;
2388         struct ipr_hostrcb64_fabric_desc *fabric;
2389         struct ipr_hostrcb64_config_element *cfg;
2390         int i, add_len;
2391
2392         error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397         add_len = be32_to_cpu(hostrcb->hcam.length) -
2398                 (offsetof(struct ipr_hostrcb64_error, u) +
2399                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402                 ipr_log64_fabric_path(hostrcb, fabric);
2403                 for_each_fabric_cfg(fabric, cfg)
2404                         ipr_log64_path_elem(hostrcb, cfg);
2405
2406                 add_len -= be16_to_cpu(fabric->length);
2407                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409         }
2410
2411         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2412 }
2413
2414 /**
2415  * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2416  * @ioa_cfg:    ioa config struct
2417  * @hostrcb:    hostrcb struct
2418  *
2419  * Return value:
2420  *      none
2421  **/
2422 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2423                                        struct ipr_hostrcb *hostrcb)
2424 {
2425         struct ipr_hostrcb_type_41_error *error;
2426
2427         error = &hostrcb->hcam.u.error64.u.type_41_error;
2428
2429         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2430         ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2431         ipr_log_hex_data(ioa_cfg, error->data,
2432                          be32_to_cpu(hostrcb->hcam.length) -
2433                          (offsetof(struct ipr_hostrcb_error, u) +
2434                           offsetof(struct ipr_hostrcb_type_41_error, data)));
2435 }
2436 /**
2437  * ipr_log_generic_error - Log an adapter error.
2438  * @ioa_cfg:    ioa config struct
2439  * @hostrcb:    hostrcb struct
2440  *
2441  * Return value:
2442  *      none
2443  **/
2444 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2445                                   struct ipr_hostrcb *hostrcb)
2446 {
2447         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2448                          be32_to_cpu(hostrcb->hcam.length));
2449 }
2450
2451 /**
2452  * ipr_log_sis64_device_error - Log a cache error.
2453  * @ioa_cfg:    ioa config struct
2454  * @hostrcb:    hostrcb struct
2455  *
2456  * Return value:
2457  *      none
2458  **/
2459 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2460                                          struct ipr_hostrcb *hostrcb)
2461 {
2462         struct ipr_hostrcb_type_21_error *error;
2463         char buffer[IPR_MAX_RES_PATH_LENGTH];
2464
2465         error = &hostrcb->hcam.u.error64.u.type_21_error;
2466
2467         ipr_err("-----Failing Device Information-----\n");
2468         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2469                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2470                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2471         ipr_err("Device Resource Path: %s\n",
2472                 __ipr_format_res_path(error->res_path,
2473                                       buffer, sizeof(buffer)));
2474         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2475         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2476         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2477         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2478         ipr_err("SCSI Sense Data:\n");
2479         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2480         ipr_err("SCSI Command Descriptor Block: \n");
2481         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2482
2483         ipr_err("Additional IOA Data:\n");
2484         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2485 }
2486
2487 /**
2488  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2489  * @ioasc:      IOASC
2490  *
2491  * This function will return the index of into the ipr_error_table
2492  * for the specified IOASC. If the IOASC is not in the table,
2493  * 0 will be returned, which points to the entry used for unknown errors.
2494  *
2495  * Return value:
2496  *      index into the ipr_error_table
2497  **/
2498 static u32 ipr_get_error(u32 ioasc)
2499 {
2500         int i;
2501
2502         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2503                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2504                         return i;
2505
2506         return 0;
2507 }
2508
2509 /**
2510  * ipr_handle_log_data - Log an adapter error.
2511  * @ioa_cfg:    ioa config struct
2512  * @hostrcb:    hostrcb struct
2513  *
2514  * This function logs an adapter error to the system.
2515  *
2516  * Return value:
2517  *      none
2518  **/
2519 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2520                                 struct ipr_hostrcb *hostrcb)
2521 {
2522         u32 ioasc;
2523         int error_index;
2524         struct ipr_hostrcb_type_21_error *error;
2525
2526         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2527                 return;
2528
2529         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2530                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2531
2532         if (ioa_cfg->sis64)
2533                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2534         else
2535                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2536
2537         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2538             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2539                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2540                 scsi_report_bus_reset(ioa_cfg->host,
2541                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2542         }
2543
2544         error_index = ipr_get_error(ioasc);
2545
2546         if (!ipr_error_table[error_index].log_hcam)
2547                 return;
2548
2549         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2550             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2551                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2552
2553                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2554                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2555                                 return;
2556         }
2557
2558         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2559
2560         /* Set indication we have logged an error */
2561         ioa_cfg->errors_logged++;
2562
2563         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2564                 return;
2565         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2566                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2567
2568         switch (hostrcb->hcam.overlay_id) {
2569         case IPR_HOST_RCB_OVERLAY_ID_2:
2570                 ipr_log_cache_error(ioa_cfg, hostrcb);
2571                 break;
2572         case IPR_HOST_RCB_OVERLAY_ID_3:
2573                 ipr_log_config_error(ioa_cfg, hostrcb);
2574                 break;
2575         case IPR_HOST_RCB_OVERLAY_ID_4:
2576         case IPR_HOST_RCB_OVERLAY_ID_6:
2577                 ipr_log_array_error(ioa_cfg, hostrcb);
2578                 break;
2579         case IPR_HOST_RCB_OVERLAY_ID_7:
2580                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2581                 break;
2582         case IPR_HOST_RCB_OVERLAY_ID_12:
2583                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2584                 break;
2585         case IPR_HOST_RCB_OVERLAY_ID_13:
2586                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2587                 break;
2588         case IPR_HOST_RCB_OVERLAY_ID_14:
2589         case IPR_HOST_RCB_OVERLAY_ID_16:
2590                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2591                 break;
2592         case IPR_HOST_RCB_OVERLAY_ID_17:
2593                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2594                 break;
2595         case IPR_HOST_RCB_OVERLAY_ID_20:
2596                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2597                 break;
2598         case IPR_HOST_RCB_OVERLAY_ID_21:
2599                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2600                 break;
2601         case IPR_HOST_RCB_OVERLAY_ID_23:
2602                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2603                 break;
2604         case IPR_HOST_RCB_OVERLAY_ID_24:
2605         case IPR_HOST_RCB_OVERLAY_ID_26:
2606                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2607                 break;
2608         case IPR_HOST_RCB_OVERLAY_ID_30:
2609                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2610                 break;
2611         case IPR_HOST_RCB_OVERLAY_ID_41:
2612                 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2613                 break;
2614         case IPR_HOST_RCB_OVERLAY_ID_1:
2615         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2616         default:
2617                 ipr_log_generic_error(ioa_cfg, hostrcb);
2618                 break;
2619         }
2620 }
2621
2622 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2623 {
2624         struct ipr_hostrcb *hostrcb;
2625
2626         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2627                                         struct ipr_hostrcb, queue);
2628
2629         if (unlikely(!hostrcb)) {
2630                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2631                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2632                                                 struct ipr_hostrcb, queue);
2633         }
2634
2635         list_del_init(&hostrcb->queue);
2636         return hostrcb;
2637 }
2638
2639 /**
2640  * ipr_process_error - Op done function for an adapter error log.
2641  * @ipr_cmd:    ipr command struct
2642  *
2643  * This function is the op done function for an error log host
2644  * controlled async from the adapter. It will log the error and
2645  * send the HCAM back to the adapter.
2646  *
2647  * Return value:
2648  *      none
2649  **/
2650 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2651 {
2652         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2653         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2654         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2655         u32 fd_ioasc;
2656
2657         if (ioa_cfg->sis64)
2658                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2659         else
2660                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2661
2662         list_del_init(&hostrcb->queue);
2663         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2664
2665         if (!ioasc) {
2666                 ipr_handle_log_data(ioa_cfg, hostrcb);
2667                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2668                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2669         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2670                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2671                 dev_err(&ioa_cfg->pdev->dev,
2672                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2673         }
2674
2675         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2676         schedule_work(&ioa_cfg->work_q);
2677         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2678
2679         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2680 }
2681
2682 /**
2683  * ipr_timeout -  An internally generated op has timed out.
2684  * @ipr_cmd:    ipr command struct
2685  *
2686  * This function blocks host requests and initiates an
2687  * adapter reset.
2688  *
2689  * Return value:
2690  *      none
2691  **/
2692 static void ipr_timeout(struct timer_list *t)
2693 {
2694         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2695         unsigned long lock_flags = 0;
2696         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2697
2698         ENTER;
2699         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2700
2701         ioa_cfg->errors_logged++;
2702         dev_err(&ioa_cfg->pdev->dev,
2703                 "Adapter being reset due to command timeout.\n");
2704
2705         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2706                 ioa_cfg->sdt_state = GET_DUMP;
2707
2708         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2709                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2710
2711         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2712         LEAVE;
2713 }
2714
2715 /**
2716  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2717  * @ipr_cmd:    ipr command struct
2718  *
2719  * This function blocks host requests and initiates an
2720  * adapter reset.
2721  *
2722  * Return value:
2723  *      none
2724  **/
2725 static void ipr_oper_timeout(struct timer_list *t)
2726 {
2727         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2728         unsigned long lock_flags = 0;
2729         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2730
2731         ENTER;
2732         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2733
2734         ioa_cfg->errors_logged++;
2735         dev_err(&ioa_cfg->pdev->dev,
2736                 "Adapter timed out transitioning to operational.\n");
2737
2738         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2739                 ioa_cfg->sdt_state = GET_DUMP;
2740
2741         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2742                 if (ipr_fastfail)
2743                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2744                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2745         }
2746
2747         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2748         LEAVE;
2749 }
2750
2751 /**
2752  * ipr_find_ses_entry - Find matching SES in SES table
2753  * @res:        resource entry struct of SES
2754  *
2755  * Return value:
2756  *      pointer to SES table entry / NULL on failure
2757  **/
2758 static const struct ipr_ses_table_entry *
2759 ipr_find_ses_entry(struct ipr_resource_entry *res)
2760 {
2761         int i, j, matches;
2762         struct ipr_std_inq_vpids *vpids;
2763         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2764
2765         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2766                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2767                         if (ste->compare_product_id_byte[j] == 'X') {
2768                                 vpids = &res->std_inq_data.vpids;
2769                                 if (vpids->product_id[j] == ste->product_id[j])
2770                                         matches++;
2771                                 else
2772                                         break;
2773                         } else
2774                                 matches++;
2775                 }
2776
2777                 if (matches == IPR_PROD_ID_LEN)
2778                         return ste;
2779         }
2780
2781         return NULL;
2782 }
2783
2784 /**
2785  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2786  * @ioa_cfg:    ioa config struct
2787  * @bus:                SCSI bus
2788  * @bus_width:  bus width
2789  *
2790  * Return value:
2791  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2792  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2793  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2794  *      max 160MHz = max 320MB/sec).
2795  **/
2796 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2797 {
2798         struct ipr_resource_entry *res;
2799         const struct ipr_ses_table_entry *ste;
2800         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2801
2802         /* Loop through each config table entry in the config table buffer */
2803         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2804                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2805                         continue;
2806
2807                 if (bus != res->bus)
2808                         continue;
2809
2810                 if (!(ste = ipr_find_ses_entry(res)))
2811                         continue;
2812
2813                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2814         }
2815
2816         return max_xfer_rate;
2817 }
2818
2819 /**
2820  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2821  * @ioa_cfg:            ioa config struct
2822  * @max_delay:          max delay in micro-seconds to wait
2823  *
2824  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2825  *
2826  * Return value:
2827  *      0 on success / other on failure
2828  **/
2829 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2830 {
2831         volatile u32 pcii_reg;
2832         int delay = 1;
2833
2834         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2835         while (delay < max_delay) {
2836                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2837
2838                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2839                         return 0;
2840
2841                 /* udelay cannot be used if delay is more than a few milliseconds */
2842                 if ((delay / 1000) > MAX_UDELAY_MS)
2843                         mdelay(delay / 1000);
2844                 else
2845                         udelay(delay);
2846
2847                 delay += delay;
2848         }
2849         return -EIO;
2850 }
2851
2852 /**
2853  * ipr_get_sis64_dump_data_section - Dump IOA memory
2854  * @ioa_cfg:                    ioa config struct
2855  * @start_addr:                 adapter address to dump
2856  * @dest:                       destination kernel buffer
2857  * @length_in_words:            length to dump in 4 byte words
2858  *
2859  * Return value:
2860  *      0 on success
2861  **/
2862 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2863                                            u32 start_addr,
2864                                            __be32 *dest, u32 length_in_words)
2865 {
2866         int i;
2867
2868         for (i = 0; i < length_in_words; i++) {
2869                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2870                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2871                 dest++;
2872         }
2873
2874         return 0;
2875 }
2876
2877 /**
2878  * ipr_get_ldump_data_section - Dump IOA memory
2879  * @ioa_cfg:                    ioa config struct
2880  * @start_addr:                 adapter address to dump
2881  * @dest:                               destination kernel buffer
2882  * @length_in_words:    length to dump in 4 byte words
2883  *
2884  * Return value:
2885  *      0 on success / -EIO on failure
2886  **/
2887 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2888                                       u32 start_addr,
2889                                       __be32 *dest, u32 length_in_words)
2890 {
2891         volatile u32 temp_pcii_reg;
2892         int i, delay = 0;
2893
2894         if (ioa_cfg->sis64)
2895                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2896                                                        dest, length_in_words);
2897
2898         /* Write IOA interrupt reg starting LDUMP state  */
2899         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2900                ioa_cfg->regs.set_uproc_interrupt_reg32);
2901
2902         /* Wait for IO debug acknowledge */
2903         if (ipr_wait_iodbg_ack(ioa_cfg,
2904                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2905                 dev_err(&ioa_cfg->pdev->dev,
2906                         "IOA dump long data transfer timeout\n");
2907                 return -EIO;
2908         }
2909
2910         /* Signal LDUMP interlocked - clear IO debug ack */
2911         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912                ioa_cfg->regs.clr_interrupt_reg);
2913
2914         /* Write Mailbox with starting address */
2915         writel(start_addr, ioa_cfg->ioa_mailbox);
2916
2917         /* Signal address valid - clear IOA Reset alert */
2918         writel(IPR_UPROCI_RESET_ALERT,
2919                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2920
2921         for (i = 0; i < length_in_words; i++) {
2922                 /* Wait for IO debug acknowledge */
2923                 if (ipr_wait_iodbg_ack(ioa_cfg,
2924                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2925                         dev_err(&ioa_cfg->pdev->dev,
2926                                 "IOA dump short data transfer timeout\n");
2927                         return -EIO;
2928                 }
2929
2930                 /* Read data from mailbox and increment destination pointer */
2931                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2932                 dest++;
2933
2934                 /* For all but the last word of data, signal data received */
2935                 if (i < (length_in_words - 1)) {
2936                         /* Signal dump data received - Clear IO debug Ack */
2937                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2938                                ioa_cfg->regs.clr_interrupt_reg);
2939                 }
2940         }
2941
2942         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2943         writel(IPR_UPROCI_RESET_ALERT,
2944                ioa_cfg->regs.set_uproc_interrupt_reg32);
2945
2946         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2947                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2948
2949         /* Signal dump data received - Clear IO debug Ack */
2950         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2951                ioa_cfg->regs.clr_interrupt_reg);
2952
2953         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2954         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2955                 temp_pcii_reg =
2956                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2957
2958                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2959                         return 0;
2960
2961                 udelay(10);
2962                 delay += 10;
2963         }
2964
2965         return 0;
2966 }
2967
2968 #ifdef CONFIG_SCSI_IPR_DUMP
2969 /**
2970  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2971  * @ioa_cfg:            ioa config struct
2972  * @pci_address:        adapter address
2973  * @length:                     length of data to copy
2974  *
2975  * Copy data from PCI adapter to kernel buffer.
2976  * Note: length MUST be a 4 byte multiple
2977  * Return value:
2978  *      0 on success / other on failure
2979  **/
2980 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2981                         unsigned long pci_address, u32 length)
2982 {
2983         int bytes_copied = 0;
2984         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2985         __be32 *page;
2986         unsigned long lock_flags = 0;
2987         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2988
2989         if (ioa_cfg->sis64)
2990                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2991         else
2992                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2993
2994         while (bytes_copied < length &&
2995                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2996                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2997                     ioa_dump->page_offset == 0) {
2998                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2999
3000                         if (!page) {
3001                                 ipr_trace;
3002                                 return bytes_copied;
3003                         }
3004
3005                         ioa_dump->page_offset = 0;
3006                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
3007                         ioa_dump->next_page_index++;
3008                 } else
3009                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
3010
3011                 rem_len = length - bytes_copied;
3012                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
3013                 cur_len = min(rem_len, rem_page_len);
3014
3015                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3016                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3017                         rc = -EIO;
3018                 } else {
3019                         rc = ipr_get_ldump_data_section(ioa_cfg,
3020                                                         pci_address + bytes_copied,
3021                                                         &page[ioa_dump->page_offset / 4],
3022                                                         (cur_len / sizeof(u32)));
3023                 }
3024                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3025
3026                 if (!rc) {
3027                         ioa_dump->page_offset += cur_len;
3028                         bytes_copied += cur_len;
3029                 } else {
3030                         ipr_trace;
3031                         break;
3032                 }
3033                 schedule();
3034         }
3035
3036         return bytes_copied;
3037 }
3038
3039 /**
3040  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3041  * @hdr:        dump entry header struct
3042  *
3043  * Return value:
3044  *      nothing
3045  **/
3046 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3047 {
3048         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3049         hdr->num_elems = 1;
3050         hdr->offset = sizeof(*hdr);
3051         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3052 }
3053
3054 /**
3055  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3056  * @ioa_cfg:    ioa config struct
3057  * @driver_dump:        driver dump struct
3058  *
3059  * Return value:
3060  *      nothing
3061  **/
3062 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3063                                    struct ipr_driver_dump *driver_dump)
3064 {
3065         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3066
3067         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3068         driver_dump->ioa_type_entry.hdr.len =
3069                 sizeof(struct ipr_dump_ioa_type_entry) -
3070                 sizeof(struct ipr_dump_entry_header);
3071         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3072         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3073         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3074         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3075                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3076                 ucode_vpd->minor_release[1];
3077         driver_dump->hdr.num_entries++;
3078 }
3079
3080 /**
3081  * ipr_dump_version_data - Fill in the driver version in the dump.
3082  * @ioa_cfg:    ioa config struct
3083  * @driver_dump:        driver dump struct
3084  *
3085  * Return value:
3086  *      nothing
3087  **/
3088 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3089                                   struct ipr_driver_dump *driver_dump)
3090 {
3091         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3092         driver_dump->version_entry.hdr.len =
3093                 sizeof(struct ipr_dump_version_entry) -
3094                 sizeof(struct ipr_dump_entry_header);
3095         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3096         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3097         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3098         driver_dump->hdr.num_entries++;
3099 }
3100
3101 /**
3102  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3103  * @ioa_cfg:    ioa config struct
3104  * @driver_dump:        driver dump struct
3105  *
3106  * Return value:
3107  *      nothing
3108  **/
3109 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3110                                    struct ipr_driver_dump *driver_dump)
3111 {
3112         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3113         driver_dump->trace_entry.hdr.len =
3114                 sizeof(struct ipr_dump_trace_entry) -
3115                 sizeof(struct ipr_dump_entry_header);
3116         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3117         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3118         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3119         driver_dump->hdr.num_entries++;
3120 }
3121
3122 /**
3123  * ipr_dump_location_data - Fill in the IOA location in the dump.
3124  * @ioa_cfg:    ioa config struct
3125  * @driver_dump:        driver dump struct
3126  *
3127  * Return value:
3128  *      nothing
3129  **/
3130 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3131                                    struct ipr_driver_dump *driver_dump)
3132 {
3133         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3134         driver_dump->location_entry.hdr.len =
3135                 sizeof(struct ipr_dump_location_entry) -
3136                 sizeof(struct ipr_dump_entry_header);
3137         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3138         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3139         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3140         driver_dump->hdr.num_entries++;
3141 }
3142
3143 /**
3144  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3145  * @ioa_cfg:    ioa config struct
3146  * @dump:               dump struct
3147  *
3148  * Return value:
3149  *      nothing
3150  **/
3151 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3152 {
3153         unsigned long start_addr, sdt_word;
3154         unsigned long lock_flags = 0;
3155         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3156         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3157         u32 num_entries, max_num_entries, start_off, end_off;
3158         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3159         struct ipr_sdt *sdt;
3160         int valid = 1;
3161         int i;
3162
3163         ENTER;
3164
3165         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3166
3167         if (ioa_cfg->sdt_state != READ_DUMP) {
3168                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3169                 return;
3170         }
3171
3172         if (ioa_cfg->sis64) {
3173                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174                 ssleep(IPR_DUMP_DELAY_SECONDS);
3175                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3176         }
3177
3178         start_addr = readl(ioa_cfg->ioa_mailbox);
3179
3180         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3181                 dev_err(&ioa_cfg->pdev->dev,
3182                         "Invalid dump table format: %lx\n", start_addr);
3183                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184                 return;
3185         }
3186
3187         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3188
3189         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3190
3191         /* Initialize the overall dump header */
3192         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3193         driver_dump->hdr.num_entries = 1;
3194         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3195         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3196         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3197         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3198
3199         ipr_dump_version_data(ioa_cfg, driver_dump);
3200         ipr_dump_location_data(ioa_cfg, driver_dump);
3201         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3202         ipr_dump_trace_data(ioa_cfg, driver_dump);
3203
3204         /* Update dump_header */
3205         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3206
3207         /* IOA Dump entry */
3208         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3209         ioa_dump->hdr.len = 0;
3210         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3211         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3212
3213         /* First entries in sdt are actually a list of dump addresses and
3214          lengths to gather the real dump data.  sdt represents the pointer
3215          to the ioa generated dump table.  Dump data will be extracted based
3216          on entries in this table */
3217         sdt = &ioa_dump->sdt;
3218
3219         if (ioa_cfg->sis64) {
3220                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3221                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3222         } else {
3223                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3224                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3225         }
3226
3227         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3228                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3229         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3230                                         bytes_to_copy / sizeof(__be32));
3231
3232         /* Smart Dump table is ready to use and the first entry is valid */
3233         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3234             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3235                 dev_err(&ioa_cfg->pdev->dev,
3236                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3237                         rc, be32_to_cpu(sdt->hdr.state));
3238                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3239                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3240                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241                 return;
3242         }
3243
3244         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3245
3246         if (num_entries > max_num_entries)
3247                 num_entries = max_num_entries;
3248
3249         /* Update dump length to the actual data to be copied */
3250         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3251         if (ioa_cfg->sis64)
3252                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3253         else
3254                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3255
3256         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257
3258         for (i = 0; i < num_entries; i++) {
3259                 if (ioa_dump->hdr.len > max_dump_size) {
3260                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3261                         break;
3262                 }
3263
3264                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3265                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3266                         if (ioa_cfg->sis64)
3267                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3268                         else {
3269                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3270                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3271
3272                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3273                                         bytes_to_copy = end_off - start_off;
3274                                 else
3275                                         valid = 0;
3276                         }
3277                         if (valid) {
3278                                 if (bytes_to_copy > max_dump_size) {
3279                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3280                                         continue;
3281                                 }
3282
3283                                 /* Copy data from adapter to driver buffers */
3284                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3285                                                             bytes_to_copy);
3286
3287                                 ioa_dump->hdr.len += bytes_copied;
3288
3289                                 if (bytes_copied != bytes_to_copy) {
3290                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3291                                         break;
3292                                 }
3293                         }
3294                 }
3295         }
3296
3297         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3298
3299         /* Update dump_header */
3300         driver_dump->hdr.len += ioa_dump->hdr.len;
3301         wmb();
3302         ioa_cfg->sdt_state = DUMP_OBTAINED;
3303         LEAVE;
3304 }
3305
3306 #else
3307 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3308 #endif
3309
3310 /**
3311  * ipr_release_dump - Free adapter dump memory
3312  * @kref:       kref struct
3313  *
3314  * Return value:
3315  *      nothing
3316  **/
3317 static void ipr_release_dump(struct kref *kref)
3318 {
3319         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3320         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3321         unsigned long lock_flags = 0;
3322         int i;
3323
3324         ENTER;
3325         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326         ioa_cfg->dump = NULL;
3327         ioa_cfg->sdt_state = INACTIVE;
3328         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329
3330         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3331                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3332
3333         vfree(dump->ioa_dump.ioa_data);
3334         kfree(dump);
3335         LEAVE;
3336 }
3337
3338 static void ipr_add_remove_thread(struct work_struct *work)
3339 {
3340         unsigned long lock_flags;
3341         struct ipr_resource_entry *res;
3342         struct scsi_device *sdev;
3343         struct ipr_ioa_cfg *ioa_cfg =
3344                 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3345         u8 bus, target, lun;
3346         int did_work;
3347
3348         ENTER;
3349         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350
3351 restart:
3352         do {
3353                 did_work = 0;
3354                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3355                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356                         return;
3357                 }
3358
3359                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3360                         if (res->del_from_ml && res->sdev) {
3361                                 did_work = 1;
3362                                 sdev = res->sdev;
3363                                 if (!scsi_device_get(sdev)) {
3364                                         if (!res->add_to_ml)
3365                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3366                                         else
3367                                                 res->del_from_ml = 0;
3368                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369                                         scsi_remove_device(sdev);
3370                                         scsi_device_put(sdev);
3371                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372                                 }
3373                                 break;
3374                         }
3375                 }
3376         } while (did_work);
3377
3378         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379                 if (res->add_to_ml) {
3380                         bus = res->bus;
3381                         target = res->target;
3382                         lun = res->lun;
3383                         res->add_to_ml = 0;
3384                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3386                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387                         goto restart;
3388                 }
3389         }
3390
3391         ioa_cfg->scan_done = 1;
3392         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3394         LEAVE;
3395 }
3396
3397 /**
3398  * ipr_worker_thread - Worker thread
3399  * @work:               ioa config struct
3400  *
3401  * Called at task level from a work thread. This function takes care
3402  * of adding and removing device from the mid-layer as configuration
3403  * changes are detected by the adapter.
3404  *
3405  * Return value:
3406  *      nothing
3407  **/
3408 static void ipr_worker_thread(struct work_struct *work)
3409 {
3410         unsigned long lock_flags;
3411         struct ipr_dump *dump;
3412         struct ipr_ioa_cfg *ioa_cfg =
3413                 container_of(work, struct ipr_ioa_cfg, work_q);
3414
3415         ENTER;
3416         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417
3418         if (ioa_cfg->sdt_state == READ_DUMP) {
3419                 dump = ioa_cfg->dump;
3420                 if (!dump) {
3421                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422                         return;
3423                 }
3424                 kref_get(&dump->kref);
3425                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3426                 ipr_get_ioa_dump(ioa_cfg, dump);
3427                 kref_put(&dump->kref, ipr_release_dump);
3428
3429                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3430                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3431                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3432                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433                 return;
3434         }
3435
3436         if (ioa_cfg->scsi_unblock) {
3437                 ioa_cfg->scsi_unblock = 0;
3438                 ioa_cfg->scsi_blocked = 0;
3439                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440                 scsi_unblock_requests(ioa_cfg->host);
3441                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3442                 if (ioa_cfg->scsi_blocked)
3443                         scsi_block_requests(ioa_cfg->host);
3444         }
3445
3446         if (!ioa_cfg->scan_enabled) {
3447                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3448                 return;
3449         }
3450
3451         schedule_work(&ioa_cfg->scsi_add_work_q);
3452
3453         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454         LEAVE;
3455 }
3456
3457 #ifdef CONFIG_SCSI_IPR_TRACE
3458 /**
3459  * ipr_read_trace - Dump the adapter trace
3460  * @filp:               open sysfs file
3461  * @kobj:               kobject struct
3462  * @bin_attr:           bin_attribute struct
3463  * @buf:                buffer
3464  * @off:                offset
3465  * @count:              buffer size
3466  *
3467  * Return value:
3468  *      number of bytes printed to buffer
3469  **/
3470 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3471                               struct bin_attribute *bin_attr,
3472                               char *buf, loff_t off, size_t count)
3473 {
3474         struct device *dev = container_of(kobj, struct device, kobj);
3475         struct Scsi_Host *shost = class_to_shost(dev);
3476         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3477         unsigned long lock_flags = 0;
3478         ssize_t ret;
3479
3480         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3481         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3482                                 IPR_TRACE_SIZE);
3483         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484
3485         return ret;
3486 }
3487
3488 static struct bin_attribute ipr_trace_attr = {
3489         .attr = {
3490                 .name = "trace",
3491                 .mode = S_IRUGO,
3492         },
3493         .size = 0,
3494         .read = ipr_read_trace,
3495 };
3496 #endif
3497
3498 /**
3499  * ipr_show_fw_version - Show the firmware version
3500  * @dev:        class device struct
3501  * @buf:        buffer
3502  *
3503  * Return value:
3504  *      number of bytes printed to buffer
3505  **/
3506 static ssize_t ipr_show_fw_version(struct device *dev,
3507                                    struct device_attribute *attr, char *buf)
3508 {
3509         struct Scsi_Host *shost = class_to_shost(dev);
3510         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3512         unsigned long lock_flags = 0;
3513         int len;
3514
3515         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3516         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3517                        ucode_vpd->major_release, ucode_vpd->card_type,
3518                        ucode_vpd->minor_release[0],
3519                        ucode_vpd->minor_release[1]);
3520         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3521         return len;
3522 }
3523
3524 static struct device_attribute ipr_fw_version_attr = {
3525         .attr = {
3526                 .name =         "fw_version",
3527                 .mode =         S_IRUGO,
3528         },
3529         .show = ipr_show_fw_version,
3530 };
3531
3532 /**
3533  * ipr_show_log_level - Show the adapter's error logging level
3534  * @dev:        class device struct
3535  * @buf:        buffer
3536  *
3537  * Return value:
3538  *      number of bytes printed to buffer
3539  **/
3540 static ssize_t ipr_show_log_level(struct device *dev,
3541                                    struct device_attribute *attr, char *buf)
3542 {
3543         struct Scsi_Host *shost = class_to_shost(dev);
3544         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3545         unsigned long lock_flags = 0;
3546         int len;
3547
3548         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3549         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3550         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3551         return len;
3552 }
3553
3554 /**
3555  * ipr_store_log_level - Change the adapter's error logging level
3556  * @dev:        class device struct
3557  * @buf:        buffer
3558  *
3559  * Return value:
3560  *      number of bytes printed to buffer
3561  **/
3562 static ssize_t ipr_store_log_level(struct device *dev,
3563                                    struct device_attribute *attr,
3564                                    const char *buf, size_t count)
3565 {
3566         struct Scsi_Host *shost = class_to_shost(dev);
3567         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3568         unsigned long lock_flags = 0;
3569
3570         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3571         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3572         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573         return strlen(buf);
3574 }
3575
3576 static struct device_attribute ipr_log_level_attr = {
3577         .attr = {
3578                 .name =         "log_level",
3579                 .mode =         S_IRUGO | S_IWUSR,
3580         },
3581         .show = ipr_show_log_level,
3582         .store = ipr_store_log_level
3583 };
3584
3585 /**
3586  * ipr_store_diagnostics - IOA Diagnostics interface
3587  * @dev:        device struct
3588  * @buf:        buffer
3589  * @count:      buffer size
3590  *
3591  * This function will reset the adapter and wait a reasonable
3592  * amount of time for any errors that the adapter might log.
3593  *
3594  * Return value:
3595  *      count on success / other on failure
3596  **/
3597 static ssize_t ipr_store_diagnostics(struct device *dev,
3598                                      struct device_attribute *attr,
3599                                      const char *buf, size_t count)
3600 {
3601         struct Scsi_Host *shost = class_to_shost(dev);
3602         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3603         unsigned long lock_flags = 0;
3604         int rc = count;
3605
3606         if (!capable(CAP_SYS_ADMIN))
3607                 return -EACCES;
3608
3609         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3610         while (ioa_cfg->in_reset_reload) {
3611                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3613                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3614         }
3615
3616         ioa_cfg->errors_logged = 0;
3617         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3618
3619         if (ioa_cfg->in_reset_reload) {
3620                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3622
3623                 /* Wait for a second for any errors to be logged */
3624                 msleep(1000);
3625         } else {
3626                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3627                 return -EIO;
3628         }
3629
3630         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3631         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3632                 rc = -EIO;
3633         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3634
3635         return rc;
3636 }
3637
3638 static struct device_attribute ipr_diagnostics_attr = {
3639         .attr = {
3640                 .name =         "run_diagnostics",
3641                 .mode =         S_IWUSR,
3642         },
3643         .store = ipr_store_diagnostics
3644 };
3645
3646 /**
3647  * ipr_show_adapter_state - Show the adapter's state
3648  * @class_dev:  device struct
3649  * @buf:        buffer
3650  *
3651  * Return value:
3652  *      number of bytes printed to buffer
3653  **/
3654 static ssize_t ipr_show_adapter_state(struct device *dev,
3655                                       struct device_attribute *attr, char *buf)
3656 {
3657         struct Scsi_Host *shost = class_to_shost(dev);
3658         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3659         unsigned long lock_flags = 0;
3660         int len;
3661
3662         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3663         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3664                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3665         else
3666                 len = snprintf(buf, PAGE_SIZE, "online\n");
3667         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3668         return len;
3669 }
3670
3671 /**
3672  * ipr_store_adapter_state - Change adapter state
3673  * @dev:        device struct
3674  * @buf:        buffer
3675  * @count:      buffer size
3676  *
3677  * This function will change the adapter's state.
3678  *
3679  * Return value:
3680  *      count on success / other on failure
3681  **/
3682 static ssize_t ipr_store_adapter_state(struct device *dev,
3683                                        struct device_attribute *attr,
3684                                        const char *buf, size_t count)
3685 {
3686         struct Scsi_Host *shost = class_to_shost(dev);
3687         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3688         unsigned long lock_flags;
3689         int result = count, i;
3690
3691         if (!capable(CAP_SYS_ADMIN))
3692                 return -EACCES;
3693
3694         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3695         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3696             !strncmp(buf, "online", 6)) {
3697                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3698                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3699                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3700                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3701                 }
3702                 wmb();
3703                 ioa_cfg->reset_retries = 0;
3704                 ioa_cfg->in_ioa_bringdown = 0;
3705                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3706         }
3707         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3709
3710         return result;
3711 }
3712
3713 static struct device_attribute ipr_ioa_state_attr = {
3714         .attr = {
3715                 .name =         "online_state",
3716                 .mode =         S_IRUGO | S_IWUSR,
3717         },
3718         .show = ipr_show_adapter_state,
3719         .store = ipr_store_adapter_state
3720 };
3721
3722 /**
3723  * ipr_store_reset_adapter - Reset the adapter
3724  * @dev:        device struct
3725  * @buf:        buffer
3726  * @count:      buffer size
3727  *
3728  * This function will reset the adapter.
3729  *
3730  * Return value:
3731  *      count on success / other on failure
3732  **/
3733 static ssize_t ipr_store_reset_adapter(struct device *dev,
3734                                        struct device_attribute *attr,
3735                                        const char *buf, size_t count)
3736 {
3737         struct Scsi_Host *shost = class_to_shost(dev);
3738         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3739         unsigned long lock_flags;
3740         int result = count;
3741
3742         if (!capable(CAP_SYS_ADMIN))
3743                 return -EACCES;
3744
3745         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3746         if (!ioa_cfg->in_reset_reload)
3747                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3748         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3749         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3750
3751         return result;
3752 }
3753
3754 static struct device_attribute ipr_ioa_reset_attr = {
3755         .attr = {
3756                 .name =         "reset_host",
3757                 .mode =         S_IWUSR,
3758         },
3759         .store = ipr_store_reset_adapter
3760 };
3761
3762 static int ipr_iopoll(struct irq_poll *iop, int budget);
3763  /**
3764  * ipr_show_iopoll_weight - Show ipr polling mode
3765  * @dev:        class device struct
3766  * @buf:        buffer
3767  *
3768  * Return value:
3769  *      number of bytes printed to buffer
3770  **/
3771 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3772                                    struct device_attribute *attr, char *buf)
3773 {
3774         struct Scsi_Host *shost = class_to_shost(dev);
3775         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3776         unsigned long lock_flags = 0;
3777         int len;
3778
3779         spin_lock_irqsave(shost->host_lock, lock_flags);
3780         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3781         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3782
3783         return len;
3784 }
3785
3786 /**
3787  * ipr_store_iopoll_weight - Change the adapter's polling mode
3788  * @dev:        class device struct
3789  * @buf:        buffer
3790  *
3791  * Return value:
3792  *      number of bytes printed to buffer
3793  **/
3794 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3795                                         struct device_attribute *attr,
3796                                         const char *buf, size_t count)
3797 {
3798         struct Scsi_Host *shost = class_to_shost(dev);
3799         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3800         unsigned long user_iopoll_weight;
3801         unsigned long lock_flags = 0;
3802         int i;
3803
3804         if (!ioa_cfg->sis64) {
3805                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3806                 return -EINVAL;
3807         }
3808         if (kstrtoul(buf, 10, &user_iopoll_weight))
3809                 return -EINVAL;
3810
3811         if (user_iopoll_weight > 256) {
3812                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3813                 return -EINVAL;
3814         }
3815
3816         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3817                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3818                 return strlen(buf);
3819         }
3820
3821         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3822                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3823                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3824         }
3825
3826         spin_lock_irqsave(shost->host_lock, lock_flags);
3827         ioa_cfg->iopoll_weight = user_iopoll_weight;
3828         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3829                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3830                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3831                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3832                 }
3833         }
3834         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3835
3836         return strlen(buf);
3837 }
3838
3839 static struct device_attribute ipr_iopoll_weight_attr = {
3840         .attr = {
3841                 .name =         "iopoll_weight",
3842                 .mode =         S_IRUGO | S_IWUSR,
3843         },
3844         .show = ipr_show_iopoll_weight,
3845         .store = ipr_store_iopoll_weight
3846 };
3847
3848 /**
3849  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3850  * @buf_len:            buffer length
3851  *
3852  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3853  * list to use for microcode download
3854  *
3855  * Return value:
3856  *      pointer to sglist / NULL on failure
3857  **/
3858 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3859 {
3860         int sg_size, order;
3861         struct ipr_sglist *sglist;
3862
3863         /* Get the minimum size per scatter/gather element */
3864         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3865
3866         /* Get the actual size per element */
3867         order = get_order(sg_size);
3868
3869         /* Allocate a scatter/gather list for the DMA */
3870         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3871         if (sglist == NULL) {
3872                 ipr_trace;
3873                 return NULL;
3874         }
3875         sglist->order = order;
3876         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3877                                               &sglist->num_sg);
3878         if (!sglist->scatterlist) {
3879                 kfree(sglist);
3880                 return NULL;
3881         }
3882
3883         return sglist;
3884 }
3885
3886 /**
3887  * ipr_free_ucode_buffer - Frees a microcode download buffer
3888  * @p_dnld:             scatter/gather list pointer
3889  *
3890  * Free a DMA'able ucode download buffer previously allocated with
3891  * ipr_alloc_ucode_buffer
3892  *
3893  * Return value:
3894  *      nothing
3895  **/
3896 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3897 {
3898         sgl_free_order(sglist->scatterlist, sglist->order);
3899         kfree(sglist);
3900 }
3901
3902 /**
3903  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3904  * @sglist:             scatter/gather list pointer
3905  * @buffer:             buffer pointer
3906  * @len:                buffer length
3907  *
3908  * Copy a microcode image from a user buffer into a buffer allocated by
3909  * ipr_alloc_ucode_buffer
3910  *
3911  * Return value:
3912  *      0 on success / other on failure
3913  **/
3914 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3915                                  u8 *buffer, u32 len)
3916 {
3917         int bsize_elem, i, result = 0;
3918         struct scatterlist *sg;
3919         void *kaddr;
3920
3921         /* Determine the actual number of bytes per element */
3922         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3923
3924         sg = sglist->scatterlist;
3925
3926         for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3927                         buffer += bsize_elem) {
3928                 struct page *page = sg_page(sg);
3929
3930                 kaddr = kmap(page);
3931                 memcpy(kaddr, buffer, bsize_elem);
3932                 kunmap(page);
3933
3934                 sg->length = bsize_elem;
3935
3936                 if (result != 0) {
3937                         ipr_trace;
3938                         return result;
3939                 }
3940         }
3941
3942         if (len % bsize_elem) {
3943                 struct page *page = sg_page(sg);
3944
3945                 kaddr = kmap(page);
3946                 memcpy(kaddr, buffer, len % bsize_elem);
3947                 kunmap(page);
3948
3949                 sg->length = len % bsize_elem;
3950         }
3951
3952         sglist->buffer_len = len;
3953         return result;
3954 }
3955
3956 /**
3957  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3958  * @ipr_cmd:            ipr command struct
3959  * @sglist:             scatter/gather list
3960  *
3961  * Builds a microcode download IOA data list (IOADL).
3962  *
3963  **/
3964 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3965                                     struct ipr_sglist *sglist)
3966 {
3967         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3968         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3969         struct scatterlist *scatterlist = sglist->scatterlist;
3970         struct scatterlist *sg;
3971         int i;
3972
3973         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3974         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3975         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3976
3977         ioarcb->ioadl_len =
3978                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3979         for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3980                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3981                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3982                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3983         }
3984
3985         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3986 }
3987
3988 /**
3989  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3990  * @ipr_cmd:    ipr command struct
3991  * @sglist:             scatter/gather list
3992  *
3993  * Builds a microcode download IOA data list (IOADL).
3994  *
3995  **/
3996 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3997                                   struct ipr_sglist *sglist)
3998 {
3999         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4000         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4001         struct scatterlist *scatterlist = sglist->scatterlist;
4002         struct scatterlist *sg;
4003         int i;
4004
4005         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4006         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4007         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4008
4009         ioarcb->ioadl_len =
4010                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4011
4012         for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
4013                 ioadl[i].flags_and_data_len =
4014                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
4015                 ioadl[i].address =
4016                         cpu_to_be32(sg_dma_address(sg));
4017         }
4018
4019         ioadl[i-1].flags_and_data_len |=
4020                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4021 }
4022
4023 /**
4024  * ipr_update_ioa_ucode - Update IOA's microcode
4025  * @ioa_cfg:    ioa config struct
4026  * @sglist:             scatter/gather list
4027  *
4028  * Initiate an adapter reset to update the IOA's microcode
4029  *
4030  * Return value:
4031  *      0 on success / -EIO on failure
4032  **/
4033 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4034                                 struct ipr_sglist *sglist)
4035 {
4036         unsigned long lock_flags;
4037
4038         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4039         while (ioa_cfg->in_reset_reload) {
4040                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4041                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4042                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4043         }
4044
4045         if (ioa_cfg->ucode_sglist) {
4046                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4047                 dev_err(&ioa_cfg->pdev->dev,
4048                         "Microcode download already in progress\n");
4049                 return -EIO;
4050         }
4051
4052         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4053                                         sglist->scatterlist, sglist->num_sg,
4054                                         DMA_TO_DEVICE);
4055
4056         if (!sglist->num_dma_sg) {
4057                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4058                 dev_err(&ioa_cfg->pdev->dev,
4059                         "Failed to map microcode download buffer!\n");
4060                 return -EIO;
4061         }
4062
4063         ioa_cfg->ucode_sglist = sglist;
4064         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4065         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4066         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4067
4068         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4069         ioa_cfg->ucode_sglist = NULL;
4070         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4071         return 0;
4072 }
4073
4074 /**
4075  * ipr_store_update_fw - Update the firmware on the adapter
4076  * @class_dev:  device struct
4077  * @buf:        buffer
4078  * @count:      buffer size
4079  *
4080  * This function will update the firmware on the adapter.
4081  *
4082  * Return value:
4083  *      count on success / other on failure
4084  **/
4085 static ssize_t ipr_store_update_fw(struct device *dev,
4086                                    struct device_attribute *attr,
4087                                    const char *buf, size_t count)
4088 {
4089         struct Scsi_Host *shost = class_to_shost(dev);
4090         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4091         struct ipr_ucode_image_header *image_hdr;
4092         const struct firmware *fw_entry;
4093         struct ipr_sglist *sglist;
4094         char fname[100];
4095         char *src;
4096         char *endline;
4097         int result, dnld_size;
4098
4099         if (!capable(CAP_SYS_ADMIN))
4100                 return -EACCES;
4101
4102         snprintf(fname, sizeof(fname), "%s", buf);
4103
4104         endline = strchr(fname, '\n');
4105         if (endline)
4106                 *endline = '\0';
4107
4108         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4109                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4110                 return -EIO;
4111         }
4112
4113         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4114
4115         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4116         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4117         sglist = ipr_alloc_ucode_buffer(dnld_size);
4118
4119         if (!sglist) {
4120                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4121                 release_firmware(fw_entry);
4122                 return -ENOMEM;
4123         }
4124
4125         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4126
4127         if (result) {
4128                 dev_err(&ioa_cfg->pdev->dev,
4129                         "Microcode buffer copy to DMA buffer failed\n");
4130                 goto out;
4131         }
4132
4133         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4134
4135         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4136
4137         if (!result)
4138                 result = count;
4139 out:
4140         ipr_free_ucode_buffer(sglist);
4141         release_firmware(fw_entry);
4142         return result;
4143 }
4144
4145 static struct device_attribute ipr_update_fw_attr = {
4146         .attr = {
4147                 .name =         "update_fw",
4148                 .mode =         S_IWUSR,
4149         },
4150         .store = ipr_store_update_fw
4151 };
4152
4153 /**
4154  * ipr_show_fw_type - Show the adapter's firmware type.
4155  * @dev:        class device struct
4156  * @buf:        buffer
4157  *
4158  * Return value:
4159  *      number of bytes printed to buffer
4160  **/
4161 static ssize_t ipr_show_fw_type(struct device *dev,
4162                                 struct device_attribute *attr, char *buf)
4163 {
4164         struct Scsi_Host *shost = class_to_shost(dev);
4165         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4166         unsigned long lock_flags = 0;
4167         int len;
4168
4169         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4170         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4171         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4172         return len;
4173 }
4174
4175 static struct device_attribute ipr_ioa_fw_type_attr = {
4176         .attr = {
4177                 .name =         "fw_type",
4178                 .mode =         S_IRUGO,
4179         },
4180         .show = ipr_show_fw_type
4181 };
4182
4183 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4184                                 struct bin_attribute *bin_attr, char *buf,
4185                                 loff_t off, size_t count)
4186 {
4187         struct device *cdev = container_of(kobj, struct device, kobj);
4188         struct Scsi_Host *shost = class_to_shost(cdev);
4189         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4190         struct ipr_hostrcb *hostrcb;
4191         unsigned long lock_flags = 0;
4192         int ret;
4193
4194         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4195         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4196                                         struct ipr_hostrcb, queue);
4197         if (!hostrcb) {
4198                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4199                 return 0;
4200         }
4201         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4202                                 sizeof(hostrcb->hcam));
4203         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4204         return ret;
4205 }
4206
4207 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4208                                 struct bin_attribute *bin_attr, char *buf,
4209                                 loff_t off, size_t count)
4210 {
4211         struct device *cdev = container_of(kobj, struct device, kobj);
4212         struct Scsi_Host *shost = class_to_shost(cdev);
4213         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4214         struct ipr_hostrcb *hostrcb;
4215         unsigned long lock_flags = 0;
4216
4217         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4218         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4219                                         struct ipr_hostrcb, queue);
4220         if (!hostrcb) {
4221                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4222                 return count;
4223         }
4224
4225         /* Reclaim hostrcb before exit */
4226         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4227         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4228         return count;
4229 }
4230
4231 static struct bin_attribute ipr_ioa_async_err_log = {
4232         .attr = {
4233                 .name =         "async_err_log",
4234                 .mode =         S_IRUGO | S_IWUSR,
4235         },
4236         .size = 0,
4237         .read = ipr_read_async_err_log,
4238         .write = ipr_next_async_err_log
4239 };
4240
4241 static struct device_attribute *ipr_ioa_attrs[] = {
4242         &ipr_fw_version_attr,
4243         &ipr_log_level_attr,
4244         &ipr_diagnostics_attr,
4245         &ipr_ioa_state_attr,
4246         &ipr_ioa_reset_attr,
4247         &ipr_update_fw_attr,
4248         &ipr_ioa_fw_type_attr,
4249         &ipr_iopoll_weight_attr,
4250         NULL,
4251 };
4252
4253 #ifdef CONFIG_SCSI_IPR_DUMP
4254 /**
4255  * ipr_read_dump - Dump the adapter
4256  * @filp:               open sysfs file
4257  * @kobj:               kobject struct
4258  * @bin_attr:           bin_attribute struct
4259  * @buf:                buffer
4260  * @off:                offset
4261  * @count:              buffer size
4262  *
4263  * Return value:
4264  *      number of bytes printed to buffer
4265  **/
4266 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4267                              struct bin_attribute *bin_attr,
4268                              char *buf, loff_t off, size_t count)
4269 {
4270         struct device *cdev = container_of(kobj, struct device, kobj);
4271         struct Scsi_Host *shost = class_to_shost(cdev);
4272         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4273         struct ipr_dump *dump;
4274         unsigned long lock_flags = 0;
4275         char *src;
4276         int len, sdt_end;
4277         size_t rc = count;
4278
4279         if (!capable(CAP_SYS_ADMIN))
4280                 return -EACCES;
4281
4282         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4283         dump = ioa_cfg->dump;
4284
4285         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4286                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4287                 return 0;
4288         }
4289         kref_get(&dump->kref);
4290         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4291
4292         if (off > dump->driver_dump.hdr.len) {
4293                 kref_put(&dump->kref, ipr_release_dump);
4294                 return 0;
4295         }
4296
4297         if (off + count > dump->driver_dump.hdr.len) {
4298                 count = dump->driver_dump.hdr.len - off;
4299                 rc = count;
4300         }
4301
4302         if (count && off < sizeof(dump->driver_dump)) {
4303                 if (off + count > sizeof(dump->driver_dump))
4304                         len = sizeof(dump->driver_dump) - off;
4305                 else
4306                         len = count;
4307                 src = (u8 *)&dump->driver_dump + off;
4308                 memcpy(buf, src, len);
4309                 buf += len;
4310                 off += len;
4311                 count -= len;
4312         }
4313
4314         off -= sizeof(dump->driver_dump);
4315
4316         if (ioa_cfg->sis64)
4317                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4318                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4319                            sizeof(struct ipr_sdt_entry));
4320         else
4321                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4322                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4323
4324         if (count && off < sdt_end) {
4325                 if (off + count > sdt_end)
4326                         len = sdt_end - off;
4327                 else
4328                         len = count;
4329                 src = (u8 *)&dump->ioa_dump + off;
4330                 memcpy(buf, src, len);
4331                 buf += len;
4332                 off += len;
4333                 count -= len;
4334         }
4335
4336         off -= sdt_end;
4337
4338         while (count) {
4339                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4340                         len = PAGE_ALIGN(off) - off;
4341                 else
4342                         len = count;
4343                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4344                 src += off & ~PAGE_MASK;
4345                 memcpy(buf, src, len);
4346                 buf += len;
4347                 off += len;
4348                 count -= len;
4349         }
4350
4351         kref_put(&dump->kref, ipr_release_dump);
4352         return rc;
4353 }
4354
4355 /**
4356  * ipr_alloc_dump - Prepare for adapter dump
4357  * @ioa_cfg:    ioa config struct
4358  *
4359  * Return value:
4360  *      0 on success / other on failure
4361  **/
4362 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4363 {
4364         struct ipr_dump *dump;
4365         __be32 **ioa_data;
4366         unsigned long lock_flags = 0;
4367
4368         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4369
4370         if (!dump) {
4371                 ipr_err("Dump memory allocation failed\n");
4372                 return -ENOMEM;
4373         }
4374
4375         if (ioa_cfg->sis64)
4376                 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4377                                               sizeof(__be32 *)));
4378         else
4379                 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4380                                               sizeof(__be32 *)));
4381
4382         if (!ioa_data) {
4383                 ipr_err("Dump memory allocation failed\n");
4384                 kfree(dump);
4385                 return -ENOMEM;
4386         }
4387
4388         dump->ioa_dump.ioa_data = ioa_data;
4389
4390         kref_init(&dump->kref);
4391         dump->ioa_cfg = ioa_cfg;
4392
4393         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4394
4395         if (INACTIVE != ioa_cfg->sdt_state) {
4396                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4397                 vfree(dump->ioa_dump.ioa_data);
4398                 kfree(dump);
4399                 return 0;
4400         }
4401
4402         ioa_cfg->dump = dump;
4403         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4404         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4405                 ioa_cfg->dump_taken = 1;
4406                 schedule_work(&ioa_cfg->work_q);
4407         }
4408         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4409
4410         return 0;
4411 }
4412
4413 /**
4414  * ipr_free_dump - Free adapter dump memory
4415  * @ioa_cfg:    ioa config struct
4416  *
4417  * Return value:
4418  *      0 on success / other on failure
4419  **/
4420 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4421 {
4422         struct ipr_dump *dump;
4423         unsigned long lock_flags = 0;
4424
4425         ENTER;
4426
4427         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4428         dump = ioa_cfg->dump;
4429         if (!dump) {
4430                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4431                 return 0;
4432         }
4433
4434         ioa_cfg->dump = NULL;
4435         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4436
4437         kref_put(&dump->kref, ipr_release_dump);
4438
4439         LEAVE;
4440         return 0;
4441 }
4442
4443 /**
4444  * ipr_write_dump - Setup dump state of adapter
4445  * @filp:               open sysfs file
4446  * @kobj:               kobject struct
4447  * @bin_attr:           bin_attribute struct
4448  * @buf:                buffer
4449  * @off:                offset
4450  * @count:              buffer size
4451  *
4452  * Return value:
4453  *      number of bytes printed to buffer
4454  **/
4455 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4456                               struct bin_attribute *bin_attr,
4457                               char *buf, loff_t off, size_t count)
4458 {
4459         struct device *cdev = container_of(kobj, struct device, kobj);
4460         struct Scsi_Host *shost = class_to_shost(cdev);
4461         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4462         int rc;
4463
4464         if (!capable(CAP_SYS_ADMIN))
4465                 return -EACCES;
4466
4467         if (buf[0] == '1')
4468                 rc = ipr_alloc_dump(ioa_cfg);
4469         else if (buf[0] == '0')
4470                 rc = ipr_free_dump(ioa_cfg);
4471         else
4472                 return -EINVAL;
4473
4474         if (rc)
4475                 return rc;
4476         else
4477                 return count;
4478 }
4479
4480 static struct bin_attribute ipr_dump_attr = {
4481         .attr = {
4482                 .name = "dump",
4483                 .mode = S_IRUSR | S_IWUSR,
4484         },
4485         .size = 0,
4486         .read = ipr_read_dump,
4487         .write = ipr_write_dump
4488 };
4489 #else
4490 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4491 #endif
4492
4493 /**
4494  * ipr_change_queue_depth - Change the device's queue depth
4495  * @sdev:       scsi device struct
4496  * @qdepth:     depth to set
4497  * @reason:     calling context
4498  *
4499  * Return value:
4500  *      actual depth set
4501  **/
4502 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4503 {
4504         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4505         struct ipr_resource_entry *res;
4506         unsigned long lock_flags = 0;
4507
4508         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4509         res = (struct ipr_resource_entry *)sdev->hostdata;
4510
4511         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4512                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4513         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4514
4515         scsi_change_queue_depth(sdev, qdepth);
4516         return sdev->queue_depth;
4517 }
4518
4519 /**
4520  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4521  * @dev:        device struct
4522  * @attr:       device attribute structure
4523  * @buf:        buffer
4524  *
4525  * Return value:
4526  *      number of bytes printed to buffer
4527  **/
4528 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4529 {
4530         struct scsi_device *sdev = to_scsi_device(dev);
4531         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4532         struct ipr_resource_entry *res;
4533         unsigned long lock_flags = 0;
4534         ssize_t len = -ENXIO;
4535
4536         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4537         res = (struct ipr_resource_entry *)sdev->hostdata;
4538         if (res)
4539                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4540         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4541         return len;
4542 }
4543
4544 static struct device_attribute ipr_adapter_handle_attr = {
4545         .attr = {
4546                 .name =         "adapter_handle",
4547                 .mode =         S_IRUSR,
4548         },
4549         .show = ipr_show_adapter_handle
4550 };
4551
4552 /**
4553  * ipr_show_resource_path - Show the resource path or the resource address for
4554  *                          this device.
4555  * @dev:        device struct
4556  * @attr:       device attribute structure
4557  * @buf:        buffer
4558  *
4559  * Return value:
4560  *      number of bytes printed to buffer
4561  **/
4562 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4563 {
4564         struct scsi_device *sdev = to_scsi_device(dev);
4565         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4566         struct ipr_resource_entry *res;
4567         unsigned long lock_flags = 0;
4568         ssize_t len = -ENXIO;
4569         char buffer[IPR_MAX_RES_PATH_LENGTH];
4570
4571         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4572         res = (struct ipr_resource_entry *)sdev->hostdata;
4573         if (res && ioa_cfg->sis64)
4574                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4575                                __ipr_format_res_path(res->res_path, buffer,
4576                                                      sizeof(buffer)));
4577         else if (res)
4578                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4579                                res->bus, res->target, res->lun);
4580
4581         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4582         return len;
4583 }
4584
4585 static struct device_attribute ipr_resource_path_attr = {
4586         .attr = {
4587                 .name =         "resource_path",
4588                 .mode =         S_IRUGO,
4589         },
4590         .show = ipr_show_resource_path
4591 };
4592
4593 /**
4594  * ipr_show_device_id - Show the device_id for this device.
4595  * @dev:        device struct
4596  * @attr:       device attribute structure
4597  * @buf:        buffer
4598  *
4599  * Return value:
4600  *      number of bytes printed to buffer
4601  **/
4602 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4603 {
4604         struct scsi_device *sdev = to_scsi_device(dev);
4605         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4606         struct ipr_resource_entry *res;
4607         unsigned long lock_flags = 0;
4608         ssize_t len = -ENXIO;
4609
4610         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4611         res = (struct ipr_resource_entry *)sdev->hostdata;
4612         if (res && ioa_cfg->sis64)
4613                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4614         else if (res)
4615                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4616
4617         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4618         return len;
4619 }
4620
4621 static struct device_attribute ipr_device_id_attr = {
4622         .attr = {
4623                 .name =         "device_id",
4624                 .mode =         S_IRUGO,
4625         },
4626         .show = ipr_show_device_id
4627 };
4628
4629 /**
4630  * ipr_show_resource_type - Show the resource type for this device.
4631  * @dev:        device struct
4632  * @attr:       device attribute structure
4633  * @buf:        buffer
4634  *
4635  * Return value:
4636  *      number of bytes printed to buffer
4637  **/
4638 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4639 {
4640         struct scsi_device *sdev = to_scsi_device(dev);
4641         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4642         struct ipr_resource_entry *res;
4643         unsigned long lock_flags = 0;
4644         ssize_t len = -ENXIO;
4645
4646         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4647         res = (struct ipr_resource_entry *)sdev->hostdata;
4648
4649         if (res)
4650                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4651
4652         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4653         return len;
4654 }
4655
4656 static struct device_attribute ipr_resource_type_attr = {
4657         .attr = {
4658                 .name =         "resource_type",
4659                 .mode =         S_IRUGO,
4660         },
4661         .show = ipr_show_resource_type
4662 };
4663
4664 /**
4665  * ipr_show_raw_mode - Show the adapter's raw mode
4666  * @dev:        class device struct
4667  * @buf:        buffer
4668  *
4669  * Return value:
4670  *      number of bytes printed to buffer
4671  **/
4672 static ssize_t ipr_show_raw_mode(struct device *dev,
4673                                  struct device_attribute *attr, char *buf)
4674 {
4675         struct scsi_device *sdev = to_scsi_device(dev);
4676         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4677         struct ipr_resource_entry *res;
4678         unsigned long lock_flags = 0;
4679         ssize_t len;
4680
4681         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4682         res = (struct ipr_resource_entry *)sdev->hostdata;
4683         if (res)
4684                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4685         else
4686                 len = -ENXIO;
4687         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4688         return len;
4689 }
4690
4691 /**
4692  * ipr_store_raw_mode - Change the adapter's raw mode
4693  * @dev:        class device struct
4694  * @buf:        buffer
4695  *
4696  * Return value:
4697  *      number of bytes printed to buffer
4698  **/
4699 static ssize_t ipr_store_raw_mode(struct device *dev,
4700                                   struct device_attribute *attr,
4701                                   const char *buf, size_t count)
4702 {
4703         struct scsi_device *sdev = to_scsi_device(dev);
4704         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4705         struct ipr_resource_entry *res;
4706         unsigned long lock_flags = 0;
4707         ssize_t len;
4708
4709         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4710         res = (struct ipr_resource_entry *)sdev->hostdata;
4711         if (res) {
4712                 if (ipr_is_af_dasd_device(res)) {
4713                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4714                         len = strlen(buf);
4715                         if (res->sdev)
4716                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4717                                         res->raw_mode ? "enabled" : "disabled");
4718                 } else
4719                         len = -EINVAL;
4720         } else
4721                 len = -ENXIO;
4722         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4723         return len;
4724 }
4725
4726 static struct device_attribute ipr_raw_mode_attr = {
4727         .attr = {
4728                 .name =         "raw_mode",
4729                 .mode =         S_IRUGO | S_IWUSR,
4730         },
4731         .show = ipr_show_raw_mode,
4732         .store = ipr_store_raw_mode
4733 };
4734
4735 static struct device_attribute *ipr_dev_attrs[] = {
4736         &ipr_adapter_handle_attr,
4737         &ipr_resource_path_attr,
4738         &ipr_device_id_attr,
4739         &ipr_resource_type_attr,
4740         &ipr_raw_mode_attr,
4741         NULL,
4742 };
4743
4744 /**
4745  * ipr_biosparam - Return the HSC mapping
4746  * @sdev:                       scsi device struct
4747  * @block_device:       block device pointer
4748  * @capacity:           capacity of the device
4749  * @parm:                       Array containing returned HSC values.
4750  *
4751  * This function generates the HSC parms that fdisk uses.
4752  * We want to make sure we return something that places partitions
4753  * on 4k boundaries for best performance with the IOA.
4754  *
4755  * Return value:
4756  *      0 on success
4757  **/
4758 static int ipr_biosparam(struct scsi_device *sdev,
4759                          struct block_device *block_device,
4760                          sector_t capacity, int *parm)
4761 {
4762         int heads, sectors;
4763         sector_t cylinders;
4764
4765         heads = 128;
4766         sectors = 32;
4767
4768         cylinders = capacity;
4769         sector_div(cylinders, (128 * 32));
4770
4771         /* return result */
4772         parm[0] = heads;
4773         parm[1] = sectors;
4774         parm[2] = cylinders;
4775
4776         return 0;
4777 }
4778
4779 /**
4780  * ipr_find_starget - Find target based on bus/target.
4781  * @starget:    scsi target struct
4782  *
4783  * Return value:
4784  *      resource entry pointer if found / NULL if not found
4785  **/
4786 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4787 {
4788         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4789         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4790         struct ipr_resource_entry *res;
4791
4792         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4793                 if ((res->bus == starget->channel) &&
4794                     (res->target == starget->id)) {
4795                         return res;
4796                 }
4797         }
4798
4799         return NULL;
4800 }
4801
4802 static struct ata_port_info sata_port_info;
4803
4804 /**
4805  * ipr_target_alloc - Prepare for commands to a SCSI target
4806  * @starget:    scsi target struct
4807  *
4808  * If the device is a SATA device, this function allocates an
4809  * ATA port with libata, else it does nothing.
4810  *
4811  * Return value:
4812  *      0 on success / non-0 on failure
4813  **/
4814 static int ipr_target_alloc(struct scsi_target *starget)
4815 {
4816         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4817         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4818         struct ipr_sata_port *sata_port;
4819         struct ata_port *ap;
4820         struct ipr_resource_entry *res;
4821         unsigned long lock_flags;
4822
4823         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4824         res = ipr_find_starget(starget);
4825         starget->hostdata = NULL;
4826
4827         if (res && ipr_is_gata(res)) {
4828                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4829                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4830                 if (!sata_port)
4831                         return -ENOMEM;
4832
4833                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4834                 if (ap) {
4835                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4836                         sata_port->ioa_cfg = ioa_cfg;
4837                         sata_port->ap = ap;
4838                         sata_port->res = res;
4839
4840                         res->sata_port = sata_port;
4841                         ap->private_data = sata_port;
4842                         starget->hostdata = sata_port;
4843                 } else {
4844                         kfree(sata_port);
4845                         return -ENOMEM;
4846                 }
4847         }
4848         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4849
4850         return 0;
4851 }
4852
4853 /**
4854  * ipr_target_destroy - Destroy a SCSI target
4855  * @starget:    scsi target struct
4856  *
4857  * If the device was a SATA device, this function frees the libata
4858  * ATA port, else it does nothing.
4859  *
4860  **/
4861 static void ipr_target_destroy(struct scsi_target *starget)
4862 {
4863         struct ipr_sata_port *sata_port = starget->hostdata;
4864         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4865         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4866
4867         if (ioa_cfg->sis64) {
4868                 if (!ipr_find_starget(starget)) {
4869                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4870                                 clear_bit(starget->id, ioa_cfg->array_ids);
4871                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4872                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4873                         else if (starget->channel == 0)
4874                                 clear_bit(starget->id, ioa_cfg->target_ids);
4875                 }
4876         }
4877
4878         if (sata_port) {
4879                 starget->hostdata = NULL;
4880                 ata_sas_port_destroy(sata_port->ap);
4881                 kfree(sata_port);
4882         }
4883 }
4884
4885 /**
4886  * ipr_find_sdev - Find device based on bus/target/lun.
4887  * @sdev:       scsi device struct
4888  *
4889  * Return value:
4890  *      resource entry pointer if found / NULL if not found
4891  **/
4892 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4893 {
4894         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4895         struct ipr_resource_entry *res;
4896
4897         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4898                 if ((res->bus == sdev->channel) &&
4899                     (res->target == sdev->id) &&
4900                     (res->lun == sdev->lun))
4901                         return res;
4902         }
4903
4904         return NULL;
4905 }
4906
4907 /**
4908  * ipr_slave_destroy - Unconfigure a SCSI device
4909  * @sdev:       scsi device struct
4910  *
4911  * Return value:
4912  *      nothing
4913  **/
4914 static void ipr_slave_destroy(struct scsi_device *sdev)
4915 {
4916         struct ipr_resource_entry *res;
4917         struct ipr_ioa_cfg *ioa_cfg;
4918         unsigned long lock_flags = 0;
4919
4920         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4921
4922         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4923         res = (struct ipr_resource_entry *) sdev->hostdata;
4924         if (res) {
4925                 if (res->sata_port)
4926                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4927                 sdev->hostdata = NULL;
4928                 res->sdev = NULL;
4929                 res->sata_port = NULL;
4930         }
4931         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4932 }
4933
4934 /**
4935  * ipr_slave_configure - Configure a SCSI device
4936  * @sdev:       scsi device struct
4937  *
4938  * This function configures the specified scsi device.
4939  *
4940  * Return value:
4941  *      0 on success
4942  **/
4943 static int ipr_slave_configure(struct scsi_device *sdev)
4944 {
4945         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4946         struct ipr_resource_entry *res;
4947         struct ata_port *ap = NULL;
4948         unsigned long lock_flags = 0;
4949         char buffer[IPR_MAX_RES_PATH_LENGTH];
4950
4951         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4952         res = sdev->hostdata;
4953         if (res) {
4954                 if (ipr_is_af_dasd_device(res))
4955                         sdev->type = TYPE_RAID;
4956                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4957                         sdev->scsi_level = 4;
4958                         sdev->no_uld_attach = 1;
4959                 }
4960                 if (ipr_is_vset_device(res)) {
4961                         sdev->scsi_level = SCSI_SPC_3;
4962                         sdev->no_report_opcodes = 1;
4963                         blk_queue_rq_timeout(sdev->request_queue,
4964                                              IPR_VSET_RW_TIMEOUT);
4965                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4966                 }
4967                 if (ipr_is_gata(res) && res->sata_port)
4968                         ap = res->sata_port->ap;
4969                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4970
4971                 if (ap) {
4972                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4973                         ata_sas_slave_configure(sdev, ap);
4974                 }
4975
4976                 if (ioa_cfg->sis64)
4977                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4978                                     ipr_format_res_path(ioa_cfg,
4979                                 res->res_path, buffer, sizeof(buffer)));
4980                 return 0;
4981         }
4982         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4983         return 0;
4984 }
4985
4986 /**
4987  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4988  * @sdev:       scsi device struct
4989  *
4990  * This function initializes an ATA port so that future commands
4991  * sent through queuecommand will work.
4992  *
4993  * Return value:
4994  *      0 on success
4995  **/
4996 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4997 {
4998         struct ipr_sata_port *sata_port = NULL;
4999         int rc = -ENXIO;
5000
5001         ENTER;
5002         if (sdev->sdev_target)
5003                 sata_port = sdev->sdev_target->hostdata;
5004         if (sata_port) {
5005                 rc = ata_sas_port_init(sata_port->ap);
5006                 if (rc == 0)
5007                         rc = ata_sas_sync_probe(sata_port->ap);
5008         }
5009
5010         if (rc)
5011                 ipr_slave_destroy(sdev);
5012
5013         LEAVE;
5014         return rc;
5015 }
5016
5017 /**
5018  * ipr_slave_alloc - Prepare for commands to a device.
5019  * @sdev:       scsi device struct
5020  *
5021  * This function saves a pointer to the resource entry
5022  * in the scsi device struct if the device exists. We
5023  * can then use this pointer in ipr_queuecommand when
5024  * handling new commands.
5025  *
5026  * Return value:
5027  *      0 on success / -ENXIO if device does not exist
5028  **/
5029 static int ipr_slave_alloc(struct scsi_device *sdev)
5030 {
5031         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5032         struct ipr_resource_entry *res;
5033         unsigned long lock_flags;
5034         int rc = -ENXIO;
5035
5036         sdev->hostdata = NULL;
5037
5038         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5039
5040         res = ipr_find_sdev(sdev);
5041         if (res) {
5042                 res->sdev = sdev;
5043                 res->add_to_ml = 0;
5044                 res->in_erp = 0;
5045                 sdev->hostdata = res;
5046                 if (!ipr_is_naca_model(res))
5047                         res->needs_sync_complete = 1;
5048                 rc = 0;
5049                 if (ipr_is_gata(res)) {
5050                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5051                         return ipr_ata_slave_alloc(sdev);
5052                 }
5053         }
5054
5055         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5056
5057         return rc;
5058 }
5059
5060 /**
5061  * ipr_match_lun - Match function for specified LUN
5062  * @ipr_cmd:    ipr command struct
5063  * @device:             device to match (sdev)
5064  *
5065  * Returns:
5066  *      1 if command matches sdev / 0 if command does not match sdev
5067  **/
5068 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5069 {
5070         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5071                 return 1;
5072         return 0;
5073 }
5074
5075 /**
5076  * ipr_cmnd_is_free - Check if a command is free or not
5077  * @ipr_cmd     ipr command struct
5078  *
5079  * Returns:
5080  *      true / false
5081  **/
5082 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5083 {
5084         struct ipr_cmnd *loop_cmd;
5085
5086         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5087                 if (loop_cmd == ipr_cmd)
5088                         return true;
5089         }
5090
5091         return false;
5092 }
5093
5094 /**
5095  * ipr_match_res - Match function for specified resource entry
5096  * @ipr_cmd:    ipr command struct
5097  * @resource:   resource entry to match
5098  *
5099  * Returns:
5100  *      1 if command matches sdev / 0 if command does not match sdev
5101  **/
5102 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5103 {
5104         struct ipr_resource_entry *res = resource;
5105
5106         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5107                 return 1;
5108         return 0;
5109 }
5110
5111 /**
5112  * ipr_wait_for_ops - Wait for matching commands to complete
5113  * @ipr_cmd:    ipr command struct
5114  * @device:             device to match (sdev)
5115  * @match:              match function to use
5116  *
5117  * Returns:
5118  *      SUCCESS / FAILED
5119  **/
5120 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5121                             int (*match)(struct ipr_cmnd *, void *))
5122 {
5123         struct ipr_cmnd *ipr_cmd;
5124         int wait, i;
5125         unsigned long flags;
5126         struct ipr_hrr_queue *hrrq;
5127         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5128         DECLARE_COMPLETION_ONSTACK(comp);
5129
5130         ENTER;
5131         do {
5132                 wait = 0;
5133
5134                 for_each_hrrq(hrrq, ioa_cfg) {
5135                         spin_lock_irqsave(hrrq->lock, flags);
5136                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5137                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5138                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5139                                         if (match(ipr_cmd, device)) {
5140                                                 ipr_cmd->eh_comp = &comp;
5141                                                 wait++;
5142                                         }
5143                                 }
5144                         }
5145                         spin_unlock_irqrestore(hrrq->lock, flags);
5146                 }
5147
5148                 if (wait) {
5149                         timeout = wait_for_completion_timeout(&comp, timeout);
5150
5151                         if (!timeout) {
5152                                 wait = 0;
5153
5154                                 for_each_hrrq(hrrq, ioa_cfg) {
5155                                         spin_lock_irqsave(hrrq->lock, flags);
5156                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5157                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5158                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5159                                                         if (match(ipr_cmd, device)) {
5160                                                                 ipr_cmd->eh_comp = NULL;
5161                                                                 wait++;
5162                                                         }
5163                                                 }
5164                                         }
5165                                         spin_unlock_irqrestore(hrrq->lock, flags);
5166                                 }
5167
5168                                 if (wait)
5169                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5170                                 LEAVE;
5171                                 return wait ? FAILED : SUCCESS;
5172                         }
5173                 }
5174         } while (wait);
5175
5176         LEAVE;
5177         return SUCCESS;
5178 }
5179
5180 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5181 {
5182         struct ipr_ioa_cfg *ioa_cfg;
5183         unsigned long lock_flags = 0;
5184         int rc = SUCCESS;
5185
5186         ENTER;
5187         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5188         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5189
5190         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5191                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5192                 dev_err(&ioa_cfg->pdev->dev,
5193                         "Adapter being reset as a result of error recovery.\n");
5194
5195                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5196                         ioa_cfg->sdt_state = GET_DUMP;
5197         }
5198
5199         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5200         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5201         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5202
5203         /* If we got hit with a host reset while we were already resetting
5204          the adapter for some reason, and the reset failed. */
5205         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5206                 ipr_trace;
5207                 rc = FAILED;
5208         }
5209
5210         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5211         LEAVE;
5212         return rc;
5213 }
5214
5215 /**
5216  * ipr_device_reset - Reset the device
5217  * @ioa_cfg:    ioa config struct
5218  * @res:                resource entry struct
5219  *
5220  * This function issues a device reset to the affected device.
5221  * If the device is a SCSI device, a LUN reset will be sent
5222  * to the device first. If that does not work, a target reset
5223  * will be sent. If the device is a SATA device, a PHY reset will
5224  * be sent.
5225  *
5226  * Return value:
5227  *      0 on success / non-zero on failure
5228  **/
5229 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5230                             struct ipr_resource_entry *res)
5231 {
5232         struct ipr_cmnd *ipr_cmd;
5233         struct ipr_ioarcb *ioarcb;
5234         struct ipr_cmd_pkt *cmd_pkt;
5235         struct ipr_ioarcb_ata_regs *regs;
5236         u32 ioasc;
5237
5238         ENTER;
5239         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5240         ioarcb = &ipr_cmd->ioarcb;
5241         cmd_pkt = &ioarcb->cmd_pkt;
5242
5243         if (ipr_cmd->ioa_cfg->sis64) {
5244                 regs = &ipr_cmd->i.ata_ioadl.regs;
5245                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5246         } else
5247                 regs = &ioarcb->u.add_data.u.regs;
5248
5249         ioarcb->res_handle = res->res_handle;
5250         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5251         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5252         if (ipr_is_gata(res)) {
5253                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5254                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5255                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5256         }
5257
5258         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5259         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5260         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5261         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5262                 if (ipr_cmd->ioa_cfg->sis64)
5263                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5264                                sizeof(struct ipr_ioasa_gata));
5265                 else
5266                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5267                                sizeof(struct ipr_ioasa_gata));
5268         }
5269
5270         LEAVE;
5271         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5272 }
5273
5274 /**
5275  * ipr_sata_reset - Reset the SATA port
5276  * @link:       SATA link to reset
5277  * @classes:    class of the attached device
5278  *
5279  * This function issues a SATA phy reset to the affected ATA link.
5280  *
5281  * Return value:
5282  *      0 on success / non-zero on failure
5283  **/
5284 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5285                                 unsigned long deadline)
5286 {
5287         struct ipr_sata_port *sata_port = link->ap->private_data;
5288         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5289         struct ipr_resource_entry *res;
5290         unsigned long lock_flags = 0;
5291         int rc = -ENXIO, ret;
5292
5293         ENTER;
5294         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5295         while (ioa_cfg->in_reset_reload) {
5296                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5297                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5298                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5299         }
5300
5301         res = sata_port->res;
5302         if (res) {
5303                 rc = ipr_device_reset(ioa_cfg, res);
5304                 *classes = res->ata_class;
5305                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5306
5307                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5308                 if (ret != SUCCESS) {
5309                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5310                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5311                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5312
5313                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5314                 }
5315         } else
5316                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5317
5318         LEAVE;
5319         return rc;
5320 }
5321
5322 /**
5323  * ipr_eh_dev_reset - Reset the device
5324  * @scsi_cmd:   scsi command struct
5325  *
5326  * This function issues a device reset to the affected device.
5327  * A LUN reset will be sent to the device first. If that does
5328  * not work, a target reset will be sent.
5329  *
5330  * Return value:
5331  *      SUCCESS / FAILED
5332  **/
5333 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5334 {
5335         struct ipr_cmnd *ipr_cmd;
5336         struct ipr_ioa_cfg *ioa_cfg;
5337         struct ipr_resource_entry *res;
5338         struct ata_port *ap;
5339         int rc = 0, i;
5340         struct ipr_hrr_queue *hrrq;
5341
5342         ENTER;
5343         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5344         res = scsi_cmd->device->hostdata;
5345
5346         /*
5347          * If we are currently going through reset/reload, return failed. This will force the
5348          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5349          * reset to complete
5350          */
5351         if (ioa_cfg->in_reset_reload)
5352                 return FAILED;
5353         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5354                 return FAILED;
5355
5356         for_each_hrrq(hrrq, ioa_cfg) {
5357                 spin_lock(&hrrq->_lock);
5358                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5359                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5360
5361                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5362                                 if (!ipr_cmd->qc)
5363                                         continue;
5364                                 if (ipr_cmnd_is_free(ipr_cmd))
5365                                         continue;
5366
5367                                 ipr_cmd->done = ipr_sata_eh_done;
5368                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5369                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5370                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5371                                 }
5372                         }
5373                 }
5374                 spin_unlock(&hrrq->_lock);
5375         }
5376         res->resetting_device = 1;
5377         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5378
5379         if (ipr_is_gata(res) && res->sata_port) {
5380                 ap = res->sata_port->ap;
5381                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5382                 ata_std_error_handler(ap);
5383                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5384         } else
5385                 rc = ipr_device_reset(ioa_cfg, res);
5386         res->resetting_device = 0;
5387         res->reset_occurred = 1;
5388
5389         LEAVE;
5390         return rc ? FAILED : SUCCESS;
5391 }
5392
5393 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5394 {
5395         int rc;
5396         struct ipr_ioa_cfg *ioa_cfg;
5397         struct ipr_resource_entry *res;
5398
5399         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5400         res = cmd->device->hostdata;
5401
5402         if (!res)
5403                 return FAILED;
5404
5405         spin_lock_irq(cmd->device->host->host_lock);
5406         rc = __ipr_eh_dev_reset(cmd);
5407         spin_unlock_irq(cmd->device->host->host_lock);
5408
5409         if (rc == SUCCESS) {
5410                 if (ipr_is_gata(res) && res->sata_port)
5411                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5412                 else
5413                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5414         }
5415
5416         return rc;
5417 }
5418
5419 /**
5420  * ipr_bus_reset_done - Op done function for bus reset.
5421  * @ipr_cmd:    ipr command struct
5422  *
5423  * This function is the op done function for a bus reset
5424  *
5425  * Return value:
5426  *      none
5427  **/
5428 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5429 {
5430         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5431         struct ipr_resource_entry *res;
5432
5433         ENTER;
5434         if (!ioa_cfg->sis64)
5435                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5436                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5437                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5438                                 break;
5439                         }
5440                 }
5441
5442         /*
5443          * If abort has not completed, indicate the reset has, else call the
5444          * abort's done function to wake the sleeping eh thread
5445          */
5446         if (ipr_cmd->sibling->sibling)
5447                 ipr_cmd->sibling->sibling = NULL;
5448         else
5449                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5450
5451         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5452         LEAVE;
5453 }
5454
5455 /**
5456  * ipr_abort_timeout - An abort task has timed out
5457  * @ipr_cmd:    ipr command struct
5458  *
5459  * This function handles when an abort task times out. If this
5460  * happens we issue a bus reset since we have resources tied
5461  * up that must be freed before returning to the midlayer.
5462  *
5463  * Return value:
5464  *      none
5465  **/
5466 static void ipr_abort_timeout(struct timer_list *t)
5467 {
5468         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5469         struct ipr_cmnd *reset_cmd;
5470         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5471         struct ipr_cmd_pkt *cmd_pkt;
5472         unsigned long lock_flags = 0;
5473
5474         ENTER;
5475         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5476         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5477                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5478                 return;
5479         }
5480
5481         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5482         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5483         ipr_cmd->sibling = reset_cmd;
5484         reset_cmd->sibling = ipr_cmd;
5485         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5486         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5487         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5488         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5489         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5490
5491         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5492         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5493         LEAVE;
5494 }
5495
5496 /**
5497  * ipr_cancel_op - Cancel specified op
5498  * @scsi_cmd:   scsi command struct
5499  *
5500  * This function cancels specified op.
5501  *
5502  * Return value:
5503  *      SUCCESS / FAILED
5504  **/
5505 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5506 {
5507         struct ipr_cmnd *ipr_cmd;
5508         struct ipr_ioa_cfg *ioa_cfg;
5509         struct ipr_resource_entry *res;
5510         struct ipr_cmd_pkt *cmd_pkt;
5511         u32 ioasc, int_reg;
5512         int i, op_found = 0;
5513         struct ipr_hrr_queue *hrrq;
5514
5515         ENTER;
5516         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5517         res = scsi_cmd->device->hostdata;
5518
5519         /* If we are currently going through reset/reload, return failed.
5520          * This will force the mid-layer to call ipr_eh_host_reset,
5521          * which will then go to sleep and wait for the reset to complete
5522          */
5523         if (ioa_cfg->in_reset_reload ||
5524             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5525                 return FAILED;
5526         if (!res)
5527                 return FAILED;
5528
5529         /*
5530          * If we are aborting a timed out op, chances are that the timeout was caused
5531          * by a still not detected EEH error. In such cases, reading a register will
5532          * trigger the EEH recovery infrastructure.
5533          */
5534         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5535
5536         if (!ipr_is_gscsi(res))
5537                 return FAILED;
5538
5539         for_each_hrrq(hrrq, ioa_cfg) {
5540                 spin_lock(&hrrq->_lock);
5541                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5542                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5543                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5544                                         op_found = 1;
5545                                         break;
5546                                 }
5547                         }
5548                 }
5549                 spin_unlock(&hrrq->_lock);
5550         }
5551
5552         if (!op_found)
5553                 return SUCCESS;
5554
5555         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5556         ipr_cmd->ioarcb.res_handle = res->res_handle;
5557         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5558         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5559         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5560         ipr_cmd->u.sdev = scsi_cmd->device;
5561
5562         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5563                     scsi_cmd->cmnd[0]);
5564         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5565         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5566
5567         /*
5568          * If the abort task timed out and we sent a bus reset, we will get
5569          * one the following responses to the abort
5570          */
5571         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5572                 ioasc = 0;
5573                 ipr_trace;
5574         }
5575
5576         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5577         if (!ipr_is_naca_model(res))
5578                 res->needs_sync_complete = 1;
5579
5580         LEAVE;
5581         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5582 }
5583
5584 /**
5585  * ipr_eh_abort - Abort a single op
5586  * @scsi_cmd:   scsi command struct
5587  *
5588  * Return value:
5589  *      0 if scan in progress / 1 if scan is complete
5590  **/
5591 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5592 {
5593         unsigned long lock_flags;
5594         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5595         int rc = 0;
5596
5597         spin_lock_irqsave(shost->host_lock, lock_flags);
5598         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5599                 rc = 1;
5600         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5601                 rc = 1;
5602         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5603         return rc;
5604 }
5605
5606 /**
5607  * ipr_eh_host_reset - Reset the host adapter
5608  * @scsi_cmd:   scsi command struct
5609  *
5610  * Return value:
5611  *      SUCCESS / FAILED
5612  **/
5613 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5614 {
5615         unsigned long flags;
5616         int rc;
5617         struct ipr_ioa_cfg *ioa_cfg;
5618
5619         ENTER;
5620
5621         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5622
5623         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5624         rc = ipr_cancel_op(scsi_cmd);
5625         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5626
5627         if (rc == SUCCESS)
5628                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5629         LEAVE;
5630         return rc;
5631 }
5632
5633 /**
5634  * ipr_handle_other_interrupt - Handle "other" interrupts
5635  * @ioa_cfg:    ioa config struct
5636  * @int_reg:    interrupt register
5637  *
5638  * Return value:
5639  *      IRQ_NONE / IRQ_HANDLED
5640  **/
5641 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5642                                               u32 int_reg)
5643 {
5644         irqreturn_t rc = IRQ_HANDLED;
5645         u32 int_mask_reg;
5646
5647         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5648         int_reg &= ~int_mask_reg;
5649
5650         /* If an interrupt on the adapter did not occur, ignore it.
5651          * Or in the case of SIS 64, check for a stage change interrupt.
5652          */
5653         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5654                 if (ioa_cfg->sis64) {
5655                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5656                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5657                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5658
5659                                 /* clear stage change */
5660                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5661                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5662                                 list_del(&ioa_cfg->reset_cmd->queue);
5663                                 del_timer(&ioa_cfg->reset_cmd->timer);
5664                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5665                                 return IRQ_HANDLED;
5666                         }
5667                 }
5668
5669                 return IRQ_NONE;
5670         }
5671
5672         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5673                 /* Mask the interrupt */
5674                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5675                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5676
5677                 list_del(&ioa_cfg->reset_cmd->queue);
5678                 del_timer(&ioa_cfg->reset_cmd->timer);
5679                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5680         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5681                 if (ioa_cfg->clear_isr) {
5682                         if (ipr_debug && printk_ratelimit())
5683                                 dev_err(&ioa_cfg->pdev->dev,
5684                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5685                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5686                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5687                         return IRQ_NONE;
5688                 }
5689         } else {
5690                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5691                         ioa_cfg->ioa_unit_checked = 1;
5692                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5693                         dev_err(&ioa_cfg->pdev->dev,
5694                                 "No Host RRQ. 0x%08X\n", int_reg);
5695                 else
5696                         dev_err(&ioa_cfg->pdev->dev,
5697                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5698
5699                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5700                         ioa_cfg->sdt_state = GET_DUMP;
5701
5702                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5703                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5704         }
5705
5706         return rc;
5707 }
5708
5709 /**
5710  * ipr_isr_eh - Interrupt service routine error handler
5711  * @ioa_cfg:    ioa config struct
5712  * @msg:        message to log
5713  *
5714  * Return value:
5715  *      none
5716  **/
5717 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5718 {
5719         ioa_cfg->errors_logged++;
5720         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5721
5722         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5723                 ioa_cfg->sdt_state = GET_DUMP;
5724
5725         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5726 }
5727
5728 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5729                                                 struct list_head *doneq)
5730 {
5731         u32 ioasc;
5732         u16 cmd_index;
5733         struct ipr_cmnd *ipr_cmd;
5734         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5735         int num_hrrq = 0;
5736
5737         /* If interrupts are disabled, ignore the interrupt */
5738         if (!hrr_queue->allow_interrupts)
5739                 return 0;
5740
5741         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5742                hrr_queue->toggle_bit) {
5743
5744                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5745                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5746                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5747
5748                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5749                              cmd_index < hrr_queue->min_cmd_id)) {
5750                         ipr_isr_eh(ioa_cfg,
5751                                 "Invalid response handle from IOA: ",
5752                                 cmd_index);
5753                         break;
5754                 }
5755
5756                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5757                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5758
5759                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5760
5761                 list_move_tail(&ipr_cmd->queue, doneq);
5762
5763                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5764                         hrr_queue->hrrq_curr++;
5765                 } else {
5766                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5767                         hrr_queue->toggle_bit ^= 1u;
5768                 }
5769                 num_hrrq++;
5770                 if (budget > 0 && num_hrrq >= budget)
5771                         break;
5772         }
5773
5774         return num_hrrq;
5775 }
5776
5777 static int ipr_iopoll(struct irq_poll *iop, int budget)
5778 {
5779         struct ipr_ioa_cfg *ioa_cfg;
5780         struct ipr_hrr_queue *hrrq;
5781         struct ipr_cmnd *ipr_cmd, *temp;
5782         unsigned long hrrq_flags;
5783         int completed_ops;
5784         LIST_HEAD(doneq);
5785
5786         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5787         ioa_cfg = hrrq->ioa_cfg;
5788
5789         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5790         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5791
5792         if (completed_ops < budget)
5793                 irq_poll_complete(iop);
5794         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5795
5796         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5797                 list_del(&ipr_cmd->queue);
5798                 del_timer(&ipr_cmd->timer);
5799                 ipr_cmd->fast_done(ipr_cmd);
5800         }
5801
5802         return completed_ops;
5803 }
5804
5805 /**
5806  * ipr_isr - Interrupt service routine
5807  * @irq:        irq number
5808  * @devp:       pointer to ioa config struct
5809  *
5810  * Return value:
5811  *      IRQ_NONE / IRQ_HANDLED
5812  **/
5813 static irqreturn_t ipr_isr(int irq, void *devp)
5814 {
5815         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5816         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5817         unsigned long hrrq_flags = 0;
5818         u32 int_reg = 0;
5819         int num_hrrq = 0;
5820         int irq_none = 0;
5821         struct ipr_cmnd *ipr_cmd, *temp;
5822         irqreturn_t rc = IRQ_NONE;
5823         LIST_HEAD(doneq);
5824
5825         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5826         /* If interrupts are disabled, ignore the interrupt */
5827         if (!hrrq->allow_interrupts) {
5828                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5829                 return IRQ_NONE;
5830         }
5831
5832         while (1) {
5833                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5834                         rc =  IRQ_HANDLED;
5835
5836                         if (!ioa_cfg->clear_isr)
5837                                 break;
5838
5839                         /* Clear the PCI interrupt */
5840                         num_hrrq = 0;
5841                         do {
5842                                 writel(IPR_PCII_HRRQ_UPDATED,
5843                                      ioa_cfg->regs.clr_interrupt_reg32);
5844                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5845                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5846                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5847
5848                 } else if (rc == IRQ_NONE && irq_none == 0) {
5849                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5850                         irq_none++;
5851                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5852                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5853                         ipr_isr_eh(ioa_cfg,
5854                                 "Error clearing HRRQ: ", num_hrrq);
5855                         rc = IRQ_HANDLED;
5856                         break;
5857                 } else
5858                         break;
5859         }
5860
5861         if (unlikely(rc == IRQ_NONE))
5862                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5863
5864         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5865         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5866                 list_del(&ipr_cmd->queue);
5867                 del_timer(&ipr_cmd->timer);
5868                 ipr_cmd->fast_done(ipr_cmd);
5869         }
5870         return rc;
5871 }
5872
5873 /**
5874  * ipr_isr_mhrrq - Interrupt service routine
5875  * @irq:        irq number
5876  * @devp:       pointer to ioa config struct
5877  *
5878  * Return value:
5879  *      IRQ_NONE / IRQ_HANDLED
5880  **/
5881 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5882 {
5883         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5884         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5885         unsigned long hrrq_flags = 0;
5886         struct ipr_cmnd *ipr_cmd, *temp;
5887         irqreturn_t rc = IRQ_NONE;
5888         LIST_HEAD(doneq);
5889
5890         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5891
5892         /* If interrupts are disabled, ignore the interrupt */
5893         if (!hrrq->allow_interrupts) {
5894                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5895                 return IRQ_NONE;
5896         }
5897
5898         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5899                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5900                        hrrq->toggle_bit) {
5901                         irq_poll_sched(&hrrq->iopoll);
5902                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5903                         return IRQ_HANDLED;
5904                 }
5905         } else {
5906                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5907                         hrrq->toggle_bit)
5908
5909                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5910                                 rc =  IRQ_HANDLED;
5911         }
5912
5913         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5914
5915         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5916                 list_del(&ipr_cmd->queue);
5917                 del_timer(&ipr_cmd->timer);
5918                 ipr_cmd->fast_done(ipr_cmd);
5919         }
5920         return rc;
5921 }
5922
5923 /**
5924  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5925  * @ioa_cfg:    ioa config struct
5926  * @ipr_cmd:    ipr command struct
5927  *
5928  * Return value:
5929  *      0 on success / -1 on failure
5930  **/
5931 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5932                              struct ipr_cmnd *ipr_cmd)
5933 {
5934         int i, nseg;
5935         struct scatterlist *sg;
5936         u32 length;
5937         u32 ioadl_flags = 0;
5938         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5939         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5940         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5941
5942         length = scsi_bufflen(scsi_cmd);
5943         if (!length)
5944                 return 0;
5945
5946         nseg = scsi_dma_map(scsi_cmd);
5947         if (nseg < 0) {
5948                 if (printk_ratelimit())
5949                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5950                 return -1;
5951         }
5952
5953         ipr_cmd->dma_use_sg = nseg;
5954
5955         ioarcb->data_transfer_length = cpu_to_be32(length);
5956         ioarcb->ioadl_len =
5957                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5958
5959         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5960                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5961                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5962         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5963                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5964
5965         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5966                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5967                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5968                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5969         }
5970
5971         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5972         return 0;
5973 }
5974
5975 /**
5976  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5977  * @ioa_cfg:    ioa config struct
5978  * @ipr_cmd:    ipr command struct
5979  *
5980  * Return value:
5981  *      0 on success / -1 on failure
5982  **/
5983 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5984                            struct ipr_cmnd *ipr_cmd)
5985 {
5986         int i, nseg;
5987         struct scatterlist *sg;
5988         u32 length;
5989         u32 ioadl_flags = 0;
5990         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5991         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5992         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5993
5994         length = scsi_bufflen(scsi_cmd);
5995         if (!length)
5996                 return 0;
5997
5998         nseg = scsi_dma_map(scsi_cmd);
5999         if (nseg < 0) {
6000                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
6001                 return -1;
6002         }
6003
6004         ipr_cmd->dma_use_sg = nseg;
6005
6006         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6007                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6008                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6009                 ioarcb->data_transfer_length = cpu_to_be32(length);
6010                 ioarcb->ioadl_len =
6011                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6012         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6013                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6014                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6015                 ioarcb->read_ioadl_len =
6016                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6017         }
6018
6019         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6020                 ioadl = ioarcb->u.add_data.u.ioadl;
6021                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6022                                     offsetof(struct ipr_ioarcb, u.add_data));
6023                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6024         }
6025
6026         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6027                 ioadl[i].flags_and_data_len =
6028                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6029                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6030         }
6031
6032         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6033         return 0;
6034 }
6035
6036 /**
6037  * __ipr_erp_done - Process completion of ERP for a device
6038  * @ipr_cmd:            ipr command struct
6039  *
6040  * This function copies the sense buffer into the scsi_cmd
6041  * struct and pushes the scsi_done function.
6042  *
6043  * Return value:
6044  *      nothing
6045  **/
6046 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6047 {
6048         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6049         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6050         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6051
6052         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6053                 scsi_cmd->result |= (DID_ERROR << 16);
6054                 scmd_printk(KERN_ERR, scsi_cmd,
6055                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6056         } else {
6057                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6058                        SCSI_SENSE_BUFFERSIZE);
6059         }
6060
6061         if (res) {
6062                 if (!ipr_is_naca_model(res))
6063                         res->needs_sync_complete = 1;
6064                 res->in_erp = 0;
6065         }
6066         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6067         scsi_cmd->scsi_done(scsi_cmd);
6068         if (ipr_cmd->eh_comp)
6069                 complete(ipr_cmd->eh_comp);
6070         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6071 }
6072
6073 /**
6074  * ipr_erp_done - Process completion of ERP for a device
6075  * @ipr_cmd:            ipr command struct
6076  *
6077  * This function copies the sense buffer into the scsi_cmd
6078  * struct and pushes the scsi_done function.
6079  *
6080  * Return value:
6081  *      nothing
6082  **/
6083 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6084 {
6085         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6086         unsigned long hrrq_flags;
6087
6088         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6089         __ipr_erp_done(ipr_cmd);
6090         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6091 }
6092
6093 /**
6094  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6095  * @ipr_cmd:    ipr command struct
6096  *
6097  * Return value:
6098  *      none
6099  **/
6100 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6101 {
6102         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6103         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6104         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6105
6106         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6107         ioarcb->data_transfer_length = 0;
6108         ioarcb->read_data_transfer_length = 0;
6109         ioarcb->ioadl_len = 0;
6110         ioarcb->read_ioadl_len = 0;
6111         ioasa->hdr.ioasc = 0;
6112         ioasa->hdr.residual_data_len = 0;
6113
6114         if (ipr_cmd->ioa_cfg->sis64)
6115                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6116                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6117         else {
6118                 ioarcb->write_ioadl_addr =
6119                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6120                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6121         }
6122 }
6123
6124 /**
6125  * __ipr_erp_request_sense - Send request sense to a device
6126  * @ipr_cmd:    ipr command struct
6127  *
6128  * This function sends a request sense to a device as a result
6129  * of a check condition.
6130  *
6131  * Return value:
6132  *      nothing
6133  **/
6134 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6135 {
6136         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6137         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6138
6139         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6140                 __ipr_erp_done(ipr_cmd);
6141                 return;
6142         }
6143
6144         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6145
6146         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6147         cmd_pkt->cdb[0] = REQUEST_SENSE;
6148         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6149         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6150         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6151         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6152
6153         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6154                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6155
6156         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6157                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6158 }
6159
6160 /**
6161  * ipr_erp_request_sense - Send request sense to a device
6162  * @ipr_cmd:    ipr command struct
6163  *
6164  * This function sends a request sense to a device as a result
6165  * of a check condition.
6166  *
6167  * Return value:
6168  *      nothing
6169  **/
6170 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6171 {
6172         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6173         unsigned long hrrq_flags;
6174
6175         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6176         __ipr_erp_request_sense(ipr_cmd);
6177         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6178 }
6179
6180 /**
6181  * ipr_erp_cancel_all - Send cancel all to a device
6182  * @ipr_cmd:    ipr command struct
6183  *
6184  * This function sends a cancel all to a device to clear the
6185  * queue. If we are running TCQ on the device, QERR is set to 1,
6186  * which means all outstanding ops have been dropped on the floor.
6187  * Cancel all will return them to us.
6188  *
6189  * Return value:
6190  *      nothing
6191  **/
6192 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6193 {
6194         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6195         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6196         struct ipr_cmd_pkt *cmd_pkt;
6197
6198         res->in_erp = 1;
6199
6200         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6201
6202         if (!scsi_cmd->device->simple_tags) {
6203                 __ipr_erp_request_sense(ipr_cmd);
6204                 return;
6205         }
6206
6207         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6208         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6209         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6210
6211         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6212                    IPR_CANCEL_ALL_TIMEOUT);
6213 }
6214
6215 /**
6216  * ipr_dump_ioasa - Dump contents of IOASA
6217  * @ioa_cfg:    ioa config struct
6218  * @ipr_cmd:    ipr command struct
6219  * @res:                resource entry struct
6220  *
6221  * This function is invoked by the interrupt handler when ops
6222  * fail. It will log the IOASA if appropriate. Only called
6223  * for GPDD ops.
6224  *
6225  * Return value:
6226  *      none
6227  **/
6228 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6229                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6230 {
6231         int i;
6232         u16 data_len;
6233         u32 ioasc, fd_ioasc;
6234         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6235         __be32 *ioasa_data = (__be32 *)ioasa;
6236         int error_index;
6237
6238         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6239         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6240
6241         if (0 == ioasc)
6242                 return;
6243
6244         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6245                 return;
6246
6247         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6248                 error_index = ipr_get_error(fd_ioasc);
6249         else
6250                 error_index = ipr_get_error(ioasc);
6251
6252         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6253                 /* Don't log an error if the IOA already logged one */
6254                 if (ioasa->hdr.ilid != 0)
6255                         return;
6256
6257                 if (!ipr_is_gscsi(res))
6258                         return;
6259
6260                 if (ipr_error_table[error_index].log_ioasa == 0)
6261                         return;
6262         }
6263
6264         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6265
6266         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6267         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6268                 data_len = sizeof(struct ipr_ioasa64);
6269         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6270                 data_len = sizeof(struct ipr_ioasa);
6271
6272         ipr_err("IOASA Dump:\n");
6273
6274         for (i = 0; i < data_len / 4; i += 4) {
6275                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6276                         be32_to_cpu(ioasa_data[i]),
6277                         be32_to_cpu(ioasa_data[i+1]),
6278                         be32_to_cpu(ioasa_data[i+2]),
6279                         be32_to_cpu(ioasa_data[i+3]));
6280         }
6281 }
6282
6283 /**
6284  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6285  * @ioasa:              IOASA
6286  * @sense_buf:  sense data buffer
6287  *
6288  * Return value:
6289  *      none
6290  **/
6291 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6292 {
6293         u32 failing_lba;
6294         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6295         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6296         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6297         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6298
6299         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6300
6301         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6302                 return;
6303
6304         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6305
6306         if (ipr_is_vset_device(res) &&
6307             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6308             ioasa->u.vset.failing_lba_hi != 0) {
6309                 sense_buf[0] = 0x72;
6310                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6311                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6312                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6313
6314                 sense_buf[7] = 12;
6315                 sense_buf[8] = 0;
6316                 sense_buf[9] = 0x0A;
6317                 sense_buf[10] = 0x80;
6318
6319                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6320
6321                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6322                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6323                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6324                 sense_buf[15] = failing_lba & 0x000000ff;
6325
6326                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6327
6328                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6329                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6330                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6331                 sense_buf[19] = failing_lba & 0x000000ff;
6332         } else {
6333                 sense_buf[0] = 0x70;
6334                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6335                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6336                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6337
6338                 /* Illegal request */
6339                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6340                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6341                         sense_buf[7] = 10;      /* additional length */
6342
6343                         /* IOARCB was in error */
6344                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6345                                 sense_buf[15] = 0xC0;
6346                         else    /* Parameter data was invalid */
6347                                 sense_buf[15] = 0x80;
6348
6349                         sense_buf[16] =
6350                             ((IPR_FIELD_POINTER_MASK &
6351                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6352                         sense_buf[17] =
6353                             (IPR_FIELD_POINTER_MASK &
6354                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6355                 } else {
6356                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6357                                 if (ipr_is_vset_device(res))
6358                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6359                                 else
6360                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6361
6362                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6363                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6364                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6365                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6366                                 sense_buf[6] = failing_lba & 0x000000ff;
6367                         }
6368
6369                         sense_buf[7] = 6;       /* additional length */
6370                 }
6371         }
6372 }
6373
6374 /**
6375  * ipr_get_autosense - Copy autosense data to sense buffer
6376  * @ipr_cmd:    ipr command struct
6377  *
6378  * This function copies the autosense buffer to the buffer
6379  * in the scsi_cmd, if there is autosense available.
6380  *
6381  * Return value:
6382  *      1 if autosense was available / 0 if not
6383  **/
6384 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6385 {
6386         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6387         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6388
6389         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6390                 return 0;
6391
6392         if (ipr_cmd->ioa_cfg->sis64)
6393                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6394                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6395                            SCSI_SENSE_BUFFERSIZE));
6396         else
6397                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6398                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6399                            SCSI_SENSE_BUFFERSIZE));
6400         return 1;
6401 }
6402
6403 /**
6404  * ipr_erp_start - Process an error response for a SCSI op
6405  * @ioa_cfg:    ioa config struct
6406  * @ipr_cmd:    ipr command struct
6407  *
6408  * This function determines whether or not to initiate ERP
6409  * on the affected device.
6410  *
6411  * Return value:
6412  *      nothing
6413  **/
6414 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6415                               struct ipr_cmnd *ipr_cmd)
6416 {
6417         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6418         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6419         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6420         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6421
6422         if (!res) {
6423                 __ipr_scsi_eh_done(ipr_cmd);
6424                 return;
6425         }
6426
6427         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6428                 ipr_gen_sense(ipr_cmd);
6429
6430         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6431
6432         switch (masked_ioasc) {
6433         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6434                 if (ipr_is_naca_model(res))
6435                         scsi_cmd->result |= (DID_ABORT << 16);
6436                 else
6437                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6438                 break;
6439         case IPR_IOASC_IR_RESOURCE_HANDLE:
6440         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6441                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6442                 break;
6443         case IPR_IOASC_HW_SEL_TIMEOUT:
6444                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6445                 if (!ipr_is_naca_model(res))
6446                         res->needs_sync_complete = 1;
6447                 break;
6448         case IPR_IOASC_SYNC_REQUIRED:
6449                 if (!res->in_erp)
6450                         res->needs_sync_complete = 1;
6451                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6452                 break;
6453         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6454         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6455                 /*
6456                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6457                  * so SCSI mid-layer and upper layers handle it accordingly.
6458                  */
6459                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6460                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6461                 break;
6462         case IPR_IOASC_BUS_WAS_RESET:
6463         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6464                 /*
6465                  * Report the bus reset and ask for a retry. The device
6466                  * will give CC/UA the next command.
6467                  */
6468                 if (!res->resetting_device)
6469                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6470                 scsi_cmd->result |= (DID_ERROR << 16);
6471                 if (!ipr_is_naca_model(res))
6472                         res->needs_sync_complete = 1;
6473                 break;
6474         case IPR_IOASC_HW_DEV_BUS_STATUS:
6475                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6476                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6477                         if (!ipr_get_autosense(ipr_cmd)) {
6478                                 if (!ipr_is_naca_model(res)) {
6479                                         ipr_erp_cancel_all(ipr_cmd);
6480                                         return;
6481                                 }
6482                         }
6483                 }
6484                 if (!ipr_is_naca_model(res))
6485                         res->needs_sync_complete = 1;
6486                 break;
6487         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6488                 break;
6489         case IPR_IOASC_IR_NON_OPTIMIZED:
6490                 if (res->raw_mode) {
6491                         res->raw_mode = 0;
6492                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6493                 } else
6494                         scsi_cmd->result |= (DID_ERROR << 16);
6495                 break;
6496         default:
6497                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6498                         scsi_cmd->result |= (DID_ERROR << 16);
6499                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6500                         res->needs_sync_complete = 1;
6501                 break;
6502         }
6503
6504         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6505         scsi_cmd->scsi_done(scsi_cmd);
6506         if (ipr_cmd->eh_comp)
6507                 complete(ipr_cmd->eh_comp);
6508         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6509 }
6510
6511 /**
6512  * ipr_scsi_done - mid-layer done function
6513  * @ipr_cmd:    ipr command struct
6514  *
6515  * This function is invoked by the interrupt handler for
6516  * ops generated by the SCSI mid-layer
6517  *
6518  * Return value:
6519  *      none
6520  **/
6521 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6522 {
6523         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6524         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6525         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6526         unsigned long lock_flags;
6527
6528         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6529
6530         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6531                 scsi_dma_unmap(scsi_cmd);
6532
6533                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6534                 scsi_cmd->scsi_done(scsi_cmd);
6535                 if (ipr_cmd->eh_comp)
6536                         complete(ipr_cmd->eh_comp);
6537                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6538                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6539         } else {
6540                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6541                 spin_lock(&ipr_cmd->hrrq->_lock);
6542                 ipr_erp_start(ioa_cfg, ipr_cmd);
6543                 spin_unlock(&ipr_cmd->hrrq->_lock);
6544                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6545         }
6546 }
6547
6548 /**
6549  * ipr_queuecommand - Queue a mid-layer request
6550  * @shost:              scsi host struct
6551  * @scsi_cmd:   scsi command struct
6552  *
6553  * This function queues a request generated by the mid-layer.
6554  *
6555  * Return value:
6556  *      0 on success
6557  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6558  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6559  **/
6560 static int ipr_queuecommand(struct Scsi_Host *shost,
6561                             struct scsi_cmnd *scsi_cmd)
6562 {
6563         struct ipr_ioa_cfg *ioa_cfg;
6564         struct ipr_resource_entry *res;
6565         struct ipr_ioarcb *ioarcb;
6566         struct ipr_cmnd *ipr_cmd;
6567         unsigned long hrrq_flags, lock_flags;
6568         int rc;
6569         struct ipr_hrr_queue *hrrq;
6570         int hrrq_id;
6571
6572         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6573
6574         scsi_cmd->result = (DID_OK << 16);
6575         res = scsi_cmd->device->hostdata;
6576
6577         if (ipr_is_gata(res) && res->sata_port) {
6578                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6579                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6580                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6581                 return rc;
6582         }
6583
6584         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6585         hrrq = &ioa_cfg->hrrq[hrrq_id];
6586
6587         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6588         /*
6589          * We are currently blocking all devices due to a host reset
6590          * We have told the host to stop giving us new requests, but
6591          * ERP ops don't count. FIXME
6592          */
6593         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6594                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6595                 return SCSI_MLQUEUE_HOST_BUSY;
6596         }
6597
6598         /*
6599          * FIXME - Create scsi_set_host_offline interface
6600          *  and the ioa_is_dead check can be removed
6601          */
6602         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6603                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6604                 goto err_nodev;
6605         }
6606
6607         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6608         if (ipr_cmd == NULL) {
6609                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6610                 return SCSI_MLQUEUE_HOST_BUSY;
6611         }
6612         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6613
6614         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6615         ioarcb = &ipr_cmd->ioarcb;
6616
6617         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6618         ipr_cmd->scsi_cmd = scsi_cmd;
6619         ipr_cmd->done = ipr_scsi_eh_done;
6620
6621         if (ipr_is_gscsi(res)) {
6622                 if (scsi_cmd->underflow == 0)
6623                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6624
6625                 if (res->reset_occurred) {
6626                         res->reset_occurred = 0;
6627                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6628                 }
6629         }
6630
6631         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6632                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6633
6634                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6635                 if (scsi_cmd->flags & SCMD_TAGGED)
6636                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6637                 else
6638                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6639         }
6640
6641         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6642             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6643                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6644         }
6645         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6646                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6647
6648                 if (scsi_cmd->underflow == 0)
6649                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6650         }
6651
6652         if (ioa_cfg->sis64)
6653                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6654         else
6655                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6656
6657         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6658         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6659                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6660                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6661                 if (!rc)
6662                         scsi_dma_unmap(scsi_cmd);
6663                 return SCSI_MLQUEUE_HOST_BUSY;
6664         }
6665
6666         if (unlikely(hrrq->ioa_is_dead)) {
6667                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6668                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6669                 scsi_dma_unmap(scsi_cmd);
6670                 goto err_nodev;
6671         }
6672
6673         ioarcb->res_handle = res->res_handle;
6674         if (res->needs_sync_complete) {
6675                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6676                 res->needs_sync_complete = 0;
6677         }
6678         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6679         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6680         ipr_send_command(ipr_cmd);
6681         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6682         return 0;
6683
6684 err_nodev:
6685         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6686         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6687         scsi_cmd->result = (DID_NO_CONNECT << 16);
6688         scsi_cmd->scsi_done(scsi_cmd);
6689         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6690         return 0;
6691 }
6692
6693 /**
6694  * ipr_ioctl - IOCTL handler
6695  * @sdev:       scsi device struct
6696  * @cmd:        IOCTL cmd
6697  * @arg:        IOCTL arg
6698  *
6699  * Return value:
6700  *      0 on success / other on failure
6701  **/
6702 static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6703                      void __user *arg)
6704 {
6705         struct ipr_resource_entry *res;
6706
6707         res = (struct ipr_resource_entry *)sdev->hostdata;
6708         if (res && ipr_is_gata(res)) {
6709                 if (cmd == HDIO_GET_IDENTITY)
6710                         return -ENOTTY;
6711                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6712         }
6713
6714         return -EINVAL;
6715 }
6716
6717 /**
6718  * ipr_info - Get information about the card/driver
6719  * @scsi_host:  scsi host struct
6720  *
6721  * Return value:
6722  *      pointer to buffer with description string
6723  **/
6724 static const char *ipr_ioa_info(struct Scsi_Host *host)
6725 {
6726         static char buffer[512];
6727         struct ipr_ioa_cfg *ioa_cfg;
6728         unsigned long lock_flags = 0;
6729
6730         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6731
6732         spin_lock_irqsave(host->host_lock, lock_flags);
6733         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6734         spin_unlock_irqrestore(host->host_lock, lock_flags);
6735
6736         return buffer;
6737 }
6738
6739 static struct scsi_host_template driver_template = {
6740         .module = THIS_MODULE,
6741         .name = "IPR",
6742         .info = ipr_ioa_info,
6743         .ioctl = ipr_ioctl,
6744         .queuecommand = ipr_queuecommand,
6745         .eh_abort_handler = ipr_eh_abort,
6746         .eh_device_reset_handler = ipr_eh_dev_reset,
6747         .eh_host_reset_handler = ipr_eh_host_reset,
6748         .slave_alloc = ipr_slave_alloc,
6749         .slave_configure = ipr_slave_configure,
6750         .slave_destroy = ipr_slave_destroy,
6751         .scan_finished = ipr_scan_finished,
6752         .target_alloc = ipr_target_alloc,
6753         .target_destroy = ipr_target_destroy,
6754         .change_queue_depth = ipr_change_queue_depth,
6755         .bios_param = ipr_biosparam,
6756         .can_queue = IPR_MAX_COMMANDS,
6757         .this_id = -1,
6758         .sg_tablesize = IPR_MAX_SGLIST,
6759         .max_sectors = IPR_IOA_MAX_SECTORS,
6760         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6761         .shost_attrs = ipr_ioa_attrs,
6762         .sdev_attrs = ipr_dev_attrs,
6763         .proc_name = IPR_NAME,
6764 };
6765
6766 /**
6767  * ipr_ata_phy_reset - libata phy_reset handler
6768  * @ap:         ata port to reset
6769  *
6770  **/
6771 static void ipr_ata_phy_reset(struct ata_port *ap)
6772 {
6773         unsigned long flags;
6774         struct ipr_sata_port *sata_port = ap->private_data;
6775         struct ipr_resource_entry *res = sata_port->res;
6776         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6777         int rc;
6778
6779         ENTER;
6780         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6781         while (ioa_cfg->in_reset_reload) {
6782                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6783                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6784                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6785         }
6786
6787         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6788                 goto out_unlock;
6789
6790         rc = ipr_device_reset(ioa_cfg, res);
6791
6792         if (rc) {
6793                 ap->link.device[0].class = ATA_DEV_NONE;
6794                 goto out_unlock;
6795         }
6796
6797         ap->link.device[0].class = res->ata_class;
6798         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6799                 ap->link.device[0].class = ATA_DEV_NONE;
6800
6801 out_unlock:
6802         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6803         LEAVE;
6804 }
6805
6806 /**
6807  * ipr_ata_post_internal - Cleanup after an internal command
6808  * @qc: ATA queued command
6809  *
6810  * Return value:
6811  *      none
6812  **/
6813 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6814 {
6815         struct ipr_sata_port *sata_port = qc->ap->private_data;
6816         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6817         struct ipr_cmnd *ipr_cmd;
6818         struct ipr_hrr_queue *hrrq;
6819         unsigned long flags;
6820
6821         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6822         while (ioa_cfg->in_reset_reload) {
6823                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6824                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6825                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6826         }
6827
6828         for_each_hrrq(hrrq, ioa_cfg) {
6829                 spin_lock(&hrrq->_lock);
6830                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6831                         if (ipr_cmd->qc == qc) {
6832                                 ipr_device_reset(ioa_cfg, sata_port->res);
6833                                 break;
6834                         }
6835                 }
6836                 spin_unlock(&hrrq->_lock);
6837         }
6838         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6839 }
6840
6841 /**
6842  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6843  * @regs:       destination
6844  * @tf: source ATA taskfile
6845  *
6846  * Return value:
6847  *      none
6848  **/
6849 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6850                              struct ata_taskfile *tf)
6851 {
6852         regs->feature = tf->feature;
6853         regs->nsect = tf->nsect;
6854         regs->lbal = tf->lbal;
6855         regs->lbam = tf->lbam;
6856         regs->lbah = tf->lbah;
6857         regs->device = tf->device;
6858         regs->command = tf->command;
6859         regs->hob_feature = tf->hob_feature;
6860         regs->hob_nsect = tf->hob_nsect;
6861         regs->hob_lbal = tf->hob_lbal;
6862         regs->hob_lbam = tf->hob_lbam;
6863         regs->hob_lbah = tf->hob_lbah;
6864         regs->ctl = tf->ctl;
6865 }
6866
6867 /**
6868  * ipr_sata_done - done function for SATA commands
6869  * @ipr_cmd:    ipr command struct
6870  *
6871  * This function is invoked by the interrupt handler for
6872  * ops generated by the SCSI mid-layer to SATA devices
6873  *
6874  * Return value:
6875  *      none
6876  **/
6877 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6878 {
6879         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6880         struct ata_queued_cmd *qc = ipr_cmd->qc;
6881         struct ipr_sata_port *sata_port = qc->ap->private_data;
6882         struct ipr_resource_entry *res = sata_port->res;
6883         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6884
6885         spin_lock(&ipr_cmd->hrrq->_lock);
6886         if (ipr_cmd->ioa_cfg->sis64)
6887                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6888                        sizeof(struct ipr_ioasa_gata));
6889         else
6890                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6891                        sizeof(struct ipr_ioasa_gata));
6892         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6893
6894         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6895                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6896
6897         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6898                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6899         else
6900                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6901         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6902         spin_unlock(&ipr_cmd->hrrq->_lock);
6903         ata_qc_complete(qc);
6904 }
6905
6906 /**
6907  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6908  * @ipr_cmd:    ipr command struct
6909  * @qc:         ATA queued command
6910  *
6911  **/
6912 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6913                                   struct ata_queued_cmd *qc)
6914 {
6915         u32 ioadl_flags = 0;
6916         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6917         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6918         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6919         int len = qc->nbytes;
6920         struct scatterlist *sg;
6921         unsigned int si;
6922         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6923
6924         if (len == 0)
6925                 return;
6926
6927         if (qc->dma_dir == DMA_TO_DEVICE) {
6928                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6929                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6930         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6931                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6932
6933         ioarcb->data_transfer_length = cpu_to_be32(len);
6934         ioarcb->ioadl_len =
6935                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6936         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6937                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6938
6939         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6940                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6941                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6942                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6943
6944                 last_ioadl64 = ioadl64;
6945                 ioadl64++;
6946         }
6947
6948         if (likely(last_ioadl64))
6949                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6950 }
6951
6952 /**
6953  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6954  * @ipr_cmd:    ipr command struct
6955  * @qc:         ATA queued command
6956  *
6957  **/
6958 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6959                                 struct ata_queued_cmd *qc)
6960 {
6961         u32 ioadl_flags = 0;
6962         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6963         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6964         struct ipr_ioadl_desc *last_ioadl = NULL;
6965         int len = qc->nbytes;
6966         struct scatterlist *sg;
6967         unsigned int si;
6968
6969         if (len == 0)
6970                 return;
6971
6972         if (qc->dma_dir == DMA_TO_DEVICE) {
6973                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6974                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6975                 ioarcb->data_transfer_length = cpu_to_be32(len);
6976                 ioarcb->ioadl_len =
6977                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6978         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6979                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6980                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6981                 ioarcb->read_ioadl_len =
6982                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6983         }
6984
6985         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6986                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6987                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6988
6989                 last_ioadl = ioadl;
6990                 ioadl++;
6991         }
6992
6993         if (likely(last_ioadl))
6994                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6995 }
6996
6997 /**
6998  * ipr_qc_defer - Get a free ipr_cmd
6999  * @qc: queued command
7000  *
7001  * Return value:
7002  *      0 if success
7003  **/
7004 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7005 {
7006         struct ata_port *ap = qc->ap;
7007         struct ipr_sata_port *sata_port = ap->private_data;
7008         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7009         struct ipr_cmnd *ipr_cmd;
7010         struct ipr_hrr_queue *hrrq;
7011         int hrrq_id;
7012
7013         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7014         hrrq = &ioa_cfg->hrrq[hrrq_id];
7015
7016         qc->lldd_task = NULL;
7017         spin_lock(&hrrq->_lock);
7018         if (unlikely(hrrq->ioa_is_dead)) {
7019                 spin_unlock(&hrrq->_lock);
7020                 return 0;
7021         }
7022
7023         if (unlikely(!hrrq->allow_cmds)) {
7024                 spin_unlock(&hrrq->_lock);
7025                 return ATA_DEFER_LINK;
7026         }
7027
7028         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7029         if (ipr_cmd == NULL) {
7030                 spin_unlock(&hrrq->_lock);
7031                 return ATA_DEFER_LINK;
7032         }
7033
7034         qc->lldd_task = ipr_cmd;
7035         spin_unlock(&hrrq->_lock);
7036         return 0;
7037 }
7038
7039 /**
7040  * ipr_qc_issue - Issue a SATA qc to a device
7041  * @qc: queued command
7042  *
7043  * Return value:
7044  *      0 if success
7045  **/
7046 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7047 {
7048         struct ata_port *ap = qc->ap;
7049         struct ipr_sata_port *sata_port = ap->private_data;
7050         struct ipr_resource_entry *res = sata_port->res;
7051         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7052         struct ipr_cmnd *ipr_cmd;
7053         struct ipr_ioarcb *ioarcb;
7054         struct ipr_ioarcb_ata_regs *regs;
7055
7056         if (qc->lldd_task == NULL)
7057                 ipr_qc_defer(qc);
7058
7059         ipr_cmd = qc->lldd_task;
7060         if (ipr_cmd == NULL)
7061                 return AC_ERR_SYSTEM;
7062
7063         qc->lldd_task = NULL;
7064         spin_lock(&ipr_cmd->hrrq->_lock);
7065         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7066                         ipr_cmd->hrrq->ioa_is_dead)) {
7067                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7068                 spin_unlock(&ipr_cmd->hrrq->_lock);
7069                 return AC_ERR_SYSTEM;
7070         }
7071
7072         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7073         ioarcb = &ipr_cmd->ioarcb;
7074
7075         if (ioa_cfg->sis64) {
7076                 regs = &ipr_cmd->i.ata_ioadl.regs;
7077                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7078         } else
7079                 regs = &ioarcb->u.add_data.u.regs;
7080
7081         memset(regs, 0, sizeof(*regs));
7082         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7083
7084         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7085         ipr_cmd->qc = qc;
7086         ipr_cmd->done = ipr_sata_done;
7087         ipr_cmd->ioarcb.res_handle = res->res_handle;
7088         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7089         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7090         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7091         ipr_cmd->dma_use_sg = qc->n_elem;
7092
7093         if (ioa_cfg->sis64)
7094                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7095         else
7096                 ipr_build_ata_ioadl(ipr_cmd, qc);
7097
7098         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7099         ipr_copy_sata_tf(regs, &qc->tf);
7100         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7101         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7102
7103         switch (qc->tf.protocol) {
7104         case ATA_PROT_NODATA:
7105         case ATA_PROT_PIO:
7106                 break;
7107
7108         case ATA_PROT_DMA:
7109                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7110                 break;
7111
7112         case ATAPI_PROT_PIO:
7113         case ATAPI_PROT_NODATA:
7114                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7115                 break;
7116
7117         case ATAPI_PROT_DMA:
7118                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7119                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7120                 break;
7121
7122         default:
7123                 WARN_ON(1);
7124                 spin_unlock(&ipr_cmd->hrrq->_lock);
7125                 return AC_ERR_INVALID;
7126         }
7127
7128         ipr_send_command(ipr_cmd);
7129         spin_unlock(&ipr_cmd->hrrq->_lock);
7130
7131         return 0;
7132 }
7133
7134 /**
7135  * ipr_qc_fill_rtf - Read result TF
7136  * @qc: ATA queued command
7137  *
7138  * Return value:
7139  *      true
7140  **/
7141 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7142 {
7143         struct ipr_sata_port *sata_port = qc->ap->private_data;
7144         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7145         struct ata_taskfile *tf = &qc->result_tf;
7146
7147         tf->feature = g->error;
7148         tf->nsect = g->nsect;
7149         tf->lbal = g->lbal;
7150         tf->lbam = g->lbam;
7151         tf->lbah = g->lbah;
7152         tf->device = g->device;
7153         tf->command = g->status;
7154         tf->hob_nsect = g->hob_nsect;
7155         tf->hob_lbal = g->hob_lbal;
7156         tf->hob_lbam = g->hob_lbam;
7157         tf->hob_lbah = g->hob_lbah;
7158
7159         return true;
7160 }
7161
7162 static struct ata_port_operations ipr_sata_ops = {
7163         .phy_reset = ipr_ata_phy_reset,
7164         .hardreset = ipr_sata_reset,
7165         .post_internal_cmd = ipr_ata_post_internal,
7166         .qc_prep = ata_noop_qc_prep,
7167         .qc_defer = ipr_qc_defer,
7168         .qc_issue = ipr_qc_issue,
7169         .qc_fill_rtf = ipr_qc_fill_rtf,
7170         .port_start = ata_sas_port_start,
7171         .port_stop = ata_sas_port_stop
7172 };
7173
7174 static struct ata_port_info sata_port_info = {
7175         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7176                           ATA_FLAG_SAS_HOST,
7177         .pio_mask       = ATA_PIO4_ONLY,
7178         .mwdma_mask     = ATA_MWDMA2,
7179         .udma_mask      = ATA_UDMA6,
7180         .port_ops       = &ipr_sata_ops
7181 };
7182
7183 #ifdef CONFIG_PPC_PSERIES
7184 static const u16 ipr_blocked_processors[] = {
7185         PVR_NORTHSTAR,
7186         PVR_PULSAR,
7187         PVR_POWER4,
7188         PVR_ICESTAR,
7189         PVR_SSTAR,
7190         PVR_POWER4p,
7191         PVR_630,
7192         PVR_630p
7193 };
7194
7195 /**
7196  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7197  * @ioa_cfg:    ioa cfg struct
7198  *
7199  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7200  * certain pSeries hardware. This function determines if the given
7201  * adapter is in one of these confgurations or not.
7202  *
7203  * Return value:
7204  *      1 if adapter is not supported / 0 if adapter is supported
7205  **/
7206 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7207 {
7208         int i;
7209
7210         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7211                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7212                         if (pvr_version_is(ipr_blocked_processors[i]))
7213                                 return 1;
7214                 }
7215         }
7216         return 0;
7217 }
7218 #else
7219 #define ipr_invalid_adapter(ioa_cfg) 0
7220 #endif
7221
7222 /**
7223  * ipr_ioa_bringdown_done - IOA bring down completion.
7224  * @ipr_cmd:    ipr command struct
7225  *
7226  * This function processes the completion of an adapter bring down.
7227  * It wakes any reset sleepers.
7228  *
7229  * Return value:
7230  *      IPR_RC_JOB_RETURN
7231  **/
7232 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7233 {
7234         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7235         int i;
7236
7237         ENTER;
7238         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7239                 ipr_trace;
7240                 ioa_cfg->scsi_unblock = 1;
7241                 schedule_work(&ioa_cfg->work_q);
7242         }
7243
7244         ioa_cfg->in_reset_reload = 0;
7245         ioa_cfg->reset_retries = 0;
7246         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7247                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7248                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7249                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7250         }
7251         wmb();
7252
7253         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7254         wake_up_all(&ioa_cfg->reset_wait_q);
7255         LEAVE;
7256
7257         return IPR_RC_JOB_RETURN;
7258 }
7259
7260 /**
7261  * ipr_ioa_reset_done - IOA reset completion.
7262  * @ipr_cmd:    ipr command struct
7263  *
7264  * This function processes the completion of an adapter reset.
7265  * It schedules any necessary mid-layer add/removes and
7266  * wakes any reset sleepers.
7267  *
7268  * Return value:
7269  *      IPR_RC_JOB_RETURN
7270  **/
7271 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7272 {
7273         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7274         struct ipr_resource_entry *res;
7275         int j;
7276
7277         ENTER;
7278         ioa_cfg->in_reset_reload = 0;
7279         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7280                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7281                 ioa_cfg->hrrq[j].allow_cmds = 1;
7282                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7283         }
7284         wmb();
7285         ioa_cfg->reset_cmd = NULL;
7286         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7287
7288         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7289                 if (res->add_to_ml || res->del_from_ml) {
7290                         ipr_trace;
7291                         break;
7292                 }
7293         }
7294         schedule_work(&ioa_cfg->work_q);
7295
7296         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7297                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7298                 if (j < IPR_NUM_LOG_HCAMS)
7299                         ipr_send_hcam(ioa_cfg,
7300                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7301                                 ioa_cfg->hostrcb[j]);
7302                 else
7303                         ipr_send_hcam(ioa_cfg,
7304                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7305                                 ioa_cfg->hostrcb[j]);
7306         }
7307
7308         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7309         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7310
7311         ioa_cfg->reset_retries = 0;
7312         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7313         wake_up_all(&ioa_cfg->reset_wait_q);
7314
7315         ioa_cfg->scsi_unblock = 1;
7316         schedule_work(&ioa_cfg->work_q);
7317         LEAVE;
7318         return IPR_RC_JOB_RETURN;
7319 }
7320
7321 /**
7322  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7323  * @supported_dev:      supported device struct
7324  * @vpids:                      vendor product id struct
7325  *
7326  * Return value:
7327  *      none
7328  **/
7329 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7330                                  struct ipr_std_inq_vpids *vpids)
7331 {
7332         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7333         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7334         supported_dev->num_records = 1;
7335         supported_dev->data_length =
7336                 cpu_to_be16(sizeof(struct ipr_supported_device));
7337         supported_dev->reserved = 0;
7338 }
7339
7340 /**
7341  * ipr_set_supported_devs - Send Set Supported Devices for a device
7342  * @ipr_cmd:    ipr command struct
7343  *
7344  * This function sends a Set Supported Devices to the adapter
7345  *
7346  * Return value:
7347  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7348  **/
7349 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7350 {
7351         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7352         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7353         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7354         struct ipr_resource_entry *res = ipr_cmd->u.res;
7355
7356         ipr_cmd->job_step = ipr_ioa_reset_done;
7357
7358         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7359                 if (!ipr_is_scsi_disk(res))
7360                         continue;
7361
7362                 ipr_cmd->u.res = res;
7363                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7364
7365                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7366                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7367                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7368
7369                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7370                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7371                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7372                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7373
7374                 ipr_init_ioadl(ipr_cmd,
7375                                ioa_cfg->vpd_cbs_dma +
7376                                  offsetof(struct ipr_misc_cbs, supp_dev),
7377                                sizeof(struct ipr_supported_device),
7378                                IPR_IOADL_FLAGS_WRITE_LAST);
7379
7380                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7381                            IPR_SET_SUP_DEVICE_TIMEOUT);
7382
7383                 if (!ioa_cfg->sis64)
7384                         ipr_cmd->job_step = ipr_set_supported_devs;
7385                 LEAVE;
7386                 return IPR_RC_JOB_RETURN;
7387         }
7388
7389         LEAVE;
7390         return IPR_RC_JOB_CONTINUE;
7391 }
7392
7393 /**
7394  * ipr_get_mode_page - Locate specified mode page
7395  * @mode_pages: mode page buffer
7396  * @page_code:  page code to find
7397  * @len:                minimum required length for mode page
7398  *
7399  * Return value:
7400  *      pointer to mode page / NULL on failure
7401  **/
7402 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7403                                u32 page_code, u32 len)
7404 {
7405         struct ipr_mode_page_hdr *mode_hdr;
7406         u32 page_length;
7407         u32 length;
7408
7409         if (!mode_pages || (mode_pages->hdr.length == 0))
7410                 return NULL;
7411
7412         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7413         mode_hdr = (struct ipr_mode_page_hdr *)
7414                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7415
7416         while (length) {
7417                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7418                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7419                                 return mode_hdr;
7420                         break;
7421                 } else {
7422                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7423                                        mode_hdr->page_length);
7424                         length -= page_length;
7425                         mode_hdr = (struct ipr_mode_page_hdr *)
7426                                 ((unsigned long)mode_hdr + page_length);
7427                 }
7428         }
7429         return NULL;
7430 }
7431
7432 /**
7433  * ipr_check_term_power - Check for term power errors
7434  * @ioa_cfg:    ioa config struct
7435  * @mode_pages: IOAFP mode pages buffer
7436  *
7437  * Check the IOAFP's mode page 28 for term power errors
7438  *
7439  * Return value:
7440  *      nothing
7441  **/
7442 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7443                                  struct ipr_mode_pages *mode_pages)
7444 {
7445         int i;
7446         int entry_length;
7447         struct ipr_dev_bus_entry *bus;
7448         struct ipr_mode_page28 *mode_page;
7449
7450         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7451                                       sizeof(struct ipr_mode_page28));
7452
7453         entry_length = mode_page->entry_length;
7454
7455         bus = mode_page->bus;
7456
7457         for (i = 0; i < mode_page->num_entries; i++) {
7458                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7459                         dev_err(&ioa_cfg->pdev->dev,
7460                                 "Term power is absent on scsi bus %d\n",
7461                                 bus->res_addr.bus);
7462                 }
7463
7464                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7465         }
7466 }
7467
7468 /**
7469  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7470  * @ioa_cfg:    ioa config struct
7471  *
7472  * Looks through the config table checking for SES devices. If
7473  * the SES device is in the SES table indicating a maximum SCSI
7474  * bus speed, the speed is limited for the bus.
7475  *
7476  * Return value:
7477  *      none
7478  **/
7479 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7480 {
7481         u32 max_xfer_rate;
7482         int i;
7483
7484         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7485                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7486                                                        ioa_cfg->bus_attr[i].bus_width);
7487
7488                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7489                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7490         }
7491 }
7492
7493 /**
7494  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7495  * @ioa_cfg:    ioa config struct
7496  * @mode_pages: mode page 28 buffer
7497  *
7498  * Updates mode page 28 based on driver configuration
7499  *
7500  * Return value:
7501  *      none
7502  **/
7503 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7504                                           struct ipr_mode_pages *mode_pages)
7505 {
7506         int i, entry_length;
7507         struct ipr_dev_bus_entry *bus;
7508         struct ipr_bus_attributes *bus_attr;
7509         struct ipr_mode_page28 *mode_page;
7510
7511         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7512                                       sizeof(struct ipr_mode_page28));
7513
7514         entry_length = mode_page->entry_length;
7515
7516         /* Loop for each device bus entry */
7517         for (i = 0, bus = mode_page->bus;
7518              i < mode_page->num_entries;
7519              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7520                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7521                         dev_err(&ioa_cfg->pdev->dev,
7522                                 "Invalid resource address reported: 0x%08X\n",
7523                                 IPR_GET_PHYS_LOC(bus->res_addr));
7524                         continue;
7525                 }
7526
7527                 bus_attr = &ioa_cfg->bus_attr[i];
7528                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7529                 bus->bus_width = bus_attr->bus_width;
7530                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7531                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7532                 if (bus_attr->qas_enabled)
7533                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7534                 else
7535                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7536         }
7537 }
7538
7539 /**
7540  * ipr_build_mode_select - Build a mode select command
7541  * @ipr_cmd:    ipr command struct
7542  * @res_handle: resource handle to send command to
7543  * @parm:               Byte 2 of Mode Sense command
7544  * @dma_addr:   DMA buffer address
7545  * @xfer_len:   data transfer length
7546  *
7547  * Return value:
7548  *      none
7549  **/
7550 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7551                                   __be32 res_handle, u8 parm,
7552                                   dma_addr_t dma_addr, u8 xfer_len)
7553 {
7554         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7555
7556         ioarcb->res_handle = res_handle;
7557         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7558         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7559         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7560         ioarcb->cmd_pkt.cdb[1] = parm;
7561         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7562
7563         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7564 }
7565
7566 /**
7567  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7568  * @ipr_cmd:    ipr command struct
7569  *
7570  * This function sets up the SCSI bus attributes and sends
7571  * a Mode Select for Page 28 to activate them.
7572  *
7573  * Return value:
7574  *      IPR_RC_JOB_RETURN
7575  **/
7576 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7577 {
7578         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7579         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7580         int length;
7581
7582         ENTER;
7583         ipr_scsi_bus_speed_limit(ioa_cfg);
7584         ipr_check_term_power(ioa_cfg, mode_pages);
7585         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7586         length = mode_pages->hdr.length + 1;
7587         mode_pages->hdr.length = 0;
7588
7589         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7590                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7591                               length);
7592
7593         ipr_cmd->job_step = ipr_set_supported_devs;
7594         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7595                                     struct ipr_resource_entry, queue);
7596         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7597
7598         LEAVE;
7599         return IPR_RC_JOB_RETURN;
7600 }
7601
7602 /**
7603  * ipr_build_mode_sense - Builds a mode sense command
7604  * @ipr_cmd:    ipr command struct
7605  * @res:                resource entry struct
7606  * @parm:               Byte 2 of mode sense command
7607  * @dma_addr:   DMA address of mode sense buffer
7608  * @xfer_len:   Size of DMA buffer
7609  *
7610  * Return value:
7611  *      none
7612  **/
7613 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7614                                  __be32 res_handle,
7615                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7616 {
7617         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7618
7619         ioarcb->res_handle = res_handle;
7620         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7621         ioarcb->cmd_pkt.cdb[2] = parm;
7622         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7623         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7624
7625         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7626 }
7627
7628 /**
7629  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7630  * @ipr_cmd:    ipr command struct
7631  *
7632  * This function handles the failure of an IOA bringup command.
7633  *
7634  * Return value:
7635  *      IPR_RC_JOB_RETURN
7636  **/
7637 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7638 {
7639         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7640         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7641
7642         dev_err(&ioa_cfg->pdev->dev,
7643                 "0x%02X failed with IOASC: 0x%08X\n",
7644                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7645
7646         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7647         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7648         return IPR_RC_JOB_RETURN;
7649 }
7650
7651 /**
7652  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7653  * @ipr_cmd:    ipr command struct
7654  *
7655  * This function handles the failure of a Mode Sense to the IOAFP.
7656  * Some adapters do not handle all mode pages.
7657  *
7658  * Return value:
7659  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7660  **/
7661 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7662 {
7663         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7664         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7665
7666         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7667                 ipr_cmd->job_step = ipr_set_supported_devs;
7668                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7669                                             struct ipr_resource_entry, queue);
7670                 return IPR_RC_JOB_CONTINUE;
7671         }
7672
7673         return ipr_reset_cmd_failed(ipr_cmd);
7674 }
7675
7676 /**
7677  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7678  * @ipr_cmd:    ipr command struct
7679  *
7680  * This function send a Page 28 mode sense to the IOA to
7681  * retrieve SCSI bus attributes.
7682  *
7683  * Return value:
7684  *      IPR_RC_JOB_RETURN
7685  **/
7686 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7687 {
7688         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7689
7690         ENTER;
7691         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7692                              0x28, ioa_cfg->vpd_cbs_dma +
7693                              offsetof(struct ipr_misc_cbs, mode_pages),
7694                              sizeof(struct ipr_mode_pages));
7695
7696         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7697         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7698
7699         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7700
7701         LEAVE;
7702         return IPR_RC_JOB_RETURN;
7703 }
7704
7705 /**
7706  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7707  * @ipr_cmd:    ipr command struct
7708  *
7709  * This function enables dual IOA RAID support if possible.
7710  *
7711  * Return value:
7712  *      IPR_RC_JOB_RETURN
7713  **/
7714 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7715 {
7716         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7717         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7718         struct ipr_mode_page24 *mode_page;
7719         int length;
7720
7721         ENTER;
7722         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7723                                       sizeof(struct ipr_mode_page24));
7724
7725         if (mode_page)
7726                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7727
7728         length = mode_pages->hdr.length + 1;
7729         mode_pages->hdr.length = 0;
7730
7731         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7732                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7733                               length);
7734
7735         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7736         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7737
7738         LEAVE;
7739         return IPR_RC_JOB_RETURN;
7740 }
7741
7742 /**
7743  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7744  * @ipr_cmd:    ipr command struct
7745  *
7746  * This function handles the failure of a Mode Sense to the IOAFP.
7747  * Some adapters do not handle all mode pages.
7748  *
7749  * Return value:
7750  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7751  **/
7752 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7753 {
7754         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7755
7756         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7757                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7758                 return IPR_RC_JOB_CONTINUE;
7759         }
7760
7761         return ipr_reset_cmd_failed(ipr_cmd);
7762 }
7763
7764 /**
7765  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7766  * @ipr_cmd:    ipr command struct
7767  *
7768  * This function send a mode sense to the IOA to retrieve
7769  * the IOA Advanced Function Control mode page.
7770  *
7771  * Return value:
7772  *      IPR_RC_JOB_RETURN
7773  **/
7774 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7775 {
7776         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7777
7778         ENTER;
7779         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7780                              0x24, ioa_cfg->vpd_cbs_dma +
7781                              offsetof(struct ipr_misc_cbs, mode_pages),
7782                              sizeof(struct ipr_mode_pages));
7783
7784         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7785         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7786
7787         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7788
7789         LEAVE;
7790         return IPR_RC_JOB_RETURN;
7791 }
7792
7793 /**
7794  * ipr_init_res_table - Initialize the resource table
7795  * @ipr_cmd:    ipr command struct
7796  *
7797  * This function looks through the existing resource table, comparing
7798  * it with the config table. This function will take care of old/new
7799  * devices and schedule adding/removing them from the mid-layer
7800  * as appropriate.
7801  *
7802  * Return value:
7803  *      IPR_RC_JOB_CONTINUE
7804  **/
7805 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7806 {
7807         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7808         struct ipr_resource_entry *res, *temp;
7809         struct ipr_config_table_entry_wrapper cfgtew;
7810         int entries, found, flag, i;
7811         LIST_HEAD(old_res);
7812
7813         ENTER;
7814         if (ioa_cfg->sis64)
7815                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7816         else
7817                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7818
7819         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7820                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7821
7822         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7823                 list_move_tail(&res->queue, &old_res);
7824
7825         if (ioa_cfg->sis64)
7826                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7827         else
7828                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7829
7830         for (i = 0; i < entries; i++) {
7831                 if (ioa_cfg->sis64)
7832                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7833                 else
7834                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7835                 found = 0;
7836
7837                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7838                         if (ipr_is_same_device(res, &cfgtew)) {
7839                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7840                                 found = 1;
7841                                 break;
7842                         }
7843                 }
7844
7845                 if (!found) {
7846                         if (list_empty(&ioa_cfg->free_res_q)) {
7847                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7848                                 break;
7849                         }
7850
7851                         found = 1;
7852                         res = list_entry(ioa_cfg->free_res_q.next,
7853                                          struct ipr_resource_entry, queue);
7854                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7855                         ipr_init_res_entry(res, &cfgtew);
7856                         res->add_to_ml = 1;
7857                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7858                         res->sdev->allow_restart = 1;
7859
7860                 if (found)
7861                         ipr_update_res_entry(res, &cfgtew);
7862         }
7863
7864         list_for_each_entry_safe(res, temp, &old_res, queue) {
7865                 if (res->sdev) {
7866                         res->del_from_ml = 1;
7867                         res->res_handle = IPR_INVALID_RES_HANDLE;
7868                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7869                 }
7870         }
7871
7872         list_for_each_entry_safe(res, temp, &old_res, queue) {
7873                 ipr_clear_res_target(res);
7874                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7875         }
7876
7877         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7878                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7879         else
7880                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7881
7882         LEAVE;
7883         return IPR_RC_JOB_CONTINUE;
7884 }
7885
7886 /**
7887  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7888  * @ipr_cmd:    ipr command struct
7889  *
7890  * This function sends a Query IOA Configuration command
7891  * to the adapter to retrieve the IOA configuration table.
7892  *
7893  * Return value:
7894  *      IPR_RC_JOB_RETURN
7895  **/
7896 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7897 {
7898         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7899         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7900         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7901         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7902
7903         ENTER;
7904         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7905                 ioa_cfg->dual_raid = 1;
7906         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7907                  ucode_vpd->major_release, ucode_vpd->card_type,
7908                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7909         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7910         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7911
7912         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7913         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7914         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7915         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7916
7917         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7918                        IPR_IOADL_FLAGS_READ_LAST);
7919
7920         ipr_cmd->job_step = ipr_init_res_table;
7921
7922         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7923
7924         LEAVE;
7925         return IPR_RC_JOB_RETURN;
7926 }
7927
7928 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7929 {
7930         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7931
7932         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7933                 return IPR_RC_JOB_CONTINUE;
7934
7935         return ipr_reset_cmd_failed(ipr_cmd);
7936 }
7937
7938 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7939                                          __be32 res_handle, u8 sa_code)
7940 {
7941         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7942
7943         ioarcb->res_handle = res_handle;
7944         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7945         ioarcb->cmd_pkt.cdb[1] = sa_code;
7946         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7947 }
7948
7949 /**
7950  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7951  * action
7952  *
7953  * Return value:
7954  *      none
7955  **/
7956 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7957 {
7958         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7959         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7960         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7961
7962         ENTER;
7963
7964         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7965
7966         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7967                 ipr_build_ioa_service_action(ipr_cmd,
7968                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7969                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7970
7971                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7972
7973                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7974                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7975                            IPR_SET_SUP_DEVICE_TIMEOUT);
7976
7977                 LEAVE;
7978                 return IPR_RC_JOB_RETURN;
7979         }
7980
7981         LEAVE;
7982         return IPR_RC_JOB_CONTINUE;
7983 }
7984
7985 /**
7986  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7987  * @ipr_cmd:    ipr command struct
7988  *
7989  * This utility function sends an inquiry to the adapter.
7990  *
7991  * Return value:
7992  *      none
7993  **/
7994 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7995                               dma_addr_t dma_addr, u8 xfer_len)
7996 {
7997         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7998
7999         ENTER;
8000         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8001         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8002
8003         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8004         ioarcb->cmd_pkt.cdb[1] = flags;
8005         ioarcb->cmd_pkt.cdb[2] = page;
8006         ioarcb->cmd_pkt.cdb[4] = xfer_len;
8007
8008         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8009
8010         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8011         LEAVE;
8012 }
8013
8014 /**
8015  * ipr_inquiry_page_supported - Is the given inquiry page supported
8016  * @page0:              inquiry page 0 buffer
8017  * @page:               page code.
8018  *
8019  * This function determines if the specified inquiry page is supported.
8020  *
8021  * Return value:
8022  *      1 if page is supported / 0 if not
8023  **/
8024 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8025 {
8026         int i;
8027
8028         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8029                 if (page0->page[i] == page)
8030                         return 1;
8031
8032         return 0;
8033 }
8034
8035 /**
8036  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8037  * @ipr_cmd:    ipr command struct
8038  *
8039  * This function sends a Page 0xC4 inquiry to the adapter
8040  * to retrieve software VPD information.
8041  *
8042  * Return value:
8043  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8044  **/
8045 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8046 {
8047         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8048         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8049         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8050
8051         ENTER;
8052         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8053         memset(pageC4, 0, sizeof(*pageC4));
8054
8055         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8056                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8057                                   (ioa_cfg->vpd_cbs_dma
8058                                    + offsetof(struct ipr_misc_cbs,
8059                                               pageC4_data)),
8060                                   sizeof(struct ipr_inquiry_pageC4));
8061                 return IPR_RC_JOB_RETURN;
8062         }
8063
8064         LEAVE;
8065         return IPR_RC_JOB_CONTINUE;
8066 }
8067
8068 /**
8069  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8070  * @ipr_cmd:    ipr command struct
8071  *
8072  * This function sends a Page 0xD0 inquiry to the adapter
8073  * to retrieve adapter capabilities.
8074  *
8075  * Return value:
8076  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8077  **/
8078 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8079 {
8080         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8081         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8082         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8083
8084         ENTER;
8085         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8086         memset(cap, 0, sizeof(*cap));
8087
8088         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8089                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8090                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8091                                   sizeof(struct ipr_inquiry_cap));
8092                 return IPR_RC_JOB_RETURN;
8093         }
8094
8095         LEAVE;
8096         return IPR_RC_JOB_CONTINUE;
8097 }
8098
8099 /**
8100  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8101  * @ipr_cmd:    ipr command struct
8102  *
8103  * This function sends a Page 3 inquiry to the adapter
8104  * to retrieve software VPD information.
8105  *
8106  * Return value:
8107  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8108  **/
8109 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8110 {
8111         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8112
8113         ENTER;
8114
8115         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8116
8117         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8118                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8119                           sizeof(struct ipr_inquiry_page3));
8120
8121         LEAVE;
8122         return IPR_RC_JOB_RETURN;
8123 }
8124
8125 /**
8126  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8127  * @ipr_cmd:    ipr command struct
8128  *
8129  * This function sends a Page 0 inquiry to the adapter
8130  * to retrieve supported inquiry pages.
8131  *
8132  * Return value:
8133  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8134  **/
8135 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8136 {
8137         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8138         char type[5];
8139
8140         ENTER;
8141
8142         /* Grab the type out of the VPD and store it away */
8143         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8144         type[4] = '\0';
8145         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8146
8147         if (ipr_invalid_adapter(ioa_cfg)) {
8148                 dev_err(&ioa_cfg->pdev->dev,
8149                         "Adapter not supported in this hardware configuration.\n");
8150
8151                 if (!ipr_testmode) {
8152                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8153                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8154                         list_add_tail(&ipr_cmd->queue,
8155                                         &ioa_cfg->hrrq->hrrq_free_q);
8156                         return IPR_RC_JOB_RETURN;
8157                 }
8158         }
8159
8160         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8161
8162         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8163                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8164                           sizeof(struct ipr_inquiry_page0));
8165
8166         LEAVE;
8167         return IPR_RC_JOB_RETURN;
8168 }
8169
8170 /**
8171  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8172  * @ipr_cmd:    ipr command struct
8173  *
8174  * This function sends a standard inquiry to the adapter.
8175  *
8176  * Return value:
8177  *      IPR_RC_JOB_RETURN
8178  **/
8179 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8180 {
8181         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8182
8183         ENTER;
8184         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8185
8186         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8187                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8188                           sizeof(struct ipr_ioa_vpd));
8189
8190         LEAVE;
8191         return IPR_RC_JOB_RETURN;
8192 }
8193
8194 /**
8195  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8196  * @ipr_cmd:    ipr command struct
8197  *
8198  * This function send an Identify Host Request Response Queue
8199  * command to establish the HRRQ with the adapter.
8200  *
8201  * Return value:
8202  *      IPR_RC_JOB_RETURN
8203  **/
8204 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8205 {
8206         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8207         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8208         struct ipr_hrr_queue *hrrq;
8209
8210         ENTER;
8211         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8212         if (ioa_cfg->identify_hrrq_index == 0)
8213                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8214
8215         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8216                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8217
8218                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8219                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8220
8221                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8222                 if (ioa_cfg->sis64)
8223                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8224
8225                 if (ioa_cfg->nvectors == 1)
8226                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8227                 else
8228                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8229
8230                 ioarcb->cmd_pkt.cdb[2] =
8231                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8232                 ioarcb->cmd_pkt.cdb[3] =
8233                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8234                 ioarcb->cmd_pkt.cdb[4] =
8235                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8236                 ioarcb->cmd_pkt.cdb[5] =
8237                         ((u64) hrrq->host_rrq_dma) & 0xff;
8238                 ioarcb->cmd_pkt.cdb[7] =
8239                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8240                 ioarcb->cmd_pkt.cdb[8] =
8241                         (sizeof(u32) * hrrq->size) & 0xff;
8242
8243                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8244                         ioarcb->cmd_pkt.cdb[9] =
8245                                         ioa_cfg->identify_hrrq_index;
8246
8247                 if (ioa_cfg->sis64) {
8248                         ioarcb->cmd_pkt.cdb[10] =
8249                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8250                         ioarcb->cmd_pkt.cdb[11] =
8251                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8252                         ioarcb->cmd_pkt.cdb[12] =
8253                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8254                         ioarcb->cmd_pkt.cdb[13] =
8255                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8256                 }
8257
8258                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8259                         ioarcb->cmd_pkt.cdb[14] =
8260                                         ioa_cfg->identify_hrrq_index;
8261
8262                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8263                            IPR_INTERNAL_TIMEOUT);
8264
8265                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8266                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8267
8268                 LEAVE;
8269                 return IPR_RC_JOB_RETURN;
8270         }
8271
8272         LEAVE;
8273         return IPR_RC_JOB_CONTINUE;
8274 }
8275
8276 /**
8277  * ipr_reset_timer_done - Adapter reset timer function
8278  * @ipr_cmd:    ipr command struct
8279  *
8280  * Description: This function is used in adapter reset processing
8281  * for timing events. If the reset_cmd pointer in the IOA
8282  * config struct is not this adapter's we are doing nested
8283  * resets and fail_all_ops will take care of freeing the
8284  * command block.
8285  *
8286  * Return value:
8287  *      none
8288  **/
8289 static void ipr_reset_timer_done(struct timer_list *t)
8290 {
8291         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8292         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8293         unsigned long lock_flags = 0;
8294
8295         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8296
8297         if (ioa_cfg->reset_cmd == ipr_cmd) {
8298                 list_del(&ipr_cmd->queue);
8299                 ipr_cmd->done(ipr_cmd);
8300         }
8301
8302         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8303 }
8304
8305 /**
8306  * ipr_reset_start_timer - Start a timer for adapter reset job
8307  * @ipr_cmd:    ipr command struct
8308  * @timeout:    timeout value
8309  *
8310  * Description: This function is used in adapter reset processing
8311  * for timing events. If the reset_cmd pointer in the IOA
8312  * config struct is not this adapter's we are doing nested
8313  * resets and fail_all_ops will take care of freeing the
8314  * command block.
8315  *
8316  * Return value:
8317  *      none
8318  **/
8319 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8320                                   unsigned long timeout)
8321 {
8322
8323         ENTER;
8324         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8325         ipr_cmd->done = ipr_reset_ioa_job;
8326
8327         ipr_cmd->timer.expires = jiffies + timeout;
8328         ipr_cmd->timer.function = ipr_reset_timer_done;
8329         add_timer(&ipr_cmd->timer);
8330 }
8331
8332 /**
8333  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8334  * @ioa_cfg:    ioa cfg struct
8335  *
8336  * Return value:
8337  *      nothing
8338  **/
8339 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8340 {
8341         struct ipr_hrr_queue *hrrq;
8342
8343         for_each_hrrq(hrrq, ioa_cfg) {
8344                 spin_lock(&hrrq->_lock);
8345                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8346
8347                 /* Initialize Host RRQ pointers */
8348                 hrrq->hrrq_start = hrrq->host_rrq;
8349                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8350                 hrrq->hrrq_curr = hrrq->hrrq_start;
8351                 hrrq->toggle_bit = 1;
8352                 spin_unlock(&hrrq->_lock);
8353         }
8354         wmb();
8355
8356         ioa_cfg->identify_hrrq_index = 0;
8357         if (ioa_cfg->hrrq_num == 1)
8358                 atomic_set(&ioa_cfg->hrrq_index, 0);
8359         else
8360                 atomic_set(&ioa_cfg->hrrq_index, 1);
8361
8362         /* Zero out config table */
8363         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8364 }
8365
8366 /**
8367  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8368  * @ipr_cmd:    ipr command struct
8369  *
8370  * Return value:
8371  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8372  **/
8373 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8374 {
8375         unsigned long stage, stage_time;
8376         u32 feedback;
8377         volatile u32 int_reg;
8378         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8379         u64 maskval = 0;
8380
8381         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8382         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8383         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8384
8385         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8386
8387         /* sanity check the stage_time value */
8388         if (stage_time == 0)
8389                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8390         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8391                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8392         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8393                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8394
8395         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8396                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8397                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8398                 stage_time = ioa_cfg->transop_timeout;
8399                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8400         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8401                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8402                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8403                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8404                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8405                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8406                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8407                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8408                         return IPR_RC_JOB_CONTINUE;
8409                 }
8410         }
8411
8412         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8413         ipr_cmd->timer.function = ipr_oper_timeout;
8414         ipr_cmd->done = ipr_reset_ioa_job;
8415         add_timer(&ipr_cmd->timer);
8416
8417         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8418
8419         return IPR_RC_JOB_RETURN;
8420 }
8421
8422 /**
8423  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8424  * @ipr_cmd:    ipr command struct
8425  *
8426  * This function reinitializes some control blocks and
8427  * enables destructive diagnostics on the adapter.
8428  *
8429  * Return value:
8430  *      IPR_RC_JOB_RETURN
8431  **/
8432 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8433 {
8434         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8435         volatile u32 int_reg;
8436         volatile u64 maskval;
8437         int i;
8438
8439         ENTER;
8440         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8441         ipr_init_ioa_mem(ioa_cfg);
8442
8443         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8444                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8445                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8446                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8447         }
8448         if (ioa_cfg->sis64) {
8449                 /* Set the adapter to the correct endian mode. */
8450                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8451                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8452         }
8453
8454         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8455
8456         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8457                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8458                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8459                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8460                 return IPR_RC_JOB_CONTINUE;
8461         }
8462
8463         /* Enable destructive diagnostics on IOA */
8464         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8465
8466         if (ioa_cfg->sis64) {
8467                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8468                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8469                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8470         } else
8471                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8472
8473         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8474
8475         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8476
8477         if (ioa_cfg->sis64) {
8478                 ipr_cmd->job_step = ipr_reset_next_stage;
8479                 return IPR_RC_JOB_CONTINUE;
8480         }
8481
8482         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8483         ipr_cmd->timer.function = ipr_oper_timeout;
8484         ipr_cmd->done = ipr_reset_ioa_job;
8485         add_timer(&ipr_cmd->timer);
8486         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8487
8488         LEAVE;
8489         return IPR_RC_JOB_RETURN;
8490 }
8491
8492 /**
8493  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8494  * @ipr_cmd:    ipr command struct
8495  *
8496  * This function is invoked when an adapter dump has run out
8497  * of processing time.
8498  *
8499  * Return value:
8500  *      IPR_RC_JOB_CONTINUE
8501  **/
8502 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8503 {
8504         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8505
8506         if (ioa_cfg->sdt_state == GET_DUMP)
8507                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8508         else if (ioa_cfg->sdt_state == READ_DUMP)
8509                 ioa_cfg->sdt_state = ABORT_DUMP;
8510
8511         ioa_cfg->dump_timeout = 1;
8512         ipr_cmd->job_step = ipr_reset_alert;
8513
8514         return IPR_RC_JOB_CONTINUE;
8515 }
8516
8517 /**
8518  * ipr_unit_check_no_data - Log a unit check/no data error log
8519  * @ioa_cfg:            ioa config struct
8520  *
8521  * Logs an error indicating the adapter unit checked, but for some
8522  * reason, we were unable to fetch the unit check buffer.
8523  *
8524  * Return value:
8525  *      nothing
8526  **/
8527 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8528 {
8529         ioa_cfg->errors_logged++;
8530         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8531 }
8532
8533 /**
8534  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8535  * @ioa_cfg:            ioa config struct
8536  *
8537  * Fetches the unit check buffer from the adapter by clocking the data
8538  * through the mailbox register.
8539  *
8540  * Return value:
8541  *      nothing
8542  **/
8543 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8544 {
8545         unsigned long mailbox;
8546         struct ipr_hostrcb *hostrcb;
8547         struct ipr_uc_sdt sdt;
8548         int rc, length;
8549         u32 ioasc;
8550
8551         mailbox = readl(ioa_cfg->ioa_mailbox);
8552
8553         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8554                 ipr_unit_check_no_data(ioa_cfg);
8555                 return;
8556         }
8557
8558         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8559         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8560                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8561
8562         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8563             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8564             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8565                 ipr_unit_check_no_data(ioa_cfg);
8566                 return;
8567         }
8568
8569         /* Find length of the first sdt entry (UC buffer) */
8570         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8571                 length = be32_to_cpu(sdt.entry[0].end_token);
8572         else
8573                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8574                           be32_to_cpu(sdt.entry[0].start_token)) &
8575                           IPR_FMT2_MBX_ADDR_MASK;
8576
8577         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8578                              struct ipr_hostrcb, queue);
8579         list_del_init(&hostrcb->queue);
8580         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8581
8582         rc = ipr_get_ldump_data_section(ioa_cfg,
8583                                         be32_to_cpu(sdt.entry[0].start_token),
8584                                         (__be32 *)&hostrcb->hcam,
8585                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8586
8587         if (!rc) {
8588                 ipr_handle_log_data(ioa_cfg, hostrcb);
8589                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8590                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8591                     ioa_cfg->sdt_state == GET_DUMP)
8592                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8593         } else
8594                 ipr_unit_check_no_data(ioa_cfg);
8595
8596         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8597 }
8598
8599 /**
8600  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8601  * @ipr_cmd:    ipr command struct
8602  *
8603  * Description: This function will call to get the unit check buffer.
8604  *
8605  * Return value:
8606  *      IPR_RC_JOB_RETURN
8607  **/
8608 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8609 {
8610         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8611
8612         ENTER;
8613         ioa_cfg->ioa_unit_checked = 0;
8614         ipr_get_unit_check_buffer(ioa_cfg);
8615         ipr_cmd->job_step = ipr_reset_alert;
8616         ipr_reset_start_timer(ipr_cmd, 0);
8617
8618         LEAVE;
8619         return IPR_RC_JOB_RETURN;
8620 }
8621
8622 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8623 {
8624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8625
8626         ENTER;
8627
8628         if (ioa_cfg->sdt_state != GET_DUMP)
8629                 return IPR_RC_JOB_RETURN;
8630
8631         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8632             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8633              IPR_PCII_MAILBOX_STABLE)) {
8634
8635                 if (!ipr_cmd->u.time_left)
8636                         dev_err(&ioa_cfg->pdev->dev,
8637                                 "Timed out waiting for Mailbox register.\n");
8638
8639                 ioa_cfg->sdt_state = READ_DUMP;
8640                 ioa_cfg->dump_timeout = 0;
8641                 if (ioa_cfg->sis64)
8642                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8643                 else
8644                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8645                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8646                 schedule_work(&ioa_cfg->work_q);
8647
8648         } else {
8649                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8650                 ipr_reset_start_timer(ipr_cmd,
8651                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8652         }
8653
8654         LEAVE;
8655         return IPR_RC_JOB_RETURN;
8656 }
8657
8658 /**
8659  * ipr_reset_restore_cfg_space - Restore PCI config space.
8660  * @ipr_cmd:    ipr command struct
8661  *
8662  * Description: This function restores the saved PCI config space of
8663  * the adapter, fails all outstanding ops back to the callers, and
8664  * fetches the dump/unit check if applicable to this reset.
8665  *
8666  * Return value:
8667  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8668  **/
8669 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8670 {
8671         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8672         u32 int_reg;
8673
8674         ENTER;
8675         ioa_cfg->pdev->state_saved = true;
8676         pci_restore_state(ioa_cfg->pdev);
8677
8678         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8679                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8680                 return IPR_RC_JOB_CONTINUE;
8681         }
8682
8683         ipr_fail_all_ops(ioa_cfg);
8684
8685         if (ioa_cfg->sis64) {
8686                 /* Set the adapter to the correct endian mode. */
8687                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8688                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8689         }
8690
8691         if (ioa_cfg->ioa_unit_checked) {
8692                 if (ioa_cfg->sis64) {
8693                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8694                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8695                         return IPR_RC_JOB_RETURN;
8696                 } else {
8697                         ioa_cfg->ioa_unit_checked = 0;
8698                         ipr_get_unit_check_buffer(ioa_cfg);
8699                         ipr_cmd->job_step = ipr_reset_alert;
8700                         ipr_reset_start_timer(ipr_cmd, 0);
8701                         return IPR_RC_JOB_RETURN;
8702                 }
8703         }
8704
8705         if (ioa_cfg->in_ioa_bringdown) {
8706                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8707         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8708                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8709                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8710         } else {
8711                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8712         }
8713
8714         LEAVE;
8715         return IPR_RC_JOB_CONTINUE;
8716 }
8717
8718 /**
8719  * ipr_reset_bist_done - BIST has completed on the adapter.
8720  * @ipr_cmd:    ipr command struct
8721  *
8722  * Description: Unblock config space and resume the reset process.
8723  *
8724  * Return value:
8725  *      IPR_RC_JOB_CONTINUE
8726  **/
8727 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8728 {
8729         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8730
8731         ENTER;
8732         if (ioa_cfg->cfg_locked)
8733                 pci_cfg_access_unlock(ioa_cfg->pdev);
8734         ioa_cfg->cfg_locked = 0;
8735         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8736         LEAVE;
8737         return IPR_RC_JOB_CONTINUE;
8738 }
8739
8740 /**
8741  * ipr_reset_start_bist - Run BIST on the adapter.
8742  * @ipr_cmd:    ipr command struct
8743  *
8744  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8745  *
8746  * Return value:
8747  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8748  **/
8749 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8750 {
8751         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8752         int rc = PCIBIOS_SUCCESSFUL;
8753
8754         ENTER;
8755         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8756                 writel(IPR_UPROCI_SIS64_START_BIST,
8757                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8758         else
8759                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8760
8761         if (rc == PCIBIOS_SUCCESSFUL) {
8762                 ipr_cmd->job_step = ipr_reset_bist_done;
8763                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8764                 rc = IPR_RC_JOB_RETURN;
8765         } else {
8766                 if (ioa_cfg->cfg_locked)
8767                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8768                 ioa_cfg->cfg_locked = 0;
8769                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8770                 rc = IPR_RC_JOB_CONTINUE;
8771         }
8772
8773         LEAVE;
8774         return rc;
8775 }
8776
8777 /**
8778  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8779  * @ipr_cmd:    ipr command struct
8780  *
8781  * Description: This clears PCI reset to the adapter and delays two seconds.
8782  *
8783  * Return value:
8784  *      IPR_RC_JOB_RETURN
8785  **/
8786 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8787 {
8788         ENTER;
8789         ipr_cmd->job_step = ipr_reset_bist_done;
8790         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8791         LEAVE;
8792         return IPR_RC_JOB_RETURN;
8793 }
8794
8795 /**
8796  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8797  * @work:       work struct
8798  *
8799  * Description: This pulses warm reset to a slot.
8800  *
8801  **/
8802 static void ipr_reset_reset_work(struct work_struct *work)
8803 {
8804         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8805         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8806         struct pci_dev *pdev = ioa_cfg->pdev;
8807         unsigned long lock_flags = 0;
8808
8809         ENTER;
8810         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8811         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8812         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8813
8814         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8815         if (ioa_cfg->reset_cmd == ipr_cmd)
8816                 ipr_reset_ioa_job(ipr_cmd);
8817         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8818         LEAVE;
8819 }
8820
8821 /**
8822  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8823  * @ipr_cmd:    ipr command struct
8824  *
8825  * Description: This asserts PCI reset to the adapter.
8826  *
8827  * Return value:
8828  *      IPR_RC_JOB_RETURN
8829  **/
8830 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8831 {
8832         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8833
8834         ENTER;
8835         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8836         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8837         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8838         LEAVE;
8839         return IPR_RC_JOB_RETURN;
8840 }
8841
8842 /**
8843  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8844  * @ipr_cmd:    ipr command struct
8845  *
8846  * Description: This attempts to block config access to the IOA.
8847  *
8848  * Return value:
8849  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8850  **/
8851 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8852 {
8853         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8854         int rc = IPR_RC_JOB_CONTINUE;
8855
8856         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8857                 ioa_cfg->cfg_locked = 1;
8858                 ipr_cmd->job_step = ioa_cfg->reset;
8859         } else {
8860                 if (ipr_cmd->u.time_left) {
8861                         rc = IPR_RC_JOB_RETURN;
8862                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8863                         ipr_reset_start_timer(ipr_cmd,
8864                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8865                 } else {
8866                         ipr_cmd->job_step = ioa_cfg->reset;
8867                         dev_err(&ioa_cfg->pdev->dev,
8868                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8869                 }
8870         }
8871
8872         return rc;
8873 }
8874
8875 /**
8876  * ipr_reset_block_config_access - Block config access to the IOA
8877  * @ipr_cmd:    ipr command struct
8878  *
8879  * Description: This attempts to block config access to the IOA
8880  *
8881  * Return value:
8882  *      IPR_RC_JOB_CONTINUE
8883  **/
8884 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8885 {
8886         ipr_cmd->ioa_cfg->cfg_locked = 0;
8887         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8888         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8889         return IPR_RC_JOB_CONTINUE;
8890 }
8891
8892 /**
8893  * ipr_reset_allowed - Query whether or not IOA can be reset
8894  * @ioa_cfg:    ioa config struct
8895  *
8896  * Return value:
8897  *      0 if reset not allowed / non-zero if reset is allowed
8898  **/
8899 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8900 {
8901         volatile u32 temp_reg;
8902
8903         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8904         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8905 }
8906
8907 /**
8908  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8909  * @ipr_cmd:    ipr command struct
8910  *
8911  * Description: This function waits for adapter permission to run BIST,
8912  * then runs BIST. If the adapter does not give permission after a
8913  * reasonable time, we will reset the adapter anyway. The impact of
8914  * resetting the adapter without warning the adapter is the risk of
8915  * losing the persistent error log on the adapter. If the adapter is
8916  * reset while it is writing to the flash on the adapter, the flash
8917  * segment will have bad ECC and be zeroed.
8918  *
8919  * Return value:
8920  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8921  **/
8922 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8923 {
8924         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8925         int rc = IPR_RC_JOB_RETURN;
8926
8927         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8928                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8929                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8930         } else {
8931                 ipr_cmd->job_step = ipr_reset_block_config_access;
8932                 rc = IPR_RC_JOB_CONTINUE;
8933         }
8934
8935         return rc;
8936 }
8937
8938 /**
8939  * ipr_reset_alert - Alert the adapter of a pending reset
8940  * @ipr_cmd:    ipr command struct
8941  *
8942  * Description: This function alerts the adapter that it will be reset.
8943  * If memory space is not currently enabled, proceed directly
8944  * to running BIST on the adapter. The timer must always be started
8945  * so we guarantee we do not run BIST from ipr_isr.
8946  *
8947  * Return value:
8948  *      IPR_RC_JOB_RETURN
8949  **/
8950 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8951 {
8952         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8953         u16 cmd_reg;
8954         int rc;
8955
8956         ENTER;
8957         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8958
8959         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8960                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8961                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8962                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8963         } else {
8964                 ipr_cmd->job_step = ipr_reset_block_config_access;
8965         }
8966
8967         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8968         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8969
8970         LEAVE;
8971         return IPR_RC_JOB_RETURN;
8972 }
8973
8974 /**
8975  * ipr_reset_quiesce_done - Complete IOA disconnect
8976  * @ipr_cmd:    ipr command struct
8977  *
8978  * Description: Freeze the adapter to complete quiesce processing
8979  *
8980  * Return value:
8981  *      IPR_RC_JOB_CONTINUE
8982  **/
8983 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8984 {
8985         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8986
8987         ENTER;
8988         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8989         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8990         LEAVE;
8991         return IPR_RC_JOB_CONTINUE;
8992 }
8993
8994 /**
8995  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8996  * @ipr_cmd:    ipr command struct
8997  *
8998  * Description: Ensure nothing is outstanding to the IOA and
8999  *                      proceed with IOA disconnect. Otherwise reset the IOA.
9000  *
9001  * Return value:
9002  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9003  **/
9004 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9005 {
9006         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9007         struct ipr_cmnd *loop_cmd;
9008         struct ipr_hrr_queue *hrrq;
9009         int rc = IPR_RC_JOB_CONTINUE;
9010         int count = 0;
9011
9012         ENTER;
9013         ipr_cmd->job_step = ipr_reset_quiesce_done;
9014
9015         for_each_hrrq(hrrq, ioa_cfg) {
9016                 spin_lock(&hrrq->_lock);
9017                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9018                         count++;
9019                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9020                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9021                         rc = IPR_RC_JOB_RETURN;
9022                         break;
9023                 }
9024                 spin_unlock(&hrrq->_lock);
9025
9026                 if (count)
9027                         break;
9028         }
9029
9030         LEAVE;
9031         return rc;
9032 }
9033
9034 /**
9035  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9036  * @ipr_cmd:    ipr command struct
9037  *
9038  * Description: Cancel any oustanding HCAMs to the IOA.
9039  *
9040  * Return value:
9041  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9042  **/
9043 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9044 {
9045         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9046         int rc = IPR_RC_JOB_CONTINUE;
9047         struct ipr_cmd_pkt *cmd_pkt;
9048         struct ipr_cmnd *hcam_cmd;
9049         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9050
9051         ENTER;
9052         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9053
9054         if (!hrrq->ioa_is_dead) {
9055                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9056                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9057                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9058                                         continue;
9059
9060                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9061                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9062                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9063                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9064                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9065                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9066                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9067                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9068                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9069                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9070                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9071                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9072                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9073                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9074
9075                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9076                                            IPR_CANCEL_TIMEOUT);
9077
9078                                 rc = IPR_RC_JOB_RETURN;
9079                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9080                                 break;
9081                         }
9082                 }
9083         } else
9084                 ipr_cmd->job_step = ipr_reset_alert;
9085
9086         LEAVE;
9087         return rc;
9088 }
9089
9090 /**
9091  * ipr_reset_ucode_download_done - Microcode download completion
9092  * @ipr_cmd:    ipr command struct
9093  *
9094  * Description: This function unmaps the microcode download buffer.
9095  *
9096  * Return value:
9097  *      IPR_RC_JOB_CONTINUE
9098  **/
9099 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9100 {
9101         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9102         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9103
9104         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9105                      sglist->num_sg, DMA_TO_DEVICE);
9106
9107         ipr_cmd->job_step = ipr_reset_alert;
9108         return IPR_RC_JOB_CONTINUE;
9109 }
9110
9111 /**
9112  * ipr_reset_ucode_download - Download microcode to the adapter
9113  * @ipr_cmd:    ipr command struct
9114  *
9115  * Description: This function checks to see if it there is microcode
9116  * to download to the adapter. If there is, a download is performed.
9117  *
9118  * Return value:
9119  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9120  **/
9121 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9122 {
9123         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9124         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9125
9126         ENTER;
9127         ipr_cmd->job_step = ipr_reset_alert;
9128
9129         if (!sglist)
9130                 return IPR_RC_JOB_CONTINUE;
9131
9132         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9133         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9134         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9135         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9136         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9137         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9138         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9139
9140         if (ioa_cfg->sis64)
9141                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9142         else
9143                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9144         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9145
9146         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9147                    IPR_WRITE_BUFFER_TIMEOUT);
9148
9149         LEAVE;
9150         return IPR_RC_JOB_RETURN;
9151 }
9152
9153 /**
9154  * ipr_reset_shutdown_ioa - Shutdown the adapter
9155  * @ipr_cmd:    ipr command struct
9156  *
9157  * Description: This function issues an adapter shutdown of the
9158  * specified type to the specified adapter as part of the
9159  * adapter reset job.
9160  *
9161  * Return value:
9162  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9163  **/
9164 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9165 {
9166         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9167         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9168         unsigned long timeout;
9169         int rc = IPR_RC_JOB_CONTINUE;
9170
9171         ENTER;
9172         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9173                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9174         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9175                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9176                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9177                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9178                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9179                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9180
9181                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9182                         timeout = IPR_SHUTDOWN_TIMEOUT;
9183                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9184                         timeout = IPR_INTERNAL_TIMEOUT;
9185                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9186                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9187                 else
9188                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9189
9190                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9191
9192                 rc = IPR_RC_JOB_RETURN;
9193                 ipr_cmd->job_step = ipr_reset_ucode_download;
9194         } else
9195                 ipr_cmd->job_step = ipr_reset_alert;
9196
9197         LEAVE;
9198         return rc;
9199 }
9200
9201 /**
9202  * ipr_reset_ioa_job - Adapter reset job
9203  * @ipr_cmd:    ipr command struct
9204  *
9205  * Description: This function is the job router for the adapter reset job.
9206  *
9207  * Return value:
9208  *      none
9209  **/
9210 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9211 {
9212         u32 rc, ioasc;
9213         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9214
9215         do {
9216                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9217
9218                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9219                         /*
9220                          * We are doing nested adapter resets and this is
9221                          * not the current reset job.
9222                          */
9223                         list_add_tail(&ipr_cmd->queue,
9224                                         &ipr_cmd->hrrq->hrrq_free_q);
9225                         return;
9226                 }
9227
9228                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9229                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9230                         if (rc == IPR_RC_JOB_RETURN)
9231                                 return;
9232                 }
9233
9234                 ipr_reinit_ipr_cmnd(ipr_cmd);
9235                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9236                 rc = ipr_cmd->job_step(ipr_cmd);
9237         } while (rc == IPR_RC_JOB_CONTINUE);
9238 }
9239
9240 /**
9241  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9242  * @ioa_cfg:            ioa config struct
9243  * @job_step:           first job step of reset job
9244  * @shutdown_type:      shutdown type
9245  *
9246  * Description: This function will initiate the reset of the given adapter
9247  * starting at the selected job step.
9248  * If the caller needs to wait on the completion of the reset,
9249  * the caller must sleep on the reset_wait_q.
9250  *
9251  * Return value:
9252  *      none
9253  **/
9254 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9255                                     int (*job_step) (struct ipr_cmnd *),
9256                                     enum ipr_shutdown_type shutdown_type)
9257 {
9258         struct ipr_cmnd *ipr_cmd;
9259         int i;
9260
9261         ioa_cfg->in_reset_reload = 1;
9262         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9263                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9264                 ioa_cfg->hrrq[i].allow_cmds = 0;
9265                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9266         }
9267         wmb();
9268         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9269                 ioa_cfg->scsi_unblock = 0;
9270                 ioa_cfg->scsi_blocked = 1;
9271                 scsi_block_requests(ioa_cfg->host);
9272         }
9273
9274         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9275         ioa_cfg->reset_cmd = ipr_cmd;
9276         ipr_cmd->job_step = job_step;
9277         ipr_cmd->u.shutdown_type = shutdown_type;
9278
9279         ipr_reset_ioa_job(ipr_cmd);
9280 }
9281
9282 /**
9283  * ipr_initiate_ioa_reset - Initiate an adapter reset
9284  * @ioa_cfg:            ioa config struct
9285  * @shutdown_type:      shutdown type
9286  *
9287  * Description: This function will initiate the reset of the given adapter.
9288  * If the caller needs to wait on the completion of the reset,
9289  * the caller must sleep on the reset_wait_q.
9290  *
9291  * Return value:
9292  *      none
9293  **/
9294 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9295                                    enum ipr_shutdown_type shutdown_type)
9296 {
9297         int i;
9298
9299         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9300                 return;
9301
9302         if (ioa_cfg->in_reset_reload) {
9303                 if (ioa_cfg->sdt_state == GET_DUMP)
9304                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9305                 else if (ioa_cfg->sdt_state == READ_DUMP)
9306                         ioa_cfg->sdt_state = ABORT_DUMP;
9307         }
9308
9309         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9310                 dev_err(&ioa_cfg->pdev->dev,
9311                         "IOA taken offline - error recovery failed\n");
9312
9313                 ioa_cfg->reset_retries = 0;
9314                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9315                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9316                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9317                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9318                 }
9319                 wmb();
9320
9321                 if (ioa_cfg->in_ioa_bringdown) {
9322                         ioa_cfg->reset_cmd = NULL;
9323                         ioa_cfg->in_reset_reload = 0;
9324                         ipr_fail_all_ops(ioa_cfg);
9325                         wake_up_all(&ioa_cfg->reset_wait_q);
9326
9327                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9328                                 ioa_cfg->scsi_unblock = 1;
9329                                 schedule_work(&ioa_cfg->work_q);
9330                         }
9331                         return;
9332                 } else {
9333                         ioa_cfg->in_ioa_bringdown = 1;
9334                         shutdown_type = IPR_SHUTDOWN_NONE;
9335                 }
9336         }
9337
9338         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9339                                 shutdown_type);
9340 }
9341
9342 /**
9343  * ipr_reset_freeze - Hold off all I/O activity
9344  * @ipr_cmd:    ipr command struct
9345  *
9346  * Description: If the PCI slot is frozen, hold off all I/O
9347  * activity; then, as soon as the slot is available again,
9348  * initiate an adapter reset.
9349  */
9350 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9351 {
9352         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9353         int i;
9354
9355         /* Disallow new interrupts, avoid loop */
9356         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9357                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9358                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9359                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9360         }
9361         wmb();
9362         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9363         ipr_cmd->done = ipr_reset_ioa_job;
9364         return IPR_RC_JOB_RETURN;
9365 }
9366
9367 /**
9368  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9369  * @pdev:       PCI device struct
9370  *
9371  * Description: This routine is called to tell us that the MMIO
9372  * access to the IOA has been restored
9373  */
9374 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9375 {
9376         unsigned long flags = 0;
9377         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9378
9379         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9380         if (!ioa_cfg->probe_done)
9381                 pci_save_state(pdev);
9382         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9383         return PCI_ERS_RESULT_NEED_RESET;
9384 }
9385
9386 /**
9387  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9388  * @pdev:       PCI device struct
9389  *
9390  * Description: This routine is called to tell us that the PCI bus
9391  * is down. Can't do anything here, except put the device driver
9392  * into a holding pattern, waiting for the PCI bus to come back.
9393  */
9394 static void ipr_pci_frozen(struct pci_dev *pdev)
9395 {
9396         unsigned long flags = 0;
9397         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9398
9399         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9400         if (ioa_cfg->probe_done)
9401                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9402         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9403 }
9404
9405 /**
9406  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9407  * @pdev:       PCI device struct
9408  *
9409  * Description: This routine is called by the pci error recovery
9410  * code after the PCI slot has been reset, just before we
9411  * should resume normal operations.
9412  */
9413 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9414 {
9415         unsigned long flags = 0;
9416         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9417
9418         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9419         if (ioa_cfg->probe_done) {
9420                 if (ioa_cfg->needs_warm_reset)
9421                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9422                 else
9423                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9424                                                 IPR_SHUTDOWN_NONE);
9425         } else
9426                 wake_up_all(&ioa_cfg->eeh_wait_q);
9427         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9428         return PCI_ERS_RESULT_RECOVERED;
9429 }
9430
9431 /**
9432  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9433  * @pdev:       PCI device struct
9434  *
9435  * Description: This routine is called when the PCI bus has
9436  * permanently failed.
9437  */
9438 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9439 {
9440         unsigned long flags = 0;
9441         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9442         int i;
9443
9444         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9445         if (ioa_cfg->probe_done) {
9446                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9447                         ioa_cfg->sdt_state = ABORT_DUMP;
9448                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9449                 ioa_cfg->in_ioa_bringdown = 1;
9450                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9451                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9452                         ioa_cfg->hrrq[i].allow_cmds = 0;
9453                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9454                 }
9455                 wmb();
9456                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9457         } else
9458                 wake_up_all(&ioa_cfg->eeh_wait_q);
9459         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9460 }
9461
9462 /**
9463  * ipr_pci_error_detected - Called when a PCI error is detected.
9464  * @pdev:       PCI device struct
9465  * @state:      PCI channel state
9466  *
9467  * Description: Called when a PCI error is detected.
9468  *
9469  * Return value:
9470  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9471  */
9472 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9473                                                pci_channel_state_t state)
9474 {
9475         switch (state) {
9476         case pci_channel_io_frozen:
9477                 ipr_pci_frozen(pdev);
9478                 return PCI_ERS_RESULT_CAN_RECOVER;
9479         case pci_channel_io_perm_failure:
9480                 ipr_pci_perm_failure(pdev);
9481                 return PCI_ERS_RESULT_DISCONNECT;
9482                 break;
9483         default:
9484                 break;
9485         }
9486         return PCI_ERS_RESULT_NEED_RESET;
9487 }
9488
9489 /**
9490  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9491  * @ioa_cfg:    ioa cfg struct
9492  *
9493  * Description: This is the second phase of adapter initialization
9494  * This function takes care of initilizing the adapter to the point
9495  * where it can accept new commands.
9496
9497  * Return value:
9498  *      0 on success / -EIO on failure
9499  **/
9500 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9501 {
9502         int rc = 0;
9503         unsigned long host_lock_flags = 0;
9504
9505         ENTER;
9506         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9507         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9508         ioa_cfg->probe_done = 1;
9509         if (ioa_cfg->needs_hard_reset) {
9510                 ioa_cfg->needs_hard_reset = 0;
9511                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9512         } else
9513                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9514                                         IPR_SHUTDOWN_NONE);
9515         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9516
9517         LEAVE;
9518         return rc;
9519 }
9520
9521 /**
9522  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9523  * @ioa_cfg:    ioa config struct
9524  *
9525  * Return value:
9526  *      none
9527  **/
9528 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9529 {
9530         int i;
9531
9532         if (ioa_cfg->ipr_cmnd_list) {
9533                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9534                         if (ioa_cfg->ipr_cmnd_list[i])
9535                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9536                                               ioa_cfg->ipr_cmnd_list[i],
9537                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9538
9539                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9540                 }
9541         }
9542
9543         if (ioa_cfg->ipr_cmd_pool)
9544                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9545
9546         kfree(ioa_cfg->ipr_cmnd_list);
9547         kfree(ioa_cfg->ipr_cmnd_list_dma);
9548         ioa_cfg->ipr_cmnd_list = NULL;
9549         ioa_cfg->ipr_cmnd_list_dma = NULL;
9550         ioa_cfg->ipr_cmd_pool = NULL;
9551 }
9552
9553 /**
9554  * ipr_free_mem - Frees memory allocated for an adapter
9555  * @ioa_cfg:    ioa cfg struct
9556  *
9557  * Return value:
9558  *      nothing
9559  **/
9560 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9561 {
9562         int i;
9563
9564         kfree(ioa_cfg->res_entries);
9565         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9566                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9567         ipr_free_cmd_blks(ioa_cfg);
9568
9569         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9570                 dma_free_coherent(&ioa_cfg->pdev->dev,
9571                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9572                                   ioa_cfg->hrrq[i].host_rrq,
9573                                   ioa_cfg->hrrq[i].host_rrq_dma);
9574
9575         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9576                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9577
9578         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9579                 dma_free_coherent(&ioa_cfg->pdev->dev,
9580                                   sizeof(struct ipr_hostrcb),
9581                                   ioa_cfg->hostrcb[i],
9582                                   ioa_cfg->hostrcb_dma[i]);
9583         }
9584
9585         ipr_free_dump(ioa_cfg);
9586         kfree(ioa_cfg->trace);
9587 }
9588
9589 /**
9590  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9591  * @ioa_cfg:    ipr cfg struct
9592  *
9593  * This function frees all allocated IRQs for the
9594  * specified adapter.
9595  *
9596  * Return value:
9597  *      none
9598  **/
9599 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9600 {
9601         struct pci_dev *pdev = ioa_cfg->pdev;
9602         int i;
9603
9604         for (i = 0; i < ioa_cfg->nvectors; i++)
9605                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9606         pci_free_irq_vectors(pdev);
9607 }
9608
9609 /**
9610  * ipr_free_all_resources - Free all allocated resources for an adapter.
9611  * @ipr_cmd:    ipr command struct
9612  *
9613  * This function frees all allocated resources for the
9614  * specified adapter.
9615  *
9616  * Return value:
9617  *      none
9618  **/
9619 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9620 {
9621         struct pci_dev *pdev = ioa_cfg->pdev;
9622
9623         ENTER;
9624         ipr_free_irqs(ioa_cfg);
9625         if (ioa_cfg->reset_work_q)
9626                 destroy_workqueue(ioa_cfg->reset_work_q);
9627         iounmap(ioa_cfg->hdw_dma_regs);
9628         pci_release_regions(pdev);
9629         ipr_free_mem(ioa_cfg);
9630         scsi_host_put(ioa_cfg->host);
9631         pci_disable_device(pdev);
9632         LEAVE;
9633 }
9634
9635 /**
9636  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9637  * @ioa_cfg:    ioa config struct
9638  *
9639  * Return value:
9640  *      0 on success / -ENOMEM on allocation failure
9641  **/
9642 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9643 {
9644         struct ipr_cmnd *ipr_cmd;
9645         struct ipr_ioarcb *ioarcb;
9646         dma_addr_t dma_addr;
9647         int i, entries_each_hrrq, hrrq_id = 0;
9648
9649         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9650                                                 sizeof(struct ipr_cmnd), 512, 0);
9651
9652         if (!ioa_cfg->ipr_cmd_pool)
9653                 return -ENOMEM;
9654
9655         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9656         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9657
9658         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9659                 ipr_free_cmd_blks(ioa_cfg);
9660                 return -ENOMEM;
9661         }
9662
9663         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9664                 if (ioa_cfg->hrrq_num > 1) {
9665                         if (i == 0) {
9666                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9667                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9668                                 ioa_cfg->hrrq[i].max_cmd_id =
9669                                         (entries_each_hrrq - 1);
9670                         } else {
9671                                 entries_each_hrrq =
9672                                         IPR_NUM_BASE_CMD_BLKS/
9673                                         (ioa_cfg->hrrq_num - 1);
9674                                 ioa_cfg->hrrq[i].min_cmd_id =
9675                                         IPR_NUM_INTERNAL_CMD_BLKS +
9676                                         (i - 1) * entries_each_hrrq;
9677                                 ioa_cfg->hrrq[i].max_cmd_id =
9678                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9679                                         i * entries_each_hrrq - 1);
9680                         }
9681                 } else {
9682                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9683                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9684                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9685                 }
9686                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9687         }
9688
9689         BUG_ON(ioa_cfg->hrrq_num == 0);
9690
9691         i = IPR_NUM_CMD_BLKS -
9692                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9693         if (i > 0) {
9694                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9695                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9696         }
9697
9698         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9699                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9700                                 GFP_KERNEL, &dma_addr);
9701
9702                 if (!ipr_cmd) {
9703                         ipr_free_cmd_blks(ioa_cfg);
9704                         return -ENOMEM;
9705                 }
9706
9707                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9708                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9709
9710                 ioarcb = &ipr_cmd->ioarcb;
9711                 ipr_cmd->dma_addr = dma_addr;
9712                 if (ioa_cfg->sis64)
9713                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9714                 else
9715                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9716
9717                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9718                 if (ioa_cfg->sis64) {
9719                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9720                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9721                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9722                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9723                 } else {
9724                         ioarcb->write_ioadl_addr =
9725                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9726                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9727                         ioarcb->ioasa_host_pci_addr =
9728                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9729                 }
9730                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9731                 ipr_cmd->cmd_index = i;
9732                 ipr_cmd->ioa_cfg = ioa_cfg;
9733                 ipr_cmd->sense_buffer_dma = dma_addr +
9734                         offsetof(struct ipr_cmnd, sense_buffer);
9735
9736                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9737                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9738                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9739                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9740                         hrrq_id++;
9741         }
9742
9743         return 0;
9744 }
9745
9746 /**
9747  * ipr_alloc_mem - Allocate memory for an adapter
9748  * @ioa_cfg:    ioa config struct
9749  *
9750  * Return value:
9751  *      0 on success / non-zero for error
9752  **/
9753 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9754 {
9755         struct pci_dev *pdev = ioa_cfg->pdev;
9756         int i, rc = -ENOMEM;
9757
9758         ENTER;
9759         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9760                                        sizeof(struct ipr_resource_entry),
9761                                        GFP_KERNEL);
9762
9763         if (!ioa_cfg->res_entries)
9764                 goto out;
9765
9766         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9767                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9768                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9769         }
9770
9771         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9772                                               sizeof(struct ipr_misc_cbs),
9773                                               &ioa_cfg->vpd_cbs_dma,
9774                                               GFP_KERNEL);
9775
9776         if (!ioa_cfg->vpd_cbs)
9777                 goto out_free_res_entries;
9778
9779         if (ipr_alloc_cmd_blks(ioa_cfg))
9780                 goto out_free_vpd_cbs;
9781
9782         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9783                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9784                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9785                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9786                                         GFP_KERNEL);
9787
9788                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9789                         while (--i > 0)
9790                                 dma_free_coherent(&pdev->dev,
9791                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9792                                         ioa_cfg->hrrq[i].host_rrq,
9793                                         ioa_cfg->hrrq[i].host_rrq_dma);
9794                         goto out_ipr_free_cmd_blocks;
9795                 }
9796                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9797         }
9798
9799         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9800                                                   ioa_cfg->cfg_table_size,
9801                                                   &ioa_cfg->cfg_table_dma,
9802                                                   GFP_KERNEL);
9803
9804         if (!ioa_cfg->u.cfg_table)
9805                 goto out_free_host_rrq;
9806
9807         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9808                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9809                                                          sizeof(struct ipr_hostrcb),
9810                                                          &ioa_cfg->hostrcb_dma[i],
9811                                                          GFP_KERNEL);
9812
9813                 if (!ioa_cfg->hostrcb[i])
9814                         goto out_free_hostrcb_dma;
9815
9816                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9817                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9818                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9819                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9820         }
9821
9822         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9823                                  sizeof(struct ipr_trace_entry),
9824                                  GFP_KERNEL);
9825
9826         if (!ioa_cfg->trace)
9827                 goto out_free_hostrcb_dma;
9828
9829         rc = 0;
9830 out:
9831         LEAVE;
9832         return rc;
9833
9834 out_free_hostrcb_dma:
9835         while (i-- > 0) {
9836                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9837                                   ioa_cfg->hostrcb[i],
9838                                   ioa_cfg->hostrcb_dma[i]);
9839         }
9840         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9841                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9842 out_free_host_rrq:
9843         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9844                 dma_free_coherent(&pdev->dev,
9845                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9846                                   ioa_cfg->hrrq[i].host_rrq,
9847                                   ioa_cfg->hrrq[i].host_rrq_dma);
9848         }
9849 out_ipr_free_cmd_blocks:
9850         ipr_free_cmd_blks(ioa_cfg);
9851 out_free_vpd_cbs:
9852         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9853                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9854 out_free_res_entries:
9855         kfree(ioa_cfg->res_entries);
9856         goto out;
9857 }
9858
9859 /**
9860  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9861  * @ioa_cfg:    ioa config struct
9862  *
9863  * Return value:
9864  *      none
9865  **/
9866 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9867 {
9868         int i;
9869
9870         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9871                 ioa_cfg->bus_attr[i].bus = i;
9872                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9873                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9874                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9875                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9876                 else
9877                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9878         }
9879 }
9880
9881 /**
9882  * ipr_init_regs - Initialize IOA registers
9883  * @ioa_cfg:    ioa config struct
9884  *
9885  * Return value:
9886  *      none
9887  **/
9888 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9889 {
9890         const struct ipr_interrupt_offsets *p;
9891         struct ipr_interrupts *t;
9892         void __iomem *base;
9893
9894         p = &ioa_cfg->chip_cfg->regs;
9895         t = &ioa_cfg->regs;
9896         base = ioa_cfg->hdw_dma_regs;
9897
9898         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9899         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9900         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9901         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9902         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9903         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9904         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9905         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9906         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9907         t->ioarrin_reg = base + p->ioarrin_reg;
9908         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9909         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9910         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9911         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9912         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9913         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9914
9915         if (ioa_cfg->sis64) {
9916                 t->init_feedback_reg = base + p->init_feedback_reg;
9917                 t->dump_addr_reg = base + p->dump_addr_reg;
9918                 t->dump_data_reg = base + p->dump_data_reg;
9919                 t->endian_swap_reg = base + p->endian_swap_reg;
9920         }
9921 }
9922
9923 /**
9924  * ipr_init_ioa_cfg - Initialize IOA config struct
9925  * @ioa_cfg:    ioa config struct
9926  * @host:               scsi host struct
9927  * @pdev:               PCI dev struct
9928  *
9929  * Return value:
9930  *      none
9931  **/
9932 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9933                              struct Scsi_Host *host, struct pci_dev *pdev)
9934 {
9935         int i;
9936
9937         ioa_cfg->host = host;
9938         ioa_cfg->pdev = pdev;
9939         ioa_cfg->log_level = ipr_log_level;
9940         ioa_cfg->doorbell = IPR_DOORBELL;
9941         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9942         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9943         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9944         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9945         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9946         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9947
9948         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9949         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9950         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9951         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9952         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9953         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9954         INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9955         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9956         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9957         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9958         ioa_cfg->sdt_state = INACTIVE;
9959
9960         ipr_initialize_bus_attr(ioa_cfg);
9961         ioa_cfg->max_devs_supported = ipr_max_devs;
9962
9963         if (ioa_cfg->sis64) {
9964                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9965                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9966                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9967                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9968                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9969                                            + ((sizeof(struct ipr_config_table_entry64)
9970                                                * ioa_cfg->max_devs_supported)));
9971         } else {
9972                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9973                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9974                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9975                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9976                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9977                                            + ((sizeof(struct ipr_config_table_entry)
9978                                                * ioa_cfg->max_devs_supported)));
9979         }
9980
9981         host->max_channel = IPR_VSET_BUS;
9982         host->unique_id = host->host_no;
9983         host->max_cmd_len = IPR_MAX_CDB_LEN;
9984         host->can_queue = ioa_cfg->max_cmds;
9985         pci_set_drvdata(pdev, ioa_cfg);
9986
9987         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9988                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9989                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9990                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9991                 if (i == 0)
9992                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9993                 else
9994                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9995         }
9996 }
9997
9998 /**
9999  * ipr_get_chip_info - Find adapter chip information
10000  * @dev_id:             PCI device id struct
10001  *
10002  * Return value:
10003  *      ptr to chip information on success / NULL on failure
10004  **/
10005 static const struct ipr_chip_t *
10006 ipr_get_chip_info(const struct pci_device_id *dev_id)
10007 {
10008         int i;
10009
10010         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10011                 if (ipr_chip[i].vendor == dev_id->vendor &&
10012                     ipr_chip[i].device == dev_id->device)
10013                         return &ipr_chip[i];
10014         return NULL;
10015 }
10016
10017 /**
10018  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10019  *                                              during probe time
10020  * @ioa_cfg:    ioa config struct
10021  *
10022  * Return value:
10023  *      None
10024  **/
10025 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10026 {
10027         struct pci_dev *pdev = ioa_cfg->pdev;
10028
10029         if (pci_channel_offline(pdev)) {
10030                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10031                                    !pci_channel_offline(pdev),
10032                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10033                 pci_restore_state(pdev);
10034         }
10035 }
10036
10037 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10038 {
10039         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10040
10041         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10042                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10043                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10044                 ioa_cfg->vectors_info[vec_idx].
10045                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10046         }
10047 }
10048
10049 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10050                 struct pci_dev *pdev)
10051 {
10052         int i, rc;
10053
10054         for (i = 1; i < ioa_cfg->nvectors; i++) {
10055                 rc = request_irq(pci_irq_vector(pdev, i),
10056                         ipr_isr_mhrrq,
10057                         0,
10058                         ioa_cfg->vectors_info[i].desc,
10059                         &ioa_cfg->hrrq[i]);
10060                 if (rc) {
10061                         while (--i >= 0)
10062                                 free_irq(pci_irq_vector(pdev, i),
10063                                         &ioa_cfg->hrrq[i]);
10064                         return rc;
10065                 }
10066         }
10067         return 0;
10068 }
10069
10070 /**
10071  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10072  * @pdev:               PCI device struct
10073  *
10074  * Description: Simply set the msi_received flag to 1 indicating that
10075  * Message Signaled Interrupts are supported.
10076  *
10077  * Return value:
10078  *      0 on success / non-zero on failure
10079  **/
10080 static irqreturn_t ipr_test_intr(int irq, void *devp)
10081 {
10082         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10083         unsigned long lock_flags = 0;
10084         irqreturn_t rc = IRQ_HANDLED;
10085
10086         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10087         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10088
10089         ioa_cfg->msi_received = 1;
10090         wake_up(&ioa_cfg->msi_wait_q);
10091
10092         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10093         return rc;
10094 }
10095
10096 /**
10097  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10098  * @pdev:               PCI device struct
10099  *
10100  * Description: This routine sets up and initiates a test interrupt to determine
10101  * if the interrupt is received via the ipr_test_intr() service routine.
10102  * If the tests fails, the driver will fall back to LSI.
10103  *
10104  * Return value:
10105  *      0 on success / non-zero on failure
10106  **/
10107 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10108 {
10109         int rc;
10110         volatile u32 int_reg;
10111         unsigned long lock_flags = 0;
10112         int irq = pci_irq_vector(pdev, 0);
10113
10114         ENTER;
10115
10116         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10117         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10118         ioa_cfg->msi_received = 0;
10119         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10120         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10121         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10122         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10123
10124         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10125         if (rc) {
10126                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10127                 return rc;
10128         } else if (ipr_debug)
10129                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10130
10131         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10132         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10133         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10134         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10135         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10136
10137         if (!ioa_cfg->msi_received) {
10138                 /* MSI test failed */
10139                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10140                 rc = -EOPNOTSUPP;
10141         } else if (ipr_debug)
10142                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10143
10144         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10145
10146         free_irq(irq, ioa_cfg);
10147
10148         LEAVE;
10149
10150         return rc;
10151 }
10152
10153  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10154  * @pdev:               PCI device struct
10155  * @dev_id:             PCI device id struct
10156  *
10157  * Return value:
10158  *      0 on success / non-zero on failure
10159  **/
10160 static int ipr_probe_ioa(struct pci_dev *pdev,
10161                          const struct pci_device_id *dev_id)
10162 {
10163         struct ipr_ioa_cfg *ioa_cfg;
10164         struct Scsi_Host *host;
10165         unsigned long ipr_regs_pci;
10166         void __iomem *ipr_regs;
10167         int rc = PCIBIOS_SUCCESSFUL;
10168         volatile u32 mask, uproc, interrupts;
10169         unsigned long lock_flags, driver_lock_flags;
10170         unsigned int irq_flag;
10171
10172         ENTER;
10173
10174         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10175         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10176
10177         if (!host) {
10178                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10179                 rc = -ENOMEM;
10180                 goto out;
10181         }
10182
10183         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10184         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10185         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10186
10187         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10188
10189         if (!ioa_cfg->ipr_chip) {
10190                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10191                         dev_id->vendor, dev_id->device);
10192                 goto out_scsi_host_put;
10193         }
10194
10195         /* set SIS 32 or SIS 64 */
10196         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10197         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10198         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10199         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10200
10201         if (ipr_transop_timeout)
10202                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10203         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10204                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10205         else
10206                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10207
10208         ioa_cfg->revid = pdev->revision;
10209
10210         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10211
10212         ipr_regs_pci = pci_resource_start(pdev, 0);
10213
10214         rc = pci_request_regions(pdev, IPR_NAME);
10215         if (rc < 0) {
10216                 dev_err(&pdev->dev,
10217                         "Couldn't register memory range of registers\n");
10218                 goto out_scsi_host_put;
10219         }
10220
10221         rc = pci_enable_device(pdev);
10222
10223         if (rc || pci_channel_offline(pdev)) {
10224                 if (pci_channel_offline(pdev)) {
10225                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10226                         rc = pci_enable_device(pdev);
10227                 }
10228
10229                 if (rc) {
10230                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10231                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10232                         goto out_release_regions;
10233                 }
10234         }
10235
10236         ipr_regs = pci_ioremap_bar(pdev, 0);
10237
10238         if (!ipr_regs) {
10239                 dev_err(&pdev->dev,
10240                         "Couldn't map memory range of registers\n");
10241                 rc = -ENOMEM;
10242                 goto out_disable;
10243         }
10244
10245         ioa_cfg->hdw_dma_regs = ipr_regs;
10246         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10247         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10248
10249         ipr_init_regs(ioa_cfg);
10250
10251         if (ioa_cfg->sis64) {
10252                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10253                 if (rc < 0) {
10254                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10255                         rc = dma_set_mask_and_coherent(&pdev->dev,
10256                                                        DMA_BIT_MASK(32));
10257                 }
10258         } else
10259                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10260
10261         if (rc < 0) {
10262                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10263                 goto cleanup_nomem;
10264         }
10265
10266         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10267                                    ioa_cfg->chip_cfg->cache_line_size);
10268
10269         if (rc != PCIBIOS_SUCCESSFUL) {
10270                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10271                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10272                 rc = -EIO;
10273                 goto cleanup_nomem;
10274         }
10275
10276         /* Issue MMIO read to ensure card is not in EEH */
10277         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10278         ipr_wait_for_pci_err_recovery(ioa_cfg);
10279
10280         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10281                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10282                         IPR_MAX_MSIX_VECTORS);
10283                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10284         }
10285
10286         irq_flag = PCI_IRQ_LEGACY;
10287         if (ioa_cfg->ipr_chip->has_msi)
10288                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10289         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10290         if (rc < 0) {
10291                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10292                 goto cleanup_nomem;
10293         }
10294         ioa_cfg->nvectors = rc;
10295
10296         if (!pdev->msi_enabled && !pdev->msix_enabled)
10297                 ioa_cfg->clear_isr = 1;
10298
10299         pci_set_master(pdev);
10300
10301         if (pci_channel_offline(pdev)) {
10302                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10303                 pci_set_master(pdev);
10304                 if (pci_channel_offline(pdev)) {
10305                         rc = -EIO;
10306                         goto out_msi_disable;
10307                 }
10308         }
10309
10310         if (pdev->msi_enabled || pdev->msix_enabled) {
10311                 rc = ipr_test_msi(ioa_cfg, pdev);
10312                 switch (rc) {
10313                 case 0:
10314                         dev_info(&pdev->dev,
10315                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10316                                 pdev->msix_enabled ? "-X" : "");
10317                         break;
10318                 case -EOPNOTSUPP:
10319                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10320                         pci_free_irq_vectors(pdev);
10321
10322                         ioa_cfg->nvectors = 1;
10323                         ioa_cfg->clear_isr = 1;
10324                         break;
10325                 default:
10326                         goto out_msi_disable;
10327                 }
10328         }
10329
10330         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10331                                 (unsigned int)num_online_cpus(),
10332                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10333
10334         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10335                 goto out_msi_disable;
10336
10337         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10338                 goto out_msi_disable;
10339
10340         rc = ipr_alloc_mem(ioa_cfg);
10341         if (rc < 0) {
10342                 dev_err(&pdev->dev,
10343                         "Couldn't allocate enough memory for device driver!\n");
10344                 goto out_msi_disable;
10345         }
10346
10347         /* Save away PCI config space for use following IOA reset */
10348         rc = pci_save_state(pdev);
10349
10350         if (rc != PCIBIOS_SUCCESSFUL) {
10351                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10352                 rc = -EIO;
10353                 goto cleanup_nolog;
10354         }
10355
10356         /*
10357          * If HRRQ updated interrupt is not masked, or reset alert is set,
10358          * the card is in an unknown state and needs a hard reset
10359          */
10360         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10361         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10362         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10363         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10364                 ioa_cfg->needs_hard_reset = 1;
10365         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10366                 ioa_cfg->needs_hard_reset = 1;
10367         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10368                 ioa_cfg->ioa_unit_checked = 1;
10369
10370         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10371         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10372         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10373
10374         if (pdev->msi_enabled || pdev->msix_enabled) {
10375                 name_msi_vectors(ioa_cfg);
10376                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10377                         ioa_cfg->vectors_info[0].desc,
10378                         &ioa_cfg->hrrq[0]);
10379                 if (!rc)
10380                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10381         } else {
10382                 rc = request_irq(pdev->irq, ipr_isr,
10383                          IRQF_SHARED,
10384                          IPR_NAME, &ioa_cfg->hrrq[0]);
10385         }
10386         if (rc) {
10387                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10388                         pdev->irq, rc);
10389                 goto cleanup_nolog;
10390         }
10391
10392         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10393             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10394                 ioa_cfg->needs_warm_reset = 1;
10395                 ioa_cfg->reset = ipr_reset_slot_reset;
10396
10397                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10398                                                                 WQ_MEM_RECLAIM, host->host_no);
10399
10400                 if (!ioa_cfg->reset_work_q) {
10401                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10402                         rc = -ENOMEM;
10403                         goto out_free_irq;
10404                 }
10405         } else
10406                 ioa_cfg->reset = ipr_reset_start_bist;
10407
10408         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10409         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10410         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10411
10412         LEAVE;
10413 out:
10414         return rc;
10415
10416 out_free_irq:
10417         ipr_free_irqs(ioa_cfg);
10418 cleanup_nolog:
10419         ipr_free_mem(ioa_cfg);
10420 out_msi_disable:
10421         ipr_wait_for_pci_err_recovery(ioa_cfg);
10422         pci_free_irq_vectors(pdev);
10423 cleanup_nomem:
10424         iounmap(ipr_regs);
10425 out_disable:
10426         pci_disable_device(pdev);
10427 out_release_regions:
10428         pci_release_regions(pdev);
10429 out_scsi_host_put:
10430         scsi_host_put(host);
10431         goto out;
10432 }
10433
10434 /**
10435  * ipr_initiate_ioa_bringdown - Bring down an adapter
10436  * @ioa_cfg:            ioa config struct
10437  * @shutdown_type:      shutdown type
10438  *
10439  * Description: This function will initiate bringing down the adapter.
10440  * This consists of issuing an IOA shutdown to the adapter
10441  * to flush the cache, and running BIST.
10442  * If the caller needs to wait on the completion of the reset,
10443  * the caller must sleep on the reset_wait_q.
10444  *
10445  * Return value:
10446  *      none
10447  **/
10448 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10449                                        enum ipr_shutdown_type shutdown_type)
10450 {
10451         ENTER;
10452         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10453                 ioa_cfg->sdt_state = ABORT_DUMP;
10454         ioa_cfg->reset_retries = 0;
10455         ioa_cfg->in_ioa_bringdown = 1;
10456         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10457         LEAVE;
10458 }
10459
10460 /**
10461  * __ipr_remove - Remove a single adapter
10462  * @pdev:       pci device struct
10463  *
10464  * Adapter hot plug remove entry point.
10465  *
10466  * Return value:
10467  *      none
10468  **/
10469 static void __ipr_remove(struct pci_dev *pdev)
10470 {
10471         unsigned long host_lock_flags = 0;
10472         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10473         int i;
10474         unsigned long driver_lock_flags;
10475         ENTER;
10476
10477         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10478         while (ioa_cfg->in_reset_reload) {
10479                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10480                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10481                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10482         }
10483
10484         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10485                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10486                 ioa_cfg->hrrq[i].removing_ioa = 1;
10487                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10488         }
10489         wmb();
10490         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10491
10492         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10493         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10494         flush_work(&ioa_cfg->work_q);
10495         if (ioa_cfg->reset_work_q)
10496                 flush_workqueue(ioa_cfg->reset_work_q);
10497         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10498         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10499
10500         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10501         list_del(&ioa_cfg->queue);
10502         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10503
10504         if (ioa_cfg->sdt_state == ABORT_DUMP)
10505                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10506         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10507
10508         ipr_free_all_resources(ioa_cfg);
10509
10510         LEAVE;
10511 }
10512
10513 /**
10514  * ipr_remove - IOA hot plug remove entry point
10515  * @pdev:       pci device struct
10516  *
10517  * Adapter hot plug remove entry point.
10518  *
10519  * Return value:
10520  *      none
10521  **/
10522 static void ipr_remove(struct pci_dev *pdev)
10523 {
10524         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10525
10526         ENTER;
10527
10528         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10529                               &ipr_trace_attr);
10530         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10531                              &ipr_dump_attr);
10532         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10533                         &ipr_ioa_async_err_log);
10534         scsi_remove_host(ioa_cfg->host);
10535
10536         __ipr_remove(pdev);
10537
10538         LEAVE;
10539 }
10540
10541 /**
10542  * ipr_probe - Adapter hot plug add entry point
10543  *
10544  * Return value:
10545  *      0 on success / non-zero on failure
10546  **/
10547 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10548 {
10549         struct ipr_ioa_cfg *ioa_cfg;
10550         unsigned long flags;
10551         int rc, i;
10552
10553         rc = ipr_probe_ioa(pdev, dev_id);
10554
10555         if (rc)
10556                 return rc;
10557
10558         ioa_cfg = pci_get_drvdata(pdev);
10559         rc = ipr_probe_ioa_part2(ioa_cfg);
10560
10561         if (rc) {
10562                 __ipr_remove(pdev);
10563                 return rc;
10564         }
10565
10566         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10567
10568         if (rc) {
10569                 __ipr_remove(pdev);
10570                 return rc;
10571         }
10572
10573         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10574                                    &ipr_trace_attr);
10575
10576         if (rc) {
10577                 scsi_remove_host(ioa_cfg->host);
10578                 __ipr_remove(pdev);
10579                 return rc;
10580         }
10581
10582         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10583                         &ipr_ioa_async_err_log);
10584
10585         if (rc) {
10586                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10587                                 &ipr_dump_attr);
10588                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10589                                 &ipr_trace_attr);
10590                 scsi_remove_host(ioa_cfg->host);
10591                 __ipr_remove(pdev);
10592                 return rc;
10593         }
10594
10595         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10596                                    &ipr_dump_attr);
10597
10598         if (rc) {
10599                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10600                                       &ipr_ioa_async_err_log);
10601                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10602                                       &ipr_trace_attr);
10603                 scsi_remove_host(ioa_cfg->host);
10604                 __ipr_remove(pdev);
10605                 return rc;
10606         }
10607         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10608         ioa_cfg->scan_enabled = 1;
10609         schedule_work(&ioa_cfg->work_q);
10610         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10611
10612         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10613
10614         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10615                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10616                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10617                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10618                 }
10619         }
10620
10621         scsi_scan_host(ioa_cfg->host);
10622
10623         return 0;
10624 }
10625
10626 /**
10627  * ipr_shutdown - Shutdown handler.
10628  * @pdev:       pci device struct
10629  *
10630  * This function is invoked upon system shutdown/reboot. It will issue
10631  * an adapter shutdown to the adapter to flush the write cache.
10632  *
10633  * Return value:
10634  *      none
10635  **/
10636 static void ipr_shutdown(struct pci_dev *pdev)
10637 {
10638         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10639         unsigned long lock_flags = 0;
10640         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10641         int i;
10642
10643         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10644         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10645                 ioa_cfg->iopoll_weight = 0;
10646                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10647                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10648         }
10649
10650         while (ioa_cfg->in_reset_reload) {
10651                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10652                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10653                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10654         }
10655
10656         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10657                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10658
10659         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10660         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10661         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10662         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10663                 ipr_free_irqs(ioa_cfg);
10664                 pci_disable_device(ioa_cfg->pdev);
10665         }
10666 }
10667
10668 static struct pci_device_id ipr_pci_table[] = {
10669         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10670                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10671         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10672                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10673         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10674                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10675         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10676                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10677         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10678                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10679         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10680                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10681         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10682                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10683         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10684                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10685                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10686         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10687               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10688         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10689               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10690               IPR_USE_LONG_TRANSOP_TIMEOUT },
10691         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10692               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10693               IPR_USE_LONG_TRANSOP_TIMEOUT },
10694         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10695               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10696         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10697               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10698               IPR_USE_LONG_TRANSOP_TIMEOUT},
10699         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10700               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10701               IPR_USE_LONG_TRANSOP_TIMEOUT },
10702         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10703               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10704               IPR_USE_LONG_TRANSOP_TIMEOUT },
10705         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10706               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10707         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10708               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10709         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10710               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10711               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10712         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10713                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10714         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10715                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10716         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10718                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10719         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10720                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10721                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10722         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10723                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10726         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10728         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10730         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10731                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10732         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10733                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10734         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10735                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10736         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10737                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10738         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10739                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10740         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10741                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10742         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10743                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10744         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10745                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10746         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10747                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10748         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10749                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10750         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10751                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10752         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10753                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10754         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10755                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10756         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10757                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10758         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10759                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10760         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10761                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10762         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10763                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10764         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10765                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10766         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10767                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10768         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10769                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10770         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10771                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10772         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10773                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10774         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10775                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10776         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10777                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10778         { }
10779 };
10780 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10781
10782 static const struct pci_error_handlers ipr_err_handler = {
10783         .error_detected = ipr_pci_error_detected,
10784         .mmio_enabled = ipr_pci_mmio_enabled,
10785         .slot_reset = ipr_pci_slot_reset,
10786 };
10787
10788 static struct pci_driver ipr_driver = {
10789         .name = IPR_NAME,
10790         .id_table = ipr_pci_table,
10791         .probe = ipr_probe,
10792         .remove = ipr_remove,
10793         .shutdown = ipr_shutdown,
10794         .err_handler = &ipr_err_handler,
10795 };
10796
10797 /**
10798  * ipr_halt_done - Shutdown prepare completion
10799  *
10800  * Return value:
10801  *      none
10802  **/
10803 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10804 {
10805         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10806 }
10807
10808 /**
10809  * ipr_halt - Issue shutdown prepare to all adapters
10810  *
10811  * Return value:
10812  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10813  **/
10814 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10815 {
10816         struct ipr_cmnd *ipr_cmd;
10817         struct ipr_ioa_cfg *ioa_cfg;
10818         unsigned long flags = 0, driver_lock_flags;
10819
10820         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10821                 return NOTIFY_DONE;
10822
10823         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10824
10825         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10826                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10827                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10828                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10829                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10830                         continue;
10831                 }
10832
10833                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10834                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10835                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10836                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10837                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10838
10839                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10840                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10841         }
10842         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10843
10844         return NOTIFY_OK;
10845 }
10846
10847 static struct notifier_block ipr_notifier = {
10848         ipr_halt, NULL, 0
10849 };
10850
10851 /**
10852  * ipr_init - Module entry point
10853  *
10854  * Return value:
10855  *      0 on success / negative value on failure
10856  **/
10857 static int __init ipr_init(void)
10858 {
10859         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10860                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10861
10862         register_reboot_notifier(&ipr_notifier);
10863         return pci_register_driver(&ipr_driver);
10864 }
10865
10866 /**
10867  * ipr_exit - Module unload
10868  *
10869  * Module unload entry point.
10870  *
10871  * Return value:
10872  *      none
10873  **/
10874 static void __exit ipr_exit(void)
10875 {
10876         unregister_reboot_notifier(&ipr_notifier);
10877         pci_unregister_driver(&ipr_driver);
10878 }
10879
10880 module_init(ipr_init);
10881 module_exit(ipr_exit);