1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ipr.c -- driver for IBM Power Linux RAID adapters
5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
7 * Copyright (C) 2003, 2004 IBM Corporation
13 * This driver is used to control the following SCSI adapters:
15 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18 * PCI-X Dual Channel Ultra 320 SCSI Adapter
19 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
20 * Embedded SCSI adapter on p615 and p655 systems
22 * Supported Hardware Features:
23 * - Ultra 320 SCSI controller
24 * - PCI-X host interface
25 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26 * - Non-Volatile Write Cache
27 * - Supports attachment of non-RAID disks, tape, and optical devices
28 * - RAID Levels 0, 5, 10
30 * - Background Parity Checking
31 * - Background Data Scrubbing
32 * - Ability to increase the capacity of an existing RAID 5 disk array
36 * - Tagged command queuing
37 * - Adapter microcode download
39 * - SCSI device hot plug
44 #include <linux/init.h>
45 #include <linux/types.h>
46 #include <linux/errno.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <linux/ioport.h>
51 #include <linux/delay.h>
52 #include <linux/pci.h>
53 #include <linux/wait.h>
54 #include <linux/spinlock.h>
55 #include <linux/sched.h>
56 #include <linux/interrupt.h>
57 #include <linux/blkdev.h>
58 #include <linux/firmware.h>
59 #include <linux/module.h>
60 #include <linux/moduleparam.h>
61 #include <linux/libata.h>
62 #include <linux/hdreg.h>
63 #include <linux/reboot.h>
64 #include <linux/stringify.h>
67 #include <asm/processor.h>
68 #include <scsi/scsi.h>
69 #include <scsi/scsi_host.h>
70 #include <scsi/scsi_tcq.h>
71 #include <scsi/scsi_eh.h>
72 #include <scsi/scsi_cmnd.h>
78 static LIST_HEAD(ipr_ioa_head);
79 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
80 static unsigned int ipr_max_speed = 1;
81 static int ipr_testmode = 0;
82 static unsigned int ipr_fastfail = 0;
83 static unsigned int ipr_transop_timeout = 0;
84 static unsigned int ipr_debug = 0;
85 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
86 static unsigned int ipr_dual_ioa_raid = 1;
87 static unsigned int ipr_number_of_msix = 16;
88 static unsigned int ipr_fast_reboot;
89 static DEFINE_SPINLOCK(ipr_driver_lock);
91 /* This table describes the differences between DMA controller chips */
92 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
93 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
96 .cache_line_size = 0x20,
100 .set_interrupt_mask_reg = 0x0022C,
101 .clr_interrupt_mask_reg = 0x00230,
102 .clr_interrupt_mask_reg32 = 0x00230,
103 .sense_interrupt_mask_reg = 0x0022C,
104 .sense_interrupt_mask_reg32 = 0x0022C,
105 .clr_interrupt_reg = 0x00228,
106 .clr_interrupt_reg32 = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .sense_interrupt_reg32 = 0x00224,
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
111 .sense_uproc_interrupt_reg32 = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214,
113 .set_uproc_interrupt_reg32 = 0x00214,
114 .clr_uproc_interrupt_reg = 0x00218,
115 .clr_uproc_interrupt_reg32 = 0x00218
118 { /* Snipe and Scamp */
121 .cache_line_size = 0x20,
125 .set_interrupt_mask_reg = 0x00288,
126 .clr_interrupt_mask_reg = 0x0028C,
127 .clr_interrupt_mask_reg32 = 0x0028C,
128 .sense_interrupt_mask_reg = 0x00288,
129 .sense_interrupt_mask_reg32 = 0x00288,
130 .clr_interrupt_reg = 0x00284,
131 .clr_interrupt_reg32 = 0x00284,
132 .sense_interrupt_reg = 0x00280,
133 .sense_interrupt_reg32 = 0x00280,
134 .ioarrin_reg = 0x00504,
135 .sense_uproc_interrupt_reg = 0x00290,
136 .sense_uproc_interrupt_reg32 = 0x00290,
137 .set_uproc_interrupt_reg = 0x00290,
138 .set_uproc_interrupt_reg32 = 0x00290,
139 .clr_uproc_interrupt_reg = 0x00294,
140 .clr_uproc_interrupt_reg32 = 0x00294
146 .cache_line_size = 0x20,
150 .set_interrupt_mask_reg = 0x00010,
151 .clr_interrupt_mask_reg = 0x00018,
152 .clr_interrupt_mask_reg32 = 0x0001C,
153 .sense_interrupt_mask_reg = 0x00010,
154 .sense_interrupt_mask_reg32 = 0x00014,
155 .clr_interrupt_reg = 0x00008,
156 .clr_interrupt_reg32 = 0x0000C,
157 .sense_interrupt_reg = 0x00000,
158 .sense_interrupt_reg32 = 0x00004,
159 .ioarrin_reg = 0x00070,
160 .sense_uproc_interrupt_reg = 0x00020,
161 .sense_uproc_interrupt_reg32 = 0x00024,
162 .set_uproc_interrupt_reg = 0x00020,
163 .set_uproc_interrupt_reg32 = 0x00024,
164 .clr_uproc_interrupt_reg = 0x00028,
165 .clr_uproc_interrupt_reg32 = 0x0002C,
166 .init_feedback_reg = 0x0005C,
167 .dump_addr_reg = 0x00064,
168 .dump_data_reg = 0x00068,
169 .endian_swap_reg = 0x00084
174 static const struct ipr_chip_t ipr_chip[] = {
175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
187 static int ipr_max_bus_speeds[] = {
188 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
191 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193 module_param_named(max_speed, ipr_max_speed, uint, 0);
194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195 module_param_named(log_level, ipr_log_level, uint, 0);
196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197 module_param_named(testmode, ipr_testmode, int, 0);
198 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
199 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
200 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
203 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
204 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
205 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
207 module_param_named(max_devs, ipr_max_devs, int, 0);
208 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
210 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
211 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
212 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
214 MODULE_LICENSE("GPL");
215 MODULE_VERSION(IPR_DRIVER_VERSION);
217 /* A constant array of IOASCs/URCs/Error Messages */
219 struct ipr_error_table_t ipr_error_table[] = {
220 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
221 "8155: An unknown error was received"},
223 "Soft underlength error"},
225 "Command to be cancelled not found"},
227 "Qualified success"},
228 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
229 "FFFE: Soft device bus error recovered by the IOA"},
230 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
231 "4101: Soft device bus fabric error"},
232 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
233 "FFFC: Logical block guard error recovered by the device"},
234 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FFFC: Logical block reference tag error recovered by the device"},
236 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered scatter list tag / sequence number error"},
238 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
240 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
242 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFD: Recovered logical block reference tag error detected by the IOA"},
244 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFD: Logical block guard error recovered by the IOA"},
246 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFF9: Device sector reassign successful"},
248 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFF7: Media error recovered by device rewrite procedures"},
250 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "7001: IOA sector reassignment successful"},
252 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFF9: Soft media error. Sector reassignment recommended"},
254 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFF7: Media error recovered by IOA rewrite procedures"},
256 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FF3D: Soft PCI bus error recovered by the IOA"},
258 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
259 "FFF6: Device hardware error recovered by the IOA"},
260 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
261 "FFF6: Device hardware error recovered by the device"},
262 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
263 "FF3D: Soft IOA error recovered by the IOA"},
264 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFFA: Undefined device response recovered by the IOA"},
266 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
267 "FFF6: Device bus error, message or command phase"},
268 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
269 "FFFE: Task Management Function failed"},
270 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Failure prediction threshold exceeded"},
272 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
273 "8009: Impending cache battery pack failure"},
275 "Logical Unit in process of becoming ready"},
277 "Initializing command required"},
279 "34FF: Disk device format in progress"},
281 "Logical unit not accessible, target port in unavailable state"},
282 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
283 "9070: IOA requested reset"},
285 "Synchronization required"},
287 "IOA microcode download required"},
289 "Device bus connection is prohibited by host"},
291 "No ready, IOA shutdown"},
293 "Not ready, IOA has been shutdown"},
294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295 "3020: Storage subsystem configuration error"},
297 "FFF5: Medium error, data unreadable, recommend reassign"},
299 "7000: Medium error, data unreadable, do not reassign"},
300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301 "FFF3: Disk media format bad"},
302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303 "3002: Addressed device failed to respond to selection"},
304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "3100: Device bus error"},
306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307 "3109: IOA timed out a device command"},
309 "3120: SCSI bus is not operational"},
310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "4100: Hard device bus fabric error"},
312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327 "9000: IOA reserved area data check"},
328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329 "9001: IOA reserved area invalid data pattern"},
330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331 "9002: IOA reserved area LRC error"},
332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335 "102E: Out of alternate sectors for disk storage"},
336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337 "FFF4: Data transfer underlength error"},
338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339 "FFF4: Data transfer overlength error"},
340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341 "3400: Logical unit failure"},
342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343 "FFF4: Device microcode is corrupt"},
344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345 "8150: PCI bus error"},
347 "Unsupported device bus message received"},
348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Disk device problem"},
350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351 "8150: Permanent IOA failure"},
352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353 "3010: Disk device returned wrong response to IOA"},
354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355 "8151: IOA microcode error"},
357 "Device bus status error"},
358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359 "8157: IOA error requiring IOA reset to recover"},
361 "ATA device status error"},
363 "Message reject received from the device"},
364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8008: A permanent cache battery pack failure occurred"},
366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367 "9090: Disk unit has been modified after the last known status"},
368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "9081: IOA detected device error"},
370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371 "9082: IOA detected device error"},
372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373 "3110: Device bus error, message or command phase"},
374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375 "3110: SAS Command / Task Management Function failed"},
376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9091: Incorrect hardware configuration change has been detected"},
378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9073: Invalid multi-adapter configuration"},
380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381 "4010: Incorrect connection between cascaded expanders"},
382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "4020: Connections exceed IOA design limits"},
384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "4030: Incorrect multipath connection"},
386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387 "4110: Unsupported enclosure function"},
388 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
389 "4120: SAS cable VPD cannot be read"},
390 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
391 "FFF4: Command to logical unit failed"},
393 "Illegal request, invalid request type or request packet"},
395 "Illegal request, invalid resource handle"},
397 "Illegal request, commands not allowed to this device"},
399 "Illegal request, command not allowed to a secondary adapter"},
401 "Illegal request, command not allowed to a non-optimized resource"},
403 "Illegal request, invalid field in parameter list"},
405 "Illegal request, parameter not supported"},
407 "Illegal request, parameter value invalid"},
409 "Illegal request, command sequence error"},
411 "Illegal request, dual adapter support not enabled"},
413 "Illegal request, another cable connector was physically disabled"},
415 "Illegal request, inconsistent group id/group count"},
416 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
417 "9031: Array protection temporarily suspended, protection resuming"},
418 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
419 "9040: Array protection temporarily suspended, protection resuming"},
420 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
421 "4080: IOA exceeded maximum operating temperature"},
422 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
423 "4085: Service required"},
424 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
425 "4086: SAS Adapter Hardware Configuration Error"},
426 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
427 "3140: Device bus not ready to ready transition"},
428 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
429 "FFFB: SCSI bus was reset"},
431 "FFFE: SCSI bus transition to single ended"},
433 "FFFE: SCSI bus transition to LVD"},
434 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "FFFB: SCSI bus was reset by another initiator"},
436 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
437 "3029: A device replacement has occurred"},
438 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4102: Device bus fabric performance degradation"},
440 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
441 "9051: IOA cache data exists for a missing or failed device"},
442 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
443 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
444 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
445 "9025: Disk unit is not supported at its physical location"},
446 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
447 "3020: IOA detected a SCSI bus configuration error"},
448 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "3150: SCSI bus configuration error"},
450 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9074: Asymmetric advanced function disk configuration"},
452 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4040: Incomplete multipath connection between IOA and enclosure"},
454 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
455 "4041: Incomplete multipath connection between enclosure and device"},
456 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9075: Incomplete multipath connection between IOA and remote IOA"},
458 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9076: Configuration error, missing remote IOA"},
460 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
461 "4050: Enclosure does not support a required multipath function"},
462 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
463 "4121: Configuration error, required cable is missing"},
464 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4122: Cable is not plugged into the correct location on remote IOA"},
466 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4123: Configuration error, invalid cable vital product data"},
468 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
469 "4124: Configuration error, both cable ends are plugged into the same IOA"},
470 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
471 "4070: Logically bad block written on device"},
472 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9041: Array protection temporarily suspended"},
474 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
475 "9042: Corrupt array parity detected on specified device"},
476 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
477 "9030: Array no longer protected due to missing or failed disk unit"},
478 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
479 "9071: Link operational transition"},
480 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9072: Link not operational transition"},
482 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9032: Array exposed but still protected"},
484 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
485 "70DD: Device forced failed by disrupt device command"},
486 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
487 "4061: Multipath redundancy level got better"},
488 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
489 "4060: Multipath redundancy level got worse"},
490 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
491 "9083: Device raw mode enabled"},
492 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
493 "9084: Device raw mode disabled"},
495 "Failure due to other device"},
496 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9008: IOA does not support functions expected by devices"},
498 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "9010: Cache data associated with attached devices cannot be found"},
500 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9011: Cache data belongs to devices other than those attached"},
502 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9020: Array missing 2 or more devices with only 1 device present"},
504 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9021: Array missing 2 or more devices with 2 or more devices present"},
506 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9022: Exposed array is missing a required device"},
508 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9023: Array member(s) not at required physical locations"},
510 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
511 "9024: Array not functional due to present hardware configuration"},
512 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
513 "9026: Array not functional due to present hardware configuration"},
514 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
515 "9027: Array is missing a device and parity is out of sync"},
516 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
517 "9028: Maximum number of arrays already exist"},
518 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
519 "9050: Required cache data cannot be located for a disk unit"},
520 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
521 "9052: Cache data exists for a device that has been modified"},
522 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
523 "9054: IOA resources not available due to previous problems"},
524 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
525 "9092: Disk unit requires initialization before use"},
526 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
527 "9029: Incorrect hardware configuration change has been detected"},
528 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
529 "9060: One or more disk pairs are missing from an array"},
530 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
531 "9061: One or more disks are missing from an array"},
532 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
533 "9062: One or more disks are missing from an array"},
534 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
535 "9063: Maximum number of functional arrays has been exceeded"},
537 "Data protect, other volume set problem"},
539 "Aborted command, invalid descriptor"},
541 "Target operating conditions have changed, dual adapter takeover"},
543 "Aborted command, medium removal prevented"},
545 "Command terminated by host"},
547 "Aborted command, command terminated by host"}
550 static const struct ipr_ses_table_entry ipr_ses_table[] = {
551 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
552 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
553 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
554 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
555 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
556 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
557 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
558 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
559 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
560 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
561 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
563 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
567 * Function Prototypes
569 static int ipr_reset_alert(struct ipr_cmnd *);
570 static void ipr_process_ccn(struct ipr_cmnd *);
571 static void ipr_process_error(struct ipr_cmnd *);
572 static void ipr_reset_ioa_job(struct ipr_cmnd *);
573 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
574 enum ipr_shutdown_type);
576 #ifdef CONFIG_SCSI_IPR_TRACE
578 * ipr_trc_hook - Add a trace entry to the driver trace
579 * @ipr_cmd: ipr command struct
581 * @add_data: additional data
586 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
587 u8 type, u32 add_data)
589 struct ipr_trace_entry *trace_entry;
590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
591 unsigned int trace_index;
593 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
594 trace_entry = &ioa_cfg->trace[trace_index];
595 trace_entry->time = jiffies;
596 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
597 trace_entry->type = type;
598 if (ipr_cmd->ioa_cfg->sis64)
599 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
601 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
602 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
603 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
604 trace_entry->u.add_data = add_data;
608 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 * ipr_lock_and_done - Acquire lock and complete command
613 * @ipr_cmd: ipr command struct
618 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
620 unsigned long lock_flags;
621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
624 ipr_cmd->done(ipr_cmd);
625 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
630 * @ipr_cmd: ipr command struct
635 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
637 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
638 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
639 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
640 dma_addr_t dma_addr = ipr_cmd->dma_addr;
643 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
644 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
645 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
646 ioarcb->data_transfer_length = 0;
647 ioarcb->read_data_transfer_length = 0;
648 ioarcb->ioadl_len = 0;
649 ioarcb->read_ioadl_len = 0;
651 if (ipr_cmd->ioa_cfg->sis64) {
652 ioarcb->u.sis64_addr_data.data_ioadl_addr =
653 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
654 ioasa64->u.gata.status = 0;
656 ioarcb->write_ioadl_addr =
657 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
658 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
659 ioasa->u.gata.status = 0;
662 ioasa->hdr.ioasc = 0;
663 ioasa->hdr.residual_data_len = 0;
664 ipr_cmd->scsi_cmd = NULL;
666 ipr_cmd->sense_buffer[0] = 0;
667 ipr_cmd->dma_use_sg = 0;
671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
672 * @ipr_cmd: ipr command struct
673 * @fast_done: fast done function call-back
678 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
679 void (*fast_done) (struct ipr_cmnd *))
681 ipr_reinit_ipr_cmnd(ipr_cmd);
682 ipr_cmd->u.scratch = 0;
683 ipr_cmd->sibling = NULL;
684 ipr_cmd->eh_comp = NULL;
685 ipr_cmd->fast_done = fast_done;
686 timer_setup(&ipr_cmd->timer, NULL, 0);
690 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
694 * pointer to ipr command struct
697 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 struct ipr_cmnd *ipr_cmd = NULL;
701 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
702 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
703 struct ipr_cmnd, queue);
704 list_del(&ipr_cmd->queue);
712 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
713 * @ioa_cfg: ioa config struct
716 * pointer to ipr command struct
719 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 struct ipr_cmnd *ipr_cmd =
722 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
723 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
728 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
729 * @ioa_cfg: ioa config struct
730 * @clr_ints: interrupts to clear
732 * This function masks all interrupts on the adapter, then clears the
733 * interrupts specified in the mask
738 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
743 /* Stop new interrupts */
744 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
745 spin_lock(&ioa_cfg->hrrq[i]._lock);
746 ioa_cfg->hrrq[i].allow_interrupts = 0;
747 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750 /* Set interrupt mask to stop all new interrupts */
752 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
754 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756 /* Clear any pending interrupts */
758 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
759 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
760 readl(ioa_cfg->regs.sense_interrupt_reg);
764 * ipr_save_pcix_cmd_reg - Save PCI-X command register
765 * @ioa_cfg: ioa config struct
768 * 0 on success / -EIO on failure
770 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
772 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
774 if (pcix_cmd_reg == 0)
777 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
778 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
779 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
789 * @ioa_cfg: ioa config struct
792 * 0 on success / -EIO on failure
794 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
796 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
799 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
800 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
801 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
810 * __ipr_sata_eh_done - done function for aborted SATA commands
811 * @ipr_cmd: ipr command struct
813 * This function is invoked for ops generated to SATA
814 * devices which are being aborted.
819 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
821 struct ata_queued_cmd *qc = ipr_cmd->qc;
822 struct ipr_sata_port *sata_port = qc->ap->private_data;
824 qc->err_mask |= AC_ERR_OTHER;
825 sata_port->ioasa.status |= ATA_BUSY;
827 if (ipr_cmd->eh_comp)
828 complete(ipr_cmd->eh_comp);
829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
833 * ipr_sata_eh_done - done function for aborted SATA commands
834 * @ipr_cmd: ipr command struct
836 * This function is invoked for ops generated to SATA
837 * devices which are being aborted.
842 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
844 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
845 unsigned long hrrq_flags;
847 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
848 __ipr_sata_eh_done(ipr_cmd);
849 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
854 * @ipr_cmd: ipr command struct
856 * This function is invoked by the interrupt handler for
857 * ops generated by the SCSI mid-layer which are being aborted.
862 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
864 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
866 scsi_cmd->result |= (DID_ERROR << 16);
868 scsi_dma_unmap(ipr_cmd->scsi_cmd);
870 if (ipr_cmd->eh_comp)
871 complete(ipr_cmd->eh_comp);
872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
876 * ipr_scsi_eh_done - mid-layer done function for aborted ops
877 * @ipr_cmd: ipr command struct
879 * This function is invoked by the interrupt handler for
880 * ops generated by the SCSI mid-layer which are being aborted.
885 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
887 unsigned long hrrq_flags;
888 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
890 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
891 __ipr_scsi_eh_done(ipr_cmd);
892 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
896 * ipr_fail_all_ops - Fails all outstanding ops.
897 * @ioa_cfg: ioa config struct
899 * This function fails all outstanding ops.
904 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
906 struct ipr_cmnd *ipr_cmd, *temp;
907 struct ipr_hrr_queue *hrrq;
910 for_each_hrrq(hrrq, ioa_cfg) {
911 spin_lock(&hrrq->_lock);
912 list_for_each_entry_safe(ipr_cmd,
913 temp, &hrrq->hrrq_pending_q, queue) {
914 list_del(&ipr_cmd->queue);
916 ipr_cmd->s.ioasa.hdr.ioasc =
917 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
918 ipr_cmd->s.ioasa.hdr.ilid =
919 cpu_to_be32(IPR_DRIVER_ILID);
921 if (ipr_cmd->scsi_cmd)
922 ipr_cmd->done = __ipr_scsi_eh_done;
923 else if (ipr_cmd->qc)
924 ipr_cmd->done = __ipr_sata_eh_done;
926 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
927 IPR_IOASC_IOA_WAS_RESET);
928 del_timer(&ipr_cmd->timer);
929 ipr_cmd->done(ipr_cmd);
931 spin_unlock(&hrrq->_lock);
937 * ipr_send_command - Send driver initiated requests.
938 * @ipr_cmd: ipr command struct
940 * This function sends a command to the adapter using the correct write call.
941 * In the case of sis64, calculate the ioarcb size required. Then or in the
947 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
950 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
952 if (ioa_cfg->sis64) {
953 /* The default size is 256 bytes */
954 send_dma_addr |= 0x1;
956 /* If the number of ioadls * size of ioadl > 128 bytes,
957 then use a 512 byte ioarcb */
958 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
959 send_dma_addr |= 0x4;
960 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
962 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
966 * ipr_do_req - Send driver initiated requests.
967 * @ipr_cmd: ipr command struct
968 * @done: done function
969 * @timeout_func: timeout function
970 * @timeout: timeout value
972 * This function sends the specified command to the adapter with the
973 * timeout given. The done function is invoked on command completion.
978 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
979 void (*done) (struct ipr_cmnd *),
980 void (*timeout_func) (struct timer_list *), u32 timeout)
982 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
984 ipr_cmd->done = done;
986 ipr_cmd->timer.expires = jiffies + timeout;
987 ipr_cmd->timer.function = timeout_func;
989 add_timer(&ipr_cmd->timer);
991 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
993 ipr_send_command(ipr_cmd);
997 * ipr_internal_cmd_done - Op done function for an internally generated op.
998 * @ipr_cmd: ipr command struct
1000 * This function is the op done function for an internally generated,
1001 * blocking op. It simply wakes the sleeping thread.
1006 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1008 if (ipr_cmd->sibling)
1009 ipr_cmd->sibling = NULL;
1011 complete(&ipr_cmd->completion);
1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1016 * @ipr_cmd: ipr command struct
1017 * @dma_addr: dma address
1018 * @len: transfer length
1019 * @flags: ioadl flag value
1021 * This function initializes an ioadl in the case where there is only a single
1027 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1030 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1031 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1033 ipr_cmd->dma_use_sg = 1;
1035 if (ipr_cmd->ioa_cfg->sis64) {
1036 ioadl64->flags = cpu_to_be32(flags);
1037 ioadl64->data_len = cpu_to_be32(len);
1038 ioadl64->address = cpu_to_be64(dma_addr);
1040 ipr_cmd->ioarcb.ioadl_len =
1041 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1042 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1044 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1045 ioadl->address = cpu_to_be32(dma_addr);
1047 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1048 ipr_cmd->ioarcb.read_ioadl_len =
1049 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1050 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1052 ipr_cmd->ioarcb.ioadl_len =
1053 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1054 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1060 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1061 * @ipr_cmd: ipr command struct
1062 * @timeout_func: function to invoke if command times out
1068 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1069 void (*timeout_func) (struct timer_list *),
1072 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1074 init_completion(&ipr_cmd->completion);
1075 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1077 spin_unlock_irq(ioa_cfg->host->host_lock);
1078 wait_for_completion(&ipr_cmd->completion);
1079 spin_lock_irq(ioa_cfg->host->host_lock);
1082 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1086 if (ioa_cfg->hrrq_num == 1)
1089 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1090 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1096 * ipr_send_hcam - Send an HCAM to the adapter.
1097 * @ioa_cfg: ioa config struct
1099 * @hostrcb: hostrcb struct
1101 * This function will send a Host Controlled Async command to the adapter.
1102 * If HCAMs are currently not allowed to be issued to the adapter, it will
1103 * place the hostrcb on the free queue.
1108 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1109 struct ipr_hostrcb *hostrcb)
1111 struct ipr_cmnd *ipr_cmd;
1112 struct ipr_ioarcb *ioarcb;
1114 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1115 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1116 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1117 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1119 ipr_cmd->u.hostrcb = hostrcb;
1120 ioarcb = &ipr_cmd->ioarcb;
1122 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1123 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1124 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1125 ioarcb->cmd_pkt.cdb[1] = type;
1126 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1127 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1129 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1130 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1132 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1133 ipr_cmd->done = ipr_process_ccn;
1135 ipr_cmd->done = ipr_process_error;
1137 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1139 ipr_send_command(ipr_cmd);
1141 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1146 * ipr_update_ata_class - Update the ata class in the resource entry
1147 * @res: resource entry struct
1148 * @proto: cfgte device bus protocol value
1153 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1156 case IPR_PROTO_SATA:
1157 case IPR_PROTO_SAS_STP:
1158 res->ata_class = ATA_DEV_ATA;
1160 case IPR_PROTO_SATA_ATAPI:
1161 case IPR_PROTO_SAS_STP_ATAPI:
1162 res->ata_class = ATA_DEV_ATAPI;
1165 res->ata_class = ATA_DEV_UNKNOWN;
1171 * ipr_init_res_entry - Initialize a resource entry struct.
1172 * @res: resource entry struct
1173 * @cfgtew: config table entry wrapper struct
1178 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1179 struct ipr_config_table_entry_wrapper *cfgtew)
1183 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1184 struct ipr_resource_entry *gscsi_res = NULL;
1186 res->needs_sync_complete = 0;
1189 res->del_from_ml = 0;
1190 res->resetting_device = 0;
1191 res->reset_occurred = 0;
1193 res->sata_port = NULL;
1195 if (ioa_cfg->sis64) {
1196 proto = cfgtew->u.cfgte64->proto;
1197 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1198 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1199 res->qmodel = IPR_QUEUEING_MODEL64(res);
1200 res->type = cfgtew->u.cfgte64->res_type;
1202 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1203 sizeof(res->res_path));
1206 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1207 sizeof(res->dev_lun.scsi_lun));
1208 res->lun = scsilun_to_int(&res->dev_lun);
1210 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1211 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1212 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1214 res->target = gscsi_res->target;
1219 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1220 ioa_cfg->max_devs_supported);
1221 set_bit(res->target, ioa_cfg->target_ids);
1223 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1224 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1226 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1227 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1228 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1229 ioa_cfg->max_devs_supported);
1230 set_bit(res->target, ioa_cfg->array_ids);
1231 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1232 res->bus = IPR_VSET_VIRTUAL_BUS;
1233 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1234 ioa_cfg->max_devs_supported);
1235 set_bit(res->target, ioa_cfg->vset_ids);
1237 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1238 ioa_cfg->max_devs_supported);
1239 set_bit(res->target, ioa_cfg->target_ids);
1242 proto = cfgtew->u.cfgte->proto;
1243 res->qmodel = IPR_QUEUEING_MODEL(res);
1244 res->flags = cfgtew->u.cfgte->flags;
1245 if (res->flags & IPR_IS_IOA_RESOURCE)
1246 res->type = IPR_RES_TYPE_IOAFP;
1248 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1250 res->bus = cfgtew->u.cfgte->res_addr.bus;
1251 res->target = cfgtew->u.cfgte->res_addr.target;
1252 res->lun = cfgtew->u.cfgte->res_addr.lun;
1253 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1256 ipr_update_ata_class(res, proto);
1260 * ipr_is_same_device - Determine if two devices are the same.
1261 * @res: resource entry struct
1262 * @cfgtew: config table entry wrapper struct
1265 * 1 if the devices are the same / 0 otherwise
1267 static int ipr_is_same_device(struct ipr_resource_entry *res,
1268 struct ipr_config_table_entry_wrapper *cfgtew)
1270 if (res->ioa_cfg->sis64) {
1271 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1272 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1273 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274 sizeof(cfgtew->u.cfgte64->lun))) {
1278 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1279 res->target == cfgtew->u.cfgte->res_addr.target &&
1280 res->lun == cfgtew->u.cfgte->res_addr.lun)
1288 * __ipr_format_res_path - Format the resource path for printing.
1289 * @res_path: resource path
1291 * @len: length of buffer provided
1296 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1302 p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
1303 for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
1304 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
1310 * ipr_format_res_path - Format the resource path for printing.
1311 * @ioa_cfg: ioa config struct
1312 * @res_path: resource path
1314 * @len: length of buffer provided
1319 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1320 u8 *res_path, char *buffer, int len)
1325 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1326 __ipr_format_res_path(res_path, p, len - (p - buffer));
1331 * ipr_update_res_entry - Update the resource entry.
1332 * @res: resource entry struct
1333 * @cfgtew: config table entry wrapper struct
1338 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1339 struct ipr_config_table_entry_wrapper *cfgtew)
1341 char buffer[IPR_MAX_RES_PATH_LENGTH];
1345 if (res->ioa_cfg->sis64) {
1346 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1347 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1348 res->type = cfgtew->u.cfgte64->res_type;
1350 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1351 sizeof(struct ipr_std_inq_data));
1353 res->qmodel = IPR_QUEUEING_MODEL64(res);
1354 proto = cfgtew->u.cfgte64->proto;
1355 res->res_handle = cfgtew->u.cfgte64->res_handle;
1356 res->dev_id = cfgtew->u.cfgte64->dev_id;
1358 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1359 sizeof(res->dev_lun.scsi_lun));
1361 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1362 sizeof(res->res_path))) {
1363 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1364 sizeof(res->res_path));
1368 if (res->sdev && new_path)
1369 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1370 ipr_format_res_path(res->ioa_cfg,
1371 res->res_path, buffer, sizeof(buffer)));
1373 res->flags = cfgtew->u.cfgte->flags;
1374 if (res->flags & IPR_IS_IOA_RESOURCE)
1375 res->type = IPR_RES_TYPE_IOAFP;
1377 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1379 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1380 sizeof(struct ipr_std_inq_data));
1382 res->qmodel = IPR_QUEUEING_MODEL(res);
1383 proto = cfgtew->u.cfgte->proto;
1384 res->res_handle = cfgtew->u.cfgte->res_handle;
1387 ipr_update_ata_class(res, proto);
1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1393 * @res: resource entry struct
1398 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1400 struct ipr_resource_entry *gscsi_res = NULL;
1401 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1403 if (!ioa_cfg->sis64)
1406 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1407 clear_bit(res->target, ioa_cfg->array_ids);
1408 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1409 clear_bit(res->target, ioa_cfg->vset_ids);
1410 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1411 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1412 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1414 clear_bit(res->target, ioa_cfg->target_ids);
1416 } else if (res->bus == 0)
1417 clear_bit(res->target, ioa_cfg->target_ids);
1421 * ipr_handle_config_change - Handle a config change from the adapter
1422 * @ioa_cfg: ioa config struct
1428 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1429 struct ipr_hostrcb *hostrcb)
1431 struct ipr_resource_entry *res = NULL;
1432 struct ipr_config_table_entry_wrapper cfgtew;
1433 __be32 cc_res_handle;
1437 if (ioa_cfg->sis64) {
1438 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1439 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1441 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1442 cc_res_handle = cfgtew.u.cfgte->res_handle;
1445 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1446 if (res->res_handle == cc_res_handle) {
1453 if (list_empty(&ioa_cfg->free_res_q)) {
1454 ipr_send_hcam(ioa_cfg,
1455 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1460 res = list_entry(ioa_cfg->free_res_q.next,
1461 struct ipr_resource_entry, queue);
1463 list_del(&res->queue);
1464 ipr_init_res_entry(res, &cfgtew);
1465 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1468 ipr_update_res_entry(res, &cfgtew);
1470 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1472 res->del_from_ml = 1;
1473 res->res_handle = IPR_INVALID_RES_HANDLE;
1474 schedule_work(&ioa_cfg->work_q);
1476 ipr_clear_res_target(res);
1477 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1479 } else if (!res->sdev || res->del_from_ml) {
1481 schedule_work(&ioa_cfg->work_q);
1484 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1488 * ipr_process_ccn - Op done function for a CCN.
1489 * @ipr_cmd: ipr command struct
1491 * This function is the op done function for a configuration
1492 * change notification host controlled async from the adapter.
1497 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1500 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1501 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1503 list_del_init(&hostrcb->queue);
1504 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1507 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1508 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1509 dev_err(&ioa_cfg->pdev->dev,
1510 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1512 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1514 ipr_handle_config_change(ioa_cfg, hostrcb);
1519 * strip_whitespace - Strip and pad trailing whitespace.
1520 * @i: size of buffer
1521 * @buf: string to modify
1523 * This function will strip all trailing whitespace and
1524 * NUL terminate the string.
1527 static void strip_whitespace(int i, char *buf)
1532 while (i && buf[i] == ' ')
1538 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1539 * @prefix: string to print at start of printk
1540 * @hostrcb: hostrcb pointer
1541 * @vpd: vendor/product id/sn struct
1546 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1547 struct ipr_vpd *vpd)
1549 char vendor_id[IPR_VENDOR_ID_LEN + 1];
1550 char product_id[IPR_PROD_ID_LEN + 1];
1551 char sn[IPR_SERIAL_NUM_LEN + 1];
1553 memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1554 strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1556 memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1557 strip_whitespace(IPR_PROD_ID_LEN, product_id);
1559 memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1560 strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1562 ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1563 vendor_id, product_id, sn);
1567 * ipr_log_vpd - Log the passed VPD to the error log.
1568 * @vpd: vendor/product id/sn struct
1573 static void ipr_log_vpd(struct ipr_vpd *vpd)
1575 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1576 + IPR_SERIAL_NUM_LEN];
1578 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1579 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1581 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1582 ipr_err("Vendor/Product ID: %s\n", buffer);
1584 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1585 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1586 ipr_err(" Serial Number: %s\n", buffer);
1590 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1591 * @prefix: string to print at start of printk
1592 * @hostrcb: hostrcb pointer
1593 * @vpd: vendor/product id/sn/wwn struct
1598 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1599 struct ipr_ext_vpd *vpd)
1601 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1602 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1603 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1607 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1608 * @vpd: vendor/product id/sn/wwn struct
1613 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1615 ipr_log_vpd(&vpd->vpd);
1616 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1617 be32_to_cpu(vpd->wwid[1]));
1621 * ipr_log_enhanced_cache_error - Log a cache error.
1622 * @ioa_cfg: ioa config struct
1623 * @hostrcb: hostrcb struct
1628 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1629 struct ipr_hostrcb *hostrcb)
1631 struct ipr_hostrcb_type_12_error *error;
1634 error = &hostrcb->hcam.u.error64.u.type_12_error;
1636 error = &hostrcb->hcam.u.error.u.type_12_error;
1638 ipr_err("-----Current Configuration-----\n");
1639 ipr_err("Cache Directory Card Information:\n");
1640 ipr_log_ext_vpd(&error->ioa_vpd);
1641 ipr_err("Adapter Card Information:\n");
1642 ipr_log_ext_vpd(&error->cfc_vpd);
1644 ipr_err("-----Expected Configuration-----\n");
1645 ipr_err("Cache Directory Card Information:\n");
1646 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1647 ipr_err("Adapter Card Information:\n");
1648 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1650 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1651 be32_to_cpu(error->ioa_data[0]),
1652 be32_to_cpu(error->ioa_data[1]),
1653 be32_to_cpu(error->ioa_data[2]));
1657 * ipr_log_cache_error - Log a cache error.
1658 * @ioa_cfg: ioa config struct
1659 * @hostrcb: hostrcb struct
1664 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1665 struct ipr_hostrcb *hostrcb)
1667 struct ipr_hostrcb_type_02_error *error =
1668 &hostrcb->hcam.u.error.u.type_02_error;
1670 ipr_err("-----Current Configuration-----\n");
1671 ipr_err("Cache Directory Card Information:\n");
1672 ipr_log_vpd(&error->ioa_vpd);
1673 ipr_err("Adapter Card Information:\n");
1674 ipr_log_vpd(&error->cfc_vpd);
1676 ipr_err("-----Expected Configuration-----\n");
1677 ipr_err("Cache Directory Card Information:\n");
1678 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1679 ipr_err("Adapter Card Information:\n");
1680 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1682 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1683 be32_to_cpu(error->ioa_data[0]),
1684 be32_to_cpu(error->ioa_data[1]),
1685 be32_to_cpu(error->ioa_data[2]));
1689 * ipr_log_enhanced_config_error - Log a configuration error.
1690 * @ioa_cfg: ioa config struct
1691 * @hostrcb: hostrcb struct
1696 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1697 struct ipr_hostrcb *hostrcb)
1699 int errors_logged, i;
1700 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1701 struct ipr_hostrcb_type_13_error *error;
1703 error = &hostrcb->hcam.u.error.u.type_13_error;
1704 errors_logged = be32_to_cpu(error->errors_logged);
1706 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1707 be32_to_cpu(error->errors_detected), errors_logged);
1709 dev_entry = error->dev;
1711 for (i = 0; i < errors_logged; i++, dev_entry++) {
1714 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1715 ipr_log_ext_vpd(&dev_entry->vpd);
1717 ipr_err("-----New Device Information-----\n");
1718 ipr_log_ext_vpd(&dev_entry->new_vpd);
1720 ipr_err("Cache Directory Card Information:\n");
1721 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1723 ipr_err("Adapter Card Information:\n");
1724 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1729 * ipr_log_sis64_config_error - Log a device error.
1730 * @ioa_cfg: ioa config struct
1731 * @hostrcb: hostrcb struct
1736 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1737 struct ipr_hostrcb *hostrcb)
1739 int errors_logged, i;
1740 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1741 struct ipr_hostrcb_type_23_error *error;
1742 char buffer[IPR_MAX_RES_PATH_LENGTH];
1744 error = &hostrcb->hcam.u.error64.u.type_23_error;
1745 errors_logged = be32_to_cpu(error->errors_logged);
1747 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1748 be32_to_cpu(error->errors_detected), errors_logged);
1750 dev_entry = error->dev;
1752 for (i = 0; i < errors_logged; i++, dev_entry++) {
1755 ipr_err("Device %d : %s", i + 1,
1756 __ipr_format_res_path(dev_entry->res_path,
1757 buffer, sizeof(buffer)));
1758 ipr_log_ext_vpd(&dev_entry->vpd);
1760 ipr_err("-----New Device Information-----\n");
1761 ipr_log_ext_vpd(&dev_entry->new_vpd);
1763 ipr_err("Cache Directory Card Information:\n");
1764 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1766 ipr_err("Adapter Card Information:\n");
1767 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1772 * ipr_log_config_error - Log a configuration error.
1773 * @ioa_cfg: ioa config struct
1774 * @hostrcb: hostrcb struct
1779 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1780 struct ipr_hostrcb *hostrcb)
1782 int errors_logged, i;
1783 struct ipr_hostrcb_device_data_entry *dev_entry;
1784 struct ipr_hostrcb_type_03_error *error;
1786 error = &hostrcb->hcam.u.error.u.type_03_error;
1787 errors_logged = be32_to_cpu(error->errors_logged);
1789 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1790 be32_to_cpu(error->errors_detected), errors_logged);
1792 dev_entry = error->dev;
1794 for (i = 0; i < errors_logged; i++, dev_entry++) {
1797 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1798 ipr_log_vpd(&dev_entry->vpd);
1800 ipr_err("-----New Device Information-----\n");
1801 ipr_log_vpd(&dev_entry->new_vpd);
1803 ipr_err("Cache Directory Card Information:\n");
1804 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1806 ipr_err("Adapter Card Information:\n");
1807 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1809 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1810 be32_to_cpu(dev_entry->ioa_data[0]),
1811 be32_to_cpu(dev_entry->ioa_data[1]),
1812 be32_to_cpu(dev_entry->ioa_data[2]),
1813 be32_to_cpu(dev_entry->ioa_data[3]),
1814 be32_to_cpu(dev_entry->ioa_data[4]));
1819 * ipr_log_enhanced_array_error - Log an array configuration error.
1820 * @ioa_cfg: ioa config struct
1821 * @hostrcb: hostrcb struct
1826 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1827 struct ipr_hostrcb *hostrcb)
1830 struct ipr_hostrcb_type_14_error *error;
1831 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1832 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1834 error = &hostrcb->hcam.u.error.u.type_14_error;
1838 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1839 error->protection_level,
1840 ioa_cfg->host->host_no,
1841 error->last_func_vset_res_addr.bus,
1842 error->last_func_vset_res_addr.target,
1843 error->last_func_vset_res_addr.lun);
1847 array_entry = error->array_member;
1848 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1849 ARRAY_SIZE(error->array_member));
1851 for (i = 0; i < num_entries; i++, array_entry++) {
1852 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1855 if (be32_to_cpu(error->exposed_mode_adn) == i)
1856 ipr_err("Exposed Array Member %d:\n", i);
1858 ipr_err("Array Member %d:\n", i);
1860 ipr_log_ext_vpd(&array_entry->vpd);
1861 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1862 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1863 "Expected Location");
1870 * ipr_log_array_error - Log an array configuration error.
1871 * @ioa_cfg: ioa config struct
1872 * @hostrcb: hostrcb struct
1877 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1878 struct ipr_hostrcb *hostrcb)
1881 struct ipr_hostrcb_type_04_error *error;
1882 struct ipr_hostrcb_array_data_entry *array_entry;
1883 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1885 error = &hostrcb->hcam.u.error.u.type_04_error;
1889 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1890 error->protection_level,
1891 ioa_cfg->host->host_no,
1892 error->last_func_vset_res_addr.bus,
1893 error->last_func_vset_res_addr.target,
1894 error->last_func_vset_res_addr.lun);
1898 array_entry = error->array_member;
1900 for (i = 0; i < 18; i++) {
1901 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1904 if (be32_to_cpu(error->exposed_mode_adn) == i)
1905 ipr_err("Exposed Array Member %d:\n", i);
1907 ipr_err("Array Member %d:\n", i);
1909 ipr_log_vpd(&array_entry->vpd);
1911 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1912 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1913 "Expected Location");
1918 array_entry = error->array_member2;
1925 * ipr_log_hex_data - Log additional hex IOA error data.
1926 * @ioa_cfg: ioa config struct
1927 * @data: IOA error data
1933 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1940 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1941 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1943 for (i = 0; i < len / 4; i += 4) {
1944 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1945 be32_to_cpu(data[i]),
1946 be32_to_cpu(data[i+1]),
1947 be32_to_cpu(data[i+2]),
1948 be32_to_cpu(data[i+3]));
1953 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1954 * @ioa_cfg: ioa config struct
1955 * @hostrcb: hostrcb struct
1960 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1961 struct ipr_hostrcb *hostrcb)
1963 struct ipr_hostrcb_type_17_error *error;
1966 error = &hostrcb->hcam.u.error64.u.type_17_error;
1968 error = &hostrcb->hcam.u.error.u.type_17_error;
1970 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1971 strim(error->failure_reason);
1973 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1974 be32_to_cpu(hostrcb->hcam.u.error.prc));
1975 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1976 ipr_log_hex_data(ioa_cfg, error->data,
1977 be32_to_cpu(hostrcb->hcam.length) -
1978 (offsetof(struct ipr_hostrcb_error, u) +
1979 offsetof(struct ipr_hostrcb_type_17_error, data)));
1983 * ipr_log_dual_ioa_error - Log a dual adapter error.
1984 * @ioa_cfg: ioa config struct
1985 * @hostrcb: hostrcb struct
1990 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1991 struct ipr_hostrcb *hostrcb)
1993 struct ipr_hostrcb_type_07_error *error;
1995 error = &hostrcb->hcam.u.error.u.type_07_error;
1996 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1997 strim(error->failure_reason);
1999 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2000 be32_to_cpu(hostrcb->hcam.u.error.prc));
2001 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2002 ipr_log_hex_data(ioa_cfg, error->data,
2003 be32_to_cpu(hostrcb->hcam.length) -
2004 (offsetof(struct ipr_hostrcb_error, u) +
2005 offsetof(struct ipr_hostrcb_type_07_error, data)));
2008 static const struct {
2011 } path_active_desc[] = {
2012 { IPR_PATH_NO_INFO, "Path" },
2013 { IPR_PATH_ACTIVE, "Active path" },
2014 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2017 static const struct {
2020 } path_state_desc[] = {
2021 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2022 { IPR_PATH_HEALTHY, "is healthy" },
2023 { IPR_PATH_DEGRADED, "is degraded" },
2024 { IPR_PATH_FAILED, "is failed" }
2028 * ipr_log_fabric_path - Log a fabric path error
2029 * @hostrcb: hostrcb struct
2030 * @fabric: fabric descriptor
2035 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2036 struct ipr_hostrcb_fabric_desc *fabric)
2039 u8 path_state = fabric->path_state;
2040 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2041 u8 state = path_state & IPR_PATH_STATE_MASK;
2043 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2044 if (path_active_desc[i].active != active)
2047 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2048 if (path_state_desc[j].state != state)
2051 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2052 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2053 path_active_desc[i].desc, path_state_desc[j].desc,
2055 } else if (fabric->cascaded_expander == 0xff) {
2056 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2057 path_active_desc[i].desc, path_state_desc[j].desc,
2058 fabric->ioa_port, fabric->phy);
2059 } else if (fabric->phy == 0xff) {
2060 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2061 path_active_desc[i].desc, path_state_desc[j].desc,
2062 fabric->ioa_port, fabric->cascaded_expander);
2064 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2065 path_active_desc[i].desc, path_state_desc[j].desc,
2066 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2072 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2073 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2077 * ipr_log64_fabric_path - Log a fabric path error
2078 * @hostrcb: hostrcb struct
2079 * @fabric: fabric descriptor
2084 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2085 struct ipr_hostrcb64_fabric_desc *fabric)
2088 u8 path_state = fabric->path_state;
2089 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2090 u8 state = path_state & IPR_PATH_STATE_MASK;
2091 char buffer[IPR_MAX_RES_PATH_LENGTH];
2093 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2094 if (path_active_desc[i].active != active)
2097 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2098 if (path_state_desc[j].state != state)
2101 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2102 path_active_desc[i].desc, path_state_desc[j].desc,
2103 ipr_format_res_path(hostrcb->ioa_cfg,
2105 buffer, sizeof(buffer)));
2110 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2111 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2112 buffer, sizeof(buffer)));
2115 static const struct {
2118 } path_type_desc[] = {
2119 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2120 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2121 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2122 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2125 static const struct {
2128 } path_status_desc[] = {
2129 { IPR_PATH_CFG_NO_PROB, "Functional" },
2130 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2131 { IPR_PATH_CFG_FAILED, "Failed" },
2132 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2133 { IPR_PATH_NOT_DETECTED, "Missing" },
2134 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2137 static const char *link_rate[] = {
2140 "phy reset problem",
2157 * ipr_log_path_elem - Log a fabric path element.
2158 * @hostrcb: hostrcb struct
2159 * @cfg: fabric path element struct
2164 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2165 struct ipr_hostrcb_config_element *cfg)
2168 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2169 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2171 if (type == IPR_PATH_CFG_NOT_EXIST)
2174 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2175 if (path_type_desc[i].type != type)
2178 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2179 if (path_status_desc[j].status != status)
2182 if (type == IPR_PATH_CFG_IOA_PORT) {
2183 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2184 path_status_desc[j].desc, path_type_desc[i].desc,
2185 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2186 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2188 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2189 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2190 path_status_desc[j].desc, path_type_desc[i].desc,
2191 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2192 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2193 } else if (cfg->cascaded_expander == 0xff) {
2194 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2195 "WWN=%08X%08X\n", path_status_desc[j].desc,
2196 path_type_desc[i].desc, cfg->phy,
2197 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2198 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2199 } else if (cfg->phy == 0xff) {
2200 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2201 "WWN=%08X%08X\n", path_status_desc[j].desc,
2202 path_type_desc[i].desc, cfg->cascaded_expander,
2203 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2204 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2206 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2207 "WWN=%08X%08X\n", path_status_desc[j].desc,
2208 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2209 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2210 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2217 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2218 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2219 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2220 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2224 * ipr_log64_path_elem - Log a fabric path element.
2225 * @hostrcb: hostrcb struct
2226 * @cfg: fabric path element struct
2231 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2232 struct ipr_hostrcb64_config_element *cfg)
2235 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2236 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2237 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2238 char buffer[IPR_MAX_RES_PATH_LENGTH];
2240 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2243 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2244 if (path_type_desc[i].type != type)
2247 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2248 if (path_status_desc[j].status != status)
2251 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2252 path_status_desc[j].desc, path_type_desc[i].desc,
2253 ipr_format_res_path(hostrcb->ioa_cfg,
2254 cfg->res_path, buffer, sizeof(buffer)),
2255 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2256 be32_to_cpu(cfg->wwid[0]),
2257 be32_to_cpu(cfg->wwid[1]));
2261 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2262 "WWN=%08X%08X\n", cfg->type_status,
2263 ipr_format_res_path(hostrcb->ioa_cfg,
2264 cfg->res_path, buffer, sizeof(buffer)),
2265 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2266 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2270 * ipr_log_fabric_error - Log a fabric error.
2271 * @ioa_cfg: ioa config struct
2272 * @hostrcb: hostrcb struct
2277 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2278 struct ipr_hostrcb *hostrcb)
2280 struct ipr_hostrcb_type_20_error *error;
2281 struct ipr_hostrcb_fabric_desc *fabric;
2282 struct ipr_hostrcb_config_element *cfg;
2285 error = &hostrcb->hcam.u.error.u.type_20_error;
2286 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2287 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2289 add_len = be32_to_cpu(hostrcb->hcam.length) -
2290 (offsetof(struct ipr_hostrcb_error, u) +
2291 offsetof(struct ipr_hostrcb_type_20_error, desc));
2293 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2294 ipr_log_fabric_path(hostrcb, fabric);
2295 for_each_fabric_cfg(fabric, cfg)
2296 ipr_log_path_elem(hostrcb, cfg);
2298 add_len -= be16_to_cpu(fabric->length);
2299 fabric = (struct ipr_hostrcb_fabric_desc *)
2300 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2303 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2307 * ipr_log_sis64_array_error - Log a sis64 array error.
2308 * @ioa_cfg: ioa config struct
2309 * @hostrcb: hostrcb struct
2314 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2315 struct ipr_hostrcb *hostrcb)
2318 struct ipr_hostrcb_type_24_error *error;
2319 struct ipr_hostrcb64_array_data_entry *array_entry;
2320 char buffer[IPR_MAX_RES_PATH_LENGTH];
2321 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2323 error = &hostrcb->hcam.u.error64.u.type_24_error;
2327 ipr_err("RAID %s Array Configuration: %s\n",
2328 error->protection_level,
2329 ipr_format_res_path(ioa_cfg, error->last_res_path,
2330 buffer, sizeof(buffer)));
2334 array_entry = error->array_member;
2335 num_entries = min_t(u32, error->num_entries,
2336 ARRAY_SIZE(error->array_member));
2338 for (i = 0; i < num_entries; i++, array_entry++) {
2340 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2343 if (error->exposed_mode_adn == i)
2344 ipr_err("Exposed Array Member %d:\n", i);
2346 ipr_err("Array Member %d:\n", i);
2348 ipr_err("Array Member %d:\n", i);
2349 ipr_log_ext_vpd(&array_entry->vpd);
2350 ipr_err("Current Location: %s\n",
2351 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2352 buffer, sizeof(buffer)));
2353 ipr_err("Expected Location: %s\n",
2354 ipr_format_res_path(ioa_cfg,
2355 array_entry->expected_res_path,
2356 buffer, sizeof(buffer)));
2363 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2364 * @ioa_cfg: ioa config struct
2365 * @hostrcb: hostrcb struct
2370 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2371 struct ipr_hostrcb *hostrcb)
2373 struct ipr_hostrcb_type_30_error *error;
2374 struct ipr_hostrcb64_fabric_desc *fabric;
2375 struct ipr_hostrcb64_config_element *cfg;
2378 error = &hostrcb->hcam.u.error64.u.type_30_error;
2380 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2381 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2383 add_len = be32_to_cpu(hostrcb->hcam.length) -
2384 (offsetof(struct ipr_hostrcb64_error, u) +
2385 offsetof(struct ipr_hostrcb_type_30_error, desc));
2387 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2388 ipr_log64_fabric_path(hostrcb, fabric);
2389 for_each_fabric_cfg(fabric, cfg)
2390 ipr_log64_path_elem(hostrcb, cfg);
2392 add_len -= be16_to_cpu(fabric->length);
2393 fabric = (struct ipr_hostrcb64_fabric_desc *)
2394 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2397 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2401 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2402 * @ioa_cfg: ioa config struct
2403 * @hostrcb: hostrcb struct
2408 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2409 struct ipr_hostrcb *hostrcb)
2411 struct ipr_hostrcb_type_41_error *error;
2413 error = &hostrcb->hcam.u.error64.u.type_41_error;
2415 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2416 ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2417 ipr_log_hex_data(ioa_cfg, error->data,
2418 be32_to_cpu(hostrcb->hcam.length) -
2419 (offsetof(struct ipr_hostrcb_error, u) +
2420 offsetof(struct ipr_hostrcb_type_41_error, data)));
2423 * ipr_log_generic_error - Log an adapter error.
2424 * @ioa_cfg: ioa config struct
2425 * @hostrcb: hostrcb struct
2430 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2431 struct ipr_hostrcb *hostrcb)
2433 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2434 be32_to_cpu(hostrcb->hcam.length));
2438 * ipr_log_sis64_device_error - Log a cache error.
2439 * @ioa_cfg: ioa config struct
2440 * @hostrcb: hostrcb struct
2445 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2446 struct ipr_hostrcb *hostrcb)
2448 struct ipr_hostrcb_type_21_error *error;
2449 char buffer[IPR_MAX_RES_PATH_LENGTH];
2451 error = &hostrcb->hcam.u.error64.u.type_21_error;
2453 ipr_err("-----Failing Device Information-----\n");
2454 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2455 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2456 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2457 ipr_err("Device Resource Path: %s\n",
2458 __ipr_format_res_path(error->res_path,
2459 buffer, sizeof(buffer)));
2460 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2461 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2462 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2463 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2464 ipr_err("SCSI Sense Data:\n");
2465 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2466 ipr_err("SCSI Command Descriptor Block: \n");
2467 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2469 ipr_err("Additional IOA Data:\n");
2470 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2474 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2477 * This function will return the index of into the ipr_error_table
2478 * for the specified IOASC. If the IOASC is not in the table,
2479 * 0 will be returned, which points to the entry used for unknown errors.
2482 * index into the ipr_error_table
2484 static u32 ipr_get_error(u32 ioasc)
2488 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2489 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2496 * ipr_handle_log_data - Log an adapter error.
2497 * @ioa_cfg: ioa config struct
2498 * @hostrcb: hostrcb struct
2500 * This function logs an adapter error to the system.
2505 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2506 struct ipr_hostrcb *hostrcb)
2510 struct ipr_hostrcb_type_21_error *error;
2512 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2515 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2516 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2519 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2521 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2523 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2524 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2525 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2526 scsi_report_bus_reset(ioa_cfg->host,
2527 hostrcb->hcam.u.error.fd_res_addr.bus);
2530 error_index = ipr_get_error(ioasc);
2532 if (!ipr_error_table[error_index].log_hcam)
2535 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2536 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2537 error = &hostrcb->hcam.u.error64.u.type_21_error;
2539 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2540 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2544 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2546 /* Set indication we have logged an error */
2547 ioa_cfg->errors_logged++;
2549 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2551 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2552 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2554 switch (hostrcb->hcam.overlay_id) {
2555 case IPR_HOST_RCB_OVERLAY_ID_2:
2556 ipr_log_cache_error(ioa_cfg, hostrcb);
2558 case IPR_HOST_RCB_OVERLAY_ID_3:
2559 ipr_log_config_error(ioa_cfg, hostrcb);
2561 case IPR_HOST_RCB_OVERLAY_ID_4:
2562 case IPR_HOST_RCB_OVERLAY_ID_6:
2563 ipr_log_array_error(ioa_cfg, hostrcb);
2565 case IPR_HOST_RCB_OVERLAY_ID_7:
2566 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2568 case IPR_HOST_RCB_OVERLAY_ID_12:
2569 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2571 case IPR_HOST_RCB_OVERLAY_ID_13:
2572 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2574 case IPR_HOST_RCB_OVERLAY_ID_14:
2575 case IPR_HOST_RCB_OVERLAY_ID_16:
2576 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2578 case IPR_HOST_RCB_OVERLAY_ID_17:
2579 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2581 case IPR_HOST_RCB_OVERLAY_ID_20:
2582 ipr_log_fabric_error(ioa_cfg, hostrcb);
2584 case IPR_HOST_RCB_OVERLAY_ID_21:
2585 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2587 case IPR_HOST_RCB_OVERLAY_ID_23:
2588 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2590 case IPR_HOST_RCB_OVERLAY_ID_24:
2591 case IPR_HOST_RCB_OVERLAY_ID_26:
2592 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2594 case IPR_HOST_RCB_OVERLAY_ID_30:
2595 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2597 case IPR_HOST_RCB_OVERLAY_ID_41:
2598 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2600 case IPR_HOST_RCB_OVERLAY_ID_1:
2601 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2603 ipr_log_generic_error(ioa_cfg, hostrcb);
2608 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2610 struct ipr_hostrcb *hostrcb;
2612 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2613 struct ipr_hostrcb, queue);
2615 if (unlikely(!hostrcb)) {
2616 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2617 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2618 struct ipr_hostrcb, queue);
2621 list_del_init(&hostrcb->queue);
2626 * ipr_process_error - Op done function for an adapter error log.
2627 * @ipr_cmd: ipr command struct
2629 * This function is the op done function for an error log host
2630 * controlled async from the adapter. It will log the error and
2631 * send the HCAM back to the adapter.
2636 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2639 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2640 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2644 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2646 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2648 list_del_init(&hostrcb->queue);
2649 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2652 ipr_handle_log_data(ioa_cfg, hostrcb);
2653 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2654 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2655 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2656 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2657 dev_err(&ioa_cfg->pdev->dev,
2658 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2661 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2662 schedule_work(&ioa_cfg->work_q);
2663 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2665 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2669 * ipr_timeout - An internally generated op has timed out.
2670 * @t: Timer context used to fetch ipr command struct
2672 * This function blocks host requests and initiates an
2678 static void ipr_timeout(struct timer_list *t)
2680 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2681 unsigned long lock_flags = 0;
2682 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687 ioa_cfg->errors_logged++;
2688 dev_err(&ioa_cfg->pdev->dev,
2689 "Adapter being reset due to command timeout.\n");
2691 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2692 ioa_cfg->sdt_state = GET_DUMP;
2694 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2695 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2702 * ipr_oper_timeout - Adapter timed out transitioning to operational
2703 * @t: Timer context used to fetch ipr command struct
2705 * This function blocks host requests and initiates an
2711 static void ipr_oper_timeout(struct timer_list *t)
2713 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2714 unsigned long lock_flags = 0;
2715 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2718 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2720 ioa_cfg->errors_logged++;
2721 dev_err(&ioa_cfg->pdev->dev,
2722 "Adapter timed out transitioning to operational.\n");
2724 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2725 ioa_cfg->sdt_state = GET_DUMP;
2727 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2729 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2730 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2738 * ipr_find_ses_entry - Find matching SES in SES table
2739 * @res: resource entry struct of SES
2742 * pointer to SES table entry / NULL on failure
2744 static const struct ipr_ses_table_entry *
2745 ipr_find_ses_entry(struct ipr_resource_entry *res)
2748 struct ipr_std_inq_vpids *vpids;
2749 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2751 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2752 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2753 if (ste->compare_product_id_byte[j] == 'X') {
2754 vpids = &res->std_inq_data.vpids;
2755 if (vpids->product_id[j] == ste->product_id[j])
2763 if (matches == IPR_PROD_ID_LEN)
2771 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2772 * @ioa_cfg: ioa config struct
2774 * @bus_width: bus width
2777 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2778 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2779 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2780 * max 160MHz = max 320MB/sec).
2782 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2784 struct ipr_resource_entry *res;
2785 const struct ipr_ses_table_entry *ste;
2786 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2788 /* Loop through each config table entry in the config table buffer */
2789 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2790 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2793 if (bus != res->bus)
2796 if (!(ste = ipr_find_ses_entry(res)))
2799 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2802 return max_xfer_rate;
2806 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2807 * @ioa_cfg: ioa config struct
2808 * @max_delay: max delay in micro-seconds to wait
2810 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2813 * 0 on success / other on failure
2815 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2817 volatile u32 pcii_reg;
2820 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2821 while (delay < max_delay) {
2822 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2824 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2827 /* udelay cannot be used if delay is more than a few milliseconds */
2828 if ((delay / 1000) > MAX_UDELAY_MS)
2829 mdelay(delay / 1000);
2839 * ipr_get_sis64_dump_data_section - Dump IOA memory
2840 * @ioa_cfg: ioa config struct
2841 * @start_addr: adapter address to dump
2842 * @dest: destination kernel buffer
2843 * @length_in_words: length to dump in 4 byte words
2848 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2850 __be32 *dest, u32 length_in_words)
2854 for (i = 0; i < length_in_words; i++) {
2855 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2856 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2864 * ipr_get_ldump_data_section - Dump IOA memory
2865 * @ioa_cfg: ioa config struct
2866 * @start_addr: adapter address to dump
2867 * @dest: destination kernel buffer
2868 * @length_in_words: length to dump in 4 byte words
2871 * 0 on success / -EIO on failure
2873 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2875 __be32 *dest, u32 length_in_words)
2877 volatile u32 temp_pcii_reg;
2881 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2882 dest, length_in_words);
2884 /* Write IOA interrupt reg starting LDUMP state */
2885 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2886 ioa_cfg->regs.set_uproc_interrupt_reg32);
2888 /* Wait for IO debug acknowledge */
2889 if (ipr_wait_iodbg_ack(ioa_cfg,
2890 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2891 dev_err(&ioa_cfg->pdev->dev,
2892 "IOA dump long data transfer timeout\n");
2896 /* Signal LDUMP interlocked - clear IO debug ack */
2897 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2898 ioa_cfg->regs.clr_interrupt_reg);
2900 /* Write Mailbox with starting address */
2901 writel(start_addr, ioa_cfg->ioa_mailbox);
2903 /* Signal address valid - clear IOA Reset alert */
2904 writel(IPR_UPROCI_RESET_ALERT,
2905 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2907 for (i = 0; i < length_in_words; i++) {
2908 /* Wait for IO debug acknowledge */
2909 if (ipr_wait_iodbg_ack(ioa_cfg,
2910 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2911 dev_err(&ioa_cfg->pdev->dev,
2912 "IOA dump short data transfer timeout\n");
2916 /* Read data from mailbox and increment destination pointer */
2917 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2920 /* For all but the last word of data, signal data received */
2921 if (i < (length_in_words - 1)) {
2922 /* Signal dump data received - Clear IO debug Ack */
2923 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2924 ioa_cfg->regs.clr_interrupt_reg);
2928 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2929 writel(IPR_UPROCI_RESET_ALERT,
2930 ioa_cfg->regs.set_uproc_interrupt_reg32);
2932 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2933 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2935 /* Signal dump data received - Clear IO debug Ack */
2936 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2937 ioa_cfg->regs.clr_interrupt_reg);
2939 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2940 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2942 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2944 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2954 #ifdef CONFIG_SCSI_IPR_DUMP
2956 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2957 * @ioa_cfg: ioa config struct
2958 * @pci_address: adapter address
2959 * @length: length of data to copy
2961 * Copy data from PCI adapter to kernel buffer.
2962 * Note: length MUST be a 4 byte multiple
2964 * 0 on success / other on failure
2966 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2967 unsigned long pci_address, u32 length)
2969 int bytes_copied = 0;
2970 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2972 unsigned long lock_flags = 0;
2973 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2976 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2978 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2980 while (bytes_copied < length &&
2981 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2982 if (ioa_dump->page_offset >= PAGE_SIZE ||
2983 ioa_dump->page_offset == 0) {
2984 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2988 return bytes_copied;
2991 ioa_dump->page_offset = 0;
2992 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2993 ioa_dump->next_page_index++;
2995 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2997 rem_len = length - bytes_copied;
2998 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2999 cur_len = min(rem_len, rem_page_len);
3001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3002 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3005 rc = ipr_get_ldump_data_section(ioa_cfg,
3006 pci_address + bytes_copied,
3007 &page[ioa_dump->page_offset / 4],
3008 (cur_len / sizeof(u32)));
3010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3013 ioa_dump->page_offset += cur_len;
3014 bytes_copied += cur_len;
3022 return bytes_copied;
3026 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3027 * @hdr: dump entry header struct
3032 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3034 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3036 hdr->offset = sizeof(*hdr);
3037 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3041 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3042 * @ioa_cfg: ioa config struct
3043 * @driver_dump: driver dump struct
3048 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3049 struct ipr_driver_dump *driver_dump)
3051 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3053 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3054 driver_dump->ioa_type_entry.hdr.len =
3055 sizeof(struct ipr_dump_ioa_type_entry) -
3056 sizeof(struct ipr_dump_entry_header);
3057 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3058 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3059 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3060 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3061 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3062 ucode_vpd->minor_release[1];
3063 driver_dump->hdr.num_entries++;
3067 * ipr_dump_version_data - Fill in the driver version in the dump.
3068 * @ioa_cfg: ioa config struct
3069 * @driver_dump: driver dump struct
3074 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3075 struct ipr_driver_dump *driver_dump)
3077 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3078 driver_dump->version_entry.hdr.len =
3079 sizeof(struct ipr_dump_version_entry) -
3080 sizeof(struct ipr_dump_entry_header);
3081 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3082 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3083 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3084 driver_dump->hdr.num_entries++;
3088 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3089 * @ioa_cfg: ioa config struct
3090 * @driver_dump: driver dump struct
3095 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3096 struct ipr_driver_dump *driver_dump)
3098 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3099 driver_dump->trace_entry.hdr.len =
3100 sizeof(struct ipr_dump_trace_entry) -
3101 sizeof(struct ipr_dump_entry_header);
3102 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3103 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3104 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3105 driver_dump->hdr.num_entries++;
3109 * ipr_dump_location_data - Fill in the IOA location in the dump.
3110 * @ioa_cfg: ioa config struct
3111 * @driver_dump: driver dump struct
3116 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3117 struct ipr_driver_dump *driver_dump)
3119 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3120 driver_dump->location_entry.hdr.len =
3121 sizeof(struct ipr_dump_location_entry) -
3122 sizeof(struct ipr_dump_entry_header);
3123 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3124 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3125 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3126 driver_dump->hdr.num_entries++;
3130 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3131 * @ioa_cfg: ioa config struct
3132 * @dump: dump struct
3137 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3139 unsigned long start_addr, sdt_word;
3140 unsigned long lock_flags = 0;
3141 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3142 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3143 u32 num_entries, max_num_entries, start_off, end_off;
3144 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3145 struct ipr_sdt *sdt;
3151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3153 if (ioa_cfg->sdt_state != READ_DUMP) {
3154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158 if (ioa_cfg->sis64) {
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160 ssleep(IPR_DUMP_DELAY_SECONDS);
3161 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3164 start_addr = readl(ioa_cfg->ioa_mailbox);
3166 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3167 dev_err(&ioa_cfg->pdev->dev,
3168 "Invalid dump table format: %lx\n", start_addr);
3169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3173 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3175 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3177 /* Initialize the overall dump header */
3178 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3179 driver_dump->hdr.num_entries = 1;
3180 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3181 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3182 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3183 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3185 ipr_dump_version_data(ioa_cfg, driver_dump);
3186 ipr_dump_location_data(ioa_cfg, driver_dump);
3187 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3188 ipr_dump_trace_data(ioa_cfg, driver_dump);
3190 /* Update dump_header */
3191 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3193 /* IOA Dump entry */
3194 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3195 ioa_dump->hdr.len = 0;
3196 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3197 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3199 /* First entries in sdt are actually a list of dump addresses and
3200 lengths to gather the real dump data. sdt represents the pointer
3201 to the ioa generated dump table. Dump data will be extracted based
3202 on entries in this table */
3203 sdt = &ioa_dump->sdt;
3205 if (ioa_cfg->sis64) {
3206 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3207 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3209 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3210 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3213 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3214 (max_num_entries * sizeof(struct ipr_sdt_entry));
3215 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3216 bytes_to_copy / sizeof(__be32));
3218 /* Smart Dump table is ready to use and the first entry is valid */
3219 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3220 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3221 dev_err(&ioa_cfg->pdev->dev,
3222 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3223 rc, be32_to_cpu(sdt->hdr.state));
3224 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3225 ioa_cfg->sdt_state = DUMP_OBTAINED;
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3232 if (num_entries > max_num_entries)
3233 num_entries = max_num_entries;
3235 /* Update dump length to the actual data to be copied */
3236 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3238 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3240 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3244 for (i = 0; i < num_entries; i++) {
3245 if (ioa_dump->hdr.len > max_dump_size) {
3246 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3250 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3251 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3253 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3255 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3256 end_off = be32_to_cpu(sdt->entry[i].end_token);
3258 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3259 bytes_to_copy = end_off - start_off;
3264 if (bytes_to_copy > max_dump_size) {
3265 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3269 /* Copy data from adapter to driver buffers */
3270 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3273 ioa_dump->hdr.len += bytes_copied;
3275 if (bytes_copied != bytes_to_copy) {
3276 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3283 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3285 /* Update dump_header */
3286 driver_dump->hdr.len += ioa_dump->hdr.len;
3288 ioa_cfg->sdt_state = DUMP_OBTAINED;
3293 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3297 * ipr_release_dump - Free adapter dump memory
3298 * @kref: kref struct
3303 static void ipr_release_dump(struct kref *kref)
3305 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3306 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3307 unsigned long lock_flags = 0;
3311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3312 ioa_cfg->dump = NULL;
3313 ioa_cfg->sdt_state = INACTIVE;
3314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3316 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3317 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3319 vfree(dump->ioa_dump.ioa_data);
3324 static void ipr_add_remove_thread(struct work_struct *work)
3326 unsigned long lock_flags;
3327 struct ipr_resource_entry *res;
3328 struct scsi_device *sdev;
3329 struct ipr_ioa_cfg *ioa_cfg =
3330 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3331 u8 bus, target, lun;
3335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3345 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3346 if (res->del_from_ml && res->sdev) {
3349 if (!scsi_device_get(sdev)) {
3350 if (!res->add_to_ml)
3351 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3353 res->del_from_ml = 0;
3354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3355 scsi_remove_device(sdev);
3356 scsi_device_put(sdev);
3357 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3364 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3365 if (res->add_to_ml) {
3367 target = res->target;
3370 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3371 scsi_add_device(ioa_cfg->host, bus, target, lun);
3372 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3377 ioa_cfg->scan_done = 1;
3378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3379 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3384 * ipr_worker_thread - Worker thread
3385 * @work: ioa config struct
3387 * Called at task level from a work thread. This function takes care
3388 * of adding and removing device from the mid-layer as configuration
3389 * changes are detected by the adapter.
3394 static void ipr_worker_thread(struct work_struct *work)
3396 unsigned long lock_flags;
3397 struct ipr_dump *dump;
3398 struct ipr_ioa_cfg *ioa_cfg =
3399 container_of(work, struct ipr_ioa_cfg, work_q);
3402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404 if (ioa_cfg->sdt_state == READ_DUMP) {
3405 dump = ioa_cfg->dump;
3407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3410 kref_get(&dump->kref);
3411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3412 ipr_get_ioa_dump(ioa_cfg, dump);
3413 kref_put(&dump->kref, ipr_release_dump);
3415 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3416 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3417 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422 if (ioa_cfg->scsi_unblock) {
3423 ioa_cfg->scsi_unblock = 0;
3424 ioa_cfg->scsi_blocked = 0;
3425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3426 scsi_unblock_requests(ioa_cfg->host);
3427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3428 if (ioa_cfg->scsi_blocked)
3429 scsi_block_requests(ioa_cfg->host);
3432 if (!ioa_cfg->scan_enabled) {
3433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3437 schedule_work(&ioa_cfg->scsi_add_work_q);
3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3443 #ifdef CONFIG_SCSI_IPR_TRACE
3445 * ipr_read_trace - Dump the adapter trace
3446 * @filp: open sysfs file
3447 * @kobj: kobject struct
3448 * @bin_attr: bin_attribute struct
3451 * @count: buffer size
3454 * number of bytes printed to buffer
3456 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3457 struct bin_attribute *bin_attr,
3458 char *buf, loff_t off, size_t count)
3460 struct device *dev = kobj_to_dev(kobj);
3461 struct Scsi_Host *shost = class_to_shost(dev);
3462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463 unsigned long lock_flags = 0;
3466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3467 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3469 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3474 static struct bin_attribute ipr_trace_attr = {
3480 .read = ipr_read_trace,
3485 * ipr_show_fw_version - Show the firmware version
3486 * @dev: class device struct
3487 * @attr: device attribute (unused)
3491 * number of bytes printed to buffer
3493 static ssize_t ipr_show_fw_version(struct device *dev,
3494 struct device_attribute *attr, char *buf)
3496 struct Scsi_Host *shost = class_to_shost(dev);
3497 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3498 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3499 unsigned long lock_flags = 0;
3502 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3503 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3504 ucode_vpd->major_release, ucode_vpd->card_type,
3505 ucode_vpd->minor_release[0],
3506 ucode_vpd->minor_release[1]);
3507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3511 static struct device_attribute ipr_fw_version_attr = {
3513 .name = "fw_version",
3516 .show = ipr_show_fw_version,
3520 * ipr_show_log_level - Show the adapter's error logging level
3521 * @dev: class device struct
3522 * @attr: device attribute (unused)
3526 * number of bytes printed to buffer
3528 static ssize_t ipr_show_log_level(struct device *dev,
3529 struct device_attribute *attr, char *buf)
3531 struct Scsi_Host *shost = class_to_shost(dev);
3532 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3533 unsigned long lock_flags = 0;
3536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3537 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3543 * ipr_store_log_level - Change the adapter's error logging level
3544 * @dev: class device struct
3545 * @attr: device attribute (unused)
3547 * @count: buffer size
3550 * number of bytes printed to buffer
3552 static ssize_t ipr_store_log_level(struct device *dev,
3553 struct device_attribute *attr,
3554 const char *buf, size_t count)
3556 struct Scsi_Host *shost = class_to_shost(dev);
3557 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3558 unsigned long lock_flags = 0;
3560 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3561 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3562 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3566 static struct device_attribute ipr_log_level_attr = {
3568 .name = "log_level",
3569 .mode = S_IRUGO | S_IWUSR,
3571 .show = ipr_show_log_level,
3572 .store = ipr_store_log_level
3576 * ipr_store_diagnostics - IOA Diagnostics interface
3577 * @dev: device struct
3578 * @attr: device attribute (unused)
3580 * @count: buffer size
3582 * This function will reset the adapter and wait a reasonable
3583 * amount of time for any errors that the adapter might log.
3586 * count on success / other on failure
3588 static ssize_t ipr_store_diagnostics(struct device *dev,
3589 struct device_attribute *attr,
3590 const char *buf, size_t count)
3592 struct Scsi_Host *shost = class_to_shost(dev);
3593 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3594 unsigned long lock_flags = 0;
3597 if (!capable(CAP_SYS_ADMIN))
3600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3601 while (ioa_cfg->in_reset_reload) {
3602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3603 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3604 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3607 ioa_cfg->errors_logged = 0;
3608 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3610 if (ioa_cfg->in_reset_reload) {
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3614 /* Wait for a second for any errors to be logged */
3617 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3622 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3624 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3629 static struct device_attribute ipr_diagnostics_attr = {
3631 .name = "run_diagnostics",
3634 .store = ipr_store_diagnostics
3638 * ipr_show_adapter_state - Show the adapter's state
3639 * @dev: device struct
3640 * @attr: device attribute (unused)
3644 * number of bytes printed to buffer
3646 static ssize_t ipr_show_adapter_state(struct device *dev,
3647 struct device_attribute *attr, char *buf)
3649 struct Scsi_Host *shost = class_to_shost(dev);
3650 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3651 unsigned long lock_flags = 0;
3654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3655 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3656 len = snprintf(buf, PAGE_SIZE, "offline\n");
3658 len = snprintf(buf, PAGE_SIZE, "online\n");
3659 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3664 * ipr_store_adapter_state - Change adapter state
3665 * @dev: device struct
3666 * @attr: device attribute (unused)
3668 * @count: buffer size
3670 * This function will change the adapter's state.
3673 * count on success / other on failure
3675 static ssize_t ipr_store_adapter_state(struct device *dev,
3676 struct device_attribute *attr,
3677 const char *buf, size_t count)
3679 struct Scsi_Host *shost = class_to_shost(dev);
3680 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3681 unsigned long lock_flags;
3682 int result = count, i;
3684 if (!capable(CAP_SYS_ADMIN))
3687 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3688 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3689 !strncmp(buf, "online", 6)) {
3690 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3691 spin_lock(&ioa_cfg->hrrq[i]._lock);
3692 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3693 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3696 ioa_cfg->reset_retries = 0;
3697 ioa_cfg->in_ioa_bringdown = 0;
3698 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3700 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3701 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3706 static struct device_attribute ipr_ioa_state_attr = {
3708 .name = "online_state",
3709 .mode = S_IRUGO | S_IWUSR,
3711 .show = ipr_show_adapter_state,
3712 .store = ipr_store_adapter_state
3716 * ipr_store_reset_adapter - Reset the adapter
3717 * @dev: device struct
3718 * @attr: device attribute (unused)
3720 * @count: buffer size
3722 * This function will reset the adapter.
3725 * count on success / other on failure
3727 static ssize_t ipr_store_reset_adapter(struct device *dev,
3728 struct device_attribute *attr,
3729 const char *buf, size_t count)
3731 struct Scsi_Host *shost = class_to_shost(dev);
3732 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3733 unsigned long lock_flags;
3736 if (!capable(CAP_SYS_ADMIN))
3739 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3740 if (!ioa_cfg->in_reset_reload)
3741 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3742 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3743 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3748 static struct device_attribute ipr_ioa_reset_attr = {
3750 .name = "reset_host",
3753 .store = ipr_store_reset_adapter
3756 static int ipr_iopoll(struct irq_poll *iop, int budget);
3758 * ipr_show_iopoll_weight - Show ipr polling mode
3759 * @dev: class device struct
3760 * @attr: device attribute (unused)
3764 * number of bytes printed to buffer
3766 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3767 struct device_attribute *attr, char *buf)
3769 struct Scsi_Host *shost = class_to_shost(dev);
3770 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3771 unsigned long lock_flags = 0;
3774 spin_lock_irqsave(shost->host_lock, lock_flags);
3775 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3776 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3782 * ipr_store_iopoll_weight - Change the adapter's polling mode
3783 * @dev: class device struct
3784 * @attr: device attribute (unused)
3786 * @count: buffer size
3789 * number of bytes printed to buffer
3791 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3792 struct device_attribute *attr,
3793 const char *buf, size_t count)
3795 struct Scsi_Host *shost = class_to_shost(dev);
3796 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3797 unsigned long user_iopoll_weight;
3798 unsigned long lock_flags = 0;
3801 if (!ioa_cfg->sis64) {
3802 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3805 if (kstrtoul(buf, 10, &user_iopoll_weight))
3808 if (user_iopoll_weight > 256) {
3809 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3813 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3814 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3818 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3819 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3820 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3823 spin_lock_irqsave(shost->host_lock, lock_flags);
3824 ioa_cfg->iopoll_weight = user_iopoll_weight;
3825 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3826 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3827 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3828 ioa_cfg->iopoll_weight, ipr_iopoll);
3831 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3836 static struct device_attribute ipr_iopoll_weight_attr = {
3838 .name = "iopoll_weight",
3839 .mode = S_IRUGO | S_IWUSR,
3841 .show = ipr_show_iopoll_weight,
3842 .store = ipr_store_iopoll_weight
3846 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3847 * @buf_len: buffer length
3849 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3850 * list to use for microcode download
3853 * pointer to sglist / NULL on failure
3855 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3858 struct ipr_sglist *sglist;
3860 /* Get the minimum size per scatter/gather element */
3861 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3863 /* Get the actual size per element */
3864 order = get_order(sg_size);
3866 /* Allocate a scatter/gather list for the DMA */
3867 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3868 if (sglist == NULL) {
3872 sglist->order = order;
3873 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3875 if (!sglist->scatterlist) {
3884 * ipr_free_ucode_buffer - Frees a microcode download buffer
3885 * @sglist: scatter/gather list pointer
3887 * Free a DMA'able ucode download buffer previously allocated with
3888 * ipr_alloc_ucode_buffer
3893 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3895 sgl_free_order(sglist->scatterlist, sglist->order);
3900 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3901 * @sglist: scatter/gather list pointer
3902 * @buffer: buffer pointer
3903 * @len: buffer length
3905 * Copy a microcode image from a user buffer into a buffer allocated by
3906 * ipr_alloc_ucode_buffer
3909 * 0 on success / other on failure
3911 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3912 u8 *buffer, u32 len)
3914 int bsize_elem, i, result = 0;
3915 struct scatterlist *sg;
3918 /* Determine the actual number of bytes per element */
3919 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3921 sg = sglist->scatterlist;
3923 for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3924 buffer += bsize_elem) {
3925 struct page *page = sg_page(sg);
3928 memcpy(kaddr, buffer, bsize_elem);
3931 sg->length = bsize_elem;
3939 if (len % bsize_elem) {
3940 struct page *page = sg_page(sg);
3943 memcpy(kaddr, buffer, len % bsize_elem);
3946 sg->length = len % bsize_elem;
3949 sglist->buffer_len = len;
3954 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3955 * @ipr_cmd: ipr command struct
3956 * @sglist: scatter/gather list
3958 * Builds a microcode download IOA data list (IOADL).
3961 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3962 struct ipr_sglist *sglist)
3964 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3965 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3966 struct scatterlist *scatterlist = sglist->scatterlist;
3967 struct scatterlist *sg;
3970 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3971 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3972 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3975 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3976 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3977 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3978 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3979 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3982 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3986 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3987 * @ipr_cmd: ipr command struct
3988 * @sglist: scatter/gather list
3990 * Builds a microcode download IOA data list (IOADL).
3993 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3994 struct ipr_sglist *sglist)
3996 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3997 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3998 struct scatterlist *scatterlist = sglist->scatterlist;
3999 struct scatterlist *sg;
4002 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
4003 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4004 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4007 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4009 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
4010 ioadl[i].flags_and_data_len =
4011 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
4013 cpu_to_be32(sg_dma_address(sg));
4016 ioadl[i-1].flags_and_data_len |=
4017 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4021 * ipr_update_ioa_ucode - Update IOA's microcode
4022 * @ioa_cfg: ioa config struct
4023 * @sglist: scatter/gather list
4025 * Initiate an adapter reset to update the IOA's microcode
4028 * 0 on success / -EIO on failure
4030 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4031 struct ipr_sglist *sglist)
4033 unsigned long lock_flags;
4035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4036 while (ioa_cfg->in_reset_reload) {
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4038 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4042 if (ioa_cfg->ucode_sglist) {
4043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044 dev_err(&ioa_cfg->pdev->dev,
4045 "Microcode download already in progress\n");
4049 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4050 sglist->scatterlist, sglist->num_sg,
4053 if (!sglist->num_dma_sg) {
4054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4055 dev_err(&ioa_cfg->pdev->dev,
4056 "Failed to map microcode download buffer!\n");
4060 ioa_cfg->ucode_sglist = sglist;
4061 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4066 ioa_cfg->ucode_sglist = NULL;
4067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4072 * ipr_store_update_fw - Update the firmware on the adapter
4073 * @dev: device struct
4074 * @attr: device attribute (unused)
4076 * @count: buffer size
4078 * This function will update the firmware on the adapter.
4081 * count on success / other on failure
4083 static ssize_t ipr_store_update_fw(struct device *dev,
4084 struct device_attribute *attr,
4085 const char *buf, size_t count)
4087 struct Scsi_Host *shost = class_to_shost(dev);
4088 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4089 struct ipr_ucode_image_header *image_hdr;
4090 const struct firmware *fw_entry;
4091 struct ipr_sglist *sglist;
4095 int result, dnld_size;
4097 if (!capable(CAP_SYS_ADMIN))
4100 snprintf(fname, sizeof(fname), "%s", buf);
4102 endline = strchr(fname, '\n');
4106 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4107 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4111 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4113 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4114 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4115 sglist = ipr_alloc_ucode_buffer(dnld_size);
4118 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4119 release_firmware(fw_entry);
4123 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4126 dev_err(&ioa_cfg->pdev->dev,
4127 "Microcode buffer copy to DMA buffer failed\n");
4131 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4133 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4138 ipr_free_ucode_buffer(sglist);
4139 release_firmware(fw_entry);
4143 static struct device_attribute ipr_update_fw_attr = {
4145 .name = "update_fw",
4148 .store = ipr_store_update_fw
4152 * ipr_show_fw_type - Show the adapter's firmware type.
4153 * @dev: class device struct
4154 * @attr: device attribute (unused)
4158 * number of bytes printed to buffer
4160 static ssize_t ipr_show_fw_type(struct device *dev,
4161 struct device_attribute *attr, char *buf)
4163 struct Scsi_Host *shost = class_to_shost(dev);
4164 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4165 unsigned long lock_flags = 0;
4168 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4169 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4170 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4174 static struct device_attribute ipr_ioa_fw_type_attr = {
4179 .show = ipr_show_fw_type
4182 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4183 struct bin_attribute *bin_attr, char *buf,
4184 loff_t off, size_t count)
4186 struct device *cdev = kobj_to_dev(kobj);
4187 struct Scsi_Host *shost = class_to_shost(cdev);
4188 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4189 struct ipr_hostrcb *hostrcb;
4190 unsigned long lock_flags = 0;
4193 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4194 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4195 struct ipr_hostrcb, queue);
4197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4200 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4201 sizeof(hostrcb->hcam));
4202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4206 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4207 struct bin_attribute *bin_attr, char *buf,
4208 loff_t off, size_t count)
4210 struct device *cdev = kobj_to_dev(kobj);
4211 struct Scsi_Host *shost = class_to_shost(cdev);
4212 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4213 struct ipr_hostrcb *hostrcb;
4214 unsigned long lock_flags = 0;
4216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4217 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4218 struct ipr_hostrcb, queue);
4220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4224 /* Reclaim hostrcb before exit */
4225 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4230 static struct bin_attribute ipr_ioa_async_err_log = {
4232 .name = "async_err_log",
4233 .mode = S_IRUGO | S_IWUSR,
4236 .read = ipr_read_async_err_log,
4237 .write = ipr_next_async_err_log
4240 static struct attribute *ipr_ioa_attrs[] = {
4241 &ipr_fw_version_attr.attr,
4242 &ipr_log_level_attr.attr,
4243 &ipr_diagnostics_attr.attr,
4244 &ipr_ioa_state_attr.attr,
4245 &ipr_ioa_reset_attr.attr,
4246 &ipr_update_fw_attr.attr,
4247 &ipr_ioa_fw_type_attr.attr,
4248 &ipr_iopoll_weight_attr.attr,
4252 ATTRIBUTE_GROUPS(ipr_ioa);
4254 #ifdef CONFIG_SCSI_IPR_DUMP
4256 * ipr_read_dump - Dump the adapter
4257 * @filp: open sysfs file
4258 * @kobj: kobject struct
4259 * @bin_attr: bin_attribute struct
4262 * @count: buffer size
4265 * number of bytes printed to buffer
4267 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4268 struct bin_attribute *bin_attr,
4269 char *buf, loff_t off, size_t count)
4271 struct device *cdev = kobj_to_dev(kobj);
4272 struct Scsi_Host *shost = class_to_shost(cdev);
4273 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4274 struct ipr_dump *dump;
4275 unsigned long lock_flags = 0;
4280 if (!capable(CAP_SYS_ADMIN))
4283 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4284 dump = ioa_cfg->dump;
4286 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4290 kref_get(&dump->kref);
4291 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4293 if (off > dump->driver_dump.hdr.len) {
4294 kref_put(&dump->kref, ipr_release_dump);
4298 if (off + count > dump->driver_dump.hdr.len) {
4299 count = dump->driver_dump.hdr.len - off;
4303 if (count && off < sizeof(dump->driver_dump)) {
4304 if (off + count > sizeof(dump->driver_dump))
4305 len = sizeof(dump->driver_dump) - off;
4308 src = (u8 *)&dump->driver_dump + off;
4309 memcpy(buf, src, len);
4315 off -= sizeof(dump->driver_dump);
4318 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4319 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4320 sizeof(struct ipr_sdt_entry));
4322 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4323 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4325 if (count && off < sdt_end) {
4326 if (off + count > sdt_end)
4327 len = sdt_end - off;
4330 src = (u8 *)&dump->ioa_dump + off;
4331 memcpy(buf, src, len);
4340 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4341 len = PAGE_ALIGN(off) - off;
4344 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4345 src += off & ~PAGE_MASK;
4346 memcpy(buf, src, len);
4352 kref_put(&dump->kref, ipr_release_dump);
4357 * ipr_alloc_dump - Prepare for adapter dump
4358 * @ioa_cfg: ioa config struct
4361 * 0 on success / other on failure
4363 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4365 struct ipr_dump *dump;
4367 unsigned long lock_flags = 0;
4369 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4372 ipr_err("Dump memory allocation failed\n");
4377 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4380 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4384 ipr_err("Dump memory allocation failed\n");
4389 dump->ioa_dump.ioa_data = ioa_data;
4391 kref_init(&dump->kref);
4392 dump->ioa_cfg = ioa_cfg;
4394 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4396 if (INACTIVE != ioa_cfg->sdt_state) {
4397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4398 vfree(dump->ioa_dump.ioa_data);
4403 ioa_cfg->dump = dump;
4404 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4405 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4406 ioa_cfg->dump_taken = 1;
4407 schedule_work(&ioa_cfg->work_q);
4409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4415 * ipr_free_dump - Free adapter dump memory
4416 * @ioa_cfg: ioa config struct
4419 * 0 on success / other on failure
4421 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4423 struct ipr_dump *dump;
4424 unsigned long lock_flags = 0;
4428 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4429 dump = ioa_cfg->dump;
4431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4435 ioa_cfg->dump = NULL;
4436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4438 kref_put(&dump->kref, ipr_release_dump);
4445 * ipr_write_dump - Setup dump state of adapter
4446 * @filp: open sysfs file
4447 * @kobj: kobject struct
4448 * @bin_attr: bin_attribute struct
4451 * @count: buffer size
4454 * number of bytes printed to buffer
4456 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4457 struct bin_attribute *bin_attr,
4458 char *buf, loff_t off, size_t count)
4460 struct device *cdev = kobj_to_dev(kobj);
4461 struct Scsi_Host *shost = class_to_shost(cdev);
4462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4465 if (!capable(CAP_SYS_ADMIN))
4469 rc = ipr_alloc_dump(ioa_cfg);
4470 else if (buf[0] == '0')
4471 rc = ipr_free_dump(ioa_cfg);
4481 static struct bin_attribute ipr_dump_attr = {
4484 .mode = S_IRUSR | S_IWUSR,
4487 .read = ipr_read_dump,
4488 .write = ipr_write_dump
4491 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4495 * ipr_change_queue_depth - Change the device's queue depth
4496 * @sdev: scsi device struct
4497 * @qdepth: depth to set
4502 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4504 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4505 struct ipr_resource_entry *res;
4506 unsigned long lock_flags = 0;
4508 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4509 res = (struct ipr_resource_entry *)sdev->hostdata;
4511 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4512 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4515 scsi_change_queue_depth(sdev, qdepth);
4516 return sdev->queue_depth;
4520 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4521 * @dev: device struct
4522 * @attr: device attribute structure
4526 * number of bytes printed to buffer
4528 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4530 struct scsi_device *sdev = to_scsi_device(dev);
4531 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4532 struct ipr_resource_entry *res;
4533 unsigned long lock_flags = 0;
4534 ssize_t len = -ENXIO;
4536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4537 res = (struct ipr_resource_entry *)sdev->hostdata;
4539 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4540 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4544 static struct device_attribute ipr_adapter_handle_attr = {
4546 .name = "adapter_handle",
4549 .show = ipr_show_adapter_handle
4553 * ipr_show_resource_path - Show the resource path or the resource address for
4555 * @dev: device struct
4556 * @attr: device attribute structure
4560 * number of bytes printed to buffer
4562 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4564 struct scsi_device *sdev = to_scsi_device(dev);
4565 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4566 struct ipr_resource_entry *res;
4567 unsigned long lock_flags = 0;
4568 ssize_t len = -ENXIO;
4569 char buffer[IPR_MAX_RES_PATH_LENGTH];
4571 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4572 res = (struct ipr_resource_entry *)sdev->hostdata;
4573 if (res && ioa_cfg->sis64)
4574 len = snprintf(buf, PAGE_SIZE, "%s\n",
4575 __ipr_format_res_path(res->res_path, buffer,
4578 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4579 res->bus, res->target, res->lun);
4581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4585 static struct device_attribute ipr_resource_path_attr = {
4587 .name = "resource_path",
4590 .show = ipr_show_resource_path
4594 * ipr_show_device_id - Show the device_id for this device.
4595 * @dev: device struct
4596 * @attr: device attribute structure
4600 * number of bytes printed to buffer
4602 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4604 struct scsi_device *sdev = to_scsi_device(dev);
4605 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4606 struct ipr_resource_entry *res;
4607 unsigned long lock_flags = 0;
4608 ssize_t len = -ENXIO;
4610 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4611 res = (struct ipr_resource_entry *)sdev->hostdata;
4612 if (res && ioa_cfg->sis64)
4613 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4615 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4617 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4621 static struct device_attribute ipr_device_id_attr = {
4623 .name = "device_id",
4626 .show = ipr_show_device_id
4630 * ipr_show_resource_type - Show the resource type for this device.
4631 * @dev: device struct
4632 * @attr: device attribute structure
4636 * number of bytes printed to buffer
4638 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4640 struct scsi_device *sdev = to_scsi_device(dev);
4641 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4642 struct ipr_resource_entry *res;
4643 unsigned long lock_flags = 0;
4644 ssize_t len = -ENXIO;
4646 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4647 res = (struct ipr_resource_entry *)sdev->hostdata;
4650 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4652 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4656 static struct device_attribute ipr_resource_type_attr = {
4658 .name = "resource_type",
4661 .show = ipr_show_resource_type
4665 * ipr_show_raw_mode - Show the adapter's raw mode
4666 * @dev: class device struct
4667 * @attr: device attribute (unused)
4671 * number of bytes printed to buffer
4673 static ssize_t ipr_show_raw_mode(struct device *dev,
4674 struct device_attribute *attr, char *buf)
4676 struct scsi_device *sdev = to_scsi_device(dev);
4677 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4678 struct ipr_resource_entry *res;
4679 unsigned long lock_flags = 0;
4682 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4683 res = (struct ipr_resource_entry *)sdev->hostdata;
4685 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4693 * ipr_store_raw_mode - Change the adapter's raw mode
4694 * @dev: class device struct
4695 * @attr: device attribute (unused)
4697 * @count: buffer size
4700 * number of bytes printed to buffer
4702 static ssize_t ipr_store_raw_mode(struct device *dev,
4703 struct device_attribute *attr,
4704 const char *buf, size_t count)
4706 struct scsi_device *sdev = to_scsi_device(dev);
4707 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4708 struct ipr_resource_entry *res;
4709 unsigned long lock_flags = 0;
4712 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4713 res = (struct ipr_resource_entry *)sdev->hostdata;
4715 if (ipr_is_af_dasd_device(res)) {
4716 res->raw_mode = simple_strtoul(buf, NULL, 10);
4719 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4720 res->raw_mode ? "enabled" : "disabled");
4725 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4729 static struct device_attribute ipr_raw_mode_attr = {
4732 .mode = S_IRUGO | S_IWUSR,
4734 .show = ipr_show_raw_mode,
4735 .store = ipr_store_raw_mode
4738 static struct attribute *ipr_dev_attrs[] = {
4739 &ipr_adapter_handle_attr.attr,
4740 &ipr_resource_path_attr.attr,
4741 &ipr_device_id_attr.attr,
4742 &ipr_resource_type_attr.attr,
4743 &ipr_raw_mode_attr.attr,
4747 ATTRIBUTE_GROUPS(ipr_dev);
4750 * ipr_biosparam - Return the HSC mapping
4751 * @sdev: scsi device struct
4752 * @block_device: block device pointer
4753 * @capacity: capacity of the device
4754 * @parm: Array containing returned HSC values.
4756 * This function generates the HSC parms that fdisk uses.
4757 * We want to make sure we return something that places partitions
4758 * on 4k boundaries for best performance with the IOA.
4763 static int ipr_biosparam(struct scsi_device *sdev,
4764 struct block_device *block_device,
4765 sector_t capacity, int *parm)
4773 cylinders = capacity;
4774 sector_div(cylinders, (128 * 32));
4779 parm[2] = cylinders;
4785 * ipr_find_starget - Find target based on bus/target.
4786 * @starget: scsi target struct
4789 * resource entry pointer if found / NULL if not found
4791 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4793 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4794 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4795 struct ipr_resource_entry *res;
4797 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4798 if ((res->bus == starget->channel) &&
4799 (res->target == starget->id)) {
4807 static struct ata_port_info sata_port_info;
4810 * ipr_target_alloc - Prepare for commands to a SCSI target
4811 * @starget: scsi target struct
4813 * If the device is a SATA device, this function allocates an
4814 * ATA port with libata, else it does nothing.
4817 * 0 on success / non-0 on failure
4819 static int ipr_target_alloc(struct scsi_target *starget)
4821 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4822 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4823 struct ipr_sata_port *sata_port;
4824 struct ata_port *ap;
4825 struct ipr_resource_entry *res;
4826 unsigned long lock_flags;
4828 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4829 res = ipr_find_starget(starget);
4830 starget->hostdata = NULL;
4832 if (res && ipr_is_gata(res)) {
4833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4834 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4838 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4840 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4841 sata_port->ioa_cfg = ioa_cfg;
4843 sata_port->res = res;
4845 res->sata_port = sata_port;
4846 ap->private_data = sata_port;
4847 starget->hostdata = sata_port;
4853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4859 * ipr_target_destroy - Destroy a SCSI target
4860 * @starget: scsi target struct
4862 * If the device was a SATA device, this function frees the libata
4863 * ATA port, else it does nothing.
4866 static void ipr_target_destroy(struct scsi_target *starget)
4868 struct ipr_sata_port *sata_port = starget->hostdata;
4869 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4870 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4872 if (ioa_cfg->sis64) {
4873 if (!ipr_find_starget(starget)) {
4874 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4875 clear_bit(starget->id, ioa_cfg->array_ids);
4876 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4877 clear_bit(starget->id, ioa_cfg->vset_ids);
4878 else if (starget->channel == 0)
4879 clear_bit(starget->id, ioa_cfg->target_ids);
4884 starget->hostdata = NULL;
4885 ata_sas_port_destroy(sata_port->ap);
4891 * ipr_find_sdev - Find device based on bus/target/lun.
4892 * @sdev: scsi device struct
4895 * resource entry pointer if found / NULL if not found
4897 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4899 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4900 struct ipr_resource_entry *res;
4902 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4903 if ((res->bus == sdev->channel) &&
4904 (res->target == sdev->id) &&
4905 (res->lun == sdev->lun))
4913 * ipr_slave_destroy - Unconfigure a SCSI device
4914 * @sdev: scsi device struct
4919 static void ipr_slave_destroy(struct scsi_device *sdev)
4921 struct ipr_resource_entry *res;
4922 struct ipr_ioa_cfg *ioa_cfg;
4923 unsigned long lock_flags = 0;
4925 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4927 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4928 res = (struct ipr_resource_entry *) sdev->hostdata;
4931 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4932 sdev->hostdata = NULL;
4934 res->sata_port = NULL;
4936 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4940 * ipr_slave_configure - Configure a SCSI device
4941 * @sdev: scsi device struct
4943 * This function configures the specified scsi device.
4948 static int ipr_slave_configure(struct scsi_device *sdev)
4950 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4951 struct ipr_resource_entry *res;
4952 struct ata_port *ap = NULL;
4953 unsigned long lock_flags = 0;
4954 char buffer[IPR_MAX_RES_PATH_LENGTH];
4956 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4957 res = sdev->hostdata;
4959 if (ipr_is_af_dasd_device(res))
4960 sdev->type = TYPE_RAID;
4961 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4962 sdev->scsi_level = 4;
4963 sdev->no_uld_attach = 1;
4965 if (ipr_is_vset_device(res)) {
4966 sdev->scsi_level = SCSI_SPC_3;
4967 sdev->no_report_opcodes = 1;
4968 blk_queue_rq_timeout(sdev->request_queue,
4969 IPR_VSET_RW_TIMEOUT);
4970 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4972 if (ipr_is_gata(res) && res->sata_port)
4973 ap = res->sata_port->ap;
4974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4977 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4978 ata_sas_slave_configure(sdev, ap);
4982 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4983 ipr_format_res_path(ioa_cfg,
4984 res->res_path, buffer, sizeof(buffer)));
4987 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4992 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4993 * @sdev: scsi device struct
4995 * This function initializes an ATA port so that future commands
4996 * sent through queuecommand will work.
5001 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
5003 struct ipr_sata_port *sata_port = NULL;
5007 if (sdev->sdev_target)
5008 sata_port = sdev->sdev_target->hostdata;
5010 rc = ata_sas_port_init(sata_port->ap);
5012 rc = ata_sas_sync_probe(sata_port->ap);
5016 ipr_slave_destroy(sdev);
5023 * ipr_slave_alloc - Prepare for commands to a device.
5024 * @sdev: scsi device struct
5026 * This function saves a pointer to the resource entry
5027 * in the scsi device struct if the device exists. We
5028 * can then use this pointer in ipr_queuecommand when
5029 * handling new commands.
5032 * 0 on success / -ENXIO if device does not exist
5034 static int ipr_slave_alloc(struct scsi_device *sdev)
5036 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5037 struct ipr_resource_entry *res;
5038 unsigned long lock_flags;
5041 sdev->hostdata = NULL;
5043 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5045 res = ipr_find_sdev(sdev);
5050 sdev->hostdata = res;
5051 if (!ipr_is_naca_model(res))
5052 res->needs_sync_complete = 1;
5054 if (ipr_is_gata(res)) {
5055 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5056 return ipr_ata_slave_alloc(sdev);
5060 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5066 * ipr_match_lun - Match function for specified LUN
5067 * @ipr_cmd: ipr command struct
5068 * @device: device to match (sdev)
5071 * 1 if command matches sdev / 0 if command does not match sdev
5073 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5075 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5081 * ipr_cmnd_is_free - Check if a command is free or not
5082 * @ipr_cmd: ipr command struct
5087 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5089 struct ipr_cmnd *loop_cmd;
5091 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5092 if (loop_cmd == ipr_cmd)
5100 * ipr_match_res - Match function for specified resource entry
5101 * @ipr_cmd: ipr command struct
5102 * @resource: resource entry to match
5105 * 1 if command matches sdev / 0 if command does not match sdev
5107 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5109 struct ipr_resource_entry *res = resource;
5111 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5117 * ipr_wait_for_ops - Wait for matching commands to complete
5118 * @ioa_cfg: ioa config struct
5119 * @device: device to match (sdev)
5120 * @match: match function to use
5125 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5126 int (*match)(struct ipr_cmnd *, void *))
5128 struct ipr_cmnd *ipr_cmd;
5130 unsigned long flags;
5131 struct ipr_hrr_queue *hrrq;
5132 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5133 DECLARE_COMPLETION_ONSTACK(comp);
5139 for_each_hrrq(hrrq, ioa_cfg) {
5140 spin_lock_irqsave(hrrq->lock, flags);
5141 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5142 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5143 if (!ipr_cmnd_is_free(ipr_cmd)) {
5144 if (match(ipr_cmd, device)) {
5145 ipr_cmd->eh_comp = ∁
5150 spin_unlock_irqrestore(hrrq->lock, flags);
5154 timeout = wait_for_completion_timeout(&comp, timeout);
5159 for_each_hrrq(hrrq, ioa_cfg) {
5160 spin_lock_irqsave(hrrq->lock, flags);
5161 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5162 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5163 if (!ipr_cmnd_is_free(ipr_cmd)) {
5164 if (match(ipr_cmd, device)) {
5165 ipr_cmd->eh_comp = NULL;
5170 spin_unlock_irqrestore(hrrq->lock, flags);
5174 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5176 return wait ? FAILED : SUCCESS;
5185 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5187 struct ipr_ioa_cfg *ioa_cfg;
5188 unsigned long lock_flags = 0;
5192 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5193 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5195 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5196 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5197 dev_err(&ioa_cfg->pdev->dev,
5198 "Adapter being reset as a result of error recovery.\n");
5200 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5201 ioa_cfg->sdt_state = GET_DUMP;
5204 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5205 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5206 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5208 /* If we got hit with a host reset while we were already resetting
5209 the adapter for some reason, and the reset failed. */
5210 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5215 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5221 * ipr_device_reset - Reset the device
5222 * @ioa_cfg: ioa config struct
5223 * @res: resource entry struct
5225 * This function issues a device reset to the affected device.
5226 * If the device is a SCSI device, a LUN reset will be sent
5227 * to the device first. If that does not work, a target reset
5228 * will be sent. If the device is a SATA device, a PHY reset will
5232 * 0 on success / non-zero on failure
5234 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5235 struct ipr_resource_entry *res)
5237 struct ipr_cmnd *ipr_cmd;
5238 struct ipr_ioarcb *ioarcb;
5239 struct ipr_cmd_pkt *cmd_pkt;
5240 struct ipr_ioarcb_ata_regs *regs;
5244 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5245 ioarcb = &ipr_cmd->ioarcb;
5246 cmd_pkt = &ioarcb->cmd_pkt;
5248 if (ipr_cmd->ioa_cfg->sis64) {
5249 regs = &ipr_cmd->i.ata_ioadl.regs;
5250 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5252 regs = &ioarcb->u.add_data.u.regs;
5254 ioarcb->res_handle = res->res_handle;
5255 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5256 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5257 if (ipr_is_gata(res)) {
5258 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5259 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5260 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5263 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5264 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5265 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5266 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5267 if (ipr_cmd->ioa_cfg->sis64)
5268 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5269 sizeof(struct ipr_ioasa_gata));
5271 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5272 sizeof(struct ipr_ioasa_gata));
5276 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5280 * ipr_sata_reset - Reset the SATA port
5281 * @link: SATA link to reset
5282 * @classes: class of the attached device
5285 * This function issues a SATA phy reset to the affected ATA link.
5288 * 0 on success / non-zero on failure
5290 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5291 unsigned long deadline)
5293 struct ipr_sata_port *sata_port = link->ap->private_data;
5294 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5295 struct ipr_resource_entry *res;
5296 unsigned long lock_flags = 0;
5297 int rc = -ENXIO, ret;
5300 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5301 while (ioa_cfg->in_reset_reload) {
5302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5303 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5304 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5307 res = sata_port->res;
5309 rc = ipr_device_reset(ioa_cfg, res);
5310 *classes = res->ata_class;
5311 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5313 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5314 if (ret != SUCCESS) {
5315 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5316 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5317 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5319 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5322 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5329 * __ipr_eh_dev_reset - Reset the device
5330 * @scsi_cmd: scsi command struct
5332 * This function issues a device reset to the affected device.
5333 * A LUN reset will be sent to the device first. If that does
5334 * not work, a target reset will be sent.
5339 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5341 struct ipr_cmnd *ipr_cmd;
5342 struct ipr_ioa_cfg *ioa_cfg;
5343 struct ipr_resource_entry *res;
5344 struct ata_port *ap;
5346 struct ipr_hrr_queue *hrrq;
5349 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5350 res = scsi_cmd->device->hostdata;
5353 * If we are currently going through reset/reload, return failed. This will force the
5354 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5357 if (ioa_cfg->in_reset_reload)
5359 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5362 for_each_hrrq(hrrq, ioa_cfg) {
5363 spin_lock(&hrrq->_lock);
5364 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5365 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5367 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5370 if (ipr_cmnd_is_free(ipr_cmd))
5373 ipr_cmd->done = ipr_sata_eh_done;
5374 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5375 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5376 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5380 spin_unlock(&hrrq->_lock);
5382 res->resetting_device = 1;
5383 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5385 if (ipr_is_gata(res) && res->sata_port) {
5386 ap = res->sata_port->ap;
5387 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5388 ata_std_error_handler(ap);
5389 spin_lock_irq(scsi_cmd->device->host->host_lock);
5391 rc = ipr_device_reset(ioa_cfg, res);
5392 res->resetting_device = 0;
5393 res->reset_occurred = 1;
5396 return rc ? FAILED : SUCCESS;
5399 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5402 struct ipr_ioa_cfg *ioa_cfg;
5403 struct ipr_resource_entry *res;
5405 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5406 res = cmd->device->hostdata;
5411 spin_lock_irq(cmd->device->host->host_lock);
5412 rc = __ipr_eh_dev_reset(cmd);
5413 spin_unlock_irq(cmd->device->host->host_lock);
5415 if (rc == SUCCESS) {
5416 if (ipr_is_gata(res) && res->sata_port)
5417 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5419 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5426 * ipr_bus_reset_done - Op done function for bus reset.
5427 * @ipr_cmd: ipr command struct
5429 * This function is the op done function for a bus reset
5434 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5436 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5437 struct ipr_resource_entry *res;
5440 if (!ioa_cfg->sis64)
5441 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5442 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5443 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5449 * If abort has not completed, indicate the reset has, else call the
5450 * abort's done function to wake the sleeping eh thread
5452 if (ipr_cmd->sibling->sibling)
5453 ipr_cmd->sibling->sibling = NULL;
5455 ipr_cmd->sibling->done(ipr_cmd->sibling);
5457 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5462 * ipr_abort_timeout - An abort task has timed out
5463 * @t: Timer context used to fetch ipr command struct
5465 * This function handles when an abort task times out. If this
5466 * happens we issue a bus reset since we have resources tied
5467 * up that must be freed before returning to the midlayer.
5472 static void ipr_abort_timeout(struct timer_list *t)
5474 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5475 struct ipr_cmnd *reset_cmd;
5476 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5477 struct ipr_cmd_pkt *cmd_pkt;
5478 unsigned long lock_flags = 0;
5481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5482 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5487 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5488 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5489 ipr_cmd->sibling = reset_cmd;
5490 reset_cmd->sibling = ipr_cmd;
5491 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5492 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5493 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5494 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5495 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5497 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5503 * ipr_cancel_op - Cancel specified op
5504 * @scsi_cmd: scsi command struct
5506 * This function cancels specified op.
5511 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5513 struct ipr_cmnd *ipr_cmd;
5514 struct ipr_ioa_cfg *ioa_cfg;
5515 struct ipr_resource_entry *res;
5516 struct ipr_cmd_pkt *cmd_pkt;
5518 int i, op_found = 0;
5519 struct ipr_hrr_queue *hrrq;
5522 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5523 res = scsi_cmd->device->hostdata;
5525 /* If we are currently going through reset/reload, return failed.
5526 * This will force the mid-layer to call ipr_eh_host_reset,
5527 * which will then go to sleep and wait for the reset to complete
5529 if (ioa_cfg->in_reset_reload ||
5530 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5536 * If we are aborting a timed out op, chances are that the timeout was caused
5537 * by a still not detected EEH error. In such cases, reading a register will
5538 * trigger the EEH recovery infrastructure.
5540 readl(ioa_cfg->regs.sense_interrupt_reg);
5542 if (!ipr_is_gscsi(res))
5545 for_each_hrrq(hrrq, ioa_cfg) {
5546 spin_lock(&hrrq->_lock);
5547 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5548 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5549 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5555 spin_unlock(&hrrq->_lock);
5561 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5562 ipr_cmd->ioarcb.res_handle = res->res_handle;
5563 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5564 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5565 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5566 ipr_cmd->u.sdev = scsi_cmd->device;
5568 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5570 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5571 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5574 * If the abort task timed out and we sent a bus reset, we will get
5575 * one the following responses to the abort
5577 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5582 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5583 if (!ipr_is_naca_model(res))
5584 res->needs_sync_complete = 1;
5587 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5591 * ipr_scan_finished - Report whether scan is done
5592 * @shost: scsi host struct
5593 * @elapsed_time: elapsed time
5596 * 0 if scan in progress / 1 if scan is complete
5598 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5600 unsigned long lock_flags;
5601 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5604 spin_lock_irqsave(shost->host_lock, lock_flags);
5605 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5607 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5609 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5614 * ipr_eh_abort - Reset the host adapter
5615 * @scsi_cmd: scsi command struct
5620 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5622 unsigned long flags;
5624 struct ipr_ioa_cfg *ioa_cfg;
5628 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5630 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5631 rc = ipr_cancel_op(scsi_cmd);
5632 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5635 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5641 * ipr_handle_other_interrupt - Handle "other" interrupts
5642 * @ioa_cfg: ioa config struct
5643 * @int_reg: interrupt register
5646 * IRQ_NONE / IRQ_HANDLED
5648 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5651 irqreturn_t rc = IRQ_HANDLED;
5654 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5655 int_reg &= ~int_mask_reg;
5657 /* If an interrupt on the adapter did not occur, ignore it.
5658 * Or in the case of SIS 64, check for a stage change interrupt.
5660 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5661 if (ioa_cfg->sis64) {
5662 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5663 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5664 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5666 /* clear stage change */
5667 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5668 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5669 list_del(&ioa_cfg->reset_cmd->queue);
5670 del_timer(&ioa_cfg->reset_cmd->timer);
5671 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5679 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5680 /* Mask the interrupt */
5681 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5682 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5684 list_del(&ioa_cfg->reset_cmd->queue);
5685 del_timer(&ioa_cfg->reset_cmd->timer);
5686 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5687 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5688 if (ioa_cfg->clear_isr) {
5689 if (ipr_debug && printk_ratelimit())
5690 dev_err(&ioa_cfg->pdev->dev,
5691 "Spurious interrupt detected. 0x%08X\n", int_reg);
5692 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5693 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5697 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5698 ioa_cfg->ioa_unit_checked = 1;
5699 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5700 dev_err(&ioa_cfg->pdev->dev,
5701 "No Host RRQ. 0x%08X\n", int_reg);
5703 dev_err(&ioa_cfg->pdev->dev,
5704 "Permanent IOA failure. 0x%08X\n", int_reg);
5706 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5707 ioa_cfg->sdt_state = GET_DUMP;
5709 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5710 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5717 * ipr_isr_eh - Interrupt service routine error handler
5718 * @ioa_cfg: ioa config struct
5719 * @msg: message to log
5720 * @number: various meanings depending on the caller/message
5725 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5727 ioa_cfg->errors_logged++;
5728 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5730 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5731 ioa_cfg->sdt_state = GET_DUMP;
5733 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5736 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5737 struct list_head *doneq)
5741 struct ipr_cmnd *ipr_cmd;
5742 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5745 /* If interrupts are disabled, ignore the interrupt */
5746 if (!hrr_queue->allow_interrupts)
5749 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5750 hrr_queue->toggle_bit) {
5752 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5753 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5754 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5756 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5757 cmd_index < hrr_queue->min_cmd_id)) {
5759 "Invalid response handle from IOA: ",
5764 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5765 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5767 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5769 list_move_tail(&ipr_cmd->queue, doneq);
5771 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5772 hrr_queue->hrrq_curr++;
5774 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5775 hrr_queue->toggle_bit ^= 1u;
5778 if (budget > 0 && num_hrrq >= budget)
5785 static int ipr_iopoll(struct irq_poll *iop, int budget)
5787 struct ipr_hrr_queue *hrrq;
5788 struct ipr_cmnd *ipr_cmd, *temp;
5789 unsigned long hrrq_flags;
5793 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5795 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5796 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5798 if (completed_ops < budget)
5799 irq_poll_complete(iop);
5800 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5802 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5803 list_del(&ipr_cmd->queue);
5804 del_timer(&ipr_cmd->timer);
5805 ipr_cmd->fast_done(ipr_cmd);
5808 return completed_ops;
5812 * ipr_isr - Interrupt service routine
5814 * @devp: pointer to ioa config struct
5817 * IRQ_NONE / IRQ_HANDLED
5819 static irqreturn_t ipr_isr(int irq, void *devp)
5821 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5822 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5823 unsigned long hrrq_flags = 0;
5827 struct ipr_cmnd *ipr_cmd, *temp;
5828 irqreturn_t rc = IRQ_NONE;
5831 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5832 /* If interrupts are disabled, ignore the interrupt */
5833 if (!hrrq->allow_interrupts) {
5834 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5839 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5842 if (!ioa_cfg->clear_isr)
5845 /* Clear the PCI interrupt */
5848 writel(IPR_PCII_HRRQ_UPDATED,
5849 ioa_cfg->regs.clr_interrupt_reg32);
5850 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5851 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5852 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5854 } else if (rc == IRQ_NONE && irq_none == 0) {
5855 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5857 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5858 int_reg & IPR_PCII_HRRQ_UPDATED) {
5860 "Error clearing HRRQ: ", num_hrrq);
5867 if (unlikely(rc == IRQ_NONE))
5868 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5870 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5871 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5872 list_del(&ipr_cmd->queue);
5873 del_timer(&ipr_cmd->timer);
5874 ipr_cmd->fast_done(ipr_cmd);
5880 * ipr_isr_mhrrq - Interrupt service routine
5882 * @devp: pointer to ioa config struct
5885 * IRQ_NONE / IRQ_HANDLED
5887 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5889 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5890 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5891 unsigned long hrrq_flags = 0;
5892 struct ipr_cmnd *ipr_cmd, *temp;
5893 irqreturn_t rc = IRQ_NONE;
5896 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5898 /* If interrupts are disabled, ignore the interrupt */
5899 if (!hrrq->allow_interrupts) {
5900 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5904 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5905 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5907 irq_poll_sched(&hrrq->iopoll);
5908 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5912 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5915 if (ipr_process_hrrq(hrrq, -1, &doneq))
5919 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5921 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5922 list_del(&ipr_cmd->queue);
5923 del_timer(&ipr_cmd->timer);
5924 ipr_cmd->fast_done(ipr_cmd);
5930 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5931 * @ioa_cfg: ioa config struct
5932 * @ipr_cmd: ipr command struct
5935 * 0 on success / -1 on failure
5937 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5938 struct ipr_cmnd *ipr_cmd)
5941 struct scatterlist *sg;
5943 u32 ioadl_flags = 0;
5944 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5945 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5946 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5948 length = scsi_bufflen(scsi_cmd);
5952 nseg = scsi_dma_map(scsi_cmd);
5954 if (printk_ratelimit())
5955 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5959 ipr_cmd->dma_use_sg = nseg;
5961 ioarcb->data_transfer_length = cpu_to_be32(length);
5963 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5965 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5966 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5967 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5968 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5969 ioadl_flags = IPR_IOADL_FLAGS_READ;
5971 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5972 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5973 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5974 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5977 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5982 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5983 * @ioa_cfg: ioa config struct
5984 * @ipr_cmd: ipr command struct
5987 * 0 on success / -1 on failure
5989 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5990 struct ipr_cmnd *ipr_cmd)
5993 struct scatterlist *sg;
5995 u32 ioadl_flags = 0;
5996 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5997 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5998 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6000 length = scsi_bufflen(scsi_cmd);
6004 nseg = scsi_dma_map(scsi_cmd);
6006 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
6010 ipr_cmd->dma_use_sg = nseg;
6012 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6013 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6014 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6015 ioarcb->data_transfer_length = cpu_to_be32(length);
6017 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6018 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6019 ioadl_flags = IPR_IOADL_FLAGS_READ;
6020 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6021 ioarcb->read_ioadl_len =
6022 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6025 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6026 ioadl = ioarcb->u.add_data.u.ioadl;
6027 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6028 offsetof(struct ipr_ioarcb, u.add_data));
6029 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6032 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6033 ioadl[i].flags_and_data_len =
6034 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6035 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6038 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6043 * __ipr_erp_done - Process completion of ERP for a device
6044 * @ipr_cmd: ipr command struct
6046 * This function copies the sense buffer into the scsi_cmd
6047 * struct and pushes the scsi_done function.
6052 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6054 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6055 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6056 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6058 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6059 scsi_cmd->result |= (DID_ERROR << 16);
6060 scmd_printk(KERN_ERR, scsi_cmd,
6061 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6063 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6064 SCSI_SENSE_BUFFERSIZE);
6068 if (!ipr_is_naca_model(res))
6069 res->needs_sync_complete = 1;
6072 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6073 scsi_done(scsi_cmd);
6074 if (ipr_cmd->eh_comp)
6075 complete(ipr_cmd->eh_comp);
6076 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6080 * ipr_erp_done - Process completion of ERP for a device
6081 * @ipr_cmd: ipr command struct
6083 * This function copies the sense buffer into the scsi_cmd
6084 * struct and pushes the scsi_done function.
6089 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6091 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6092 unsigned long hrrq_flags;
6094 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6095 __ipr_erp_done(ipr_cmd);
6096 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6100 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6101 * @ipr_cmd: ipr command struct
6106 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6108 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6109 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6110 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6112 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6113 ioarcb->data_transfer_length = 0;
6114 ioarcb->read_data_transfer_length = 0;
6115 ioarcb->ioadl_len = 0;
6116 ioarcb->read_ioadl_len = 0;
6117 ioasa->hdr.ioasc = 0;
6118 ioasa->hdr.residual_data_len = 0;
6120 if (ipr_cmd->ioa_cfg->sis64)
6121 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6122 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6124 ioarcb->write_ioadl_addr =
6125 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6126 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6131 * __ipr_erp_request_sense - Send request sense to a device
6132 * @ipr_cmd: ipr command struct
6134 * This function sends a request sense to a device as a result
6135 * of a check condition.
6140 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6142 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6143 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6145 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6146 __ipr_erp_done(ipr_cmd);
6150 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6152 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6153 cmd_pkt->cdb[0] = REQUEST_SENSE;
6154 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6155 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6156 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6157 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6159 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6160 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6162 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6163 IPR_REQUEST_SENSE_TIMEOUT * 2);
6167 * ipr_erp_request_sense - Send request sense to a device
6168 * @ipr_cmd: ipr command struct
6170 * This function sends a request sense to a device as a result
6171 * of a check condition.
6176 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6178 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6179 unsigned long hrrq_flags;
6181 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6182 __ipr_erp_request_sense(ipr_cmd);
6183 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6187 * ipr_erp_cancel_all - Send cancel all to a device
6188 * @ipr_cmd: ipr command struct
6190 * This function sends a cancel all to a device to clear the
6191 * queue. If we are running TCQ on the device, QERR is set to 1,
6192 * which means all outstanding ops have been dropped on the floor.
6193 * Cancel all will return them to us.
6198 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6200 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6201 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6202 struct ipr_cmd_pkt *cmd_pkt;
6206 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6208 if (!scsi_cmd->device->simple_tags) {
6209 __ipr_erp_request_sense(ipr_cmd);
6213 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6214 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6215 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6217 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6218 IPR_CANCEL_ALL_TIMEOUT);
6222 * ipr_dump_ioasa - Dump contents of IOASA
6223 * @ioa_cfg: ioa config struct
6224 * @ipr_cmd: ipr command struct
6225 * @res: resource entry struct
6227 * This function is invoked by the interrupt handler when ops
6228 * fail. It will log the IOASA if appropriate. Only called
6234 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6235 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6239 u32 ioasc, fd_ioasc;
6240 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6241 __be32 *ioasa_data = (__be32 *)ioasa;
6244 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6245 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6250 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6253 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6254 error_index = ipr_get_error(fd_ioasc);
6256 error_index = ipr_get_error(ioasc);
6258 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6259 /* Don't log an error if the IOA already logged one */
6260 if (ioasa->hdr.ilid != 0)
6263 if (!ipr_is_gscsi(res))
6266 if (ipr_error_table[error_index].log_ioasa == 0)
6270 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6272 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6273 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6274 data_len = sizeof(struct ipr_ioasa64);
6275 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6276 data_len = sizeof(struct ipr_ioasa);
6278 ipr_err("IOASA Dump:\n");
6280 for (i = 0; i < data_len / 4; i += 4) {
6281 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6282 be32_to_cpu(ioasa_data[i]),
6283 be32_to_cpu(ioasa_data[i+1]),
6284 be32_to_cpu(ioasa_data[i+2]),
6285 be32_to_cpu(ioasa_data[i+3]));
6290 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6291 * @ipr_cmd: ipr command struct
6296 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6299 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6300 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6301 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6302 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6304 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6306 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6309 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6311 if (ipr_is_vset_device(res) &&
6312 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6313 ioasa->u.vset.failing_lba_hi != 0) {
6314 sense_buf[0] = 0x72;
6315 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6316 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6317 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6321 sense_buf[9] = 0x0A;
6322 sense_buf[10] = 0x80;
6324 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6326 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6327 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6328 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6329 sense_buf[15] = failing_lba & 0x000000ff;
6331 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6333 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6334 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6335 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6336 sense_buf[19] = failing_lba & 0x000000ff;
6338 sense_buf[0] = 0x70;
6339 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6340 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6341 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6343 /* Illegal request */
6344 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6345 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6346 sense_buf[7] = 10; /* additional length */
6348 /* IOARCB was in error */
6349 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6350 sense_buf[15] = 0xC0;
6351 else /* Parameter data was invalid */
6352 sense_buf[15] = 0x80;
6355 ((IPR_FIELD_POINTER_MASK &
6356 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6358 (IPR_FIELD_POINTER_MASK &
6359 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6361 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6362 if (ipr_is_vset_device(res))
6363 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6365 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6367 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6368 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6369 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6370 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6371 sense_buf[6] = failing_lba & 0x000000ff;
6374 sense_buf[7] = 6; /* additional length */
6380 * ipr_get_autosense - Copy autosense data to sense buffer
6381 * @ipr_cmd: ipr command struct
6383 * This function copies the autosense buffer to the buffer
6384 * in the scsi_cmd, if there is autosense available.
6387 * 1 if autosense was available / 0 if not
6389 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6391 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6392 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6394 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6397 if (ipr_cmd->ioa_cfg->sis64)
6398 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6399 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6400 SCSI_SENSE_BUFFERSIZE));
6402 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6403 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6404 SCSI_SENSE_BUFFERSIZE));
6409 * ipr_erp_start - Process an error response for a SCSI op
6410 * @ioa_cfg: ioa config struct
6411 * @ipr_cmd: ipr command struct
6413 * This function determines whether or not to initiate ERP
6414 * on the affected device.
6419 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6420 struct ipr_cmnd *ipr_cmd)
6422 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6423 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6424 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6425 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6428 __ipr_scsi_eh_done(ipr_cmd);
6432 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6433 ipr_gen_sense(ipr_cmd);
6435 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6437 switch (masked_ioasc) {
6438 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6439 if (ipr_is_naca_model(res))
6440 scsi_cmd->result |= (DID_ABORT << 16);
6442 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6444 case IPR_IOASC_IR_RESOURCE_HANDLE:
6445 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6446 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6448 case IPR_IOASC_HW_SEL_TIMEOUT:
6449 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6450 if (!ipr_is_naca_model(res))
6451 res->needs_sync_complete = 1;
6453 case IPR_IOASC_SYNC_REQUIRED:
6455 res->needs_sync_complete = 1;
6456 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6458 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6459 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6461 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6462 * so SCSI mid-layer and upper layers handle it accordingly.
6464 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6465 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6467 case IPR_IOASC_BUS_WAS_RESET:
6468 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6470 * Report the bus reset and ask for a retry. The device
6471 * will give CC/UA the next command.
6473 if (!res->resetting_device)
6474 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6475 scsi_cmd->result |= (DID_ERROR << 16);
6476 if (!ipr_is_naca_model(res))
6477 res->needs_sync_complete = 1;
6479 case IPR_IOASC_HW_DEV_BUS_STATUS:
6480 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6481 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6482 if (!ipr_get_autosense(ipr_cmd)) {
6483 if (!ipr_is_naca_model(res)) {
6484 ipr_erp_cancel_all(ipr_cmd);
6489 if (!ipr_is_naca_model(res))
6490 res->needs_sync_complete = 1;
6492 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6494 case IPR_IOASC_IR_NON_OPTIMIZED:
6495 if (res->raw_mode) {
6497 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6499 scsi_cmd->result |= (DID_ERROR << 16);
6502 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6503 scsi_cmd->result |= (DID_ERROR << 16);
6504 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6505 res->needs_sync_complete = 1;
6509 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6510 scsi_done(scsi_cmd);
6511 if (ipr_cmd->eh_comp)
6512 complete(ipr_cmd->eh_comp);
6513 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6517 * ipr_scsi_done - mid-layer done function
6518 * @ipr_cmd: ipr command struct
6520 * This function is invoked by the interrupt handler for
6521 * ops generated by the SCSI mid-layer
6526 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6528 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6529 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6530 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6531 unsigned long lock_flags;
6533 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6535 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6536 scsi_dma_unmap(scsi_cmd);
6538 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6539 scsi_done(scsi_cmd);
6540 if (ipr_cmd->eh_comp)
6541 complete(ipr_cmd->eh_comp);
6542 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6543 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6545 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6546 spin_lock(&ipr_cmd->hrrq->_lock);
6547 ipr_erp_start(ioa_cfg, ipr_cmd);
6548 spin_unlock(&ipr_cmd->hrrq->_lock);
6549 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6554 * ipr_queuecommand - Queue a mid-layer request
6555 * @shost: scsi host struct
6556 * @scsi_cmd: scsi command struct
6558 * This function queues a request generated by the mid-layer.
6562 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6563 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6565 static int ipr_queuecommand(struct Scsi_Host *shost,
6566 struct scsi_cmnd *scsi_cmd)
6568 struct ipr_ioa_cfg *ioa_cfg;
6569 struct ipr_resource_entry *res;
6570 struct ipr_ioarcb *ioarcb;
6571 struct ipr_cmnd *ipr_cmd;
6572 unsigned long hrrq_flags, lock_flags;
6574 struct ipr_hrr_queue *hrrq;
6577 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6579 scsi_cmd->result = (DID_OK << 16);
6580 res = scsi_cmd->device->hostdata;
6582 if (ipr_is_gata(res) && res->sata_port) {
6583 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6584 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6585 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6589 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6590 hrrq = &ioa_cfg->hrrq[hrrq_id];
6592 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6594 * We are currently blocking all devices due to a host reset
6595 * We have told the host to stop giving us new requests, but
6596 * ERP ops don't count. FIXME
6598 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6599 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6600 return SCSI_MLQUEUE_HOST_BUSY;
6604 * FIXME - Create scsi_set_host_offline interface
6605 * and the ioa_is_dead check can be removed
6607 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6608 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6612 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6613 if (ipr_cmd == NULL) {
6614 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6615 return SCSI_MLQUEUE_HOST_BUSY;
6617 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6619 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6620 ioarcb = &ipr_cmd->ioarcb;
6622 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6623 ipr_cmd->scsi_cmd = scsi_cmd;
6624 ipr_cmd->done = ipr_scsi_eh_done;
6626 if (ipr_is_gscsi(res)) {
6627 if (scsi_cmd->underflow == 0)
6628 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6630 if (res->reset_occurred) {
6631 res->reset_occurred = 0;
6632 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6636 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6637 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6639 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6640 if (scsi_cmd->flags & SCMD_TAGGED)
6641 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6643 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6646 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6647 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6648 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6650 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6651 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6653 if (scsi_cmd->underflow == 0)
6654 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6658 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6660 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6662 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6663 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6664 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6665 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6667 scsi_dma_unmap(scsi_cmd);
6668 return SCSI_MLQUEUE_HOST_BUSY;
6671 if (unlikely(hrrq->ioa_is_dead)) {
6672 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6673 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6674 scsi_dma_unmap(scsi_cmd);
6678 ioarcb->res_handle = res->res_handle;
6679 if (res->needs_sync_complete) {
6680 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6681 res->needs_sync_complete = 0;
6683 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6684 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6685 ipr_send_command(ipr_cmd);
6686 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6690 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6691 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6692 scsi_cmd->result = (DID_NO_CONNECT << 16);
6693 scsi_done(scsi_cmd);
6694 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6699 * ipr_ioctl - IOCTL handler
6700 * @sdev: scsi device struct
6705 * 0 on success / other on failure
6707 static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6710 struct ipr_resource_entry *res;
6712 res = (struct ipr_resource_entry *)sdev->hostdata;
6713 if (res && ipr_is_gata(res)) {
6714 if (cmd == HDIO_GET_IDENTITY)
6716 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6723 * ipr_ioa_info - Get information about the card/driver
6724 * @host: scsi host struct
6727 * pointer to buffer with description string
6729 static const char *ipr_ioa_info(struct Scsi_Host *host)
6731 static char buffer[512];
6732 struct ipr_ioa_cfg *ioa_cfg;
6733 unsigned long lock_flags = 0;
6735 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6737 spin_lock_irqsave(host->host_lock, lock_flags);
6738 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6739 spin_unlock_irqrestore(host->host_lock, lock_flags);
6744 static struct scsi_host_template driver_template = {
6745 .module = THIS_MODULE,
6747 .info = ipr_ioa_info,
6749 #ifdef CONFIG_COMPAT
6750 .compat_ioctl = ipr_ioctl,
6752 .queuecommand = ipr_queuecommand,
6753 .dma_need_drain = ata_scsi_dma_need_drain,
6754 .eh_abort_handler = ipr_eh_abort,
6755 .eh_device_reset_handler = ipr_eh_dev_reset,
6756 .eh_host_reset_handler = ipr_eh_host_reset,
6757 .slave_alloc = ipr_slave_alloc,
6758 .slave_configure = ipr_slave_configure,
6759 .slave_destroy = ipr_slave_destroy,
6760 .scan_finished = ipr_scan_finished,
6761 .target_alloc = ipr_target_alloc,
6762 .target_destroy = ipr_target_destroy,
6763 .change_queue_depth = ipr_change_queue_depth,
6764 .bios_param = ipr_biosparam,
6765 .can_queue = IPR_MAX_COMMANDS,
6767 .sg_tablesize = IPR_MAX_SGLIST,
6768 .max_sectors = IPR_IOA_MAX_SECTORS,
6769 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6770 .shost_groups = ipr_ioa_groups,
6771 .sdev_groups = ipr_dev_groups,
6772 .proc_name = IPR_NAME,
6776 * ipr_ata_phy_reset - libata phy_reset handler
6777 * @ap: ata port to reset
6780 static void ipr_ata_phy_reset(struct ata_port *ap)
6782 unsigned long flags;
6783 struct ipr_sata_port *sata_port = ap->private_data;
6784 struct ipr_resource_entry *res = sata_port->res;
6785 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6789 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6790 while (ioa_cfg->in_reset_reload) {
6791 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6792 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6793 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6796 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6799 rc = ipr_device_reset(ioa_cfg, res);
6802 ap->link.device[0].class = ATA_DEV_NONE;
6806 ap->link.device[0].class = res->ata_class;
6807 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6808 ap->link.device[0].class = ATA_DEV_NONE;
6811 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6816 * ipr_ata_post_internal - Cleanup after an internal command
6817 * @qc: ATA queued command
6822 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6824 struct ipr_sata_port *sata_port = qc->ap->private_data;
6825 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6826 struct ipr_cmnd *ipr_cmd;
6827 struct ipr_hrr_queue *hrrq;
6828 unsigned long flags;
6830 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6831 while (ioa_cfg->in_reset_reload) {
6832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6833 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6834 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6837 for_each_hrrq(hrrq, ioa_cfg) {
6838 spin_lock(&hrrq->_lock);
6839 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6840 if (ipr_cmd->qc == qc) {
6841 ipr_device_reset(ioa_cfg, sata_port->res);
6845 spin_unlock(&hrrq->_lock);
6847 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6851 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6852 * @regs: destination
6853 * @tf: source ATA taskfile
6858 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6859 struct ata_taskfile *tf)
6861 regs->feature = tf->feature;
6862 regs->nsect = tf->nsect;
6863 regs->lbal = tf->lbal;
6864 regs->lbam = tf->lbam;
6865 regs->lbah = tf->lbah;
6866 regs->device = tf->device;
6867 regs->command = tf->command;
6868 regs->hob_feature = tf->hob_feature;
6869 regs->hob_nsect = tf->hob_nsect;
6870 regs->hob_lbal = tf->hob_lbal;
6871 regs->hob_lbam = tf->hob_lbam;
6872 regs->hob_lbah = tf->hob_lbah;
6873 regs->ctl = tf->ctl;
6877 * ipr_sata_done - done function for SATA commands
6878 * @ipr_cmd: ipr command struct
6880 * This function is invoked by the interrupt handler for
6881 * ops generated by the SCSI mid-layer to SATA devices
6886 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6888 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6889 struct ata_queued_cmd *qc = ipr_cmd->qc;
6890 struct ipr_sata_port *sata_port = qc->ap->private_data;
6891 struct ipr_resource_entry *res = sata_port->res;
6892 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6894 spin_lock(&ipr_cmd->hrrq->_lock);
6895 if (ipr_cmd->ioa_cfg->sis64)
6896 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6897 sizeof(struct ipr_ioasa_gata));
6899 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6900 sizeof(struct ipr_ioasa_gata));
6901 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6903 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6904 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6906 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6907 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6909 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6910 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6911 spin_unlock(&ipr_cmd->hrrq->_lock);
6912 ata_qc_complete(qc);
6916 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6917 * @ipr_cmd: ipr command struct
6918 * @qc: ATA queued command
6921 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6922 struct ata_queued_cmd *qc)
6924 u32 ioadl_flags = 0;
6925 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6926 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6927 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6928 int len = qc->nbytes;
6929 struct scatterlist *sg;
6931 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6936 if (qc->dma_dir == DMA_TO_DEVICE) {
6937 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6938 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6939 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6940 ioadl_flags = IPR_IOADL_FLAGS_READ;
6942 ioarcb->data_transfer_length = cpu_to_be32(len);
6944 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6945 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6946 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6948 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6949 ioadl64->flags = cpu_to_be32(ioadl_flags);
6950 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6951 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6953 last_ioadl64 = ioadl64;
6957 if (likely(last_ioadl64))
6958 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6962 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6963 * @ipr_cmd: ipr command struct
6964 * @qc: ATA queued command
6967 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6968 struct ata_queued_cmd *qc)
6970 u32 ioadl_flags = 0;
6971 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6972 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6973 struct ipr_ioadl_desc *last_ioadl = NULL;
6974 int len = qc->nbytes;
6975 struct scatterlist *sg;
6981 if (qc->dma_dir == DMA_TO_DEVICE) {
6982 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6983 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6984 ioarcb->data_transfer_length = cpu_to_be32(len);
6986 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6987 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6988 ioadl_flags = IPR_IOADL_FLAGS_READ;
6989 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6990 ioarcb->read_ioadl_len =
6991 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6994 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6995 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6996 ioadl->address = cpu_to_be32(sg_dma_address(sg));
7002 if (likely(last_ioadl))
7003 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
7007 * ipr_qc_defer - Get a free ipr_cmd
7008 * @qc: queued command
7013 static int ipr_qc_defer(struct ata_queued_cmd *qc)
7015 struct ata_port *ap = qc->ap;
7016 struct ipr_sata_port *sata_port = ap->private_data;
7017 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7018 struct ipr_cmnd *ipr_cmd;
7019 struct ipr_hrr_queue *hrrq;
7022 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7023 hrrq = &ioa_cfg->hrrq[hrrq_id];
7025 qc->lldd_task = NULL;
7026 spin_lock(&hrrq->_lock);
7027 if (unlikely(hrrq->ioa_is_dead)) {
7028 spin_unlock(&hrrq->_lock);
7032 if (unlikely(!hrrq->allow_cmds)) {
7033 spin_unlock(&hrrq->_lock);
7034 return ATA_DEFER_LINK;
7037 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7038 if (ipr_cmd == NULL) {
7039 spin_unlock(&hrrq->_lock);
7040 return ATA_DEFER_LINK;
7043 qc->lldd_task = ipr_cmd;
7044 spin_unlock(&hrrq->_lock);
7049 * ipr_qc_issue - Issue a SATA qc to a device
7050 * @qc: queued command
7055 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7057 struct ata_port *ap = qc->ap;
7058 struct ipr_sata_port *sata_port = ap->private_data;
7059 struct ipr_resource_entry *res = sata_port->res;
7060 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7061 struct ipr_cmnd *ipr_cmd;
7062 struct ipr_ioarcb *ioarcb;
7063 struct ipr_ioarcb_ata_regs *regs;
7065 if (qc->lldd_task == NULL)
7068 ipr_cmd = qc->lldd_task;
7069 if (ipr_cmd == NULL)
7070 return AC_ERR_SYSTEM;
7072 qc->lldd_task = NULL;
7073 spin_lock(&ipr_cmd->hrrq->_lock);
7074 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7075 ipr_cmd->hrrq->ioa_is_dead)) {
7076 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7077 spin_unlock(&ipr_cmd->hrrq->_lock);
7078 return AC_ERR_SYSTEM;
7081 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7082 ioarcb = &ipr_cmd->ioarcb;
7084 if (ioa_cfg->sis64) {
7085 regs = &ipr_cmd->i.ata_ioadl.regs;
7086 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7088 regs = &ioarcb->u.add_data.u.regs;
7090 memset(regs, 0, sizeof(*regs));
7091 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7093 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7095 ipr_cmd->done = ipr_sata_done;
7096 ipr_cmd->ioarcb.res_handle = res->res_handle;
7097 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7098 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7099 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7100 ipr_cmd->dma_use_sg = qc->n_elem;
7103 ipr_build_ata_ioadl64(ipr_cmd, qc);
7105 ipr_build_ata_ioadl(ipr_cmd, qc);
7107 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7108 ipr_copy_sata_tf(regs, &qc->tf);
7109 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7110 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7112 switch (qc->tf.protocol) {
7113 case ATA_PROT_NODATA:
7118 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7121 case ATAPI_PROT_PIO:
7122 case ATAPI_PROT_NODATA:
7123 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7126 case ATAPI_PROT_DMA:
7127 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7128 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7133 spin_unlock(&ipr_cmd->hrrq->_lock);
7134 return AC_ERR_INVALID;
7137 ipr_send_command(ipr_cmd);
7138 spin_unlock(&ipr_cmd->hrrq->_lock);
7144 * ipr_qc_fill_rtf - Read result TF
7145 * @qc: ATA queued command
7150 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7152 struct ipr_sata_port *sata_port = qc->ap->private_data;
7153 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7154 struct ata_taskfile *tf = &qc->result_tf;
7156 tf->feature = g->error;
7157 tf->nsect = g->nsect;
7161 tf->device = g->device;
7162 tf->command = g->status;
7163 tf->hob_nsect = g->hob_nsect;
7164 tf->hob_lbal = g->hob_lbal;
7165 tf->hob_lbam = g->hob_lbam;
7166 tf->hob_lbah = g->hob_lbah;
7171 static struct ata_port_operations ipr_sata_ops = {
7172 .phy_reset = ipr_ata_phy_reset,
7173 .hardreset = ipr_sata_reset,
7174 .post_internal_cmd = ipr_ata_post_internal,
7175 .qc_prep = ata_noop_qc_prep,
7176 .qc_defer = ipr_qc_defer,
7177 .qc_issue = ipr_qc_issue,
7178 .qc_fill_rtf = ipr_qc_fill_rtf,
7179 .port_start = ata_sas_port_start,
7180 .port_stop = ata_sas_port_stop
7183 static struct ata_port_info sata_port_info = {
7184 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7186 .pio_mask = ATA_PIO4_ONLY,
7187 .mwdma_mask = ATA_MWDMA2,
7188 .udma_mask = ATA_UDMA6,
7189 .port_ops = &ipr_sata_ops
7192 #ifdef CONFIG_PPC_PSERIES
7193 static const u16 ipr_blocked_processors[] = {
7205 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7206 * @ioa_cfg: ioa cfg struct
7208 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7209 * certain pSeries hardware. This function determines if the given
7210 * adapter is in one of these confgurations or not.
7213 * 1 if adapter is not supported / 0 if adapter is supported
7215 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7219 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7220 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7221 if (pvr_version_is(ipr_blocked_processors[i]))
7228 #define ipr_invalid_adapter(ioa_cfg) 0
7232 * ipr_ioa_bringdown_done - IOA bring down completion.
7233 * @ipr_cmd: ipr command struct
7235 * This function processes the completion of an adapter bring down.
7236 * It wakes any reset sleepers.
7241 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7243 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7247 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7249 ioa_cfg->scsi_unblock = 1;
7250 schedule_work(&ioa_cfg->work_q);
7253 ioa_cfg->in_reset_reload = 0;
7254 ioa_cfg->reset_retries = 0;
7255 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7256 spin_lock(&ioa_cfg->hrrq[i]._lock);
7257 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7258 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7262 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7263 wake_up_all(&ioa_cfg->reset_wait_q);
7266 return IPR_RC_JOB_RETURN;
7270 * ipr_ioa_reset_done - IOA reset completion.
7271 * @ipr_cmd: ipr command struct
7273 * This function processes the completion of an adapter reset.
7274 * It schedules any necessary mid-layer add/removes and
7275 * wakes any reset sleepers.
7280 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7283 struct ipr_resource_entry *res;
7287 ioa_cfg->in_reset_reload = 0;
7288 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7289 spin_lock(&ioa_cfg->hrrq[j]._lock);
7290 ioa_cfg->hrrq[j].allow_cmds = 1;
7291 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7294 ioa_cfg->reset_cmd = NULL;
7295 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7297 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7298 if (res->add_to_ml || res->del_from_ml) {
7303 schedule_work(&ioa_cfg->work_q);
7305 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7306 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7307 if (j < IPR_NUM_LOG_HCAMS)
7308 ipr_send_hcam(ioa_cfg,
7309 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7310 ioa_cfg->hostrcb[j]);
7312 ipr_send_hcam(ioa_cfg,
7313 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7314 ioa_cfg->hostrcb[j]);
7317 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7318 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7320 ioa_cfg->reset_retries = 0;
7321 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7322 wake_up_all(&ioa_cfg->reset_wait_q);
7324 ioa_cfg->scsi_unblock = 1;
7325 schedule_work(&ioa_cfg->work_q);
7327 return IPR_RC_JOB_RETURN;
7331 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7332 * @supported_dev: supported device struct
7333 * @vpids: vendor product id struct
7338 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7339 struct ipr_std_inq_vpids *vpids)
7341 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7342 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7343 supported_dev->num_records = 1;
7344 supported_dev->data_length =
7345 cpu_to_be16(sizeof(struct ipr_supported_device));
7346 supported_dev->reserved = 0;
7350 * ipr_set_supported_devs - Send Set Supported Devices for a device
7351 * @ipr_cmd: ipr command struct
7353 * This function sends a Set Supported Devices to the adapter
7356 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7358 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7360 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7361 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7362 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7363 struct ipr_resource_entry *res = ipr_cmd->u.res;
7365 ipr_cmd->job_step = ipr_ioa_reset_done;
7367 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7368 if (!ipr_is_scsi_disk(res))
7371 ipr_cmd->u.res = res;
7372 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7374 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7375 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7376 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7378 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7379 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7380 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7381 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7383 ipr_init_ioadl(ipr_cmd,
7384 ioa_cfg->vpd_cbs_dma +
7385 offsetof(struct ipr_misc_cbs, supp_dev),
7386 sizeof(struct ipr_supported_device),
7387 IPR_IOADL_FLAGS_WRITE_LAST);
7389 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7390 IPR_SET_SUP_DEVICE_TIMEOUT);
7392 if (!ioa_cfg->sis64)
7393 ipr_cmd->job_step = ipr_set_supported_devs;
7395 return IPR_RC_JOB_RETURN;
7399 return IPR_RC_JOB_CONTINUE;
7403 * ipr_get_mode_page - Locate specified mode page
7404 * @mode_pages: mode page buffer
7405 * @page_code: page code to find
7406 * @len: minimum required length for mode page
7409 * pointer to mode page / NULL on failure
7411 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7412 u32 page_code, u32 len)
7414 struct ipr_mode_page_hdr *mode_hdr;
7418 if (!mode_pages || (mode_pages->hdr.length == 0))
7421 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7422 mode_hdr = (struct ipr_mode_page_hdr *)
7423 (mode_pages->data + mode_pages->hdr.block_desc_len);
7426 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7427 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7431 page_length = (sizeof(struct ipr_mode_page_hdr) +
7432 mode_hdr->page_length);
7433 length -= page_length;
7434 mode_hdr = (struct ipr_mode_page_hdr *)
7435 ((unsigned long)mode_hdr + page_length);
7442 * ipr_check_term_power - Check for term power errors
7443 * @ioa_cfg: ioa config struct
7444 * @mode_pages: IOAFP mode pages buffer
7446 * Check the IOAFP's mode page 28 for term power errors
7451 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7452 struct ipr_mode_pages *mode_pages)
7456 struct ipr_dev_bus_entry *bus;
7457 struct ipr_mode_page28 *mode_page;
7459 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7460 sizeof(struct ipr_mode_page28));
7462 entry_length = mode_page->entry_length;
7464 bus = mode_page->bus;
7466 for (i = 0; i < mode_page->num_entries; i++) {
7467 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7468 dev_err(&ioa_cfg->pdev->dev,
7469 "Term power is absent on scsi bus %d\n",
7473 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7478 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7479 * @ioa_cfg: ioa config struct
7481 * Looks through the config table checking for SES devices. If
7482 * the SES device is in the SES table indicating a maximum SCSI
7483 * bus speed, the speed is limited for the bus.
7488 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7493 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7494 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7495 ioa_cfg->bus_attr[i].bus_width);
7497 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7498 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7503 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7504 * @ioa_cfg: ioa config struct
7505 * @mode_pages: mode page 28 buffer
7507 * Updates mode page 28 based on driver configuration
7512 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7513 struct ipr_mode_pages *mode_pages)
7515 int i, entry_length;
7516 struct ipr_dev_bus_entry *bus;
7517 struct ipr_bus_attributes *bus_attr;
7518 struct ipr_mode_page28 *mode_page;
7520 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7521 sizeof(struct ipr_mode_page28));
7523 entry_length = mode_page->entry_length;
7525 /* Loop for each device bus entry */
7526 for (i = 0, bus = mode_page->bus;
7527 i < mode_page->num_entries;
7528 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7529 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7530 dev_err(&ioa_cfg->pdev->dev,
7531 "Invalid resource address reported: 0x%08X\n",
7532 IPR_GET_PHYS_LOC(bus->res_addr));
7536 bus_attr = &ioa_cfg->bus_attr[i];
7537 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7538 bus->bus_width = bus_attr->bus_width;
7539 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7540 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7541 if (bus_attr->qas_enabled)
7542 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7544 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7549 * ipr_build_mode_select - Build a mode select command
7550 * @ipr_cmd: ipr command struct
7551 * @res_handle: resource handle to send command to
7552 * @parm: Byte 2 of Mode Sense command
7553 * @dma_addr: DMA buffer address
7554 * @xfer_len: data transfer length
7559 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7560 __be32 res_handle, u8 parm,
7561 dma_addr_t dma_addr, u8 xfer_len)
7563 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7565 ioarcb->res_handle = res_handle;
7566 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7567 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7568 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7569 ioarcb->cmd_pkt.cdb[1] = parm;
7570 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7572 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7576 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7577 * @ipr_cmd: ipr command struct
7579 * This function sets up the SCSI bus attributes and sends
7580 * a Mode Select for Page 28 to activate them.
7585 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7588 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7592 ipr_scsi_bus_speed_limit(ioa_cfg);
7593 ipr_check_term_power(ioa_cfg, mode_pages);
7594 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7595 length = mode_pages->hdr.length + 1;
7596 mode_pages->hdr.length = 0;
7598 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7599 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7602 ipr_cmd->job_step = ipr_set_supported_devs;
7603 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7604 struct ipr_resource_entry, queue);
7605 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7608 return IPR_RC_JOB_RETURN;
7612 * ipr_build_mode_sense - Builds a mode sense command
7613 * @ipr_cmd: ipr command struct
7614 * @res_handle: resource entry struct
7615 * @parm: Byte 2 of mode sense command
7616 * @dma_addr: DMA address of mode sense buffer
7617 * @xfer_len: Size of DMA buffer
7622 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7624 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7626 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7628 ioarcb->res_handle = res_handle;
7629 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7630 ioarcb->cmd_pkt.cdb[2] = parm;
7631 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7632 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7634 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7638 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7639 * @ipr_cmd: ipr command struct
7641 * This function handles the failure of an IOA bringup command.
7646 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7648 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7649 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7651 dev_err(&ioa_cfg->pdev->dev,
7652 "0x%02X failed with IOASC: 0x%08X\n",
7653 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7655 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7656 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7657 return IPR_RC_JOB_RETURN;
7661 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7662 * @ipr_cmd: ipr command struct
7664 * This function handles the failure of a Mode Sense to the IOAFP.
7665 * Some adapters do not handle all mode pages.
7668 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7670 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7673 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7675 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7676 ipr_cmd->job_step = ipr_set_supported_devs;
7677 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7678 struct ipr_resource_entry, queue);
7679 return IPR_RC_JOB_CONTINUE;
7682 return ipr_reset_cmd_failed(ipr_cmd);
7686 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7687 * @ipr_cmd: ipr command struct
7689 * This function send a Page 28 mode sense to the IOA to
7690 * retrieve SCSI bus attributes.
7695 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7697 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7700 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7701 0x28, ioa_cfg->vpd_cbs_dma +
7702 offsetof(struct ipr_misc_cbs, mode_pages),
7703 sizeof(struct ipr_mode_pages));
7705 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7706 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7708 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7711 return IPR_RC_JOB_RETURN;
7715 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7716 * @ipr_cmd: ipr command struct
7718 * This function enables dual IOA RAID support if possible.
7723 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7725 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7726 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7727 struct ipr_mode_page24 *mode_page;
7731 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7732 sizeof(struct ipr_mode_page24));
7735 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7737 length = mode_pages->hdr.length + 1;
7738 mode_pages->hdr.length = 0;
7740 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7741 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7744 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7745 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7748 return IPR_RC_JOB_RETURN;
7752 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7753 * @ipr_cmd: ipr command struct
7755 * This function handles the failure of a Mode Sense to the IOAFP.
7756 * Some adapters do not handle all mode pages.
7759 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7761 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7763 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7765 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7766 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7767 return IPR_RC_JOB_CONTINUE;
7770 return ipr_reset_cmd_failed(ipr_cmd);
7774 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7775 * @ipr_cmd: ipr command struct
7777 * This function send a mode sense to the IOA to retrieve
7778 * the IOA Advanced Function Control mode page.
7783 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7785 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7788 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7789 0x24, ioa_cfg->vpd_cbs_dma +
7790 offsetof(struct ipr_misc_cbs, mode_pages),
7791 sizeof(struct ipr_mode_pages));
7793 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7794 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7796 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7799 return IPR_RC_JOB_RETURN;
7803 * ipr_init_res_table - Initialize the resource table
7804 * @ipr_cmd: ipr command struct
7806 * This function looks through the existing resource table, comparing
7807 * it with the config table. This function will take care of old/new
7808 * devices and schedule adding/removing them from the mid-layer
7812 * IPR_RC_JOB_CONTINUE
7814 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7816 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7817 struct ipr_resource_entry *res, *temp;
7818 struct ipr_config_table_entry_wrapper cfgtew;
7819 int entries, found, flag, i;
7824 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7826 flag = ioa_cfg->u.cfg_table->hdr.flags;
7828 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7829 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7831 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7832 list_move_tail(&res->queue, &old_res);
7835 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7837 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7839 for (i = 0; i < entries; i++) {
7841 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7843 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7846 list_for_each_entry_safe(res, temp, &old_res, queue) {
7847 if (ipr_is_same_device(res, &cfgtew)) {
7848 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7855 if (list_empty(&ioa_cfg->free_res_q)) {
7856 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7861 res = list_entry(ioa_cfg->free_res_q.next,
7862 struct ipr_resource_entry, queue);
7863 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7864 ipr_init_res_entry(res, &cfgtew);
7866 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7867 res->sdev->allow_restart = 1;
7870 ipr_update_res_entry(res, &cfgtew);
7873 list_for_each_entry_safe(res, temp, &old_res, queue) {
7875 res->del_from_ml = 1;
7876 res->res_handle = IPR_INVALID_RES_HANDLE;
7877 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7881 list_for_each_entry_safe(res, temp, &old_res, queue) {
7882 ipr_clear_res_target(res);
7883 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7886 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7887 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7889 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7892 return IPR_RC_JOB_CONTINUE;
7896 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7897 * @ipr_cmd: ipr command struct
7899 * This function sends a Query IOA Configuration command
7900 * to the adapter to retrieve the IOA configuration table.
7905 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7907 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7908 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7909 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7910 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7913 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7914 ioa_cfg->dual_raid = 1;
7915 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7916 ucode_vpd->major_release, ucode_vpd->card_type,
7917 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7918 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7919 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7921 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7922 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7923 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7924 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7926 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7927 IPR_IOADL_FLAGS_READ_LAST);
7929 ipr_cmd->job_step = ipr_init_res_table;
7931 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7934 return IPR_RC_JOB_RETURN;
7937 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7939 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7941 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7942 return IPR_RC_JOB_CONTINUE;
7944 return ipr_reset_cmd_failed(ipr_cmd);
7947 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7948 __be32 res_handle, u8 sa_code)
7950 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7952 ioarcb->res_handle = res_handle;
7953 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7954 ioarcb->cmd_pkt.cdb[1] = sa_code;
7955 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7959 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7961 * @ipr_cmd: ipr command struct
7966 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7968 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7969 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7970 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7974 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7976 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7977 ipr_build_ioa_service_action(ipr_cmd,
7978 cpu_to_be32(IPR_IOA_RES_HANDLE),
7979 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7981 ioarcb->cmd_pkt.cdb[2] = 0x40;
7983 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7984 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7985 IPR_SET_SUP_DEVICE_TIMEOUT);
7988 return IPR_RC_JOB_RETURN;
7992 return IPR_RC_JOB_CONTINUE;
7996 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7997 * @ipr_cmd: ipr command struct
7998 * @flags: flags to send
7999 * @page: page to inquire
8000 * @dma_addr: DMA address
8001 * @xfer_len: transfer data length
8003 * This utility function sends an inquiry to the adapter.
8008 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
8009 dma_addr_t dma_addr, u8 xfer_len)
8011 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8014 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8015 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8017 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8018 ioarcb->cmd_pkt.cdb[1] = flags;
8019 ioarcb->cmd_pkt.cdb[2] = page;
8020 ioarcb->cmd_pkt.cdb[4] = xfer_len;
8022 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
8024 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8029 * ipr_inquiry_page_supported - Is the given inquiry page supported
8030 * @page0: inquiry page 0 buffer
8033 * This function determines if the specified inquiry page is supported.
8036 * 1 if page is supported / 0 if not
8038 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8042 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8043 if (page0->page[i] == page)
8050 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8051 * @ipr_cmd: ipr command struct
8053 * This function sends a Page 0xC4 inquiry to the adapter
8054 * to retrieve software VPD information.
8057 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8059 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8061 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8062 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8063 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8066 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8067 memset(pageC4, 0, sizeof(*pageC4));
8069 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8070 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8071 (ioa_cfg->vpd_cbs_dma
8072 + offsetof(struct ipr_misc_cbs,
8074 sizeof(struct ipr_inquiry_pageC4));
8075 return IPR_RC_JOB_RETURN;
8079 return IPR_RC_JOB_CONTINUE;
8083 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8084 * @ipr_cmd: ipr command struct
8086 * This function sends a Page 0xD0 inquiry to the adapter
8087 * to retrieve adapter capabilities.
8090 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8092 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8094 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8095 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8096 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8099 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8100 memset(cap, 0, sizeof(*cap));
8102 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8103 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8104 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8105 sizeof(struct ipr_inquiry_cap));
8106 return IPR_RC_JOB_RETURN;
8110 return IPR_RC_JOB_CONTINUE;
8114 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8115 * @ipr_cmd: ipr command struct
8117 * This function sends a Page 3 inquiry to the adapter
8118 * to retrieve software VPD information.
8121 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8123 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8125 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8129 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8131 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8132 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8133 sizeof(struct ipr_inquiry_page3));
8136 return IPR_RC_JOB_RETURN;
8140 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8141 * @ipr_cmd: ipr command struct
8143 * This function sends a Page 0 inquiry to the adapter
8144 * to retrieve supported inquiry pages.
8147 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8149 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8151 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8156 /* Grab the type out of the VPD and store it away */
8157 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8159 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8161 if (ipr_invalid_adapter(ioa_cfg)) {
8162 dev_err(&ioa_cfg->pdev->dev,
8163 "Adapter not supported in this hardware configuration.\n");
8165 if (!ipr_testmode) {
8166 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8167 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8168 list_add_tail(&ipr_cmd->queue,
8169 &ioa_cfg->hrrq->hrrq_free_q);
8170 return IPR_RC_JOB_RETURN;
8174 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8176 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8177 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8178 sizeof(struct ipr_inquiry_page0));
8181 return IPR_RC_JOB_RETURN;
8185 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8186 * @ipr_cmd: ipr command struct
8188 * This function sends a standard inquiry to the adapter.
8193 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8195 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8198 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8200 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8201 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8202 sizeof(struct ipr_ioa_vpd));
8205 return IPR_RC_JOB_RETURN;
8209 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8210 * @ipr_cmd: ipr command struct
8212 * This function send an Identify Host Request Response Queue
8213 * command to establish the HRRQ with the adapter.
8218 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8220 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8221 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8222 struct ipr_hrr_queue *hrrq;
8225 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8226 if (ioa_cfg->identify_hrrq_index == 0)
8227 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8229 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8230 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8232 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8233 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8235 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8237 ioarcb->cmd_pkt.cdb[1] = 0x1;
8239 if (ioa_cfg->nvectors == 1)
8240 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8242 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8244 ioarcb->cmd_pkt.cdb[2] =
8245 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8246 ioarcb->cmd_pkt.cdb[3] =
8247 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8248 ioarcb->cmd_pkt.cdb[4] =
8249 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8250 ioarcb->cmd_pkt.cdb[5] =
8251 ((u64) hrrq->host_rrq_dma) & 0xff;
8252 ioarcb->cmd_pkt.cdb[7] =
8253 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8254 ioarcb->cmd_pkt.cdb[8] =
8255 (sizeof(u32) * hrrq->size) & 0xff;
8257 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8258 ioarcb->cmd_pkt.cdb[9] =
8259 ioa_cfg->identify_hrrq_index;
8261 if (ioa_cfg->sis64) {
8262 ioarcb->cmd_pkt.cdb[10] =
8263 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8264 ioarcb->cmd_pkt.cdb[11] =
8265 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8266 ioarcb->cmd_pkt.cdb[12] =
8267 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8268 ioarcb->cmd_pkt.cdb[13] =
8269 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8272 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8273 ioarcb->cmd_pkt.cdb[14] =
8274 ioa_cfg->identify_hrrq_index;
8276 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8277 IPR_INTERNAL_TIMEOUT);
8279 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8280 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8283 return IPR_RC_JOB_RETURN;
8287 return IPR_RC_JOB_CONTINUE;
8291 * ipr_reset_timer_done - Adapter reset timer function
8292 * @t: Timer context used to fetch ipr command struct
8294 * Description: This function is used in adapter reset processing
8295 * for timing events. If the reset_cmd pointer in the IOA
8296 * config struct is not this adapter's we are doing nested
8297 * resets and fail_all_ops will take care of freeing the
8303 static void ipr_reset_timer_done(struct timer_list *t)
8305 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8306 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8307 unsigned long lock_flags = 0;
8309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8311 if (ioa_cfg->reset_cmd == ipr_cmd) {
8312 list_del(&ipr_cmd->queue);
8313 ipr_cmd->done(ipr_cmd);
8316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8320 * ipr_reset_start_timer - Start a timer for adapter reset job
8321 * @ipr_cmd: ipr command struct
8322 * @timeout: timeout value
8324 * Description: This function is used in adapter reset processing
8325 * for timing events. If the reset_cmd pointer in the IOA
8326 * config struct is not this adapter's we are doing nested
8327 * resets and fail_all_ops will take care of freeing the
8333 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8334 unsigned long timeout)
8338 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8339 ipr_cmd->done = ipr_reset_ioa_job;
8341 ipr_cmd->timer.expires = jiffies + timeout;
8342 ipr_cmd->timer.function = ipr_reset_timer_done;
8343 add_timer(&ipr_cmd->timer);
8347 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8348 * @ioa_cfg: ioa cfg struct
8353 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8355 struct ipr_hrr_queue *hrrq;
8357 for_each_hrrq(hrrq, ioa_cfg) {
8358 spin_lock(&hrrq->_lock);
8359 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8361 /* Initialize Host RRQ pointers */
8362 hrrq->hrrq_start = hrrq->host_rrq;
8363 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8364 hrrq->hrrq_curr = hrrq->hrrq_start;
8365 hrrq->toggle_bit = 1;
8366 spin_unlock(&hrrq->_lock);
8370 ioa_cfg->identify_hrrq_index = 0;
8371 if (ioa_cfg->hrrq_num == 1)
8372 atomic_set(&ioa_cfg->hrrq_index, 0);
8374 atomic_set(&ioa_cfg->hrrq_index, 1);
8376 /* Zero out config table */
8377 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8381 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8382 * @ipr_cmd: ipr command struct
8385 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8387 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8389 unsigned long stage, stage_time;
8391 volatile u32 int_reg;
8392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8395 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8396 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8397 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8399 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8401 /* sanity check the stage_time value */
8402 if (stage_time == 0)
8403 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8404 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8405 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8406 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8407 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8409 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8410 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8411 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8412 stage_time = ioa_cfg->transop_timeout;
8413 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8414 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8415 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8416 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8417 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8418 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8419 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8420 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8421 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8422 return IPR_RC_JOB_CONTINUE;
8426 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8427 ipr_cmd->timer.function = ipr_oper_timeout;
8428 ipr_cmd->done = ipr_reset_ioa_job;
8429 add_timer(&ipr_cmd->timer);
8431 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8433 return IPR_RC_JOB_RETURN;
8437 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8438 * @ipr_cmd: ipr command struct
8440 * This function reinitializes some control blocks and
8441 * enables destructive diagnostics on the adapter.
8446 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8448 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8449 volatile u32 int_reg;
8450 volatile u64 maskval;
8454 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8455 ipr_init_ioa_mem(ioa_cfg);
8457 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8458 spin_lock(&ioa_cfg->hrrq[i]._lock);
8459 ioa_cfg->hrrq[i].allow_interrupts = 1;
8460 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8462 if (ioa_cfg->sis64) {
8463 /* Set the adapter to the correct endian mode. */
8464 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8465 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8468 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8470 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8471 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8472 ioa_cfg->regs.clr_interrupt_mask_reg32);
8473 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8474 return IPR_RC_JOB_CONTINUE;
8477 /* Enable destructive diagnostics on IOA */
8478 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8480 if (ioa_cfg->sis64) {
8481 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8482 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8483 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8485 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8487 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8489 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8491 if (ioa_cfg->sis64) {
8492 ipr_cmd->job_step = ipr_reset_next_stage;
8493 return IPR_RC_JOB_CONTINUE;
8496 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8497 ipr_cmd->timer.function = ipr_oper_timeout;
8498 ipr_cmd->done = ipr_reset_ioa_job;
8499 add_timer(&ipr_cmd->timer);
8500 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8503 return IPR_RC_JOB_RETURN;
8507 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8508 * @ipr_cmd: ipr command struct
8510 * This function is invoked when an adapter dump has run out
8511 * of processing time.
8514 * IPR_RC_JOB_CONTINUE
8516 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8518 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8520 if (ioa_cfg->sdt_state == GET_DUMP)
8521 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8522 else if (ioa_cfg->sdt_state == READ_DUMP)
8523 ioa_cfg->sdt_state = ABORT_DUMP;
8525 ioa_cfg->dump_timeout = 1;
8526 ipr_cmd->job_step = ipr_reset_alert;
8528 return IPR_RC_JOB_CONTINUE;
8532 * ipr_unit_check_no_data - Log a unit check/no data error log
8533 * @ioa_cfg: ioa config struct
8535 * Logs an error indicating the adapter unit checked, but for some
8536 * reason, we were unable to fetch the unit check buffer.
8541 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8543 ioa_cfg->errors_logged++;
8544 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8548 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8549 * @ioa_cfg: ioa config struct
8551 * Fetches the unit check buffer from the adapter by clocking the data
8552 * through the mailbox register.
8557 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8559 unsigned long mailbox;
8560 struct ipr_hostrcb *hostrcb;
8561 struct ipr_uc_sdt sdt;
8565 mailbox = readl(ioa_cfg->ioa_mailbox);
8567 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8568 ipr_unit_check_no_data(ioa_cfg);
8572 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8573 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8574 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8576 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8577 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8578 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8579 ipr_unit_check_no_data(ioa_cfg);
8583 /* Find length of the first sdt entry (UC buffer) */
8584 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8585 length = be32_to_cpu(sdt.entry[0].end_token);
8587 length = (be32_to_cpu(sdt.entry[0].end_token) -
8588 be32_to_cpu(sdt.entry[0].start_token)) &
8589 IPR_FMT2_MBX_ADDR_MASK;
8591 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8592 struct ipr_hostrcb, queue);
8593 list_del_init(&hostrcb->queue);
8594 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8596 rc = ipr_get_ldump_data_section(ioa_cfg,
8597 be32_to_cpu(sdt.entry[0].start_token),
8598 (__be32 *)&hostrcb->hcam,
8599 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8602 ipr_handle_log_data(ioa_cfg, hostrcb);
8603 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8604 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8605 ioa_cfg->sdt_state == GET_DUMP)
8606 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8608 ipr_unit_check_no_data(ioa_cfg);
8610 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8614 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8615 * @ipr_cmd: ipr command struct
8617 * Description: This function will call to get the unit check buffer.
8622 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8627 ioa_cfg->ioa_unit_checked = 0;
8628 ipr_get_unit_check_buffer(ioa_cfg);
8629 ipr_cmd->job_step = ipr_reset_alert;
8630 ipr_reset_start_timer(ipr_cmd, 0);
8633 return IPR_RC_JOB_RETURN;
8636 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8642 if (ioa_cfg->sdt_state != GET_DUMP)
8643 return IPR_RC_JOB_RETURN;
8645 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8646 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8647 IPR_PCII_MAILBOX_STABLE)) {
8649 if (!ipr_cmd->u.time_left)
8650 dev_err(&ioa_cfg->pdev->dev,
8651 "Timed out waiting for Mailbox register.\n");
8653 ioa_cfg->sdt_state = READ_DUMP;
8654 ioa_cfg->dump_timeout = 0;
8656 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8658 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8659 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8660 schedule_work(&ioa_cfg->work_q);
8663 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8664 ipr_reset_start_timer(ipr_cmd,
8665 IPR_CHECK_FOR_RESET_TIMEOUT);
8669 return IPR_RC_JOB_RETURN;
8673 * ipr_reset_restore_cfg_space - Restore PCI config space.
8674 * @ipr_cmd: ipr command struct
8676 * Description: This function restores the saved PCI config space of
8677 * the adapter, fails all outstanding ops back to the callers, and
8678 * fetches the dump/unit check if applicable to this reset.
8681 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8683 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8685 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8688 ioa_cfg->pdev->state_saved = true;
8689 pci_restore_state(ioa_cfg->pdev);
8691 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8692 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8693 return IPR_RC_JOB_CONTINUE;
8696 ipr_fail_all_ops(ioa_cfg);
8698 if (ioa_cfg->sis64) {
8699 /* Set the adapter to the correct endian mode. */
8700 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8701 readl(ioa_cfg->regs.endian_swap_reg);
8704 if (ioa_cfg->ioa_unit_checked) {
8705 if (ioa_cfg->sis64) {
8706 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8707 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8708 return IPR_RC_JOB_RETURN;
8710 ioa_cfg->ioa_unit_checked = 0;
8711 ipr_get_unit_check_buffer(ioa_cfg);
8712 ipr_cmd->job_step = ipr_reset_alert;
8713 ipr_reset_start_timer(ipr_cmd, 0);
8714 return IPR_RC_JOB_RETURN;
8718 if (ioa_cfg->in_ioa_bringdown) {
8719 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8720 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8721 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8722 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8724 ipr_cmd->job_step = ipr_reset_enable_ioa;
8728 return IPR_RC_JOB_CONTINUE;
8732 * ipr_reset_bist_done - BIST has completed on the adapter.
8733 * @ipr_cmd: ipr command struct
8735 * Description: Unblock config space and resume the reset process.
8738 * IPR_RC_JOB_CONTINUE
8740 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8742 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8745 if (ioa_cfg->cfg_locked)
8746 pci_cfg_access_unlock(ioa_cfg->pdev);
8747 ioa_cfg->cfg_locked = 0;
8748 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8750 return IPR_RC_JOB_CONTINUE;
8754 * ipr_reset_start_bist - Run BIST on the adapter.
8755 * @ipr_cmd: ipr command struct
8757 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8760 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8762 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8765 int rc = PCIBIOS_SUCCESSFUL;
8768 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8769 writel(IPR_UPROCI_SIS64_START_BIST,
8770 ioa_cfg->regs.set_uproc_interrupt_reg32);
8772 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8774 if (rc == PCIBIOS_SUCCESSFUL) {
8775 ipr_cmd->job_step = ipr_reset_bist_done;
8776 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8777 rc = IPR_RC_JOB_RETURN;
8779 if (ioa_cfg->cfg_locked)
8780 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8781 ioa_cfg->cfg_locked = 0;
8782 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8783 rc = IPR_RC_JOB_CONTINUE;
8791 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8792 * @ipr_cmd: ipr command struct
8794 * Description: This clears PCI reset to the adapter and delays two seconds.
8799 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8802 ipr_cmd->job_step = ipr_reset_bist_done;
8803 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8805 return IPR_RC_JOB_RETURN;
8809 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8810 * @work: work struct
8812 * Description: This pulses warm reset to a slot.
8815 static void ipr_reset_reset_work(struct work_struct *work)
8817 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8818 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8819 struct pci_dev *pdev = ioa_cfg->pdev;
8820 unsigned long lock_flags = 0;
8823 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8824 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8825 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8827 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8828 if (ioa_cfg->reset_cmd == ipr_cmd)
8829 ipr_reset_ioa_job(ipr_cmd);
8830 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8835 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8836 * @ipr_cmd: ipr command struct
8838 * Description: This asserts PCI reset to the adapter.
8843 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8845 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8848 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8849 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8850 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8852 return IPR_RC_JOB_RETURN;
8856 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8857 * @ipr_cmd: ipr command struct
8859 * Description: This attempts to block config access to the IOA.
8862 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8864 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8866 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8867 int rc = IPR_RC_JOB_CONTINUE;
8869 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8870 ioa_cfg->cfg_locked = 1;
8871 ipr_cmd->job_step = ioa_cfg->reset;
8873 if (ipr_cmd->u.time_left) {
8874 rc = IPR_RC_JOB_RETURN;
8875 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8876 ipr_reset_start_timer(ipr_cmd,
8877 IPR_CHECK_FOR_RESET_TIMEOUT);
8879 ipr_cmd->job_step = ioa_cfg->reset;
8880 dev_err(&ioa_cfg->pdev->dev,
8881 "Timed out waiting to lock config access. Resetting anyway.\n");
8889 * ipr_reset_block_config_access - Block config access to the IOA
8890 * @ipr_cmd: ipr command struct
8892 * Description: This attempts to block config access to the IOA
8895 * IPR_RC_JOB_CONTINUE
8897 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8899 ipr_cmd->ioa_cfg->cfg_locked = 0;
8900 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8901 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8902 return IPR_RC_JOB_CONTINUE;
8906 * ipr_reset_allowed - Query whether or not IOA can be reset
8907 * @ioa_cfg: ioa config struct
8910 * 0 if reset not allowed / non-zero if reset is allowed
8912 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8914 volatile u32 temp_reg;
8916 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8917 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8921 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8922 * @ipr_cmd: ipr command struct
8924 * Description: This function waits for adapter permission to run BIST,
8925 * then runs BIST. If the adapter does not give permission after a
8926 * reasonable time, we will reset the adapter anyway. The impact of
8927 * resetting the adapter without warning the adapter is the risk of
8928 * losing the persistent error log on the adapter. If the adapter is
8929 * reset while it is writing to the flash on the adapter, the flash
8930 * segment will have bad ECC and be zeroed.
8933 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8935 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8937 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8938 int rc = IPR_RC_JOB_RETURN;
8940 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8941 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8942 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8944 ipr_cmd->job_step = ipr_reset_block_config_access;
8945 rc = IPR_RC_JOB_CONTINUE;
8952 * ipr_reset_alert - Alert the adapter of a pending reset
8953 * @ipr_cmd: ipr command struct
8955 * Description: This function alerts the adapter that it will be reset.
8956 * If memory space is not currently enabled, proceed directly
8957 * to running BIST on the adapter. The timer must always be started
8958 * so we guarantee we do not run BIST from ipr_isr.
8963 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8965 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8970 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8972 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8973 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8974 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8975 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8977 ipr_cmd->job_step = ipr_reset_block_config_access;
8980 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8981 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8984 return IPR_RC_JOB_RETURN;
8988 * ipr_reset_quiesce_done - Complete IOA disconnect
8989 * @ipr_cmd: ipr command struct
8991 * Description: Freeze the adapter to complete quiesce processing
8994 * IPR_RC_JOB_CONTINUE
8996 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8998 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9001 ipr_cmd->job_step = ipr_ioa_bringdown_done;
9002 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9004 return IPR_RC_JOB_CONTINUE;
9008 * ipr_reset_cancel_hcam_done - Check for outstanding commands
9009 * @ipr_cmd: ipr command struct
9011 * Description: Ensure nothing is outstanding to the IOA and
9012 * proceed with IOA disconnect. Otherwise reset the IOA.
9015 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9017 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9019 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9020 struct ipr_cmnd *loop_cmd;
9021 struct ipr_hrr_queue *hrrq;
9022 int rc = IPR_RC_JOB_CONTINUE;
9026 ipr_cmd->job_step = ipr_reset_quiesce_done;
9028 for_each_hrrq(hrrq, ioa_cfg) {
9029 spin_lock(&hrrq->_lock);
9030 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9032 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9034 rc = IPR_RC_JOB_RETURN;
9037 spin_unlock(&hrrq->_lock);
9048 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9049 * @ipr_cmd: ipr command struct
9051 * Description: Cancel any oustanding HCAMs to the IOA.
9054 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9056 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9058 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9059 int rc = IPR_RC_JOB_CONTINUE;
9060 struct ipr_cmd_pkt *cmd_pkt;
9061 struct ipr_cmnd *hcam_cmd;
9062 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9065 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9067 if (!hrrq->ioa_is_dead) {
9068 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9069 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9070 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9073 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9074 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9075 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9076 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9077 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9078 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9079 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9080 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9081 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9082 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9083 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9084 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9085 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9086 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9088 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9089 IPR_CANCEL_TIMEOUT);
9091 rc = IPR_RC_JOB_RETURN;
9092 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9097 ipr_cmd->job_step = ipr_reset_alert;
9104 * ipr_reset_ucode_download_done - Microcode download completion
9105 * @ipr_cmd: ipr command struct
9107 * Description: This function unmaps the microcode download buffer.
9110 * IPR_RC_JOB_CONTINUE
9112 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9114 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9115 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9117 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9118 sglist->num_sg, DMA_TO_DEVICE);
9120 ipr_cmd->job_step = ipr_reset_alert;
9121 return IPR_RC_JOB_CONTINUE;
9125 * ipr_reset_ucode_download - Download microcode to the adapter
9126 * @ipr_cmd: ipr command struct
9128 * Description: This function checks to see if it there is microcode
9129 * to download to the adapter. If there is, a download is performed.
9132 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9134 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9136 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9137 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9140 ipr_cmd->job_step = ipr_reset_alert;
9143 return IPR_RC_JOB_CONTINUE;
9145 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9146 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9147 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9148 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9149 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9150 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9151 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9154 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9156 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9157 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9159 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9160 IPR_WRITE_BUFFER_TIMEOUT);
9163 return IPR_RC_JOB_RETURN;
9167 * ipr_reset_shutdown_ioa - Shutdown the adapter
9168 * @ipr_cmd: ipr command struct
9170 * Description: This function issues an adapter shutdown of the
9171 * specified type to the specified adapter as part of the
9172 * adapter reset job.
9175 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9177 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9179 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9180 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9181 unsigned long timeout;
9182 int rc = IPR_RC_JOB_CONTINUE;
9185 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9186 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9187 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9188 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9189 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9190 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9191 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9192 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9194 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9195 timeout = IPR_SHUTDOWN_TIMEOUT;
9196 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9197 timeout = IPR_INTERNAL_TIMEOUT;
9198 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9199 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9201 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9203 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9205 rc = IPR_RC_JOB_RETURN;
9206 ipr_cmd->job_step = ipr_reset_ucode_download;
9208 ipr_cmd->job_step = ipr_reset_alert;
9215 * ipr_reset_ioa_job - Adapter reset job
9216 * @ipr_cmd: ipr command struct
9218 * Description: This function is the job router for the adapter reset job.
9223 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9229 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9231 if (ioa_cfg->reset_cmd != ipr_cmd) {
9233 * We are doing nested adapter resets and this is
9234 * not the current reset job.
9236 list_add_tail(&ipr_cmd->queue,
9237 &ipr_cmd->hrrq->hrrq_free_q);
9241 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9242 rc = ipr_cmd->job_step_failed(ipr_cmd);
9243 if (rc == IPR_RC_JOB_RETURN)
9247 ipr_reinit_ipr_cmnd(ipr_cmd);
9248 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9249 rc = ipr_cmd->job_step(ipr_cmd);
9250 } while (rc == IPR_RC_JOB_CONTINUE);
9254 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9255 * @ioa_cfg: ioa config struct
9256 * @job_step: first job step of reset job
9257 * @shutdown_type: shutdown type
9259 * Description: This function will initiate the reset of the given adapter
9260 * starting at the selected job step.
9261 * If the caller needs to wait on the completion of the reset,
9262 * the caller must sleep on the reset_wait_q.
9267 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9268 int (*job_step) (struct ipr_cmnd *),
9269 enum ipr_shutdown_type shutdown_type)
9271 struct ipr_cmnd *ipr_cmd;
9274 ioa_cfg->in_reset_reload = 1;
9275 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9276 spin_lock(&ioa_cfg->hrrq[i]._lock);
9277 ioa_cfg->hrrq[i].allow_cmds = 0;
9278 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9281 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9282 ioa_cfg->scsi_unblock = 0;
9283 ioa_cfg->scsi_blocked = 1;
9284 scsi_block_requests(ioa_cfg->host);
9287 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9288 ioa_cfg->reset_cmd = ipr_cmd;
9289 ipr_cmd->job_step = job_step;
9290 ipr_cmd->u.shutdown_type = shutdown_type;
9292 ipr_reset_ioa_job(ipr_cmd);
9296 * ipr_initiate_ioa_reset - Initiate an adapter reset
9297 * @ioa_cfg: ioa config struct
9298 * @shutdown_type: shutdown type
9300 * Description: This function will initiate the reset of the given adapter.
9301 * If the caller needs to wait on the completion of the reset,
9302 * the caller must sleep on the reset_wait_q.
9307 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9308 enum ipr_shutdown_type shutdown_type)
9312 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9315 if (ioa_cfg->in_reset_reload) {
9316 if (ioa_cfg->sdt_state == GET_DUMP)
9317 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9318 else if (ioa_cfg->sdt_state == READ_DUMP)
9319 ioa_cfg->sdt_state = ABORT_DUMP;
9322 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9323 dev_err(&ioa_cfg->pdev->dev,
9324 "IOA taken offline - error recovery failed\n");
9326 ioa_cfg->reset_retries = 0;
9327 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9328 spin_lock(&ioa_cfg->hrrq[i]._lock);
9329 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9330 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9334 if (ioa_cfg->in_ioa_bringdown) {
9335 ioa_cfg->reset_cmd = NULL;
9336 ioa_cfg->in_reset_reload = 0;
9337 ipr_fail_all_ops(ioa_cfg);
9338 wake_up_all(&ioa_cfg->reset_wait_q);
9340 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9341 ioa_cfg->scsi_unblock = 1;
9342 schedule_work(&ioa_cfg->work_q);
9346 ioa_cfg->in_ioa_bringdown = 1;
9347 shutdown_type = IPR_SHUTDOWN_NONE;
9351 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9356 * ipr_reset_freeze - Hold off all I/O activity
9357 * @ipr_cmd: ipr command struct
9359 * Description: If the PCI slot is frozen, hold off all I/O
9360 * activity; then, as soon as the slot is available again,
9361 * initiate an adapter reset.
9363 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9368 /* Disallow new interrupts, avoid loop */
9369 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9370 spin_lock(&ioa_cfg->hrrq[i]._lock);
9371 ioa_cfg->hrrq[i].allow_interrupts = 0;
9372 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9375 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9376 ipr_cmd->done = ipr_reset_ioa_job;
9377 return IPR_RC_JOB_RETURN;
9381 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9382 * @pdev: PCI device struct
9384 * Description: This routine is called to tell us that the MMIO
9385 * access to the IOA has been restored
9387 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9389 unsigned long flags = 0;
9390 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9392 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9393 if (!ioa_cfg->probe_done)
9394 pci_save_state(pdev);
9395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9396 return PCI_ERS_RESULT_NEED_RESET;
9400 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9401 * @pdev: PCI device struct
9403 * Description: This routine is called to tell us that the PCI bus
9404 * is down. Can't do anything here, except put the device driver
9405 * into a holding pattern, waiting for the PCI bus to come back.
9407 static void ipr_pci_frozen(struct pci_dev *pdev)
9409 unsigned long flags = 0;
9410 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9412 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9413 if (ioa_cfg->probe_done)
9414 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9419 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9420 * @pdev: PCI device struct
9422 * Description: This routine is called by the pci error recovery
9423 * code after the PCI slot has been reset, just before we
9424 * should resume normal operations.
9426 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9428 unsigned long flags = 0;
9429 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9431 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9432 if (ioa_cfg->probe_done) {
9433 if (ioa_cfg->needs_warm_reset)
9434 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9436 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9439 wake_up_all(&ioa_cfg->eeh_wait_q);
9440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9441 return PCI_ERS_RESULT_RECOVERED;
9445 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9446 * @pdev: PCI device struct
9448 * Description: This routine is called when the PCI bus has
9449 * permanently failed.
9451 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9453 unsigned long flags = 0;
9454 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9457 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9458 if (ioa_cfg->probe_done) {
9459 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9460 ioa_cfg->sdt_state = ABORT_DUMP;
9461 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9462 ioa_cfg->in_ioa_bringdown = 1;
9463 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9464 spin_lock(&ioa_cfg->hrrq[i]._lock);
9465 ioa_cfg->hrrq[i].allow_cmds = 0;
9466 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9469 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9471 wake_up_all(&ioa_cfg->eeh_wait_q);
9472 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9476 * ipr_pci_error_detected - Called when a PCI error is detected.
9477 * @pdev: PCI device struct
9478 * @state: PCI channel state
9480 * Description: Called when a PCI error is detected.
9483 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9485 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9486 pci_channel_state_t state)
9489 case pci_channel_io_frozen:
9490 ipr_pci_frozen(pdev);
9491 return PCI_ERS_RESULT_CAN_RECOVER;
9492 case pci_channel_io_perm_failure:
9493 ipr_pci_perm_failure(pdev);
9494 return PCI_ERS_RESULT_DISCONNECT;
9498 return PCI_ERS_RESULT_NEED_RESET;
9502 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9503 * @ioa_cfg: ioa cfg struct
9505 * Description: This is the second phase of adapter initialization
9506 * This function takes care of initilizing the adapter to the point
9507 * where it can accept new commands.
9509 * 0 on success / -EIO on failure
9511 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9514 unsigned long host_lock_flags = 0;
9517 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9518 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9519 ioa_cfg->probe_done = 1;
9520 if (ioa_cfg->needs_hard_reset) {
9521 ioa_cfg->needs_hard_reset = 0;
9522 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9524 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9526 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9533 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9534 * @ioa_cfg: ioa config struct
9539 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9543 if (ioa_cfg->ipr_cmnd_list) {
9544 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9545 if (ioa_cfg->ipr_cmnd_list[i])
9546 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9547 ioa_cfg->ipr_cmnd_list[i],
9548 ioa_cfg->ipr_cmnd_list_dma[i]);
9550 ioa_cfg->ipr_cmnd_list[i] = NULL;
9554 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9556 kfree(ioa_cfg->ipr_cmnd_list);
9557 kfree(ioa_cfg->ipr_cmnd_list_dma);
9558 ioa_cfg->ipr_cmnd_list = NULL;
9559 ioa_cfg->ipr_cmnd_list_dma = NULL;
9560 ioa_cfg->ipr_cmd_pool = NULL;
9564 * ipr_free_mem - Frees memory allocated for an adapter
9565 * @ioa_cfg: ioa cfg struct
9570 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9574 kfree(ioa_cfg->res_entries);
9575 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9576 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9577 ipr_free_cmd_blks(ioa_cfg);
9579 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9580 dma_free_coherent(&ioa_cfg->pdev->dev,
9581 sizeof(u32) * ioa_cfg->hrrq[i].size,
9582 ioa_cfg->hrrq[i].host_rrq,
9583 ioa_cfg->hrrq[i].host_rrq_dma);
9585 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9586 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9588 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9589 dma_free_coherent(&ioa_cfg->pdev->dev,
9590 sizeof(struct ipr_hostrcb),
9591 ioa_cfg->hostrcb[i],
9592 ioa_cfg->hostrcb_dma[i]);
9595 ipr_free_dump(ioa_cfg);
9596 kfree(ioa_cfg->trace);
9600 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9601 * @ioa_cfg: ipr cfg struct
9603 * This function frees all allocated IRQs for the
9604 * specified adapter.
9609 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9611 struct pci_dev *pdev = ioa_cfg->pdev;
9614 for (i = 0; i < ioa_cfg->nvectors; i++)
9615 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9616 pci_free_irq_vectors(pdev);
9620 * ipr_free_all_resources - Free all allocated resources for an adapter.
9621 * @ioa_cfg: ioa config struct
9623 * This function frees all allocated resources for the
9624 * specified adapter.
9629 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9631 struct pci_dev *pdev = ioa_cfg->pdev;
9634 ipr_free_irqs(ioa_cfg);
9635 if (ioa_cfg->reset_work_q)
9636 destroy_workqueue(ioa_cfg->reset_work_q);
9637 iounmap(ioa_cfg->hdw_dma_regs);
9638 pci_release_regions(pdev);
9639 ipr_free_mem(ioa_cfg);
9640 scsi_host_put(ioa_cfg->host);
9641 pci_disable_device(pdev);
9646 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9647 * @ioa_cfg: ioa config struct
9650 * 0 on success / -ENOMEM on allocation failure
9652 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9654 struct ipr_cmnd *ipr_cmd;
9655 struct ipr_ioarcb *ioarcb;
9656 dma_addr_t dma_addr;
9657 int i, entries_each_hrrq, hrrq_id = 0;
9659 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9660 sizeof(struct ipr_cmnd), 512, 0);
9662 if (!ioa_cfg->ipr_cmd_pool)
9665 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9666 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9668 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9669 ipr_free_cmd_blks(ioa_cfg);
9673 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9674 if (ioa_cfg->hrrq_num > 1) {
9676 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9677 ioa_cfg->hrrq[i].min_cmd_id = 0;
9678 ioa_cfg->hrrq[i].max_cmd_id =
9679 (entries_each_hrrq - 1);
9682 IPR_NUM_BASE_CMD_BLKS/
9683 (ioa_cfg->hrrq_num - 1);
9684 ioa_cfg->hrrq[i].min_cmd_id =
9685 IPR_NUM_INTERNAL_CMD_BLKS +
9686 (i - 1) * entries_each_hrrq;
9687 ioa_cfg->hrrq[i].max_cmd_id =
9688 (IPR_NUM_INTERNAL_CMD_BLKS +
9689 i * entries_each_hrrq - 1);
9692 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9693 ioa_cfg->hrrq[i].min_cmd_id = 0;
9694 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9696 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9699 BUG_ON(ioa_cfg->hrrq_num == 0);
9701 i = IPR_NUM_CMD_BLKS -
9702 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9704 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9705 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9708 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9709 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9710 GFP_KERNEL, &dma_addr);
9713 ipr_free_cmd_blks(ioa_cfg);
9717 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9718 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9720 ioarcb = &ipr_cmd->ioarcb;
9721 ipr_cmd->dma_addr = dma_addr;
9723 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9725 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9727 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9728 if (ioa_cfg->sis64) {
9729 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9730 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9731 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9732 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9734 ioarcb->write_ioadl_addr =
9735 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9736 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9737 ioarcb->ioasa_host_pci_addr =
9738 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9740 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9741 ipr_cmd->cmd_index = i;
9742 ipr_cmd->ioa_cfg = ioa_cfg;
9743 ipr_cmd->sense_buffer_dma = dma_addr +
9744 offsetof(struct ipr_cmnd, sense_buffer);
9746 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9747 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9748 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9749 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9757 * ipr_alloc_mem - Allocate memory for an adapter
9758 * @ioa_cfg: ioa config struct
9761 * 0 on success / non-zero for error
9763 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9765 struct pci_dev *pdev = ioa_cfg->pdev;
9766 int i, rc = -ENOMEM;
9769 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9770 sizeof(struct ipr_resource_entry),
9773 if (!ioa_cfg->res_entries)
9776 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9777 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9778 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9781 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9782 sizeof(struct ipr_misc_cbs),
9783 &ioa_cfg->vpd_cbs_dma,
9786 if (!ioa_cfg->vpd_cbs)
9787 goto out_free_res_entries;
9789 if (ipr_alloc_cmd_blks(ioa_cfg))
9790 goto out_free_vpd_cbs;
9792 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9793 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9794 sizeof(u32) * ioa_cfg->hrrq[i].size,
9795 &ioa_cfg->hrrq[i].host_rrq_dma,
9798 if (!ioa_cfg->hrrq[i].host_rrq) {
9800 dma_free_coherent(&pdev->dev,
9801 sizeof(u32) * ioa_cfg->hrrq[i].size,
9802 ioa_cfg->hrrq[i].host_rrq,
9803 ioa_cfg->hrrq[i].host_rrq_dma);
9804 goto out_ipr_free_cmd_blocks;
9806 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9809 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9810 ioa_cfg->cfg_table_size,
9811 &ioa_cfg->cfg_table_dma,
9814 if (!ioa_cfg->u.cfg_table)
9815 goto out_free_host_rrq;
9817 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9818 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9819 sizeof(struct ipr_hostrcb),
9820 &ioa_cfg->hostrcb_dma[i],
9823 if (!ioa_cfg->hostrcb[i])
9824 goto out_free_hostrcb_dma;
9826 ioa_cfg->hostrcb[i]->hostrcb_dma =
9827 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9828 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9829 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9832 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9833 sizeof(struct ipr_trace_entry),
9836 if (!ioa_cfg->trace)
9837 goto out_free_hostrcb_dma;
9844 out_free_hostrcb_dma:
9846 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9847 ioa_cfg->hostrcb[i],
9848 ioa_cfg->hostrcb_dma[i]);
9850 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9851 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9853 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9854 dma_free_coherent(&pdev->dev,
9855 sizeof(u32) * ioa_cfg->hrrq[i].size,
9856 ioa_cfg->hrrq[i].host_rrq,
9857 ioa_cfg->hrrq[i].host_rrq_dma);
9859 out_ipr_free_cmd_blocks:
9860 ipr_free_cmd_blks(ioa_cfg);
9862 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9863 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9864 out_free_res_entries:
9865 kfree(ioa_cfg->res_entries);
9870 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9871 * @ioa_cfg: ioa config struct
9876 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9880 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9881 ioa_cfg->bus_attr[i].bus = i;
9882 ioa_cfg->bus_attr[i].qas_enabled = 0;
9883 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9884 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9885 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9887 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9892 * ipr_init_regs - Initialize IOA registers
9893 * @ioa_cfg: ioa config struct
9898 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9900 const struct ipr_interrupt_offsets *p;
9901 struct ipr_interrupts *t;
9904 p = &ioa_cfg->chip_cfg->regs;
9906 base = ioa_cfg->hdw_dma_regs;
9908 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9909 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9910 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9911 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9912 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9913 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9914 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9915 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9916 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9917 t->ioarrin_reg = base + p->ioarrin_reg;
9918 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9919 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9920 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9921 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9922 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9923 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9925 if (ioa_cfg->sis64) {
9926 t->init_feedback_reg = base + p->init_feedback_reg;
9927 t->dump_addr_reg = base + p->dump_addr_reg;
9928 t->dump_data_reg = base + p->dump_data_reg;
9929 t->endian_swap_reg = base + p->endian_swap_reg;
9934 * ipr_init_ioa_cfg - Initialize IOA config struct
9935 * @ioa_cfg: ioa config struct
9936 * @host: scsi host struct
9937 * @pdev: PCI dev struct
9942 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9943 struct Scsi_Host *host, struct pci_dev *pdev)
9947 ioa_cfg->host = host;
9948 ioa_cfg->pdev = pdev;
9949 ioa_cfg->log_level = ipr_log_level;
9950 ioa_cfg->doorbell = IPR_DOORBELL;
9951 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9952 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9953 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9954 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9955 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9956 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9958 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9959 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9960 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9961 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9962 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9963 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9964 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9965 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9966 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9967 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9968 ioa_cfg->sdt_state = INACTIVE;
9970 ipr_initialize_bus_attr(ioa_cfg);
9971 ioa_cfg->max_devs_supported = ipr_max_devs;
9973 if (ioa_cfg->sis64) {
9974 host->max_channel = IPR_MAX_SIS64_BUSES;
9975 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9976 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9977 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9978 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9979 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9980 + ((sizeof(struct ipr_config_table_entry64)
9981 * ioa_cfg->max_devs_supported)));
9983 host->max_channel = IPR_VSET_BUS;
9984 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9985 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9986 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9987 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9988 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9989 + ((sizeof(struct ipr_config_table_entry)
9990 * ioa_cfg->max_devs_supported)));
9993 host->unique_id = host->host_no;
9994 host->max_cmd_len = IPR_MAX_CDB_LEN;
9995 host->can_queue = ioa_cfg->max_cmds;
9996 pci_set_drvdata(pdev, ioa_cfg);
9998 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9999 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
10000 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
10001 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
10003 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
10005 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
10010 * ipr_get_chip_info - Find adapter chip information
10011 * @dev_id: PCI device id struct
10014 * ptr to chip information on success / NULL on failure
10016 static const struct ipr_chip_t *
10017 ipr_get_chip_info(const struct pci_device_id *dev_id)
10021 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10022 if (ipr_chip[i].vendor == dev_id->vendor &&
10023 ipr_chip[i].device == dev_id->device)
10024 return &ipr_chip[i];
10029 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10030 * during probe time
10031 * @ioa_cfg: ioa config struct
10036 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10038 struct pci_dev *pdev = ioa_cfg->pdev;
10040 if (pci_channel_offline(pdev)) {
10041 wait_event_timeout(ioa_cfg->eeh_wait_q,
10042 !pci_channel_offline(pdev),
10043 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10044 pci_restore_state(pdev);
10048 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10050 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10052 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10053 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10054 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10055 ioa_cfg->vectors_info[vec_idx].
10056 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10060 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10061 struct pci_dev *pdev)
10065 for (i = 1; i < ioa_cfg->nvectors; i++) {
10066 rc = request_irq(pci_irq_vector(pdev, i),
10069 ioa_cfg->vectors_info[i].desc,
10070 &ioa_cfg->hrrq[i]);
10073 free_irq(pci_irq_vector(pdev, i),
10074 &ioa_cfg->hrrq[i]);
10082 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10083 * @devp: PCI device struct
10086 * Description: Simply set the msi_received flag to 1 indicating that
10087 * Message Signaled Interrupts are supported.
10090 * 0 on success / non-zero on failure
10092 static irqreturn_t ipr_test_intr(int irq, void *devp)
10094 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10095 unsigned long lock_flags = 0;
10097 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10098 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10100 ioa_cfg->msi_received = 1;
10101 wake_up(&ioa_cfg->msi_wait_q);
10103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10104 return IRQ_HANDLED;
10108 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10109 * @ioa_cfg: ioa config struct
10110 * @pdev: PCI device struct
10112 * Description: This routine sets up and initiates a test interrupt to determine
10113 * if the interrupt is received via the ipr_test_intr() service routine.
10114 * If the tests fails, the driver will fall back to LSI.
10117 * 0 on success / non-zero on failure
10119 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10122 unsigned long lock_flags = 0;
10123 int irq = pci_irq_vector(pdev, 0);
10127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10128 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10129 ioa_cfg->msi_received = 0;
10130 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10131 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10132 readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10135 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10137 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10139 } else if (ipr_debug)
10140 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10142 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10143 readl(ioa_cfg->regs.sense_interrupt_reg);
10144 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10145 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10146 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10148 if (!ioa_cfg->msi_received) {
10149 /* MSI test failed */
10150 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10152 } else if (ipr_debug)
10153 dev_info(&pdev->dev, "MSI test succeeded.\n");
10155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10157 free_irq(irq, ioa_cfg);
10164 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10165 * @pdev: PCI device struct
10166 * @dev_id: PCI device id struct
10169 * 0 on success / non-zero on failure
10171 static int ipr_probe_ioa(struct pci_dev *pdev,
10172 const struct pci_device_id *dev_id)
10174 struct ipr_ioa_cfg *ioa_cfg;
10175 struct Scsi_Host *host;
10176 unsigned long ipr_regs_pci;
10177 void __iomem *ipr_regs;
10178 int rc = PCIBIOS_SUCCESSFUL;
10179 volatile u32 mask, uproc, interrupts;
10180 unsigned long lock_flags, driver_lock_flags;
10181 unsigned int irq_flag;
10185 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10186 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10189 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10194 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10195 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10196 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10198 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10200 if (!ioa_cfg->ipr_chip) {
10201 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10202 dev_id->vendor, dev_id->device);
10203 goto out_scsi_host_put;
10206 /* set SIS 32 or SIS 64 */
10207 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10208 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10209 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10210 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10212 if (ipr_transop_timeout)
10213 ioa_cfg->transop_timeout = ipr_transop_timeout;
10214 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10215 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10217 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10219 ioa_cfg->revid = pdev->revision;
10221 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10223 ipr_regs_pci = pci_resource_start(pdev, 0);
10225 rc = pci_request_regions(pdev, IPR_NAME);
10227 dev_err(&pdev->dev,
10228 "Couldn't register memory range of registers\n");
10229 goto out_scsi_host_put;
10232 rc = pci_enable_device(pdev);
10234 if (rc || pci_channel_offline(pdev)) {
10235 if (pci_channel_offline(pdev)) {
10236 ipr_wait_for_pci_err_recovery(ioa_cfg);
10237 rc = pci_enable_device(pdev);
10241 dev_err(&pdev->dev, "Cannot enable adapter\n");
10242 ipr_wait_for_pci_err_recovery(ioa_cfg);
10243 goto out_release_regions;
10247 ipr_regs = pci_ioremap_bar(pdev, 0);
10250 dev_err(&pdev->dev,
10251 "Couldn't map memory range of registers\n");
10256 ioa_cfg->hdw_dma_regs = ipr_regs;
10257 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10258 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10260 ipr_init_regs(ioa_cfg);
10262 if (ioa_cfg->sis64) {
10263 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10265 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10266 rc = dma_set_mask_and_coherent(&pdev->dev,
10270 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10273 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10274 goto cleanup_nomem;
10277 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10278 ioa_cfg->chip_cfg->cache_line_size);
10280 if (rc != PCIBIOS_SUCCESSFUL) {
10281 dev_err(&pdev->dev, "Write of cache line size failed\n");
10282 ipr_wait_for_pci_err_recovery(ioa_cfg);
10284 goto cleanup_nomem;
10287 /* Issue MMIO read to ensure card is not in EEH */
10288 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10289 ipr_wait_for_pci_err_recovery(ioa_cfg);
10291 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10292 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10293 IPR_MAX_MSIX_VECTORS);
10294 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10297 irq_flag = PCI_IRQ_LEGACY;
10298 if (ioa_cfg->ipr_chip->has_msi)
10299 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10300 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10302 ipr_wait_for_pci_err_recovery(ioa_cfg);
10303 goto cleanup_nomem;
10305 ioa_cfg->nvectors = rc;
10307 if (!pdev->msi_enabled && !pdev->msix_enabled)
10308 ioa_cfg->clear_isr = 1;
10310 pci_set_master(pdev);
10312 if (pci_channel_offline(pdev)) {
10313 ipr_wait_for_pci_err_recovery(ioa_cfg);
10314 pci_set_master(pdev);
10315 if (pci_channel_offline(pdev)) {
10317 goto out_msi_disable;
10321 if (pdev->msi_enabled || pdev->msix_enabled) {
10322 rc = ipr_test_msi(ioa_cfg, pdev);
10325 dev_info(&pdev->dev,
10326 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10327 pdev->msix_enabled ? "-X" : "");
10330 ipr_wait_for_pci_err_recovery(ioa_cfg);
10331 pci_free_irq_vectors(pdev);
10333 ioa_cfg->nvectors = 1;
10334 ioa_cfg->clear_isr = 1;
10337 goto out_msi_disable;
10341 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10342 (unsigned int)num_online_cpus(),
10343 (unsigned int)IPR_MAX_HRRQ_NUM);
10345 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10346 goto out_msi_disable;
10348 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10349 goto out_msi_disable;
10351 rc = ipr_alloc_mem(ioa_cfg);
10353 dev_err(&pdev->dev,
10354 "Couldn't allocate enough memory for device driver!\n");
10355 goto out_msi_disable;
10358 /* Save away PCI config space for use following IOA reset */
10359 rc = pci_save_state(pdev);
10361 if (rc != PCIBIOS_SUCCESSFUL) {
10362 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10364 goto cleanup_nolog;
10368 * If HRRQ updated interrupt is not masked, or reset alert is set,
10369 * the card is in an unknown state and needs a hard reset
10371 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10372 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10373 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10374 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10375 ioa_cfg->needs_hard_reset = 1;
10376 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10377 ioa_cfg->needs_hard_reset = 1;
10378 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10379 ioa_cfg->ioa_unit_checked = 1;
10381 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10382 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10383 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10385 if (pdev->msi_enabled || pdev->msix_enabled) {
10386 name_msi_vectors(ioa_cfg);
10387 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10388 ioa_cfg->vectors_info[0].desc,
10389 &ioa_cfg->hrrq[0]);
10391 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10393 rc = request_irq(pdev->irq, ipr_isr,
10395 IPR_NAME, &ioa_cfg->hrrq[0]);
10398 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10400 goto cleanup_nolog;
10403 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10404 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10405 ioa_cfg->needs_warm_reset = 1;
10406 ioa_cfg->reset = ipr_reset_slot_reset;
10408 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10409 WQ_MEM_RECLAIM, host->host_no);
10411 if (!ioa_cfg->reset_work_q) {
10412 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10417 ioa_cfg->reset = ipr_reset_start_bist;
10419 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10420 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10421 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10428 ipr_free_irqs(ioa_cfg);
10430 ipr_free_mem(ioa_cfg);
10432 ipr_wait_for_pci_err_recovery(ioa_cfg);
10433 pci_free_irq_vectors(pdev);
10437 pci_disable_device(pdev);
10438 out_release_regions:
10439 pci_release_regions(pdev);
10441 scsi_host_put(host);
10446 * ipr_initiate_ioa_bringdown - Bring down an adapter
10447 * @ioa_cfg: ioa config struct
10448 * @shutdown_type: shutdown type
10450 * Description: This function will initiate bringing down the adapter.
10451 * This consists of issuing an IOA shutdown to the adapter
10452 * to flush the cache, and running BIST.
10453 * If the caller needs to wait on the completion of the reset,
10454 * the caller must sleep on the reset_wait_q.
10459 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10460 enum ipr_shutdown_type shutdown_type)
10463 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10464 ioa_cfg->sdt_state = ABORT_DUMP;
10465 ioa_cfg->reset_retries = 0;
10466 ioa_cfg->in_ioa_bringdown = 1;
10467 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10472 * __ipr_remove - Remove a single adapter
10473 * @pdev: pci device struct
10475 * Adapter hot plug remove entry point.
10480 static void __ipr_remove(struct pci_dev *pdev)
10482 unsigned long host_lock_flags = 0;
10483 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10485 unsigned long driver_lock_flags;
10488 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10489 while (ioa_cfg->in_reset_reload) {
10490 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10491 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10492 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10495 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10496 spin_lock(&ioa_cfg->hrrq[i]._lock);
10497 ioa_cfg->hrrq[i].removing_ioa = 1;
10498 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10501 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10503 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10504 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10505 flush_work(&ioa_cfg->work_q);
10506 if (ioa_cfg->reset_work_q)
10507 flush_workqueue(ioa_cfg->reset_work_q);
10508 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10509 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10511 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10512 list_del(&ioa_cfg->queue);
10513 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10515 if (ioa_cfg->sdt_state == ABORT_DUMP)
10516 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10519 ipr_free_all_resources(ioa_cfg);
10525 * ipr_remove - IOA hot plug remove entry point
10526 * @pdev: pci device struct
10528 * Adapter hot plug remove entry point.
10533 static void ipr_remove(struct pci_dev *pdev)
10535 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10539 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10541 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10543 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10544 &ipr_ioa_async_err_log);
10545 scsi_remove_host(ioa_cfg->host);
10547 __ipr_remove(pdev);
10553 * ipr_probe - Adapter hot plug add entry point
10554 * @pdev: pci device struct
10555 * @dev_id: pci device ID
10558 * 0 on success / non-zero on failure
10560 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10562 struct ipr_ioa_cfg *ioa_cfg;
10563 unsigned long flags;
10566 rc = ipr_probe_ioa(pdev, dev_id);
10571 ioa_cfg = pci_get_drvdata(pdev);
10572 rc = ipr_probe_ioa_part2(ioa_cfg);
10575 __ipr_remove(pdev);
10579 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10582 __ipr_remove(pdev);
10586 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10590 scsi_remove_host(ioa_cfg->host);
10591 __ipr_remove(pdev);
10595 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10596 &ipr_ioa_async_err_log);
10599 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10601 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10603 scsi_remove_host(ioa_cfg->host);
10604 __ipr_remove(pdev);
10608 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10612 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10613 &ipr_ioa_async_err_log);
10614 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10616 scsi_remove_host(ioa_cfg->host);
10617 __ipr_remove(pdev);
10620 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10621 ioa_cfg->scan_enabled = 1;
10622 schedule_work(&ioa_cfg->work_q);
10623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10625 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10627 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10628 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10629 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10630 ioa_cfg->iopoll_weight, ipr_iopoll);
10634 scsi_scan_host(ioa_cfg->host);
10640 * ipr_shutdown - Shutdown handler.
10641 * @pdev: pci device struct
10643 * This function is invoked upon system shutdown/reboot. It will issue
10644 * an adapter shutdown to the adapter to flush the write cache.
10649 static void ipr_shutdown(struct pci_dev *pdev)
10651 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10652 unsigned long lock_flags = 0;
10653 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10656 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10657 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10658 ioa_cfg->iopoll_weight = 0;
10659 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10660 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10663 while (ioa_cfg->in_reset_reload) {
10664 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10665 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10666 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10669 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10670 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10672 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10674 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10675 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10676 ipr_free_irqs(ioa_cfg);
10677 pci_disable_device(ioa_cfg->pdev);
10681 static struct pci_device_id ipr_pci_table[] = {
10682 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10684 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10686 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10688 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10692 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10694 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10695 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10698 IPR_USE_LONG_TRANSOP_TIMEOUT },
10699 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10700 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10701 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10702 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10703 IPR_USE_LONG_TRANSOP_TIMEOUT },
10704 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10706 IPR_USE_LONG_TRANSOP_TIMEOUT },
10707 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10708 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10709 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10710 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10711 IPR_USE_LONG_TRANSOP_TIMEOUT},
10712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10714 IPR_USE_LONG_TRANSOP_TIMEOUT },
10715 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10716 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10717 IPR_USE_LONG_TRANSOP_TIMEOUT },
10718 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10720 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10721 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10722 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10723 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10724 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10725 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10726 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10727 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10728 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10729 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10730 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10731 IPR_USE_LONG_TRANSOP_TIMEOUT },
10732 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10733 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10734 IPR_USE_LONG_TRANSOP_TIMEOUT },
10735 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10736 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10737 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10738 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10739 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10740 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10741 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10742 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10743 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10744 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10745 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10746 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10747 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10749 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10751 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10753 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10755 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10757 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10759 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10760 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10761 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10762 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10763 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10764 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10765 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10766 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10767 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10768 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10769 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10770 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10771 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10773 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10774 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10775 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10776 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10777 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10778 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10779 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10780 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10781 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10782 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10783 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10784 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10785 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10786 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10787 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10788 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10789 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10790 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10793 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10795 static const struct pci_error_handlers ipr_err_handler = {
10796 .error_detected = ipr_pci_error_detected,
10797 .mmio_enabled = ipr_pci_mmio_enabled,
10798 .slot_reset = ipr_pci_slot_reset,
10801 static struct pci_driver ipr_driver = {
10803 .id_table = ipr_pci_table,
10804 .probe = ipr_probe,
10805 .remove = ipr_remove,
10806 .shutdown = ipr_shutdown,
10807 .err_handler = &ipr_err_handler,
10811 * ipr_halt_done - Shutdown prepare completion
10812 * @ipr_cmd: ipr command struct
10817 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10819 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10823 * ipr_halt - Issue shutdown prepare to all adapters
10824 * @nb: Notifier block
10825 * @event: Notifier event
10826 * @buf: Notifier data (unused)
10829 * NOTIFY_OK on success / NOTIFY_DONE on failure
10831 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10833 struct ipr_cmnd *ipr_cmd;
10834 struct ipr_ioa_cfg *ioa_cfg;
10835 unsigned long flags = 0, driver_lock_flags;
10837 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10838 return NOTIFY_DONE;
10840 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10842 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10843 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10844 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10845 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10850 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10851 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10852 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10853 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10854 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10856 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10857 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10859 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10864 static struct notifier_block ipr_notifier = {
10869 * ipr_init - Module entry point
10872 * 0 on success / negative value on failure
10874 static int __init ipr_init(void)
10878 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10879 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10881 register_reboot_notifier(&ipr_notifier);
10882 rc = pci_register_driver(&ipr_driver);
10884 unregister_reboot_notifier(&ipr_notifier);
10892 * ipr_exit - Module unload
10894 * Module unload entry point.
10899 static void __exit ipr_exit(void)
10901 unregister_reboot_notifier(&ipr_notifier);
10902 pci_unregister_driver(&ipr_driver);
10905 module_init(ipr_init);
10906 module_exit(ipr_exit);