2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_eh.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
59 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
60 #define HPSA_DRIVER_VERSION "3.4.4-1"
61 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
64 /* How long to wait for CISS doorbell communication */
65 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
66 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
67 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
68 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
69 #define MAX_IOCTL_CONFIG_WAIT 1000
71 /*define how many times we will try a command because of bus resets */
72 #define MAX_CMD_RETRIES 3
74 /* Embedded module documentation macros - see modules.h */
75 MODULE_AUTHOR("Hewlett-Packard Company");
76 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
78 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
79 MODULE_VERSION(HPSA_DRIVER_VERSION);
80 MODULE_LICENSE("GPL");
82 static int hpsa_allow_any;
83 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
84 MODULE_PARM_DESC(hpsa_allow_any,
85 "Allow hpsa driver to access unknown HP Smart Array hardware");
86 static int hpsa_simple_mode;
87 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
88 MODULE_PARM_DESC(hpsa_simple_mode,
89 "Use 'simple mode' rather than 'performant mode'");
91 /* define the PCI info for the cards we can control */
92 static const struct pci_device_id hpsa_pci_device_id[] = {
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
133 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
134 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
136 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
137 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
138 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
139 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
143 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
145 /* board_id = Subsystem Device ID & Vendor ID
146 * product = Marketing Name for the board
147 * access = Address of the struct of function pointers
149 static struct board_type products[] = {
150 {0x3241103C, "Smart Array P212", &SA5_access},
151 {0x3243103C, "Smart Array P410", &SA5_access},
152 {0x3245103C, "Smart Array P410i", &SA5_access},
153 {0x3247103C, "Smart Array P411", &SA5_access},
154 {0x3249103C, "Smart Array P812", &SA5_access},
155 {0x324A103C, "Smart Array P712m", &SA5_access},
156 {0x324B103C, "Smart Array P711m", &SA5_access},
157 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
158 {0x3350103C, "Smart Array P222", &SA5_access},
159 {0x3351103C, "Smart Array P420", &SA5_access},
160 {0x3352103C, "Smart Array P421", &SA5_access},
161 {0x3353103C, "Smart Array P822", &SA5_access},
162 {0x3354103C, "Smart Array P420i", &SA5_access},
163 {0x3355103C, "Smart Array P220i", &SA5_access},
164 {0x3356103C, "Smart Array P721m", &SA5_access},
165 {0x1921103C, "Smart Array P830i", &SA5_access},
166 {0x1922103C, "Smart Array P430", &SA5_access},
167 {0x1923103C, "Smart Array P431", &SA5_access},
168 {0x1924103C, "Smart Array P830", &SA5_access},
169 {0x1926103C, "Smart Array P731m", &SA5_access},
170 {0x1928103C, "Smart Array P230i", &SA5_access},
171 {0x1929103C, "Smart Array P530", &SA5_access},
172 {0x21BD103C, "Smart Array P244br", &SA5_access},
173 {0x21BE103C, "Smart Array P741m", &SA5_access},
174 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
175 {0x21C0103C, "Smart Array P440ar", &SA5_access},
176 {0x21C1103C, "Smart Array P840ar", &SA5_access},
177 {0x21C2103C, "Smart Array P440", &SA5_access},
178 {0x21C3103C, "Smart Array P441", &SA5_access},
179 {0x21C4103C, "Smart Array", &SA5_access},
180 {0x21C5103C, "Smart Array P841", &SA5_access},
181 {0x21C6103C, "Smart HBA H244br", &SA5_access},
182 {0x21C7103C, "Smart HBA H240", &SA5_access},
183 {0x21C8103C, "Smart HBA H241", &SA5_access},
184 {0x21C9103C, "Smart Array", &SA5_access},
185 {0x21CA103C, "Smart Array P246br", &SA5_access},
186 {0x21CB103C, "Smart Array P840", &SA5_access},
187 {0x21CC103C, "Smart Array", &SA5_access},
188 {0x21CD103C, "Smart Array", &SA5_access},
189 {0x21CE103C, "Smart HBA", &SA5_access},
190 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
191 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
192 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
193 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
194 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
195 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
198 static int number_of_controllers;
200 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
201 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
202 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
205 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
209 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
210 static struct CommandList *cmd_alloc(struct ctlr_info *h);
211 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
212 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
214 static void hpsa_free_cmd_pool(struct ctlr_info *h);
215 #define VPD_PAGE (1 << 8)
217 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
218 static void hpsa_scan_start(struct Scsi_Host *);
219 static int hpsa_scan_finished(struct Scsi_Host *sh,
220 unsigned long elapsed_time);
221 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
223 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
224 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
225 static int hpsa_slave_alloc(struct scsi_device *sdev);
226 static int hpsa_slave_configure(struct scsi_device *sdev);
227 static void hpsa_slave_destroy(struct scsi_device *sdev);
229 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
230 static int check_for_unit_attention(struct ctlr_info *h,
231 struct CommandList *c);
232 static void check_ioctl_unit_attention(struct ctlr_info *h,
233 struct CommandList *c);
234 /* performant mode helper functions */
235 static void calc_bucket_map(int *bucket, int num_buckets,
236 int nsgs, int min_blocks, u32 *bucket_map);
237 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
238 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h);
239 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h);
240 static inline u32 next_command(struct ctlr_info *h, u8 q);
241 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
242 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
244 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
245 unsigned long *memory_bar);
246 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
247 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
249 static inline void finish_cmd(struct CommandList *c);
250 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
251 #define BOARD_NOT_READY 0
252 #define BOARD_READY 1
253 static void hpsa_drain_accel_commands(struct ctlr_info *h);
254 static void hpsa_flush_cache(struct ctlr_info *h);
255 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
256 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
257 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
258 static void hpsa_command_resubmit_worker(struct work_struct *work);
259 static u32 lockup_detected(struct ctlr_info *h);
260 static int detect_controller_lockup(struct ctlr_info *h);
262 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
264 unsigned long *priv = shost_priv(sdev->host);
265 return (struct ctlr_info *) *priv;
268 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
270 unsigned long *priv = shost_priv(sh);
271 return (struct ctlr_info *) *priv;
274 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
275 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
276 u8 *sense_key, u8 *asc, u8 *ascq)
278 struct scsi_sense_hdr sshdr;
285 if (sense_data_len < 1)
288 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
290 *sense_key = sshdr.sense_key;
296 static int check_for_unit_attention(struct ctlr_info *h,
297 struct CommandList *c)
299 u8 sense_key, asc, ascq;
302 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
303 sense_len = sizeof(c->err_info->SenseInfo);
305 sense_len = c->err_info->SenseLen;
307 decode_sense_data(c->err_info->SenseInfo, sense_len,
308 &sense_key, &asc, &ascq);
309 if (sense_key != UNIT_ATTENTION || asc == -1)
314 dev_warn(&h->pdev->dev,
315 HPSA "%d: a state change detected, command retried\n",
319 dev_warn(&h->pdev->dev,
320 HPSA "%d: LUN failure detected\n", h->ctlr);
322 case REPORT_LUNS_CHANGED:
323 dev_warn(&h->pdev->dev,
324 HPSA "%d: report LUN data changed\n", h->ctlr);
326 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
327 * target (array) devices.
331 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
332 "or device reset detected\n", h->ctlr);
334 case UNIT_ATTENTION_CLEARED:
335 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
336 "cleared by another initiator\n", h->ctlr);
339 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
340 "unit attention detected\n", h->ctlr);
346 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
348 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
349 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
350 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
352 dev_warn(&h->pdev->dev, HPSA "device busy");
356 static u32 lockup_detected(struct ctlr_info *h);
357 static ssize_t host_show_lockup_detected(struct device *dev,
358 struct device_attribute *attr, char *buf)
362 struct Scsi_Host *shost = class_to_shost(dev);
364 h = shost_to_hba(shost);
365 ld = lockup_detected(h);
367 return sprintf(buf, "ld=%d\n", ld);
370 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
371 struct device_attribute *attr,
372 const char *buf, size_t count)
376 struct Scsi_Host *shost = class_to_shost(dev);
379 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
381 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
382 strncpy(tmpbuf, buf, len);
384 if (sscanf(tmpbuf, "%d", &status) != 1)
386 h = shost_to_hba(shost);
387 h->acciopath_status = !!status;
388 dev_warn(&h->pdev->dev,
389 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
390 h->acciopath_status ? "enabled" : "disabled");
394 static ssize_t host_store_raid_offload_debug(struct device *dev,
395 struct device_attribute *attr,
396 const char *buf, size_t count)
398 int debug_level, len;
400 struct Scsi_Host *shost = class_to_shost(dev);
403 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
405 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
406 strncpy(tmpbuf, buf, len);
408 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
412 h = shost_to_hba(shost);
413 h->raid_offload_debug = debug_level;
414 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
415 h->raid_offload_debug);
419 static ssize_t host_store_rescan(struct device *dev,
420 struct device_attribute *attr,
421 const char *buf, size_t count)
424 struct Scsi_Host *shost = class_to_shost(dev);
425 h = shost_to_hba(shost);
426 hpsa_scan_start(h->scsi_host);
430 static ssize_t host_show_firmware_revision(struct device *dev,
431 struct device_attribute *attr, char *buf)
434 struct Scsi_Host *shost = class_to_shost(dev);
435 unsigned char *fwrev;
437 h = shost_to_hba(shost);
438 if (!h->hba_inquiry_data)
440 fwrev = &h->hba_inquiry_data[32];
441 return snprintf(buf, 20, "%c%c%c%c\n",
442 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
445 static ssize_t host_show_commands_outstanding(struct device *dev,
446 struct device_attribute *attr, char *buf)
448 struct Scsi_Host *shost = class_to_shost(dev);
449 struct ctlr_info *h = shost_to_hba(shost);
451 return snprintf(buf, 20, "%d\n",
452 atomic_read(&h->commands_outstanding));
455 static ssize_t host_show_transport_mode(struct device *dev,
456 struct device_attribute *attr, char *buf)
459 struct Scsi_Host *shost = class_to_shost(dev);
461 h = shost_to_hba(shost);
462 return snprintf(buf, 20, "%s\n",
463 h->transMethod & CFGTBL_Trans_Performant ?
464 "performant" : "simple");
467 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
468 struct device_attribute *attr, char *buf)
471 struct Scsi_Host *shost = class_to_shost(dev);
473 h = shost_to_hba(shost);
474 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
475 (h->acciopath_status == 1) ? "enabled" : "disabled");
478 /* List of controllers which cannot be hard reset on kexec with reset_devices */
479 static u32 unresettable_controller[] = {
480 0x324a103C, /* Smart Array P712m */
481 0x324b103C, /* Smart Array P711m */
482 0x3223103C, /* Smart Array P800 */
483 0x3234103C, /* Smart Array P400 */
484 0x3235103C, /* Smart Array P400i */
485 0x3211103C, /* Smart Array E200i */
486 0x3212103C, /* Smart Array E200 */
487 0x3213103C, /* Smart Array E200i */
488 0x3214103C, /* Smart Array E200i */
489 0x3215103C, /* Smart Array E200i */
490 0x3237103C, /* Smart Array E500 */
491 0x323D103C, /* Smart Array P700m */
492 0x40800E11, /* Smart Array 5i */
493 0x409C0E11, /* Smart Array 6400 */
494 0x409D0E11, /* Smart Array 6400 EM */
495 0x40700E11, /* Smart Array 5300 */
496 0x40820E11, /* Smart Array 532 */
497 0x40830E11, /* Smart Array 5312 */
498 0x409A0E11, /* Smart Array 641 */
499 0x409B0E11, /* Smart Array 642 */
500 0x40910E11, /* Smart Array 6i */
503 /* List of controllers which cannot even be soft reset */
504 static u32 soft_unresettable_controller[] = {
505 0x40800E11, /* Smart Array 5i */
506 0x40700E11, /* Smart Array 5300 */
507 0x40820E11, /* Smart Array 532 */
508 0x40830E11, /* Smart Array 5312 */
509 0x409A0E11, /* Smart Array 641 */
510 0x409B0E11, /* Smart Array 642 */
511 0x40910E11, /* Smart Array 6i */
512 /* Exclude 640x boards. These are two pci devices in one slot
513 * which share a battery backed cache module. One controls the
514 * cache, the other accesses the cache through the one that controls
515 * it. If we reset the one controlling the cache, the other will
516 * likely not be happy. Just forbid resetting this conjoined mess.
517 * The 640x isn't really supported by hpsa anyway.
519 0x409C0E11, /* Smart Array 6400 */
520 0x409D0E11, /* Smart Array 6400 EM */
523 static u32 needs_abort_tags_swizzled[] = {
524 0x323D103C, /* Smart Array P700m */
525 0x324a103C, /* Smart Array P712m */
526 0x324b103C, /* SmartArray P711m */
529 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
533 for (i = 0; i < nelems; i++)
534 if (a[i] == board_id)
539 static int ctlr_is_hard_resettable(u32 board_id)
541 return !board_id_in_array(unresettable_controller,
542 ARRAY_SIZE(unresettable_controller), board_id);
545 static int ctlr_is_soft_resettable(u32 board_id)
547 return !board_id_in_array(soft_unresettable_controller,
548 ARRAY_SIZE(soft_unresettable_controller), board_id);
551 static int ctlr_is_resettable(u32 board_id)
553 return ctlr_is_hard_resettable(board_id) ||
554 ctlr_is_soft_resettable(board_id);
557 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
559 return board_id_in_array(needs_abort_tags_swizzled,
560 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
563 static ssize_t host_show_resettable(struct device *dev,
564 struct device_attribute *attr, char *buf)
567 struct Scsi_Host *shost = class_to_shost(dev);
569 h = shost_to_hba(shost);
570 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
573 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
575 return (scsi3addr[3] & 0xC0) == 0x40;
578 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
579 "1(+0)ADM", "UNKNOWN"
581 #define HPSA_RAID_0 0
582 #define HPSA_RAID_4 1
583 #define HPSA_RAID_1 2 /* also used for RAID 10 */
584 #define HPSA_RAID_5 3 /* also used for RAID 50 */
585 #define HPSA_RAID_51 4
586 #define HPSA_RAID_6 5 /* also used for RAID 60 */
587 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
588 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
590 static ssize_t raid_level_show(struct device *dev,
591 struct device_attribute *attr, char *buf)
594 unsigned char rlevel;
596 struct scsi_device *sdev;
597 struct hpsa_scsi_dev_t *hdev;
600 sdev = to_scsi_device(dev);
601 h = sdev_to_hba(sdev);
602 spin_lock_irqsave(&h->lock, flags);
603 hdev = sdev->hostdata;
605 spin_unlock_irqrestore(&h->lock, flags);
609 /* Is this even a logical drive? */
610 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
611 spin_unlock_irqrestore(&h->lock, flags);
612 l = snprintf(buf, PAGE_SIZE, "N/A\n");
616 rlevel = hdev->raid_level;
617 spin_unlock_irqrestore(&h->lock, flags);
618 if (rlevel > RAID_UNKNOWN)
619 rlevel = RAID_UNKNOWN;
620 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
624 static ssize_t lunid_show(struct device *dev,
625 struct device_attribute *attr, char *buf)
628 struct scsi_device *sdev;
629 struct hpsa_scsi_dev_t *hdev;
631 unsigned char lunid[8];
633 sdev = to_scsi_device(dev);
634 h = sdev_to_hba(sdev);
635 spin_lock_irqsave(&h->lock, flags);
636 hdev = sdev->hostdata;
638 spin_unlock_irqrestore(&h->lock, flags);
641 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
642 spin_unlock_irqrestore(&h->lock, flags);
643 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
644 lunid[0], lunid[1], lunid[2], lunid[3],
645 lunid[4], lunid[5], lunid[6], lunid[7]);
648 static ssize_t unique_id_show(struct device *dev,
649 struct device_attribute *attr, char *buf)
652 struct scsi_device *sdev;
653 struct hpsa_scsi_dev_t *hdev;
655 unsigned char sn[16];
657 sdev = to_scsi_device(dev);
658 h = sdev_to_hba(sdev);
659 spin_lock_irqsave(&h->lock, flags);
660 hdev = sdev->hostdata;
662 spin_unlock_irqrestore(&h->lock, flags);
665 memcpy(sn, hdev->device_id, sizeof(sn));
666 spin_unlock_irqrestore(&h->lock, flags);
667 return snprintf(buf, 16 * 2 + 2,
668 "%02X%02X%02X%02X%02X%02X%02X%02X"
669 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
670 sn[0], sn[1], sn[2], sn[3],
671 sn[4], sn[5], sn[6], sn[7],
672 sn[8], sn[9], sn[10], sn[11],
673 sn[12], sn[13], sn[14], sn[15]);
676 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
677 struct device_attribute *attr, char *buf)
680 struct scsi_device *sdev;
681 struct hpsa_scsi_dev_t *hdev;
685 sdev = to_scsi_device(dev);
686 h = sdev_to_hba(sdev);
687 spin_lock_irqsave(&h->lock, flags);
688 hdev = sdev->hostdata;
690 spin_unlock_irqrestore(&h->lock, flags);
693 offload_enabled = hdev->offload_enabled;
694 spin_unlock_irqrestore(&h->lock, flags);
695 return snprintf(buf, 20, "%d\n", offload_enabled);
698 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
699 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
700 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
701 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
702 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
703 host_show_hp_ssd_smart_path_enabled, NULL);
704 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
705 host_show_hp_ssd_smart_path_status,
706 host_store_hp_ssd_smart_path_status);
707 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
708 host_store_raid_offload_debug);
709 static DEVICE_ATTR(firmware_revision, S_IRUGO,
710 host_show_firmware_revision, NULL);
711 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
712 host_show_commands_outstanding, NULL);
713 static DEVICE_ATTR(transport_mode, S_IRUGO,
714 host_show_transport_mode, NULL);
715 static DEVICE_ATTR(resettable, S_IRUGO,
716 host_show_resettable, NULL);
717 static DEVICE_ATTR(lockup_detected, S_IRUGO,
718 host_show_lockup_detected, NULL);
720 static struct device_attribute *hpsa_sdev_attrs[] = {
721 &dev_attr_raid_level,
724 &dev_attr_hp_ssd_smart_path_enabled,
725 &dev_attr_lockup_detected,
729 static struct device_attribute *hpsa_shost_attrs[] = {
731 &dev_attr_firmware_revision,
732 &dev_attr_commands_outstanding,
733 &dev_attr_transport_mode,
734 &dev_attr_resettable,
735 &dev_attr_hp_ssd_smart_path_status,
736 &dev_attr_raid_offload_debug,
740 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
741 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
743 static struct scsi_host_template hpsa_driver_template = {
744 .module = THIS_MODULE,
747 .queuecommand = hpsa_scsi_queue_command,
748 .scan_start = hpsa_scan_start,
749 .scan_finished = hpsa_scan_finished,
750 .change_queue_depth = hpsa_change_queue_depth,
752 .use_clustering = ENABLE_CLUSTERING,
753 .eh_abort_handler = hpsa_eh_abort_handler,
754 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
756 .slave_alloc = hpsa_slave_alloc,
757 .slave_configure = hpsa_slave_configure,
758 .slave_destroy = hpsa_slave_destroy,
760 .compat_ioctl = hpsa_compat_ioctl,
762 .sdev_attrs = hpsa_sdev_attrs,
763 .shost_attrs = hpsa_shost_attrs,
768 static inline u32 next_command(struct ctlr_info *h, u8 q)
771 struct reply_queue_buffer *rq = &h->reply_queue[q];
773 if (h->transMethod & CFGTBL_Trans_io_accel1)
774 return h->access.command_completed(h, q);
776 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
777 return h->access.command_completed(h, q);
779 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
780 a = rq->head[rq->current_entry];
782 atomic_dec(&h->commands_outstanding);
786 /* Check for wraparound */
787 if (rq->current_entry == h->max_commands) {
788 rq->current_entry = 0;
795 * There are some special bits in the bus address of the
796 * command that we have to set for the controller to know
797 * how to process the command:
799 * Normal performant mode:
800 * bit 0: 1 means performant mode, 0 means simple mode.
801 * bits 1-3 = block fetch table entry
802 * bits 4-6 = command type (== 0)
805 * bit 0 = "performant mode" bit.
806 * bits 1-3 = block fetch table entry
807 * bits 4-6 = command type (== 110)
808 * (command type is needed because ioaccel1 mode
809 * commands are submitted through the same register as normal
810 * mode commands, so this is how the controller knows whether
811 * the command is normal mode or ioaccel1 mode.)
814 * bit 0 = "performant mode" bit.
815 * bits 1-4 = block fetch table entry (note extra bit)
816 * bits 4-6 = not needed, because ioaccel2 mode has
817 * a separate special register for submitting commands.
821 * set_performant_mode: Modify the tag for cciss performant
822 * set bit 0 for pull model, bits 3-1 for block fetch
825 #define DEFAULT_REPLY_QUEUE (-1)
826 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
829 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
830 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
831 if (unlikely(!h->msix_vector))
833 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
834 c->Header.ReplyQueue =
835 raw_smp_processor_id() % h->nreply_queues;
837 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
841 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
842 struct CommandList *c,
845 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
848 * Tell the controller to post the reply to the queue for this
849 * processor. This seems to give the best I/O throughput.
851 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
852 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
854 cp->ReplyQueue = reply_queue % h->nreply_queues;
856 * Set the bits in the address sent down to include:
857 * - performant mode bit (bit 0)
858 * - pull count (bits 1-3)
859 * - command type (bits 4-6)
861 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
862 IOACCEL1_BUSADDR_CMDTYPE;
865 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
866 struct CommandList *c,
869 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
872 * Tell the controller to post the reply to the queue for this
873 * processor. This seems to give the best I/O throughput.
875 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
876 cp->reply_queue = smp_processor_id() % h->nreply_queues;
878 cp->reply_queue = reply_queue % h->nreply_queues;
880 * Set the bits in the address sent down to include:
881 * - performant mode bit not used in ioaccel mode 2
882 * - pull count (bits 0-3)
883 * - command type isn't needed for ioaccel2
885 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
888 static int is_firmware_flash_cmd(u8 *cdb)
890 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
894 * During firmware flash, the heartbeat register may not update as frequently
895 * as it should. So we dial down lockup detection during firmware flash. and
896 * dial it back up when firmware flash completes.
898 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
899 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
900 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
901 struct CommandList *c)
903 if (!is_firmware_flash_cmd(c->Request.CDB))
905 atomic_inc(&h->firmware_flash_in_progress);
906 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
909 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
910 struct CommandList *c)
912 if (is_firmware_flash_cmd(c->Request.CDB) &&
913 atomic_dec_and_test(&h->firmware_flash_in_progress))
914 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
917 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
918 struct CommandList *c, int reply_queue)
920 dial_down_lockup_detection_during_fw_flash(h, c);
921 atomic_inc(&h->commands_outstanding);
922 switch (c->cmd_type) {
924 set_ioaccel1_performant_mode(h, c, reply_queue);
925 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
928 set_ioaccel2_performant_mode(h, c, reply_queue);
929 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
932 set_performant_mode(h, c, reply_queue);
933 h->access.submit_command(h, c);
937 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
938 struct CommandList *c)
940 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
943 static inline int is_hba_lunid(unsigned char scsi3addr[])
945 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
948 static inline int is_scsi_rev_5(struct ctlr_info *h)
950 if (!h->hba_inquiry_data)
952 if ((h->hba_inquiry_data[2] & 0x07) == 5)
957 static int hpsa_find_target_lun(struct ctlr_info *h,
958 unsigned char scsi3addr[], int bus, int *target, int *lun)
960 /* finds an unused bus, target, lun for a new physical device
961 * assumes h->devlock is held
964 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
966 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
968 for (i = 0; i < h->ndevices; i++) {
969 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
970 __set_bit(h->dev[i]->target, lun_taken);
973 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
974 if (i < HPSA_MAX_DEVICES) {
983 static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
984 struct hpsa_scsi_dev_t *dev, char *description)
986 dev_printk(level, &h->pdev->dev,
987 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
988 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
990 scsi_device_type(dev->devtype),
993 dev->raid_level > RAID_UNKNOWN ?
994 "RAID-?" : raid_label[dev->raid_level],
995 dev->offload_config ? '+' : '-',
996 dev->offload_enabled ? '+' : '-',
1000 /* Add an entry into h->dev[] array. */
1001 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
1002 struct hpsa_scsi_dev_t *device,
1003 struct hpsa_scsi_dev_t *added[], int *nadded)
1005 /* assumes h->devlock is held */
1006 int n = h->ndevices;
1008 unsigned char addr1[8], addr2[8];
1009 struct hpsa_scsi_dev_t *sd;
1011 if (n >= HPSA_MAX_DEVICES) {
1012 dev_err(&h->pdev->dev, "too many devices, some will be "
1017 /* physical devices do not have lun or target assigned until now. */
1018 if (device->lun != -1)
1019 /* Logical device, lun is already assigned. */
1022 /* If this device a non-zero lun of a multi-lun device
1023 * byte 4 of the 8-byte LUN addr will contain the logical
1024 * unit no, zero otherwise.
1026 if (device->scsi3addr[4] == 0) {
1027 /* This is not a non-zero lun of a multi-lun device */
1028 if (hpsa_find_target_lun(h, device->scsi3addr,
1029 device->bus, &device->target, &device->lun) != 0)
1034 /* This is a non-zero lun of a multi-lun device.
1035 * Search through our list and find the device which
1036 * has the same 8 byte LUN address, excepting byte 4.
1037 * Assign the same bus and target for this new LUN.
1038 * Use the logical unit number from the firmware.
1040 memcpy(addr1, device->scsi3addr, 8);
1042 for (i = 0; i < n; i++) {
1044 memcpy(addr2, sd->scsi3addr, 8);
1046 /* differ only in byte 4? */
1047 if (memcmp(addr1, addr2, 8) == 0) {
1048 device->bus = sd->bus;
1049 device->target = sd->target;
1050 device->lun = device->scsi3addr[4];
1054 if (device->lun == -1) {
1055 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1056 " suspect firmware bug or unsupported hardware "
1057 "configuration.\n");
1065 added[*nadded] = device;
1067 hpsa_show_dev_msg(KERN_INFO, h, device,
1068 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
1069 device->offload_to_be_enabled = device->offload_enabled;
1070 device->offload_enabled = 0;
1074 /* Update an entry in h->dev[] array. */
1075 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1076 int entry, struct hpsa_scsi_dev_t *new_entry)
1078 int offload_enabled;
1079 /* assumes h->devlock is held */
1080 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1082 /* Raid level changed. */
1083 h->dev[entry]->raid_level = new_entry->raid_level;
1085 /* Raid offload parameters changed. Careful about the ordering. */
1086 if (new_entry->offload_config && new_entry->offload_enabled) {
1088 * if drive is newly offload_enabled, we want to copy the
1089 * raid map data first. If previously offload_enabled and
1090 * offload_config were set, raid map data had better be
1091 * the same as it was before. if raid map data is changed
1092 * then it had better be the case that
1093 * h->dev[entry]->offload_enabled is currently 0.
1095 h->dev[entry]->raid_map = new_entry->raid_map;
1096 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1098 if (new_entry->hba_ioaccel_enabled) {
1099 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1100 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1102 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1103 h->dev[entry]->offload_config = new_entry->offload_config;
1104 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1105 h->dev[entry]->queue_depth = new_entry->queue_depth;
1108 * We can turn off ioaccel offload now, but need to delay turning
1109 * it on until we can update h->dev[entry]->phys_disk[], but we
1110 * can't do that until all the devices are updated.
1112 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1113 if (!new_entry->offload_enabled)
1114 h->dev[entry]->offload_enabled = 0;
1116 offload_enabled = h->dev[entry]->offload_enabled;
1117 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1118 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1119 h->dev[entry]->offload_enabled = offload_enabled;
1122 /* Replace an entry from h->dev[] array. */
1123 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1124 int entry, struct hpsa_scsi_dev_t *new_entry,
1125 struct hpsa_scsi_dev_t *added[], int *nadded,
1126 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1128 /* assumes h->devlock is held */
1129 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1130 removed[*nremoved] = h->dev[entry];
1134 * New physical devices won't have target/lun assigned yet
1135 * so we need to preserve the values in the slot we are replacing.
1137 if (new_entry->target == -1) {
1138 new_entry->target = h->dev[entry]->target;
1139 new_entry->lun = h->dev[entry]->lun;
1142 h->dev[entry] = new_entry;
1143 added[*nadded] = new_entry;
1145 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1146 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1147 new_entry->offload_enabled = 0;
1150 /* Remove an entry from h->dev[] array. */
1151 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1152 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1154 /* assumes h->devlock is held */
1156 struct hpsa_scsi_dev_t *sd;
1158 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1161 removed[*nremoved] = h->dev[entry];
1164 for (i = entry; i < h->ndevices-1; i++)
1165 h->dev[i] = h->dev[i+1];
1167 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1170 #define SCSI3ADDR_EQ(a, b) ( \
1171 (a)[7] == (b)[7] && \
1172 (a)[6] == (b)[6] && \
1173 (a)[5] == (b)[5] && \
1174 (a)[4] == (b)[4] && \
1175 (a)[3] == (b)[3] && \
1176 (a)[2] == (b)[2] && \
1177 (a)[1] == (b)[1] && \
1180 static void fixup_botched_add(struct ctlr_info *h,
1181 struct hpsa_scsi_dev_t *added)
1183 /* called when scsi_add_device fails in order to re-adjust
1184 * h->dev[] to match the mid layer's view.
1186 unsigned long flags;
1189 spin_lock_irqsave(&h->lock, flags);
1190 for (i = 0; i < h->ndevices; i++) {
1191 if (h->dev[i] == added) {
1192 for (j = i; j < h->ndevices-1; j++)
1193 h->dev[j] = h->dev[j+1];
1198 spin_unlock_irqrestore(&h->lock, flags);
1202 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1203 struct hpsa_scsi_dev_t *dev2)
1205 /* we compare everything except lun and target as these
1206 * are not yet assigned. Compare parts likely
1209 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1210 sizeof(dev1->scsi3addr)) != 0)
1212 if (memcmp(dev1->device_id, dev2->device_id,
1213 sizeof(dev1->device_id)) != 0)
1215 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1217 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1219 if (dev1->devtype != dev2->devtype)
1221 if (dev1->bus != dev2->bus)
1226 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1227 struct hpsa_scsi_dev_t *dev2)
1229 /* Device attributes that can change, but don't mean
1230 * that the device is a different device, nor that the OS
1231 * needs to be told anything about the change.
1233 if (dev1->raid_level != dev2->raid_level)
1235 if (dev1->offload_config != dev2->offload_config)
1237 if (dev1->offload_enabled != dev2->offload_enabled)
1239 if (dev1->queue_depth != dev2->queue_depth)
1244 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1245 * and return needle location in *index. If scsi3addr matches, but not
1246 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1247 * location in *index.
1248 * In the case of a minor device attribute change, such as RAID level, just
1249 * return DEVICE_UPDATED, along with the updated device's location in index.
1250 * If needle not found, return DEVICE_NOT_FOUND.
1252 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1253 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1257 #define DEVICE_NOT_FOUND 0
1258 #define DEVICE_CHANGED 1
1259 #define DEVICE_SAME 2
1260 #define DEVICE_UPDATED 3
1261 for (i = 0; i < haystack_size; i++) {
1262 if (haystack[i] == NULL) /* previously removed. */
1264 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1266 if (device_is_the_same(needle, haystack[i])) {
1267 if (device_updated(needle, haystack[i]))
1268 return DEVICE_UPDATED;
1271 /* Keep offline devices offline */
1272 if (needle->volume_offline)
1273 return DEVICE_NOT_FOUND;
1274 return DEVICE_CHANGED;
1279 return DEVICE_NOT_FOUND;
1282 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1283 unsigned char scsi3addr[])
1285 struct offline_device_entry *device;
1286 unsigned long flags;
1288 /* Check to see if device is already on the list */
1289 spin_lock_irqsave(&h->offline_device_lock, flags);
1290 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1291 if (memcmp(device->scsi3addr, scsi3addr,
1292 sizeof(device->scsi3addr)) == 0) {
1293 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1297 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1299 /* Device is not on the list, add it. */
1300 device = kmalloc(sizeof(*device), GFP_KERNEL);
1302 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1305 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1306 spin_lock_irqsave(&h->offline_device_lock, flags);
1307 list_add_tail(&device->offline_list, &h->offline_device_list);
1308 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1311 /* Print a message explaining various offline volume states */
1312 static void hpsa_show_volume_status(struct ctlr_info *h,
1313 struct hpsa_scsi_dev_t *sd)
1315 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1316 dev_info(&h->pdev->dev,
1317 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1318 h->scsi_host->host_no,
1319 sd->bus, sd->target, sd->lun);
1320 switch (sd->volume_offline) {
1323 case HPSA_LV_UNDERGOING_ERASE:
1324 dev_info(&h->pdev->dev,
1325 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1326 h->scsi_host->host_no,
1327 sd->bus, sd->target, sd->lun);
1329 case HPSA_LV_UNDERGOING_RPI:
1330 dev_info(&h->pdev->dev,
1331 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1332 h->scsi_host->host_no,
1333 sd->bus, sd->target, sd->lun);
1335 case HPSA_LV_PENDING_RPI:
1336 dev_info(&h->pdev->dev,
1337 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1338 h->scsi_host->host_no,
1339 sd->bus, sd->target, sd->lun);
1341 case HPSA_LV_ENCRYPTED_NO_KEY:
1342 dev_info(&h->pdev->dev,
1343 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1344 h->scsi_host->host_no,
1345 sd->bus, sd->target, sd->lun);
1347 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1348 dev_info(&h->pdev->dev,
1349 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1350 h->scsi_host->host_no,
1351 sd->bus, sd->target, sd->lun);
1353 case HPSA_LV_UNDERGOING_ENCRYPTION:
1354 dev_info(&h->pdev->dev,
1355 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1356 h->scsi_host->host_no,
1357 sd->bus, sd->target, sd->lun);
1359 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1360 dev_info(&h->pdev->dev,
1361 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1362 h->scsi_host->host_no,
1363 sd->bus, sd->target, sd->lun);
1365 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1366 dev_info(&h->pdev->dev,
1367 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1368 h->scsi_host->host_no,
1369 sd->bus, sd->target, sd->lun);
1371 case HPSA_LV_PENDING_ENCRYPTION:
1372 dev_info(&h->pdev->dev,
1373 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1374 h->scsi_host->host_no,
1375 sd->bus, sd->target, sd->lun);
1377 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1378 dev_info(&h->pdev->dev,
1379 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1380 h->scsi_host->host_no,
1381 sd->bus, sd->target, sd->lun);
1387 * Figure the list of physical drive pointers for a logical drive with
1388 * raid offload configured.
1390 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1391 struct hpsa_scsi_dev_t *dev[], int ndevices,
1392 struct hpsa_scsi_dev_t *logical_drive)
1394 struct raid_map_data *map = &logical_drive->raid_map;
1395 struct raid_map_disk_data *dd = &map->data[0];
1397 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1398 le16_to_cpu(map->metadata_disks_per_row);
1399 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1400 le16_to_cpu(map->layout_map_count) *
1401 total_disks_per_row;
1402 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1403 total_disks_per_row;
1406 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1407 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1410 for (i = 0; i < nraid_map_entries; i++) {
1411 logical_drive->phys_disk[i] = NULL;
1412 if (!logical_drive->offload_config)
1414 for (j = 0; j < ndevices; j++) {
1415 if (dev[j]->devtype != TYPE_DISK)
1417 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1419 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1422 logical_drive->phys_disk[i] = dev[j];
1424 qdepth = min(h->nr_cmds, qdepth +
1425 logical_drive->phys_disk[i]->queue_depth);
1430 * This can happen if a physical drive is removed and
1431 * the logical drive is degraded. In that case, the RAID
1432 * map data will refer to a physical disk which isn't actually
1433 * present. And in that case offload_enabled should already
1434 * be 0, but we'll turn it off here just in case
1436 if (!logical_drive->phys_disk[i]) {
1437 logical_drive->offload_enabled = 0;
1438 logical_drive->offload_to_be_enabled = 0;
1439 logical_drive->queue_depth = 8;
1442 if (nraid_map_entries)
1444 * This is correct for reads, too high for full stripe writes,
1445 * way too high for partial stripe writes
1447 logical_drive->queue_depth = qdepth;
1449 logical_drive->queue_depth = h->nr_cmds;
1452 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1453 struct hpsa_scsi_dev_t *dev[], int ndevices)
1457 for (i = 0; i < ndevices; i++) {
1458 if (dev[i]->devtype != TYPE_DISK)
1460 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1464 * If offload is currently enabled, the RAID map and
1465 * phys_disk[] assignment *better* not be changing
1466 * and since it isn't changing, we do not need to
1469 if (dev[i]->offload_enabled)
1472 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1476 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1477 struct hpsa_scsi_dev_t *sd[], int nsds)
1479 /* sd contains scsi3 addresses and devtypes, and inquiry
1480 * data. This function takes what's in sd to be the current
1481 * reality and updates h->dev[] to reflect that reality.
1483 int i, entry, device_change, changes = 0;
1484 struct hpsa_scsi_dev_t *csd;
1485 unsigned long flags;
1486 struct hpsa_scsi_dev_t **added, **removed;
1487 int nadded, nremoved;
1488 struct Scsi_Host *sh = NULL;
1490 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1491 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1493 if (!added || !removed) {
1494 dev_warn(&h->pdev->dev, "out of memory in "
1495 "adjust_hpsa_scsi_table\n");
1499 spin_lock_irqsave(&h->devlock, flags);
1501 /* find any devices in h->dev[] that are not in
1502 * sd[] and remove them from h->dev[], and for any
1503 * devices which have changed, remove the old device
1504 * info and add the new device info.
1505 * If minor device attributes change, just update
1506 * the existing device structure.
1511 while (i < h->ndevices) {
1513 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1514 if (device_change == DEVICE_NOT_FOUND) {
1516 hpsa_scsi_remove_entry(h, hostno, i,
1517 removed, &nremoved);
1518 continue; /* remove ^^^, hence i not incremented */
1519 } else if (device_change == DEVICE_CHANGED) {
1521 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1522 added, &nadded, removed, &nremoved);
1523 /* Set it to NULL to prevent it from being freed
1524 * at the bottom of hpsa_update_scsi_devices()
1527 } else if (device_change == DEVICE_UPDATED) {
1528 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1533 /* Now, make sure every device listed in sd[] is also
1534 * listed in h->dev[], adding them if they aren't found
1537 for (i = 0; i < nsds; i++) {
1538 if (!sd[i]) /* if already added above. */
1541 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1542 * as the SCSI mid-layer does not handle such devices well.
1543 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1544 * at 160Hz, and prevents the system from coming up.
1546 if (sd[i]->volume_offline) {
1547 hpsa_show_volume_status(h, sd[i]);
1548 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1552 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1553 h->ndevices, &entry);
1554 if (device_change == DEVICE_NOT_FOUND) {
1556 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1557 added, &nadded) != 0)
1559 sd[i] = NULL; /* prevent from being freed later. */
1560 } else if (device_change == DEVICE_CHANGED) {
1561 /* should never happen... */
1563 dev_warn(&h->pdev->dev,
1564 "device unexpectedly changed.\n");
1565 /* but if it does happen, we just ignore that device */
1568 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1570 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1571 * any logical drives that need it enabled.
1573 for (i = 0; i < h->ndevices; i++)
1574 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1576 spin_unlock_irqrestore(&h->devlock, flags);
1578 /* Monitor devices which are in one of several NOT READY states to be
1579 * brought online later. This must be done without holding h->devlock,
1580 * so don't touch h->dev[]
1582 for (i = 0; i < nsds; i++) {
1583 if (!sd[i]) /* if already added above. */
1585 if (sd[i]->volume_offline)
1586 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1589 /* Don't notify scsi mid layer of any changes the first time through
1590 * (or if there are no changes) scsi_scan_host will do it later the
1591 * first time through.
1593 if (hostno == -1 || !changes)
1597 /* Notify scsi mid layer of any removed devices */
1598 for (i = 0; i < nremoved; i++) {
1599 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1600 struct scsi_device *sdev =
1601 scsi_device_lookup(sh, removed[i]->bus,
1602 removed[i]->target, removed[i]->lun);
1604 scsi_remove_device(sdev);
1605 scsi_device_put(sdev);
1608 * We don't expect to get here.
1609 * future cmds to this device will get selection
1610 * timeout as if the device was gone.
1612 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1613 "didn't find device for removal.");
1620 /* Notify scsi mid layer of any added devices */
1621 for (i = 0; i < nadded; i++) {
1622 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1624 if (scsi_add_device(sh, added[i]->bus,
1625 added[i]->target, added[i]->lun) == 0)
1627 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1628 "addition failed, device not added.");
1629 /* now we have to remove it from h->dev,
1630 * since it didn't get added to scsi mid layer
1632 fixup_botched_add(h, added[i]);
1641 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1642 * Assume's h->devlock is held.
1644 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1645 int bus, int target, int lun)
1648 struct hpsa_scsi_dev_t *sd;
1650 for (i = 0; i < h->ndevices; i++) {
1652 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1658 static int hpsa_slave_alloc(struct scsi_device *sdev)
1660 struct hpsa_scsi_dev_t *sd;
1661 unsigned long flags;
1662 struct ctlr_info *h;
1664 h = sdev_to_hba(sdev);
1665 spin_lock_irqsave(&h->devlock, flags);
1666 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1667 sdev_id(sdev), sdev->lun);
1669 atomic_set(&sd->ioaccel_cmds_out, 0);
1670 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1672 sdev->hostdata = NULL;
1673 spin_unlock_irqrestore(&h->devlock, flags);
1677 /* configure scsi device based on internal per-device structure */
1678 static int hpsa_slave_configure(struct scsi_device *sdev)
1680 struct hpsa_scsi_dev_t *sd;
1683 sd = sdev->hostdata;
1684 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1687 queue_depth = sd->queue_depth != 0 ?
1688 sd->queue_depth : sdev->host->can_queue;
1690 queue_depth = sdev->host->can_queue;
1692 scsi_change_queue_depth(sdev, queue_depth);
1697 static void hpsa_slave_destroy(struct scsi_device *sdev)
1699 /* nothing to do. */
1702 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1706 if (!h->ioaccel2_cmd_sg_list)
1708 for (i = 0; i < h->nr_cmds; i++) {
1709 kfree(h->ioaccel2_cmd_sg_list[i]);
1710 h->ioaccel2_cmd_sg_list[i] = NULL;
1712 kfree(h->ioaccel2_cmd_sg_list);
1713 h->ioaccel2_cmd_sg_list = NULL;
1716 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1720 if (h->chainsize <= 0)
1723 h->ioaccel2_cmd_sg_list =
1724 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1726 if (!h->ioaccel2_cmd_sg_list)
1728 for (i = 0; i < h->nr_cmds; i++) {
1729 h->ioaccel2_cmd_sg_list[i] =
1730 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1731 h->maxsgentries, GFP_KERNEL);
1732 if (!h->ioaccel2_cmd_sg_list[i])
1738 hpsa_free_ioaccel2_sg_chain_blocks(h);
1742 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1746 if (!h->cmd_sg_list)
1748 for (i = 0; i < h->nr_cmds; i++) {
1749 kfree(h->cmd_sg_list[i]);
1750 h->cmd_sg_list[i] = NULL;
1752 kfree(h->cmd_sg_list);
1753 h->cmd_sg_list = NULL;
1756 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1760 if (h->chainsize <= 0)
1763 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1765 if (!h->cmd_sg_list) {
1766 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1769 for (i = 0; i < h->nr_cmds; i++) {
1770 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1771 h->chainsize, GFP_KERNEL);
1772 if (!h->cmd_sg_list[i]) {
1773 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1780 hpsa_free_sg_chain_blocks(h);
1784 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1785 struct io_accel2_cmd *cp, struct CommandList *c)
1787 struct ioaccel2_sg_element *chain_block;
1791 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1792 chain_size = le32_to_cpu(cp->data_len);
1793 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1795 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1796 /* prevent subsequent unmapping */
1797 cp->sg->address = 0;
1800 cp->sg->address = cpu_to_le64(temp64);
1804 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1805 struct io_accel2_cmd *cp)
1807 struct ioaccel2_sg_element *chain_sg;
1812 temp64 = le64_to_cpu(chain_sg->address);
1813 chain_size = le32_to_cpu(cp->data_len);
1814 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
1817 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1818 struct CommandList *c)
1820 struct SGDescriptor *chain_sg, *chain_block;
1824 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1825 chain_block = h->cmd_sg_list[c->cmdindex];
1826 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1827 chain_len = sizeof(*chain_sg) *
1828 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1829 chain_sg->Len = cpu_to_le32(chain_len);
1830 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1832 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1833 /* prevent subsequent unmapping */
1834 chain_sg->Addr = cpu_to_le64(0);
1837 chain_sg->Addr = cpu_to_le64(temp64);
1841 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1842 struct CommandList *c)
1844 struct SGDescriptor *chain_sg;
1846 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1849 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1850 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1851 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1855 /* Decode the various types of errors on ioaccel2 path.
1856 * Return 1 for any error that should generate a RAID path retry.
1857 * Return 0 for errors that don't require a RAID path retry.
1859 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1860 struct CommandList *c,
1861 struct scsi_cmnd *cmd,
1862 struct io_accel2_cmd *c2)
1866 u32 ioaccel2_resid = 0;
1868 switch (c2->error_data.serv_response) {
1869 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1870 switch (c2->error_data.status) {
1871 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1873 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1874 dev_warn(&h->pdev->dev,
1875 "%s: task complete with check condition.\n",
1876 "HP SSD Smart Path");
1877 cmd->result |= SAM_STAT_CHECK_CONDITION;
1878 if (c2->error_data.data_present !=
1879 IOACCEL2_SENSE_DATA_PRESENT) {
1880 memset(cmd->sense_buffer, 0,
1881 SCSI_SENSE_BUFFERSIZE);
1884 /* copy the sense data */
1885 data_len = c2->error_data.sense_data_len;
1886 if (data_len > SCSI_SENSE_BUFFERSIZE)
1887 data_len = SCSI_SENSE_BUFFERSIZE;
1888 if (data_len > sizeof(c2->error_data.sense_data_buff))
1890 sizeof(c2->error_data.sense_data_buff);
1891 memcpy(cmd->sense_buffer,
1892 c2->error_data.sense_data_buff, data_len);
1895 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1896 dev_warn(&h->pdev->dev,
1897 "%s: task complete with BUSY status.\n",
1898 "HP SSD Smart Path");
1901 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1902 dev_warn(&h->pdev->dev,
1903 "%s: task complete with reservation conflict.\n",
1904 "HP SSD Smart Path");
1907 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1910 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1911 dev_warn(&h->pdev->dev,
1912 "%s: task complete with aborted status.\n",
1913 "HP SSD Smart Path");
1917 dev_warn(&h->pdev->dev,
1918 "%s: task complete with unrecognized status: 0x%02x\n",
1919 "HP SSD Smart Path", c2->error_data.status);
1924 case IOACCEL2_SERV_RESPONSE_FAILURE:
1925 switch (c2->error_data.status) {
1926 case IOACCEL2_STATUS_SR_IO_ERROR:
1927 case IOACCEL2_STATUS_SR_IO_ABORTED:
1928 case IOACCEL2_STATUS_SR_OVERRUN:
1931 case IOACCEL2_STATUS_SR_UNDERRUN:
1932 cmd->result = (DID_OK << 16); /* host byte */
1933 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1934 ioaccel2_resid = get_unaligned_le32(
1935 &c2->error_data.resid_cnt[0]);
1936 scsi_set_resid(cmd, ioaccel2_resid);
1938 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
1939 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
1940 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
1941 /* We will get an event from ctlr to trigger rescan */
1946 dev_warn(&h->pdev->dev,
1947 "unexpected delivery or target failure, status = 0x%02x\n",
1948 c2->error_data.status);
1951 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1953 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1955 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1956 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1959 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1960 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1963 dev_warn(&h->pdev->dev,
1964 "%s: Unrecognized server response: 0x%02x\n",
1965 "HP SSD Smart Path",
1966 c2->error_data.serv_response);
1971 return retry; /* retry on raid path? */
1974 static void process_ioaccel2_completion(struct ctlr_info *h,
1975 struct CommandList *c, struct scsi_cmnd *cmd,
1976 struct hpsa_scsi_dev_t *dev)
1978 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1980 /* check for good status */
1981 if (likely(c2->error_data.serv_response == 0 &&
1982 c2->error_data.status == 0)) {
1984 cmd->scsi_done(cmd);
1988 /* Any RAID offload error results in retry which will use
1989 * the normal I/O path so the controller can handle whatever's
1992 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1993 c2->error_data.serv_response ==
1994 IOACCEL2_SERV_RESPONSE_FAILURE) {
1995 if (c2->error_data.status ==
1996 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1997 dev->offload_enabled = 0;
2001 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
2005 cmd->scsi_done(cmd);
2009 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2010 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2013 /* Returns 0 on success, < 0 otherwise. */
2014 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2015 struct CommandList *cp)
2017 u8 tmf_status = cp->err_info->ScsiStatus;
2019 switch (tmf_status) {
2020 case CISS_TMF_COMPLETE:
2022 * CISS_TMF_COMPLETE never happens, instead,
2023 * ei->CommandStatus == 0 for this case.
2025 case CISS_TMF_SUCCESS:
2027 case CISS_TMF_INVALID_FRAME:
2028 case CISS_TMF_NOT_SUPPORTED:
2029 case CISS_TMF_FAILED:
2030 case CISS_TMF_WRONG_LUN:
2031 case CISS_TMF_OVERLAPPED_TAG:
2034 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2041 static void complete_scsi_command(struct CommandList *cp)
2043 struct scsi_cmnd *cmd;
2044 struct ctlr_info *h;
2045 struct ErrorInfo *ei;
2046 struct hpsa_scsi_dev_t *dev;
2047 struct io_accel2_cmd *c2;
2050 u8 asc; /* additional sense code */
2051 u8 ascq; /* additional sense code qualifier */
2052 unsigned long sense_data_size;
2057 dev = cmd->device->hostdata;
2058 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2060 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2061 if ((cp->cmd_type == CMD_SCSI) &&
2062 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2063 hpsa_unmap_sg_chain_block(h, cp);
2065 if ((cp->cmd_type == CMD_IOACCEL2) &&
2066 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2067 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2069 cmd->result = (DID_OK << 16); /* host byte */
2070 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2072 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2073 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2076 * We check for lockup status here as it may be set for
2077 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2078 * fail_all_oustanding_cmds()
2080 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2081 /* DID_NO_CONNECT will prevent a retry */
2082 cmd->result = DID_NO_CONNECT << 16;
2084 cmd->scsi_done(cmd);
2088 if (cp->cmd_type == CMD_IOACCEL2)
2089 return process_ioaccel2_completion(h, cp, cmd, dev);
2091 scsi_set_resid(cmd, ei->ResidualCnt);
2092 if (ei->CommandStatus == 0) {
2093 if (cp->cmd_type == CMD_IOACCEL1)
2094 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2096 cmd->scsi_done(cmd);
2100 /* For I/O accelerator commands, copy over some fields to the normal
2101 * CISS header used below for error handling.
2103 if (cp->cmd_type == CMD_IOACCEL1) {
2104 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2105 cp->Header.SGList = scsi_sg_count(cmd);
2106 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2107 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2108 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2109 cp->Header.tag = c->tag;
2110 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2111 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2113 /* Any RAID offload error results in retry which will use
2114 * the normal I/O path so the controller can handle whatever's
2117 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2118 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2119 dev->offload_enabled = 0;
2120 INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
2121 queue_work_on(raw_smp_processor_id(),
2122 h->resubmit_wq, &cp->work);
2127 /* an error has occurred */
2128 switch (ei->CommandStatus) {
2130 case CMD_TARGET_STATUS:
2131 cmd->result |= ei->ScsiStatus;
2132 /* copy the sense data */
2133 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2134 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2136 sense_data_size = sizeof(ei->SenseInfo);
2137 if (ei->SenseLen < sense_data_size)
2138 sense_data_size = ei->SenseLen;
2139 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2141 decode_sense_data(ei->SenseInfo, sense_data_size,
2142 &sense_key, &asc, &ascq);
2143 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2144 if (sense_key == ABORTED_COMMAND) {
2145 cmd->result |= DID_SOFT_ERROR << 16;
2150 /* Problem was not a check condition
2151 * Pass it up to the upper layers...
2153 if (ei->ScsiStatus) {
2154 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2155 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2156 "Returning result: 0x%x\n",
2158 sense_key, asc, ascq,
2160 } else { /* scsi status is zero??? How??? */
2161 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2162 "Returning no connection.\n", cp),
2164 /* Ordinarily, this case should never happen,
2165 * but there is a bug in some released firmware
2166 * revisions that allows it to happen if, for
2167 * example, a 4100 backplane loses power and
2168 * the tape drive is in it. We assume that
2169 * it's a fatal error of some kind because we
2170 * can't show that it wasn't. We will make it
2171 * look like selection timeout since that is
2172 * the most common reason for this to occur,
2173 * and it's severe enough.
2176 cmd->result = DID_NO_CONNECT << 16;
2180 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2182 case CMD_DATA_OVERRUN:
2183 dev_warn(&h->pdev->dev,
2184 "CDB %16phN data overrun\n", cp->Request.CDB);
2187 /* print_bytes(cp, sizeof(*cp), 1, 0);
2189 /* We get CMD_INVALID if you address a non-existent device
2190 * instead of a selection timeout (no response). You will
2191 * see this if you yank out a drive, then try to access it.
2192 * This is kind of a shame because it means that any other
2193 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2194 * missing target. */
2195 cmd->result = DID_NO_CONNECT << 16;
2198 case CMD_PROTOCOL_ERR:
2199 cmd->result = DID_ERROR << 16;
2200 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2203 case CMD_HARDWARE_ERR:
2204 cmd->result = DID_ERROR << 16;
2205 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2208 case CMD_CONNECTION_LOST:
2209 cmd->result = DID_ERROR << 16;
2210 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2214 cmd->result = DID_ABORT << 16;
2215 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2216 cp->Request.CDB, ei->ScsiStatus);
2218 case CMD_ABORT_FAILED:
2219 cmd->result = DID_ERROR << 16;
2220 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2223 case CMD_UNSOLICITED_ABORT:
2224 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2225 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2229 cmd->result = DID_TIME_OUT << 16;
2230 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2233 case CMD_UNABORTABLE:
2234 cmd->result = DID_ERROR << 16;
2235 dev_warn(&h->pdev->dev, "Command unabortable\n");
2237 case CMD_TMF_STATUS:
2238 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2239 cmd->result = DID_ERROR << 16;
2241 case CMD_IOACCEL_DISABLED:
2242 /* This only handles the direct pass-through case since RAID
2243 * offload is handled above. Just attempt a retry.
2245 cmd->result = DID_SOFT_ERROR << 16;
2246 dev_warn(&h->pdev->dev,
2247 "cp %p had HP SSD Smart Path error\n", cp);
2250 cmd->result = DID_ERROR << 16;
2251 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2252 cp, ei->CommandStatus);
2255 cmd->scsi_done(cmd);
2258 static void hpsa_pci_unmap(struct pci_dev *pdev,
2259 struct CommandList *c, int sg_used, int data_direction)
2263 for (i = 0; i < sg_used; i++)
2264 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2265 le32_to_cpu(c->SG[i].Len),
2269 static int hpsa_map_one(struct pci_dev *pdev,
2270 struct CommandList *cp,
2277 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2278 cp->Header.SGList = 0;
2279 cp->Header.SGTotal = cpu_to_le16(0);
2283 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2284 if (dma_mapping_error(&pdev->dev, addr64)) {
2285 /* Prevent subsequent unmap of something never mapped */
2286 cp->Header.SGList = 0;
2287 cp->Header.SGTotal = cpu_to_le16(0);
2290 cp->SG[0].Addr = cpu_to_le64(addr64);
2291 cp->SG[0].Len = cpu_to_le32(buflen);
2292 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2293 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2294 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2298 #define NO_TIMEOUT ((unsigned long) -1)
2299 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2300 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2301 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2303 DECLARE_COMPLETION_ONSTACK(wait);
2306 __enqueue_cmd_and_start_io(h, c, reply_queue);
2307 if (timeout_msecs == NO_TIMEOUT) {
2308 /* TODO: get rid of this no-timeout thing */
2309 wait_for_completion_io(&wait);
2312 if (!wait_for_completion_io_timeout(&wait,
2313 msecs_to_jiffies(timeout_msecs))) {
2314 dev_warn(&h->pdev->dev, "Command timed out.\n");
2320 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2321 int reply_queue, unsigned long timeout_msecs)
2323 if (unlikely(lockup_detected(h))) {
2324 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2327 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2330 static u32 lockup_detected(struct ctlr_info *h)
2333 u32 rc, *lockup_detected;
2336 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2337 rc = *lockup_detected;
2342 #define MAX_DRIVER_CMD_RETRIES 25
2343 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2344 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2346 int backoff_time = 10, retry_count = 0;
2350 memset(c->err_info, 0, sizeof(*c->err_info));
2351 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2356 if (retry_count > 3) {
2357 msleep(backoff_time);
2358 if (backoff_time < 1000)
2361 } while ((check_for_unit_attention(h, c) ||
2362 check_for_busy(h, c)) &&
2363 retry_count <= MAX_DRIVER_CMD_RETRIES);
2364 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2365 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2370 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2371 struct CommandList *c)
2373 const u8 *cdb = c->Request.CDB;
2374 const u8 *lun = c->Header.LUN.LunAddrBytes;
2376 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2377 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2378 txt, lun[0], lun[1], lun[2], lun[3],
2379 lun[4], lun[5], lun[6], lun[7],
2380 cdb[0], cdb[1], cdb[2], cdb[3],
2381 cdb[4], cdb[5], cdb[6], cdb[7],
2382 cdb[8], cdb[9], cdb[10], cdb[11],
2383 cdb[12], cdb[13], cdb[14], cdb[15]);
2386 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2387 struct CommandList *cp)
2389 const struct ErrorInfo *ei = cp->err_info;
2390 struct device *d = &cp->h->pdev->dev;
2391 u8 sense_key, asc, ascq;
2394 switch (ei->CommandStatus) {
2395 case CMD_TARGET_STATUS:
2396 if (ei->SenseLen > sizeof(ei->SenseInfo))
2397 sense_len = sizeof(ei->SenseInfo);
2399 sense_len = ei->SenseLen;
2400 decode_sense_data(ei->SenseInfo, sense_len,
2401 &sense_key, &asc, &ascq);
2402 hpsa_print_cmd(h, "SCSI status", cp);
2403 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2404 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2405 sense_key, asc, ascq);
2407 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2408 if (ei->ScsiStatus == 0)
2409 dev_warn(d, "SCSI status is abnormally zero. "
2410 "(probably indicates selection timeout "
2411 "reported incorrectly due to a known "
2412 "firmware bug, circa July, 2001.)\n");
2414 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2416 case CMD_DATA_OVERRUN:
2417 hpsa_print_cmd(h, "overrun condition", cp);
2420 /* controller unfortunately reports SCSI passthru's
2421 * to non-existent targets as invalid commands.
2423 hpsa_print_cmd(h, "invalid command", cp);
2424 dev_warn(d, "probably means device no longer present\n");
2427 case CMD_PROTOCOL_ERR:
2428 hpsa_print_cmd(h, "protocol error", cp);
2430 case CMD_HARDWARE_ERR:
2431 hpsa_print_cmd(h, "hardware error", cp);
2433 case CMD_CONNECTION_LOST:
2434 hpsa_print_cmd(h, "connection lost", cp);
2437 hpsa_print_cmd(h, "aborted", cp);
2439 case CMD_ABORT_FAILED:
2440 hpsa_print_cmd(h, "abort failed", cp);
2442 case CMD_UNSOLICITED_ABORT:
2443 hpsa_print_cmd(h, "unsolicited abort", cp);
2446 hpsa_print_cmd(h, "timed out", cp);
2448 case CMD_UNABORTABLE:
2449 hpsa_print_cmd(h, "unabortable", cp);
2451 case CMD_CTLR_LOCKUP:
2452 hpsa_print_cmd(h, "controller lockup detected", cp);
2455 hpsa_print_cmd(h, "unknown status", cp);
2456 dev_warn(d, "Unknown command status %x\n",
2461 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2462 u16 page, unsigned char *buf,
2463 unsigned char bufsize)
2466 struct CommandList *c;
2467 struct ErrorInfo *ei;
2471 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2472 page, scsi3addr, TYPE_CMD)) {
2476 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2477 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2481 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2482 hpsa_scsi_interpret_error(h, c);
2490 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2491 unsigned char *scsi3addr, unsigned char page,
2492 struct bmic_controller_parameters *buf, size_t bufsize)
2495 struct CommandList *c;
2496 struct ErrorInfo *ei;
2499 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2500 page, scsi3addr, TYPE_CMD)) {
2504 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2505 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2509 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2510 hpsa_scsi_interpret_error(h, c);
2518 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2519 u8 reset_type, int reply_queue)
2522 struct CommandList *c;
2523 struct ErrorInfo *ei;
2528 /* fill_cmd can't fail here, no data buffer to map. */
2529 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2530 scsi3addr, TYPE_MSG);
2531 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2532 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2534 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2537 /* no unmap needed here because no data xfer. */
2540 if (ei->CommandStatus != 0) {
2541 hpsa_scsi_interpret_error(h, c);
2549 static void hpsa_get_raid_level(struct ctlr_info *h,
2550 unsigned char *scsi3addr, unsigned char *raid_level)
2555 *raid_level = RAID_UNKNOWN;
2556 buf = kzalloc(64, GFP_KERNEL);
2559 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2561 *raid_level = buf[8];
2562 if (*raid_level > RAID_UNKNOWN)
2563 *raid_level = RAID_UNKNOWN;
2568 #define HPSA_MAP_DEBUG
2569 #ifdef HPSA_MAP_DEBUG
2570 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2571 struct raid_map_data *map_buff)
2573 struct raid_map_disk_data *dd = &map_buff->data[0];
2575 u16 map_cnt, row_cnt, disks_per_row;
2580 /* Show details only if debugging has been activated. */
2581 if (h->raid_offload_debug < 2)
2584 dev_info(&h->pdev->dev, "structure_size = %u\n",
2585 le32_to_cpu(map_buff->structure_size));
2586 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2587 le32_to_cpu(map_buff->volume_blk_size));
2588 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2589 le64_to_cpu(map_buff->volume_blk_cnt));
2590 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2591 map_buff->phys_blk_shift);
2592 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2593 map_buff->parity_rotation_shift);
2594 dev_info(&h->pdev->dev, "strip_size = %u\n",
2595 le16_to_cpu(map_buff->strip_size));
2596 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2597 le64_to_cpu(map_buff->disk_starting_blk));
2598 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2599 le64_to_cpu(map_buff->disk_blk_cnt));
2600 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2601 le16_to_cpu(map_buff->data_disks_per_row));
2602 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2603 le16_to_cpu(map_buff->metadata_disks_per_row));
2604 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2605 le16_to_cpu(map_buff->row_cnt));
2606 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2607 le16_to_cpu(map_buff->layout_map_count));
2608 dev_info(&h->pdev->dev, "flags = 0x%x\n",
2609 le16_to_cpu(map_buff->flags));
2610 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2611 le16_to_cpu(map_buff->flags) &
2612 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
2613 dev_info(&h->pdev->dev, "dekindex = %u\n",
2614 le16_to_cpu(map_buff->dekindex));
2615 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2616 for (map = 0; map < map_cnt; map++) {
2617 dev_info(&h->pdev->dev, "Map%u:\n", map);
2618 row_cnt = le16_to_cpu(map_buff->row_cnt);
2619 for (row = 0; row < row_cnt; row++) {
2620 dev_info(&h->pdev->dev, " Row%u:\n", row);
2622 le16_to_cpu(map_buff->data_disks_per_row);
2623 for (col = 0; col < disks_per_row; col++, dd++)
2624 dev_info(&h->pdev->dev,
2625 " D%02u: h=0x%04x xor=%u,%u\n",
2626 col, dd->ioaccel_handle,
2627 dd->xor_mult[0], dd->xor_mult[1]);
2629 le16_to_cpu(map_buff->metadata_disks_per_row);
2630 for (col = 0; col < disks_per_row; col++, dd++)
2631 dev_info(&h->pdev->dev,
2632 " M%02u: h=0x%04x xor=%u,%u\n",
2633 col, dd->ioaccel_handle,
2634 dd->xor_mult[0], dd->xor_mult[1]);
2639 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2640 __attribute__((unused)) int rc,
2641 __attribute__((unused)) struct raid_map_data *map_buff)
2646 static int hpsa_get_raid_map(struct ctlr_info *h,
2647 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2650 struct CommandList *c;
2651 struct ErrorInfo *ei;
2655 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2656 sizeof(this_device->raid_map), 0,
2657 scsi3addr, TYPE_CMD)) {
2658 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2662 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2663 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2667 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2668 hpsa_scsi_interpret_error(h, c);
2674 /* @todo in the future, dynamically allocate RAID map memory */
2675 if (le32_to_cpu(this_device->raid_map.structure_size) >
2676 sizeof(this_device->raid_map)) {
2677 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2680 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2687 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2688 unsigned char scsi3addr[], u16 bmic_device_index,
2689 struct bmic_identify_physical_device *buf, size_t bufsize)
2692 struct CommandList *c;
2693 struct ErrorInfo *ei;
2696 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2697 0, RAID_CTLR_LUNID, TYPE_CMD);
2701 c->Request.CDB[2] = bmic_device_index & 0xff;
2702 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2704 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2707 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2708 hpsa_scsi_interpret_error(h, c);
2716 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2717 unsigned char scsi3addr[], u8 page)
2722 unsigned char *buf, bufsize;
2724 buf = kzalloc(256, GFP_KERNEL);
2728 /* Get the size of the page list first */
2729 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2730 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2731 buf, HPSA_VPD_HEADER_SZ);
2733 goto exit_unsupported;
2735 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2736 bufsize = pages + HPSA_VPD_HEADER_SZ;
2740 /* Get the whole VPD page list */
2741 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2742 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2745 goto exit_unsupported;
2748 for (i = 1; i <= pages; i++)
2749 if (buf[3 + i] == page)
2750 goto exit_supported;
2759 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2760 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2766 this_device->offload_config = 0;
2767 this_device->offload_enabled = 0;
2768 this_device->offload_to_be_enabled = 0;
2770 buf = kzalloc(64, GFP_KERNEL);
2773 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2775 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2776 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2780 #define IOACCEL_STATUS_BYTE 4
2781 #define OFFLOAD_CONFIGURED_BIT 0x01
2782 #define OFFLOAD_ENABLED_BIT 0x02
2783 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2784 this_device->offload_config =
2785 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2786 if (this_device->offload_config) {
2787 this_device->offload_enabled =
2788 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2789 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2790 this_device->offload_enabled = 0;
2792 this_device->offload_to_be_enabled = this_device->offload_enabled;
2798 /* Get the device id from inquiry page 0x83 */
2799 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2800 unsigned char *device_id, int buflen)
2807 buf = kzalloc(64, GFP_KERNEL);
2810 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2812 memcpy(device_id, &buf[8], buflen);
2817 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2818 void *buf, int bufsize,
2819 int extended_response)
2822 struct CommandList *c;
2823 unsigned char scsi3addr[8];
2824 struct ErrorInfo *ei;
2828 /* address the controller */
2829 memset(scsi3addr, 0, sizeof(scsi3addr));
2830 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2831 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2835 if (extended_response)
2836 c->Request.CDB[1] = extended_response;
2837 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2838 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2842 if (ei->CommandStatus != 0 &&
2843 ei->CommandStatus != CMD_DATA_UNDERRUN) {
2844 hpsa_scsi_interpret_error(h, c);
2847 struct ReportLUNdata *rld = buf;
2849 if (rld->extended_response_flag != extended_response) {
2850 dev_err(&h->pdev->dev,
2851 "report luns requested format %u, got %u\n",
2853 rld->extended_response_flag);
2862 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2863 struct ReportExtendedLUNdata *buf, int bufsize)
2865 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2866 HPSA_REPORT_PHYS_EXTENDED);
2869 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2870 struct ReportLUNdata *buf, int bufsize)
2872 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2875 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2876 int bus, int target, int lun)
2879 device->target = target;
2883 /* Use VPD inquiry to get details of volume status */
2884 static int hpsa_get_volume_status(struct ctlr_info *h,
2885 unsigned char scsi3addr[])
2892 buf = kzalloc(64, GFP_KERNEL);
2894 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2896 /* Does controller have VPD for logical volume status? */
2897 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2900 /* Get the size of the VPD return buffer */
2901 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2902 buf, HPSA_VPD_HEADER_SZ);
2907 /* Now get the whole VPD buffer */
2908 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2909 buf, size + HPSA_VPD_HEADER_SZ);
2912 status = buf[4]; /* status byte */
2918 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2921 /* Determine offline status of a volume.
2924 * 0xff (offline for unknown reasons)
2925 * # (integer code indicating one of several NOT READY states
2926 * describing why a volume is to be kept offline)
2928 static int hpsa_volume_offline(struct ctlr_info *h,
2929 unsigned char scsi3addr[])
2931 struct CommandList *c;
2932 unsigned char *sense;
2933 u8 sense_key, asc, ascq;
2938 #define ASC_LUN_NOT_READY 0x04
2939 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2940 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2944 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2945 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
2950 sense = c->err_info->SenseInfo;
2951 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
2952 sense_len = sizeof(c->err_info->SenseInfo);
2954 sense_len = c->err_info->SenseLen;
2955 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
2956 cmd_status = c->err_info->CommandStatus;
2957 scsi_status = c->err_info->ScsiStatus;
2959 /* Is the volume 'not ready'? */
2960 if (cmd_status != CMD_TARGET_STATUS ||
2961 scsi_status != SAM_STAT_CHECK_CONDITION ||
2962 sense_key != NOT_READY ||
2963 asc != ASC_LUN_NOT_READY) {
2967 /* Determine the reason for not ready state */
2968 ldstat = hpsa_get_volume_status(h, scsi3addr);
2970 /* Keep volume offline in certain cases: */
2972 case HPSA_LV_UNDERGOING_ERASE:
2973 case HPSA_LV_UNDERGOING_RPI:
2974 case HPSA_LV_PENDING_RPI:
2975 case HPSA_LV_ENCRYPTED_NO_KEY:
2976 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2977 case HPSA_LV_UNDERGOING_ENCRYPTION:
2978 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2979 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2981 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2982 /* If VPD status page isn't available,
2983 * use ASC/ASCQ to determine state
2985 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2986 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2996 * Find out if a logical device supports aborts by simply trying one.
2997 * Smart Array may claim not to support aborts on logical drives, but
2998 * if a MSA2000 * is connected, the drives on that will be presented
2999 * by the Smart Array as logical drives, and aborts may be sent to
3000 * those devices successfully. So the simplest way to find out is
3001 * to simply try an abort and see how the device responds.
3003 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3004 unsigned char *scsi3addr)
3006 struct CommandList *c;
3007 struct ErrorInfo *ei;
3010 u64 tag = (u64) -1; /* bogus tag */
3012 /* Assume that physical devices support aborts */
3013 if (!is_logical_dev_addr_mode(scsi3addr))
3018 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3019 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3020 /* no unmap needed here because no data xfer. */
3022 switch (ei->CommandStatus) {
3026 case CMD_UNABORTABLE:
3027 case CMD_ABORT_FAILED:
3030 case CMD_TMF_STATUS:
3031 rc = hpsa_evaluate_tmf_status(h, c);
3041 static int hpsa_update_device_info(struct ctlr_info *h,
3042 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3043 unsigned char *is_OBDR_device)
3046 #define OBDR_SIG_OFFSET 43
3047 #define OBDR_TAPE_SIG "$DR-10"
3048 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3049 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3051 unsigned char *inq_buff;
3052 unsigned char *obdr_sig;
3054 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3058 /* Do an inquiry to the device to see what it is. */
3059 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3060 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3061 /* Inquiry failed (msg printed already) */
3062 dev_err(&h->pdev->dev,
3063 "hpsa_update_device_info: inquiry failed\n");
3067 this_device->devtype = (inq_buff[0] & 0x1f);
3068 memcpy(this_device->scsi3addr, scsi3addr, 8);
3069 memcpy(this_device->vendor, &inq_buff[8],
3070 sizeof(this_device->vendor));
3071 memcpy(this_device->model, &inq_buff[16],
3072 sizeof(this_device->model));
3073 memset(this_device->device_id, 0,
3074 sizeof(this_device->device_id));
3075 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3076 sizeof(this_device->device_id));
3078 if (this_device->devtype == TYPE_DISK &&
3079 is_logical_dev_addr_mode(scsi3addr)) {
3082 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3083 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3084 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3085 volume_offline = hpsa_volume_offline(h, scsi3addr);
3086 if (volume_offline < 0 || volume_offline > 0xff)
3087 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3088 this_device->volume_offline = volume_offline & 0xff;
3090 this_device->raid_level = RAID_UNKNOWN;
3091 this_device->offload_config = 0;
3092 this_device->offload_enabled = 0;
3093 this_device->offload_to_be_enabled = 0;
3094 this_device->hba_ioaccel_enabled = 0;
3095 this_device->volume_offline = 0;
3096 this_device->queue_depth = h->nr_cmds;
3099 if (is_OBDR_device) {
3100 /* See if this is a One-Button-Disaster-Recovery device
3101 * by looking for "$DR-10" at offset 43 in inquiry data.
3103 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3104 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3105 strncmp(obdr_sig, OBDR_TAPE_SIG,
3106 OBDR_SIG_LEN) == 0);
3116 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3117 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3119 unsigned long flags;
3122 * See if this device supports aborts. If we already know
3123 * the device, we already know if it supports aborts, otherwise
3124 * we have to find out if it supports aborts by trying one.
3126 spin_lock_irqsave(&h->devlock, flags);
3127 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3128 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3129 entry >= 0 && entry < h->ndevices) {
3130 dev->supports_aborts = h->dev[entry]->supports_aborts;
3131 spin_unlock_irqrestore(&h->devlock, flags);
3133 spin_unlock_irqrestore(&h->devlock, flags);
3134 dev->supports_aborts =
3135 hpsa_device_supports_aborts(h, scsi3addr);
3136 if (dev->supports_aborts < 0)
3137 dev->supports_aborts = 0;
3141 static unsigned char *ext_target_model[] = {
3151 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
3155 for (i = 0; ext_target_model[i]; i++)
3156 if (strncmp(device->model, ext_target_model[i],
3157 strlen(ext_target_model[i])) == 0)
3162 /* Helper function to assign bus, target, lun mapping of devices.
3163 * Puts non-external target logical volumes on bus 0, external target logical
3164 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3165 * Logical drive target and lun are assigned at this time, but
3166 * physical device lun and target assignment are deferred (assigned
3167 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3169 static void figure_bus_target_lun(struct ctlr_info *h,
3170 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3172 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
3174 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3175 /* physical device, target and lun filled in later */
3176 if (is_hba_lunid(lunaddrbytes))
3177 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
3179 /* defer target, lun assignment for physical devices */
3180 hpsa_set_bus_target_lun(device, 2, -1, -1);
3183 /* It's a logical device */
3184 if (is_ext_target(h, device)) {
3185 /* external target way, put logicals on bus 1
3186 * and match target/lun numbers box
3187 * reports, other smart array, bus 0, target 0, match lunid
3189 hpsa_set_bus_target_lun(device,
3190 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3193 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
3197 * If there is no lun 0 on a target, linux won't find any devices.
3198 * For the external targets (arrays), we have to manually detect the enclosure
3199 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3200 * it for some reason. *tmpdevice is the target we're adding,
3201 * this_device is a pointer into the current element of currentsd[]
3202 * that we're building up in update_scsi_devices(), below.
3203 * lunzerobits is a bitmap that tracks which targets already have a
3205 * Returns 1 if an enclosure was added, 0 if not.
3207 static int add_ext_target_dev(struct ctlr_info *h,
3208 struct hpsa_scsi_dev_t *tmpdevice,
3209 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
3210 unsigned long lunzerobits[], int *n_ext_target_devs)
3212 unsigned char scsi3addr[8];
3214 if (test_bit(tmpdevice->target, lunzerobits))
3215 return 0; /* There is already a lun 0 on this target. */
3217 if (!is_logical_dev_addr_mode(lunaddrbytes))
3218 return 0; /* It's the logical targets that may lack lun 0. */
3220 if (!is_ext_target(h, tmpdevice))
3221 return 0; /* Only external target devices have this problem. */
3223 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
3226 memset(scsi3addr, 0, 8);
3227 scsi3addr[3] = tmpdevice->target;
3228 if (is_hba_lunid(scsi3addr))
3229 return 0; /* Don't add the RAID controller here. */
3231 if (is_scsi_rev_5(h))
3232 return 0; /* p1210m doesn't need to do this. */
3234 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
3235 dev_warn(&h->pdev->dev, "Maximum number of external "
3236 "target devices exceeded. Check your hardware "
3241 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
3243 (*n_ext_target_devs)++;
3244 hpsa_set_bus_target_lun(this_device,
3245 tmpdevice->bus, tmpdevice->target, 0);
3246 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
3247 set_bit(tmpdevice->target, lunzerobits);
3252 * Get address of physical disk used for an ioaccel2 mode command:
3253 * 1. Extract ioaccel2 handle from the command.
3254 * 2. Find a matching ioaccel2 handle from list of physical disks.
3256 * 1 and set scsi3addr to address of matching physical
3257 * 0 if no matching physical disk was found.
3259 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3260 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3262 struct io_accel2_cmd *c2 =
3263 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3264 unsigned long flags;
3267 spin_lock_irqsave(&h->devlock, flags);
3268 for (i = 0; i < h->ndevices; i++)
3269 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3270 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3271 sizeof(h->dev[i]->scsi3addr));
3272 spin_unlock_irqrestore(&h->devlock, flags);
3275 spin_unlock_irqrestore(&h->devlock, flags);
3280 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3281 * logdev. The number of luns in physdev and logdev are returned in
3282 * *nphysicals and *nlogicals, respectively.
3283 * Returns 0 on success, -1 otherwise.
3285 static int hpsa_gather_lun_info(struct ctlr_info *h,
3286 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3287 struct ReportLUNdata *logdev, u32 *nlogicals)
3289 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3290 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3293 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3294 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3295 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3296 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3297 *nphysicals = HPSA_MAX_PHYS_LUN;
3299 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3300 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3303 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3304 /* Reject Logicals in excess of our max capability. */
3305 if (*nlogicals > HPSA_MAX_LUN) {
3306 dev_warn(&h->pdev->dev,
3307 "maximum logical LUNs (%d) exceeded. "
3308 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3309 *nlogicals - HPSA_MAX_LUN);
3310 *nlogicals = HPSA_MAX_LUN;
3312 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3313 dev_warn(&h->pdev->dev,
3314 "maximum logical + physical LUNs (%d) exceeded. "
3315 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3316 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3317 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3322 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3323 int i, int nphysicals, int nlogicals,
3324 struct ReportExtendedLUNdata *physdev_list,
3325 struct ReportLUNdata *logdev_list)
3327 /* Helper function, figure out where the LUN ID info is coming from
3328 * given index i, lists of physical and logical devices, where in
3329 * the list the raid controller is supposed to appear (first or last)
3332 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3333 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3335 if (i == raid_ctlr_position)
3336 return RAID_CTLR_LUNID;
3338 if (i < logicals_start)
3339 return &physdev_list->LUN[i -
3340 (raid_ctlr_position == 0)].lunid[0];
3342 if (i < last_device)
3343 return &logdev_list->LUN[i - nphysicals -
3344 (raid_ctlr_position == 0)][0];
3349 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3352 int hba_mode_enabled;
3353 struct bmic_controller_parameters *ctlr_params;
3354 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3359 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3360 sizeof(struct bmic_controller_parameters));
3367 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3369 return hba_mode_enabled;
3372 /* get physical drive ioaccel handle and queue depth */
3373 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3374 struct hpsa_scsi_dev_t *dev,
3376 struct bmic_identify_physical_device *id_phys)
3379 struct ext_report_lun_entry *rle =
3380 (struct ext_report_lun_entry *) lunaddrbytes;
3382 dev->ioaccel_handle = rle->ioaccel_handle;
3383 if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3384 dev->hba_ioaccel_enabled = 1;
3385 memset(id_phys, 0, sizeof(*id_phys));
3386 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3387 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3390 /* Reserve space for FW operations */
3391 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3392 #define DRIVE_QUEUE_DEPTH 7
3394 le16_to_cpu(id_phys->current_queue_depth_limit) -
3395 DRIVE_CMDS_RESERVED_FOR_FW;
3397 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3398 atomic_set(&dev->ioaccel_cmds_out, 0);
3401 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3403 /* the idea here is we could get notified
3404 * that some devices have changed, so we do a report
3405 * physical luns and report logical luns cmd, and adjust
3406 * our list of devices accordingly.
3408 * The scsi3addr's of devices won't change so long as the
3409 * adapter is not reset. That means we can rescan and
3410 * tell which devices we already know about, vs. new
3411 * devices, vs. disappearing devices.
3413 struct ReportExtendedLUNdata *physdev_list = NULL;
3414 struct ReportLUNdata *logdev_list = NULL;
3415 struct bmic_identify_physical_device *id_phys = NULL;
3418 u32 ndev_allocated = 0;
3419 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3421 int i, n_ext_target_devs, ndevs_to_allocate;
3422 int raid_ctlr_position;
3423 int rescan_hba_mode;
3424 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3426 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3427 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3428 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3429 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3430 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3432 if (!currentsd || !physdev_list || !logdev_list ||
3433 !tmpdevice || !id_phys) {
3434 dev_err(&h->pdev->dev, "out of memory\n");
3437 memset(lunzerobits, 0, sizeof(lunzerobits));
3439 rescan_hba_mode = hpsa_hba_mode_enabled(h);
3440 if (rescan_hba_mode < 0)
3443 if (!h->hba_mode_enabled && rescan_hba_mode)
3444 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3445 else if (h->hba_mode_enabled && !rescan_hba_mode)
3446 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3448 h->hba_mode_enabled = rescan_hba_mode;
3450 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3451 logdev_list, &nlogicals))
3454 /* We might see up to the maximum number of logical and physical disks
3455 * plus external target devices, and a device for the local RAID
3458 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3460 /* Allocate the per device structures */
3461 for (i = 0; i < ndevs_to_allocate; i++) {
3462 if (i >= HPSA_MAX_DEVICES) {
3463 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3464 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3465 ndevs_to_allocate - HPSA_MAX_DEVICES);
3469 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3470 if (!currentsd[i]) {
3471 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3472 __FILE__, __LINE__);
3478 if (is_scsi_rev_5(h))
3479 raid_ctlr_position = 0;
3481 raid_ctlr_position = nphysicals + nlogicals;
3483 /* adjust our table of devices */
3484 n_ext_target_devs = 0;
3485 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3486 u8 *lunaddrbytes, is_OBDR = 0;
3488 /* Figure out where the LUN ID info is coming from */
3489 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3490 i, nphysicals, nlogicals, physdev_list, logdev_list);
3492 /* skip masked non-disk devices */
3493 if (MASKED_DEVICE(lunaddrbytes))
3494 if (i < nphysicals + (raid_ctlr_position == 0) &&
3495 NON_DISK_PHYS_DEV(lunaddrbytes))
3498 /* Get device type, vendor, model, device id */
3499 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3501 continue; /* skip it if we can't talk to it. */
3502 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3503 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
3504 this_device = currentsd[ncurrent];
3507 * For external target devices, we have to insert a LUN 0 which
3508 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3509 * is nonetheless an enclosure device there. We have to
3510 * present that otherwise linux won't find anything if
3511 * there is no lun 0.
3513 if (add_ext_target_dev(h, tmpdevice, this_device,
3514 lunaddrbytes, lunzerobits,
3515 &n_ext_target_devs)) {
3517 this_device = currentsd[ncurrent];
3520 *this_device = *tmpdevice;
3522 /* do not expose masked devices */
3523 if (MASKED_DEVICE(lunaddrbytes) &&
3524 i < nphysicals + (raid_ctlr_position == 0)) {
3525 if (h->hba_mode_enabled)
3526 dev_warn(&h->pdev->dev,
3527 "Masked physical device detected\n");
3528 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3530 this_device->expose_state =
3531 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3534 switch (this_device->devtype) {
3536 /* We don't *really* support actual CD-ROM devices,
3537 * just "One Button Disaster Recovery" tape drive
3538 * which temporarily pretends to be a CD-ROM drive.
3539 * So we check that the device is really an OBDR tape
3540 * device by checking for "$DR-10" in bytes 43-48 of
3547 if (i >= nphysicals) {
3552 if (h->hba_mode_enabled)
3553 /* never use raid mapper in HBA mode */
3554 this_device->offload_enabled = 0;
3555 else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
3556 h->transMethod & CFGTBL_Trans_io_accel2))
3559 hpsa_get_ioaccel_drive_info(h, this_device,
3560 lunaddrbytes, id_phys);
3561 atomic_set(&this_device->ioaccel_cmds_out, 0);
3565 case TYPE_MEDIUM_CHANGER:
3568 case TYPE_ENCLOSURE:
3569 if (h->hba_mode_enabled)
3573 /* Only present the Smartarray HBA as a RAID controller.
3574 * If it's a RAID controller other than the HBA itself
3575 * (an external RAID controller, MSA500 or similar)
3578 if (!is_hba_lunid(lunaddrbytes))
3585 if (ncurrent >= HPSA_MAX_DEVICES)
3588 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3591 for (i = 0; i < ndev_allocated; i++)
3592 kfree(currentsd[i]);
3594 kfree(physdev_list);
3599 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3600 struct scatterlist *sg)
3602 u64 addr64 = (u64) sg_dma_address(sg);
3603 unsigned int len = sg_dma_len(sg);
3605 desc->Addr = cpu_to_le64(addr64);
3606 desc->Len = cpu_to_le32(len);
3611 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3612 * dma mapping and fills in the scatter gather entries of the
3615 static int hpsa_scatter_gather(struct ctlr_info *h,
3616 struct CommandList *cp,
3617 struct scsi_cmnd *cmd)
3619 struct scatterlist *sg;
3620 int use_sg, i, sg_index, chained;
3621 struct SGDescriptor *curr_sg;
3623 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3625 use_sg = scsi_dma_map(cmd);
3630 goto sglist_finished;
3635 scsi_for_each_sg(cmd, sg, use_sg, i) {
3636 if (i == h->max_cmd_sg_entries - 1 &&
3637 use_sg > h->max_cmd_sg_entries) {
3639 curr_sg = h->cmd_sg_list[cp->cmdindex];
3642 hpsa_set_sg_descriptor(curr_sg, sg);
3646 /* Back the pointer up to the last entry and mark it as "last". */
3647 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3649 if (use_sg + chained > h->maxSG)
3650 h->maxSG = use_sg + chained;
3653 cp->Header.SGList = h->max_cmd_sg_entries;
3654 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3655 if (hpsa_map_sg_chain_block(h, cp)) {
3656 scsi_dma_unmap(cmd);
3664 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3665 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3669 #define IO_ACCEL_INELIGIBLE (1)
3670 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3676 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3683 if (*cdb_len == 6) {
3684 block = (((u32) cdb[2]) << 8) | cdb[3];
3687 BUG_ON(*cdb_len != 12);
3688 block = (((u32) cdb[2]) << 24) |
3689 (((u32) cdb[3]) << 16) |
3690 (((u32) cdb[4]) << 8) |
3693 (((u32) cdb[6]) << 24) |
3694 (((u32) cdb[7]) << 16) |
3695 (((u32) cdb[8]) << 8) |
3698 if (block_cnt > 0xffff)
3699 return IO_ACCEL_INELIGIBLE;
3701 cdb[0] = is_write ? WRITE_10 : READ_10;
3703 cdb[2] = (u8) (block >> 24);
3704 cdb[3] = (u8) (block >> 16);
3705 cdb[4] = (u8) (block >> 8);
3706 cdb[5] = (u8) (block);
3708 cdb[7] = (u8) (block_cnt >> 8);
3709 cdb[8] = (u8) (block_cnt);
3717 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3718 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3719 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3721 struct scsi_cmnd *cmd = c->scsi_cmd;
3722 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3724 unsigned int total_len = 0;
3725 struct scatterlist *sg;
3728 struct SGDescriptor *curr_sg;
3729 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3731 /* TODO: implement chaining support */
3732 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3733 atomic_dec(&phys_disk->ioaccel_cmds_out);
3734 return IO_ACCEL_INELIGIBLE;
3737 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3739 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3740 atomic_dec(&phys_disk->ioaccel_cmds_out);
3741 return IO_ACCEL_INELIGIBLE;
3744 c->cmd_type = CMD_IOACCEL1;
3746 /* Adjust the DMA address to point to the accelerated command buffer */
3747 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3748 (c->cmdindex * sizeof(*cp));
3749 BUG_ON(c->busaddr & 0x0000007F);
3751 use_sg = scsi_dma_map(cmd);
3753 atomic_dec(&phys_disk->ioaccel_cmds_out);
3759 scsi_for_each_sg(cmd, sg, use_sg, i) {
3760 addr64 = (u64) sg_dma_address(sg);
3761 len = sg_dma_len(sg);
3763 curr_sg->Addr = cpu_to_le64(addr64);
3764 curr_sg->Len = cpu_to_le32(len);
3765 curr_sg->Ext = cpu_to_le32(0);
3768 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3770 switch (cmd->sc_data_direction) {
3772 control |= IOACCEL1_CONTROL_DATA_OUT;
3774 case DMA_FROM_DEVICE:
3775 control |= IOACCEL1_CONTROL_DATA_IN;
3778 control |= IOACCEL1_CONTROL_NODATAXFER;
3781 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3782 cmd->sc_data_direction);
3787 control |= IOACCEL1_CONTROL_NODATAXFER;
3790 c->Header.SGList = use_sg;
3791 /* Fill out the command structure to submit */
3792 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3793 cp->transfer_len = cpu_to_le32(total_len);
3794 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3795 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3796 cp->control = cpu_to_le32(control);
3797 memcpy(cp->CDB, cdb, cdb_len);
3798 memcpy(cp->CISS_LUN, scsi3addr, 8);
3799 /* Tag was already set at init time. */
3800 enqueue_cmd_and_start_io(h, c);
3805 * Queue a command directly to a device behind the controller using the
3806 * I/O accelerator path.
3808 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3809 struct CommandList *c)
3811 struct scsi_cmnd *cmd = c->scsi_cmd;
3812 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3816 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3817 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3821 * Set encryption parameters for the ioaccel2 request
3823 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3824 struct CommandList *c, struct io_accel2_cmd *cp)
3826 struct scsi_cmnd *cmd = c->scsi_cmd;
3827 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3828 struct raid_map_data *map = &dev->raid_map;
3831 /* Are we doing encryption on this device */
3832 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3834 /* Set the data encryption key index. */
3835 cp->dekindex = map->dekindex;
3837 /* Set the encryption enable flag, encoded into direction field. */
3838 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3840 /* Set encryption tweak values based on logical block address
3841 * If block size is 512, tweak value is LBA.
3842 * For other block sizes, tweak is (LBA * block size)/ 512)
3844 switch (cmd->cmnd[0]) {
3845 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3848 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3852 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3855 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3859 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3862 dev_err(&h->pdev->dev,
3863 "ERROR: %s: size (0x%x) not supported for encryption\n",
3864 __func__, cmd->cmnd[0]);
3869 if (le32_to_cpu(map->volume_blk_size) != 512)
3870 first_block = first_block *
3871 le32_to_cpu(map->volume_blk_size)/512;
3873 cp->tweak_lower = cpu_to_le32(first_block);
3874 cp->tweak_upper = cpu_to_le32(first_block >> 32);
3877 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3878 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3879 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3881 struct scsi_cmnd *cmd = c->scsi_cmd;
3882 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3883 struct ioaccel2_sg_element *curr_sg;
3885 struct scatterlist *sg;
3890 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3892 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3893 atomic_dec(&phys_disk->ioaccel_cmds_out);
3894 return IO_ACCEL_INELIGIBLE;
3897 c->cmd_type = CMD_IOACCEL2;
3898 /* Adjust the DMA address to point to the accelerated command buffer */
3899 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3900 (c->cmdindex * sizeof(*cp));
3901 BUG_ON(c->busaddr & 0x0000007F);
3903 memset(cp, 0, sizeof(*cp));
3904 cp->IU_type = IOACCEL2_IU_TYPE;
3906 use_sg = scsi_dma_map(cmd);
3908 atomic_dec(&phys_disk->ioaccel_cmds_out);
3914 if (use_sg > h->ioaccel_maxsg) {
3915 addr64 = le64_to_cpu(
3916 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
3917 curr_sg->address = cpu_to_le64(addr64);
3918 curr_sg->length = 0;
3919 curr_sg->reserved[0] = 0;
3920 curr_sg->reserved[1] = 0;
3921 curr_sg->reserved[2] = 0;
3922 curr_sg->chain_indicator = 0x80;
3924 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
3926 scsi_for_each_sg(cmd, sg, use_sg, i) {
3927 addr64 = (u64) sg_dma_address(sg);
3928 len = sg_dma_len(sg);
3930 curr_sg->address = cpu_to_le64(addr64);
3931 curr_sg->length = cpu_to_le32(len);
3932 curr_sg->reserved[0] = 0;
3933 curr_sg->reserved[1] = 0;
3934 curr_sg->reserved[2] = 0;
3935 curr_sg->chain_indicator = 0;
3939 switch (cmd->sc_data_direction) {
3941 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3942 cp->direction |= IOACCEL2_DIR_DATA_OUT;
3944 case DMA_FROM_DEVICE:
3945 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3946 cp->direction |= IOACCEL2_DIR_DATA_IN;
3949 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3950 cp->direction |= IOACCEL2_DIR_NO_DATA;
3953 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3954 cmd->sc_data_direction);
3959 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3960 cp->direction |= IOACCEL2_DIR_NO_DATA;
3963 /* Set encryption parameters, if necessary */
3964 set_encrypt_ioaccel2(h, c, cp);
3966 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3967 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3968 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3970 cp->data_len = cpu_to_le32(total_len);
3971 cp->err_ptr = cpu_to_le64(c->busaddr +
3972 offsetof(struct io_accel2_cmd, error_data));
3973 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3975 /* fill in sg elements */
3976 if (use_sg > h->ioaccel_maxsg) {
3978 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
3979 atomic_dec(&phys_disk->ioaccel_cmds_out);
3980 scsi_dma_unmap(cmd);
3984 cp->sg_count = (u8) use_sg;
3986 enqueue_cmd_and_start_io(h, c);
3991 * Queue a command to the correct I/O accelerator path.
3993 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3994 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3995 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3997 /* Try to honor the device's queue depth */
3998 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
3999 phys_disk->queue_depth) {
4000 atomic_dec(&phys_disk->ioaccel_cmds_out);
4001 return IO_ACCEL_INELIGIBLE;
4003 if (h->transMethod & CFGTBL_Trans_io_accel1)
4004 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4005 cdb, cdb_len, scsi3addr,
4008 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4009 cdb, cdb_len, scsi3addr,
4013 static void raid_map_helper(struct raid_map_data *map,
4014 int offload_to_mirror, u32 *map_index, u32 *current_group)
4016 if (offload_to_mirror == 0) {
4017 /* use physical disk in the first mirrored group. */
4018 *map_index %= le16_to_cpu(map->data_disks_per_row);
4022 /* determine mirror group that *map_index indicates */
4023 *current_group = *map_index /
4024 le16_to_cpu(map->data_disks_per_row);
4025 if (offload_to_mirror == *current_group)
4027 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4028 /* select map index from next group */
4029 *map_index += le16_to_cpu(map->data_disks_per_row);
4032 /* select map index from first group */
4033 *map_index %= le16_to_cpu(map->data_disks_per_row);
4036 } while (offload_to_mirror != *current_group);
4040 * Attempt to perform offload RAID mapping for a logical volume I/O.
4042 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4043 struct CommandList *c)
4045 struct scsi_cmnd *cmd = c->scsi_cmd;
4046 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4047 struct raid_map_data *map = &dev->raid_map;
4048 struct raid_map_disk_data *dd = &map->data[0];
4051 u64 first_block, last_block;
4054 u64 first_row, last_row;
4055 u32 first_row_offset, last_row_offset;
4056 u32 first_column, last_column;
4057 u64 r0_first_row, r0_last_row;
4058 u32 r5or6_blocks_per_row;
4059 u64 r5or6_first_row, r5or6_last_row;
4060 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4061 u32 r5or6_first_column, r5or6_last_column;
4062 u32 total_disks_per_row;
4064 u32 first_group, last_group, current_group;
4072 #if BITS_PER_LONG == 32
4075 int offload_to_mirror;
4077 /* check for valid opcode, get LBA and block count */
4078 switch (cmd->cmnd[0]) {
4083 (((u64) cmd->cmnd[2]) << 8) |
4085 block_cnt = cmd->cmnd[4];
4093 (((u64) cmd->cmnd[2]) << 24) |
4094 (((u64) cmd->cmnd[3]) << 16) |
4095 (((u64) cmd->cmnd[4]) << 8) |
4098 (((u32) cmd->cmnd[7]) << 8) |
4105 (((u64) cmd->cmnd[2]) << 24) |
4106 (((u64) cmd->cmnd[3]) << 16) |
4107 (((u64) cmd->cmnd[4]) << 8) |
4110 (((u32) cmd->cmnd[6]) << 24) |
4111 (((u32) cmd->cmnd[7]) << 16) |
4112 (((u32) cmd->cmnd[8]) << 8) |
4119 (((u64) cmd->cmnd[2]) << 56) |
4120 (((u64) cmd->cmnd[3]) << 48) |
4121 (((u64) cmd->cmnd[4]) << 40) |
4122 (((u64) cmd->cmnd[5]) << 32) |
4123 (((u64) cmd->cmnd[6]) << 24) |
4124 (((u64) cmd->cmnd[7]) << 16) |
4125 (((u64) cmd->cmnd[8]) << 8) |
4128 (((u32) cmd->cmnd[10]) << 24) |
4129 (((u32) cmd->cmnd[11]) << 16) |
4130 (((u32) cmd->cmnd[12]) << 8) |
4134 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4136 last_block = first_block + block_cnt - 1;
4138 /* check for write to non-RAID-0 */
4139 if (is_write && dev->raid_level != 0)
4140 return IO_ACCEL_INELIGIBLE;
4142 /* check for invalid block or wraparound */
4143 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4144 last_block < first_block)
4145 return IO_ACCEL_INELIGIBLE;
4147 /* calculate stripe information for the request */
4148 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4149 le16_to_cpu(map->strip_size);
4150 strip_size = le16_to_cpu(map->strip_size);
4151 #if BITS_PER_LONG == 32
4152 tmpdiv = first_block;
4153 (void) do_div(tmpdiv, blocks_per_row);
4155 tmpdiv = last_block;
4156 (void) do_div(tmpdiv, blocks_per_row);
4158 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4159 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4160 tmpdiv = first_row_offset;
4161 (void) do_div(tmpdiv, strip_size);
4162 first_column = tmpdiv;
4163 tmpdiv = last_row_offset;
4164 (void) do_div(tmpdiv, strip_size);
4165 last_column = tmpdiv;
4167 first_row = first_block / blocks_per_row;
4168 last_row = last_block / blocks_per_row;
4169 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4170 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4171 first_column = first_row_offset / strip_size;
4172 last_column = last_row_offset / strip_size;
4175 /* if this isn't a single row/column then give to the controller */
4176 if ((first_row != last_row) || (first_column != last_column))
4177 return IO_ACCEL_INELIGIBLE;
4179 /* proceeding with driver mapping */
4180 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4181 le16_to_cpu(map->metadata_disks_per_row);
4182 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4183 le16_to_cpu(map->row_cnt);
4184 map_index = (map_row * total_disks_per_row) + first_column;
4186 switch (dev->raid_level) {
4188 break; /* nothing special to do */
4190 /* Handles load balance across RAID 1 members.
4191 * (2-drive R1 and R10 with even # of drives.)
4192 * Appropriate for SSDs, not optimal for HDDs
4194 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4195 if (dev->offload_to_mirror)
4196 map_index += le16_to_cpu(map->data_disks_per_row);
4197 dev->offload_to_mirror = !dev->offload_to_mirror;
4200 /* Handles N-way mirrors (R1-ADM)
4201 * and R10 with # of drives divisible by 3.)
4203 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4205 offload_to_mirror = dev->offload_to_mirror;
4206 raid_map_helper(map, offload_to_mirror,
4207 &map_index, ¤t_group);
4208 /* set mirror group to use next time */
4210 (offload_to_mirror >=
4211 le16_to_cpu(map->layout_map_count) - 1)
4212 ? 0 : offload_to_mirror + 1;
4213 dev->offload_to_mirror = offload_to_mirror;
4214 /* Avoid direct use of dev->offload_to_mirror within this
4215 * function since multiple threads might simultaneously
4216 * increment it beyond the range of dev->layout_map_count -1.
4221 if (le16_to_cpu(map->layout_map_count) <= 1)
4224 /* Verify first and last block are in same RAID group */
4225 r5or6_blocks_per_row =
4226 le16_to_cpu(map->strip_size) *
4227 le16_to_cpu(map->data_disks_per_row);
4228 BUG_ON(r5or6_blocks_per_row == 0);
4229 stripesize = r5or6_blocks_per_row *
4230 le16_to_cpu(map->layout_map_count);
4231 #if BITS_PER_LONG == 32
4232 tmpdiv = first_block;
4233 first_group = do_div(tmpdiv, stripesize);
4234 tmpdiv = first_group;
4235 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4236 first_group = tmpdiv;
4237 tmpdiv = last_block;
4238 last_group = do_div(tmpdiv, stripesize);
4239 tmpdiv = last_group;
4240 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4241 last_group = tmpdiv;
4243 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4244 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4246 if (first_group != last_group)
4247 return IO_ACCEL_INELIGIBLE;
4249 /* Verify request is in a single row of RAID 5/6 */
4250 #if BITS_PER_LONG == 32
4251 tmpdiv = first_block;
4252 (void) do_div(tmpdiv, stripesize);
4253 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4254 tmpdiv = last_block;
4255 (void) do_div(tmpdiv, stripesize);
4256 r5or6_last_row = r0_last_row = tmpdiv;
4258 first_row = r5or6_first_row = r0_first_row =
4259 first_block / stripesize;
4260 r5or6_last_row = r0_last_row = last_block / stripesize;
4262 if (r5or6_first_row != r5or6_last_row)
4263 return IO_ACCEL_INELIGIBLE;
4266 /* Verify request is in a single column */
4267 #if BITS_PER_LONG == 32
4268 tmpdiv = first_block;
4269 first_row_offset = do_div(tmpdiv, stripesize);
4270 tmpdiv = first_row_offset;
4271 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4272 r5or6_first_row_offset = first_row_offset;
4273 tmpdiv = last_block;
4274 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4275 tmpdiv = r5or6_last_row_offset;
4276 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4277 tmpdiv = r5or6_first_row_offset;
4278 (void) do_div(tmpdiv, map->strip_size);
4279 first_column = r5or6_first_column = tmpdiv;
4280 tmpdiv = r5or6_last_row_offset;
4281 (void) do_div(tmpdiv, map->strip_size);
4282 r5or6_last_column = tmpdiv;
4284 first_row_offset = r5or6_first_row_offset =
4285 (u32)((first_block % stripesize) %
4286 r5or6_blocks_per_row);
4288 r5or6_last_row_offset =
4289 (u32)((last_block % stripesize) %
4290 r5or6_blocks_per_row);
4292 first_column = r5or6_first_column =
4293 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4295 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4297 if (r5or6_first_column != r5or6_last_column)
4298 return IO_ACCEL_INELIGIBLE;
4300 /* Request is eligible */
4301 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4302 le16_to_cpu(map->row_cnt);
4304 map_index = (first_group *
4305 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4306 (map_row * total_disks_per_row) + first_column;
4309 return IO_ACCEL_INELIGIBLE;
4312 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4313 return IO_ACCEL_INELIGIBLE;
4315 c->phys_disk = dev->phys_disk[map_index];
4317 disk_handle = dd[map_index].ioaccel_handle;
4318 disk_block = le64_to_cpu(map->disk_starting_blk) +
4319 first_row * le16_to_cpu(map->strip_size) +
4320 (first_row_offset - first_column *
4321 le16_to_cpu(map->strip_size));
4322 disk_block_cnt = block_cnt;
4324 /* handle differing logical/physical block sizes */
4325 if (map->phys_blk_shift) {
4326 disk_block <<= map->phys_blk_shift;
4327 disk_block_cnt <<= map->phys_blk_shift;
4329 BUG_ON(disk_block_cnt > 0xffff);
4331 /* build the new CDB for the physical disk I/O */
4332 if (disk_block > 0xffffffff) {
4333 cdb[0] = is_write ? WRITE_16 : READ_16;
4335 cdb[2] = (u8) (disk_block >> 56);
4336 cdb[3] = (u8) (disk_block >> 48);
4337 cdb[4] = (u8) (disk_block >> 40);
4338 cdb[5] = (u8) (disk_block >> 32);
4339 cdb[6] = (u8) (disk_block >> 24);
4340 cdb[7] = (u8) (disk_block >> 16);
4341 cdb[8] = (u8) (disk_block >> 8);
4342 cdb[9] = (u8) (disk_block);
4343 cdb[10] = (u8) (disk_block_cnt >> 24);
4344 cdb[11] = (u8) (disk_block_cnt >> 16);
4345 cdb[12] = (u8) (disk_block_cnt >> 8);
4346 cdb[13] = (u8) (disk_block_cnt);
4351 cdb[0] = is_write ? WRITE_10 : READ_10;
4353 cdb[2] = (u8) (disk_block >> 24);
4354 cdb[3] = (u8) (disk_block >> 16);
4355 cdb[4] = (u8) (disk_block >> 8);
4356 cdb[5] = (u8) (disk_block);
4358 cdb[7] = (u8) (disk_block_cnt >> 8);
4359 cdb[8] = (u8) (disk_block_cnt);
4363 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4365 dev->phys_disk[map_index]);
4369 * Submit commands down the "normal" RAID stack path
4370 * All callers to hpsa_ciss_submit must check lockup_detected
4371 * beforehand, before (opt.) and after calling cmd_alloc
4373 static int hpsa_ciss_submit(struct ctlr_info *h,
4374 struct CommandList *c, struct scsi_cmnd *cmd,
4375 unsigned char scsi3addr[])
4377 cmd->host_scribble = (unsigned char *) c;
4378 c->cmd_type = CMD_SCSI;
4380 c->Header.ReplyQueue = 0; /* unused in simple mode */
4381 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4382 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4384 /* Fill in the request block... */
4386 c->Request.Timeout = 0;
4387 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4388 c->Request.CDBLen = cmd->cmd_len;
4389 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4390 switch (cmd->sc_data_direction) {
4392 c->Request.type_attr_dir =
4393 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4395 case DMA_FROM_DEVICE:
4396 c->Request.type_attr_dir =
4397 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4400 c->Request.type_attr_dir =
4401 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4403 case DMA_BIDIRECTIONAL:
4404 /* This can happen if a buggy application does a scsi passthru
4405 * and sets both inlen and outlen to non-zero. ( see
4406 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4409 c->Request.type_attr_dir =
4410 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4411 /* This is technically wrong, and hpsa controllers should
4412 * reject it with CMD_INVALID, which is the most correct
4413 * response, but non-fibre backends appear to let it
4414 * slide by, and give the same results as if this field
4415 * were set correctly. Either way is acceptable for
4416 * our purposes here.
4422 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4423 cmd->sc_data_direction);
4428 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4430 return SCSI_MLQUEUE_HOST_BUSY;
4432 enqueue_cmd_and_start_io(h, c);
4433 /* the cmd'll come back via intr handler in complete_scsi_command() */
4437 static void hpsa_cmd_init(struct ctlr_info *h, int index,
4438 struct CommandList *c)
4440 dma_addr_t cmd_dma_handle, err_dma_handle;
4442 /* Zero out all of commandlist except the last field, refcount */
4443 memset(c, 0, offsetof(struct CommandList, refcount));
4444 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4445 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4446 c->err_info = h->errinfo_pool + index;
4447 memset(c->err_info, 0, sizeof(*c->err_info));
4448 err_dma_handle = h->errinfo_pool_dhandle
4449 + index * sizeof(*c->err_info);
4450 c->cmdindex = index;
4451 c->busaddr = (u32) cmd_dma_handle;
4452 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4453 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4457 static void hpsa_preinitialize_commands(struct ctlr_info *h)
4461 for (i = 0; i < h->nr_cmds; i++) {
4462 struct CommandList *c = h->cmd_pool + i;
4464 hpsa_cmd_init(h, i, c);
4465 atomic_set(&c->refcount, 0);
4469 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4470 struct CommandList *c)
4472 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4474 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4475 memset(c->err_info, 0, sizeof(*c->err_info));
4476 c->busaddr = (u32) cmd_dma_handle;
4479 static int hpsa_ioaccel_submit(struct ctlr_info *h,
4480 struct CommandList *c, struct scsi_cmnd *cmd,
4481 unsigned char *scsi3addr)
4483 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4484 int rc = IO_ACCEL_INELIGIBLE;
4486 cmd->host_scribble = (unsigned char *) c;
4488 if (dev->offload_enabled) {
4489 hpsa_cmd_init(h, c->cmdindex, c);
4490 c->cmd_type = CMD_SCSI;
4492 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4493 if (rc < 0) /* scsi_dma_map failed. */
4494 rc = SCSI_MLQUEUE_HOST_BUSY;
4495 } else if (dev->hba_ioaccel_enabled) {
4496 hpsa_cmd_init(h, c->cmdindex, c);
4497 c->cmd_type = CMD_SCSI;
4499 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4500 if (rc < 0) /* scsi_dma_map failed. */
4501 rc = SCSI_MLQUEUE_HOST_BUSY;
4506 static void hpsa_command_resubmit_worker(struct work_struct *work)
4508 struct scsi_cmnd *cmd;
4509 struct hpsa_scsi_dev_t *dev;
4510 struct CommandList *c =
4511 container_of(work, struct CommandList, work);
4514 dev = cmd->device->hostdata;
4516 cmd->result = DID_NO_CONNECT << 16;
4518 cmd->scsi_done(cmd);
4521 if (c->cmd_type == CMD_IOACCEL2) {
4522 struct ctlr_info *h = c->h;
4523 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4526 if (c2->error_data.serv_response ==
4527 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4528 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4531 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4533 * If we get here, it means dma mapping failed.
4534 * Try again via scsi mid layer, which will
4535 * then get SCSI_MLQUEUE_HOST_BUSY.
4537 cmd->result = DID_IMM_RETRY << 16;
4538 cmd->scsi_done(cmd);
4539 cmd_free(h, c); /* FIX-ME: on merge, change
4540 * to cmd_tagged_free() and
4542 * hpsa_cmd_free_and_done(). */
4545 /* else, fall thru and resubmit down CISS path */
4548 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
4549 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4551 * If we get here, it means dma mapping failed. Try
4552 * again via scsi mid layer, which will then get
4553 * SCSI_MLQUEUE_HOST_BUSY.
4555 * hpsa_ciss_submit will have already freed c
4556 * if it encountered a dma mapping failure.
4558 cmd->result = DID_IMM_RETRY << 16;
4559 cmd->scsi_done(cmd);
4563 /* Running in struct Scsi_Host->host_lock less mode */
4564 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4566 struct ctlr_info *h;
4567 struct hpsa_scsi_dev_t *dev;
4568 unsigned char scsi3addr[8];
4569 struct CommandList *c;
4572 /* Get the ptr to our adapter structure out of cmd->host. */
4573 h = sdev_to_hba(cmd->device);
4574 dev = cmd->device->hostdata;
4576 cmd->result = DID_NO_CONNECT << 16;
4577 cmd->scsi_done(cmd);
4580 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4582 if (unlikely(lockup_detected(h))) {
4583 cmd->result = DID_NO_CONNECT << 16;
4584 cmd->scsi_done(cmd);
4589 if (unlikely(lockup_detected(h))) {
4590 cmd->result = DID_NO_CONNECT << 16;
4592 cmd->scsi_done(cmd);
4597 * Call alternate submit routine for I/O accelerated commands.
4598 * Retries always go down the normal I/O path.
4600 if (likely(cmd->retries == 0 &&
4601 cmd->request->cmd_type == REQ_TYPE_FS &&
4602 h->acciopath_status)) {
4603 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4606 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4607 cmd_free(h, c); /* FIX-ME: on merge, change to
4608 * cmd_tagged_free(), and ultimately
4609 * to hpsa_cmd_resolve_and_free(). */
4610 return SCSI_MLQUEUE_HOST_BUSY;
4613 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4616 static void hpsa_scan_complete(struct ctlr_info *h)
4618 unsigned long flags;
4620 spin_lock_irqsave(&h->scan_lock, flags);
4621 h->scan_finished = 1;
4622 wake_up_all(&h->scan_wait_queue);
4623 spin_unlock_irqrestore(&h->scan_lock, flags);
4626 static void hpsa_scan_start(struct Scsi_Host *sh)
4628 struct ctlr_info *h = shost_to_hba(sh);
4629 unsigned long flags;
4632 * Don't let rescans be initiated on a controller known to be locked
4633 * up. If the controller locks up *during* a rescan, that thread is
4634 * probably hosed, but at least we can prevent new rescan threads from
4635 * piling up on a locked up controller.
4637 if (unlikely(lockup_detected(h)))
4638 return hpsa_scan_complete(h);
4640 /* wait until any scan already in progress is finished. */
4642 spin_lock_irqsave(&h->scan_lock, flags);
4643 if (h->scan_finished)
4645 spin_unlock_irqrestore(&h->scan_lock, flags);
4646 wait_event(h->scan_wait_queue, h->scan_finished);
4647 /* Note: We don't need to worry about a race between this
4648 * thread and driver unload because the midlayer will
4649 * have incremented the reference count, so unload won't
4650 * happen if we're in here.
4653 h->scan_finished = 0; /* mark scan as in progress */
4654 spin_unlock_irqrestore(&h->scan_lock, flags);
4656 if (unlikely(lockup_detected(h)))
4657 return hpsa_scan_complete(h);
4659 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4661 hpsa_scan_complete(h);
4664 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4666 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4673 else if (qdepth > logical_drive->queue_depth)
4674 qdepth = logical_drive->queue_depth;
4676 return scsi_change_queue_depth(sdev, qdepth);
4679 static int hpsa_scan_finished(struct Scsi_Host *sh,
4680 unsigned long elapsed_time)
4682 struct ctlr_info *h = shost_to_hba(sh);
4683 unsigned long flags;
4686 spin_lock_irqsave(&h->scan_lock, flags);
4687 finished = h->scan_finished;
4688 spin_unlock_irqrestore(&h->scan_lock, flags);
4692 static void hpsa_unregister_scsi(struct ctlr_info *h)
4694 /* we are being forcibly unloaded, and may not refuse. */
4695 scsi_remove_host(h->scsi_host);
4696 scsi_host_put(h->scsi_host);
4697 h->scsi_host = NULL;
4700 static int hpsa_register_scsi(struct ctlr_info *h)
4702 struct Scsi_Host *sh;
4705 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4712 sh->max_channel = 3;
4713 sh->max_cmd_len = MAX_COMMAND_SIZE;
4714 sh->max_lun = HPSA_MAX_LUN;
4715 sh->max_id = HPSA_MAX_LUN;
4716 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
4717 sh->cmd_per_lun = sh->can_queue;
4718 sh->sg_tablesize = h->maxsgentries;
4720 sh->hostdata[0] = (unsigned long) h;
4721 sh->irq = h->intr[h->intr_mode];
4722 sh->unique_id = sh->irq;
4723 error = scsi_add_host(sh, &h->pdev->dev);
4730 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4731 " failed for controller %d\n", __func__, h->ctlr);
4735 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4736 " failed for controller %d\n", __func__, h->ctlr);
4740 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4741 unsigned char lunaddr[])
4745 int waittime = 1; /* seconds */
4746 struct CommandList *c;
4750 /* Send test unit ready until device ready, or give up. */
4751 while (count < HPSA_TUR_RETRY_LIMIT) {
4753 /* Wait for a bit. do this first, because if we send
4754 * the TUR right away, the reset will just abort it.
4756 msleep(1000 * waittime);
4758 rc = 0; /* Device ready. */
4760 /* Increase wait time with each try, up to a point. */
4761 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4762 waittime = waittime * 2;
4764 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4765 (void) fill_cmd(c, TEST_UNIT_READY, h,
4766 NULL, 0, 0, lunaddr, TYPE_CMD);
4767 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
4771 /* no unmap needed here because no data xfer. */
4773 if (c->err_info->CommandStatus == CMD_SUCCESS)
4776 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4777 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4778 (c->err_info->SenseInfo[2] == NO_SENSE ||
4779 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4782 dev_warn(&h->pdev->dev, "waiting %d secs "
4783 "for device to become ready.\n", waittime);
4784 rc = 1; /* device not ready. */
4788 dev_warn(&h->pdev->dev, "giving up on device.\n");
4790 dev_warn(&h->pdev->dev, "device is ready.\n");
4796 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4797 * complaining. Doing a host- or bus-reset can't do anything good here.
4799 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4802 struct ctlr_info *h;
4803 struct hpsa_scsi_dev_t *dev;
4805 /* find the controller to which the command to be aborted was sent */
4806 h = sdev_to_hba(scsicmd->device);
4807 if (h == NULL) /* paranoia */
4810 if (lockup_detected(h))
4813 dev = scsicmd->device->hostdata;
4815 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4816 "device lookup failed.\n");
4820 /* if controller locked up, we can guarantee command won't complete */
4821 if (lockup_detected(h)) {
4822 dev_warn(&h->pdev->dev,
4823 "scsi %d:%d:%d:%d RESET FAILED, lockup detected\n",
4824 h->scsi_host->host_no, dev->bus, dev->target,
4829 /* this reset request might be the result of a lockup; check */
4830 if (detect_controller_lockup(h)) {
4831 dev_warn(&h->pdev->dev,
4832 "scsi %d:%d:%d:%d RESET FAILED, new lockup detected\n",
4833 h->scsi_host->host_no, dev->bus, dev->target,
4838 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
4840 /* send a reset to the SCSI LUN which the command was sent to */
4841 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
4842 DEFAULT_REPLY_QUEUE);
4843 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4846 dev_warn(&h->pdev->dev,
4847 "scsi %d:%d:%d:%d reset failed\n",
4848 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4852 static void swizzle_abort_tag(u8 *tag)
4856 memcpy(original_tag, tag, 8);
4857 tag[0] = original_tag[3];
4858 tag[1] = original_tag[2];
4859 tag[2] = original_tag[1];
4860 tag[3] = original_tag[0];
4861 tag[4] = original_tag[7];
4862 tag[5] = original_tag[6];
4863 tag[6] = original_tag[5];
4864 tag[7] = original_tag[4];
4867 static void hpsa_get_tag(struct ctlr_info *h,
4868 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
4871 if (c->cmd_type == CMD_IOACCEL1) {
4872 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4873 &h->ioaccel_cmd_pool[c->cmdindex];
4874 tag = le64_to_cpu(cm1->tag);
4875 *tagupper = cpu_to_le32(tag >> 32);
4876 *taglower = cpu_to_le32(tag);
4879 if (c->cmd_type == CMD_IOACCEL2) {
4880 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4881 &h->ioaccel2_cmd_pool[c->cmdindex];
4882 /* upper tag not used in ioaccel2 mode */
4883 memset(tagupper, 0, sizeof(*tagupper));
4884 *taglower = cm2->Tag;
4887 tag = le64_to_cpu(c->Header.tag);
4888 *tagupper = cpu_to_le32(tag >> 32);
4889 *taglower = cpu_to_le32(tag);
4892 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4893 struct CommandList *abort, int reply_queue)
4896 struct CommandList *c;
4897 struct ErrorInfo *ei;
4898 __le32 tagupper, taglower;
4902 /* fill_cmd can't fail here, no buffer to map */
4903 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
4904 0, 0, scsi3addr, TYPE_MSG);
4905 if (h->needs_abort_tags_swizzled)
4906 swizzle_abort_tag(&c->Request.CDB[4]);
4907 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4908 hpsa_get_tag(h, abort, &taglower, &tagupper);
4909 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
4910 __func__, tagupper, taglower);
4911 /* no unmap needed here because no data xfer. */
4914 switch (ei->CommandStatus) {
4917 case CMD_TMF_STATUS:
4918 rc = hpsa_evaluate_tmf_status(h, c);
4920 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4924 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4925 __func__, tagupper, taglower);
4926 hpsa_scsi_interpret_error(h, c);
4931 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4932 __func__, tagupper, taglower);
4936 /* ioaccel2 path firmware cannot handle abort task requests.
4937 * Change abort requests to physical target reset, and send to the
4938 * address of the physical disk used for the ioaccel 2 command.
4939 * Return 0 on success (IO_OK)
4943 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4944 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
4947 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4948 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4949 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4950 unsigned char *psa = &phys_scsi3addr[0];
4952 /* Get a pointer to the hpsa logical device. */
4953 scmd = abort->scsi_cmd;
4954 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4956 dev_warn(&h->pdev->dev,
4957 "Cannot abort: no device pointer for command.\n");
4958 return -1; /* not abortable */
4961 if (h->raid_offload_debug > 0)
4962 dev_info(&h->pdev->dev,
4963 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4964 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4966 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4967 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4969 if (!dev->offload_enabled) {
4970 dev_warn(&h->pdev->dev,
4971 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4972 return -1; /* not abortable */
4975 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4976 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4977 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4978 return -1; /* not abortable */
4981 /* send the reset */
4982 if (h->raid_offload_debug > 0)
4983 dev_info(&h->pdev->dev,
4984 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4985 psa[0], psa[1], psa[2], psa[3],
4986 psa[4], psa[5], psa[6], psa[7]);
4987 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
4989 dev_warn(&h->pdev->dev,
4990 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4991 psa[0], psa[1], psa[2], psa[3],
4992 psa[4], psa[5], psa[6], psa[7]);
4993 return rc; /* failed to reset */
4996 /* wait for device to recover */
4997 if (wait_for_device_to_become_ready(h, psa) != 0) {
4998 dev_warn(&h->pdev->dev,
4999 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5000 psa[0], psa[1], psa[2], psa[3],
5001 psa[4], psa[5], psa[6], psa[7]);
5002 return -1; /* failed to recover */
5005 /* device recovered */
5006 dev_info(&h->pdev->dev,
5007 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5008 psa[0], psa[1], psa[2], psa[3],
5009 psa[4], psa[5], psa[6], psa[7]);
5011 return rc; /* success */
5014 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5015 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5017 /* ioccelerator mode 2 commands should be aborted via the
5018 * accelerated path, since RAID path is unaware of these commands,
5019 * but underlying firmware can't handle abort TMF.
5020 * Change abort to physical device reset.
5022 if (abort->cmd_type == CMD_IOACCEL2)
5023 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5024 abort, reply_queue);
5025 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5028 /* Find out which reply queue a command was meant to return on */
5029 static int hpsa_extract_reply_queue(struct ctlr_info *h,
5030 struct CommandList *c)
5032 if (c->cmd_type == CMD_IOACCEL2)
5033 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5034 return c->Header.ReplyQueue;
5038 * Limit concurrency of abort commands to prevent
5039 * over-subscription of commands
5041 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5043 #define ABORT_CMD_WAIT_MSECS 5000
5044 return !wait_event_timeout(h->abort_cmd_wait_queue,
5045 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5046 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5049 /* Send an abort for the specified command.
5050 * If the device and controller support it,
5051 * send a task abort request.
5053 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5057 struct ctlr_info *h;
5058 struct hpsa_scsi_dev_t *dev;
5059 struct CommandList *abort; /* pointer to command to be aborted */
5060 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5061 char msg[256]; /* For debug messaging. */
5063 __le32 tagupper, taglower;
5064 int refcount, reply_queue;
5069 if (sc->device == NULL)
5072 /* Find the controller of the command to be aborted */
5073 h = sdev_to_hba(sc->device);
5077 /* Find the device of the command to be aborted */
5078 dev = sc->device->hostdata;
5080 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5085 /* If controller locked up, we can guarantee command won't complete */
5086 if (lockup_detected(h)) {
5087 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5088 "ABORT FAILED, lockup detected");
5092 /* This is a good time to check if controller lockup has occurred */
5093 if (detect_controller_lockup(h)) {
5094 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5095 "ABORT FAILED, new lockup detected");
5099 /* Check that controller supports some kind of task abort */
5100 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5101 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5104 memset(msg, 0, sizeof(msg));
5105 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s",
5106 h->scsi_host->host_no, sc->device->channel,
5107 sc->device->id, sc->device->lun,
5108 "Aborting command");
5110 /* Get SCSI command to be aborted */
5111 abort = (struct CommandList *) sc->host_scribble;
5112 if (abort == NULL) {
5113 /* This can happen if the command already completed. */
5116 refcount = atomic_inc_return(&abort->refcount);
5117 if (refcount == 1) { /* Command is done already. */
5122 /* Don't bother trying the abort if we know it won't work. */
5123 if (abort->cmd_type != CMD_IOACCEL2 &&
5124 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5129 hpsa_get_tag(h, abort, &taglower, &tagupper);
5130 reply_queue = hpsa_extract_reply_queue(h, abort);
5131 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
5132 as = abort->scsi_cmd;
5134 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
5135 as->cmnd[0], as->serial_number);
5136 dev_dbg(&h->pdev->dev, "%s\n", msg);
5137 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5139 * Command is in flight, or possibly already completed
5140 * by the firmware (but not to the scsi mid layer) but we can't
5141 * distinguish which. Send the abort down.
5143 if (wait_for_available_abort_cmd(h)) {
5144 dev_warn(&h->pdev->dev,
5145 "Timed out waiting for an abort command to become available.\n");
5149 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
5150 atomic_inc(&h->abort_cmds_available);
5151 wake_up_all(&h->abort_cmd_wait_queue);
5153 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5154 "FAILED to abort command");
5158 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
5160 /* If the abort(s) above completed and actually aborted the
5161 * command, then the command to be aborted should already be
5162 * completed. If not, wait around a bit more to see if they
5163 * manage to complete normally.
5165 #define ABORT_COMPLETE_WAIT_SECS 30
5166 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
5167 refcount = atomic_read(&abort->refcount);
5175 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
5176 msg, ABORT_COMPLETE_WAIT_SECS);
5182 * For operations that cannot sleep, a command block is allocated at init,
5183 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5184 * which ones are free or in use. Lock must be held when calling this.
5185 * cmd_free() is the complement.
5186 * This function never gives up and returns NULL. If it hangs,
5187 * another thread must call cmd_free() to free some tags.
5190 static struct CommandList *cmd_alloc(struct ctlr_info *h)
5192 struct CommandList *c;
5194 unsigned long offset;
5197 * There is some *extremely* small but non-zero chance that that
5198 * multiple threads could get in here, and one thread could
5199 * be scanning through the list of bits looking for a free
5200 * one, but the free ones are always behind him, and other
5201 * threads sneak in behind him and eat them before he can
5202 * get to them, so that while there is always a free one, a
5203 * very unlucky thread might be starved anyway, never able to
5204 * beat the other threads. In reality, this happens so
5205 * infrequently as to be indistinguishable from never.
5208 offset = h->last_allocation; /* benignly racy */
5210 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
5211 if (unlikely(i == h->nr_cmds)) {
5215 c = h->cmd_pool + i;
5216 refcount = atomic_inc_return(&c->refcount);
5217 if (unlikely(refcount > 1)) {
5218 cmd_free(h, c); /* already in use */
5219 offset = (i + 1) % h->nr_cmds;
5222 set_bit(i & (BITS_PER_LONG - 1),
5223 h->cmd_pool_bits + (i / BITS_PER_LONG));
5224 break; /* it's ours now. */
5226 h->last_allocation = i; /* benignly racy */
5227 hpsa_cmd_partial_init(h, i, c);
5231 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5233 if (atomic_dec_and_test(&c->refcount)) {
5236 i = c - h->cmd_pool;
5237 clear_bit(i & (BITS_PER_LONG - 1),
5238 h->cmd_pool_bits + (i / BITS_PER_LONG));
5242 #ifdef CONFIG_COMPAT
5244 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5247 IOCTL32_Command_struct __user *arg32 =
5248 (IOCTL32_Command_struct __user *) arg;
5249 IOCTL_Command_struct arg64;
5250 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5254 memset(&arg64, 0, sizeof(arg64));
5256 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5257 sizeof(arg64.LUN_info));
5258 err |= copy_from_user(&arg64.Request, &arg32->Request,
5259 sizeof(arg64.Request));
5260 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5261 sizeof(arg64.error_info));
5262 err |= get_user(arg64.buf_size, &arg32->buf_size);
5263 err |= get_user(cp, &arg32->buf);
5264 arg64.buf = compat_ptr(cp);
5265 err |= copy_to_user(p, &arg64, sizeof(arg64));
5270 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
5273 err |= copy_in_user(&arg32->error_info, &p->error_info,
5274 sizeof(arg32->error_info));
5280 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
5281 int cmd, void __user *arg)
5283 BIG_IOCTL32_Command_struct __user *arg32 =
5284 (BIG_IOCTL32_Command_struct __user *) arg;
5285 BIG_IOCTL_Command_struct arg64;
5286 BIG_IOCTL_Command_struct __user *p =
5287 compat_alloc_user_space(sizeof(arg64));
5291 memset(&arg64, 0, sizeof(arg64));
5293 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5294 sizeof(arg64.LUN_info));
5295 err |= copy_from_user(&arg64.Request, &arg32->Request,
5296 sizeof(arg64.Request));
5297 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5298 sizeof(arg64.error_info));
5299 err |= get_user(arg64.buf_size, &arg32->buf_size);
5300 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5301 err |= get_user(cp, &arg32->buf);
5302 arg64.buf = compat_ptr(cp);
5303 err |= copy_to_user(p, &arg64, sizeof(arg64));
5308 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
5311 err |= copy_in_user(&arg32->error_info, &p->error_info,
5312 sizeof(arg32->error_info));
5318 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5321 case CCISS_GETPCIINFO:
5322 case CCISS_GETINTINFO:
5323 case CCISS_SETINTINFO:
5324 case CCISS_GETNODENAME:
5325 case CCISS_SETNODENAME:
5326 case CCISS_GETHEARTBEAT:
5327 case CCISS_GETBUSTYPES:
5328 case CCISS_GETFIRMVER:
5329 case CCISS_GETDRIVVER:
5330 case CCISS_REVALIDVOLS:
5331 case CCISS_DEREGDISK:
5332 case CCISS_REGNEWDISK:
5334 case CCISS_RESCANDISK:
5335 case CCISS_GETLUNINFO:
5336 return hpsa_ioctl(dev, cmd, arg);
5338 case CCISS_PASSTHRU32:
5339 return hpsa_ioctl32_passthru(dev, cmd, arg);
5340 case CCISS_BIG_PASSTHRU32:
5341 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5344 return -ENOIOCTLCMD;
5349 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5351 struct hpsa_pci_info pciinfo;
5355 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5356 pciinfo.bus = h->pdev->bus->number;
5357 pciinfo.dev_fn = h->pdev->devfn;
5358 pciinfo.board_id = h->board_id;
5359 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5364 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5366 DriverVer_type DriverVer;
5367 unsigned char vmaj, vmin, vsubmin;
5370 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5371 &vmaj, &vmin, &vsubmin);
5373 dev_info(&h->pdev->dev, "driver version string '%s' "
5374 "unrecognized.", HPSA_DRIVER_VERSION);
5379 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5382 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5387 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5389 IOCTL_Command_struct iocommand;
5390 struct CommandList *c;
5397 if (!capable(CAP_SYS_RAWIO))
5399 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5401 if ((iocommand.buf_size < 1) &&
5402 (iocommand.Request.Type.Direction != XFER_NONE)) {
5405 if (iocommand.buf_size > 0) {
5406 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5409 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5410 /* Copy the data into the buffer we created */
5411 if (copy_from_user(buff, iocommand.buf,
5412 iocommand.buf_size)) {
5417 memset(buff, 0, iocommand.buf_size);
5422 /* Fill in the command type */
5423 c->cmd_type = CMD_IOCTL_PEND;
5424 /* Fill in Command Header */
5425 c->Header.ReplyQueue = 0; /* unused in simple mode */
5426 if (iocommand.buf_size > 0) { /* buffer to fill */
5427 c->Header.SGList = 1;
5428 c->Header.SGTotal = cpu_to_le16(1);
5429 } else { /* no buffers to fill */
5430 c->Header.SGList = 0;
5431 c->Header.SGTotal = cpu_to_le16(0);
5433 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
5435 /* Fill in Request block */
5436 memcpy(&c->Request, &iocommand.Request,
5437 sizeof(c->Request));
5439 /* Fill in the scatter gather information */
5440 if (iocommand.buf_size > 0) {
5441 temp64 = pci_map_single(h->pdev, buff,
5442 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5443 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5444 c->SG[0].Addr = cpu_to_le64(0);
5445 c->SG[0].Len = cpu_to_le32(0);
5449 c->SG[0].Addr = cpu_to_le64(temp64);
5450 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5451 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
5453 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5454 if (iocommand.buf_size > 0)
5455 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
5456 check_ioctl_unit_attention(h, c);
5462 /* Copy the error information out */
5463 memcpy(&iocommand.error_info, c->err_info,
5464 sizeof(iocommand.error_info));
5465 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
5469 if ((iocommand.Request.Type.Direction & XFER_READ) &&
5470 iocommand.buf_size > 0) {
5471 /* Copy the data out of the buffer we created */
5472 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
5484 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5486 BIG_IOCTL_Command_struct *ioc;
5487 struct CommandList *c;
5488 unsigned char **buff = NULL;
5489 int *buff_size = NULL;
5495 BYTE __user *data_ptr;
5499 if (!capable(CAP_SYS_RAWIO))
5501 ioc = (BIG_IOCTL_Command_struct *)
5502 kmalloc(sizeof(*ioc), GFP_KERNEL);
5507 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5511 if ((ioc->buf_size < 1) &&
5512 (ioc->Request.Type.Direction != XFER_NONE)) {
5516 /* Check kmalloc limits using all SGs */
5517 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5521 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5525 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5530 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5535 left = ioc->buf_size;
5536 data_ptr = ioc->buf;
5538 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5539 buff_size[sg_used] = sz;
5540 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5541 if (buff[sg_used] == NULL) {
5545 if (ioc->Request.Type.Direction & XFER_WRITE) {
5546 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5551 memset(buff[sg_used], 0, sz);
5558 c->cmd_type = CMD_IOCTL_PEND;
5559 c->Header.ReplyQueue = 0;
5560 c->Header.SGList = (u8) sg_used;
5561 c->Header.SGTotal = cpu_to_le16(sg_used);
5562 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5563 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5564 if (ioc->buf_size > 0) {
5566 for (i = 0; i < sg_used; i++) {
5567 temp64 = pci_map_single(h->pdev, buff[i],
5568 buff_size[i], PCI_DMA_BIDIRECTIONAL);
5569 if (dma_mapping_error(&h->pdev->dev,
5570 (dma_addr_t) temp64)) {
5571 c->SG[i].Addr = cpu_to_le64(0);
5572 c->SG[i].Len = cpu_to_le32(0);
5573 hpsa_pci_unmap(h->pdev, c, i,
5574 PCI_DMA_BIDIRECTIONAL);
5578 c->SG[i].Addr = cpu_to_le64(temp64);
5579 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5580 c->SG[i].Ext = cpu_to_le32(0);
5582 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5584 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5586 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5587 check_ioctl_unit_attention(h, c);
5593 /* Copy the error information out */
5594 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5595 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5599 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5602 /* Copy the data out of the buffer we created */
5603 BYTE __user *ptr = ioc->buf;
5604 for (i = 0; i < sg_used; i++) {
5605 if (copy_to_user(ptr, buff[i], buff_size[i])) {
5609 ptr += buff_size[i];
5619 for (i = 0; i < sg_used; i++)
5628 static void check_ioctl_unit_attention(struct ctlr_info *h,
5629 struct CommandList *c)
5631 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5632 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5633 (void) check_for_unit_attention(h, c);
5639 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5641 struct ctlr_info *h;
5642 void __user *argp = (void __user *)arg;
5645 h = sdev_to_hba(dev);
5648 case CCISS_DEREGDISK:
5649 case CCISS_REGNEWDISK:
5651 hpsa_scan_start(h->scsi_host);
5653 case CCISS_GETPCIINFO:
5654 return hpsa_getpciinfo_ioctl(h, argp);
5655 case CCISS_GETDRIVVER:
5656 return hpsa_getdrivver_ioctl(h, argp);
5657 case CCISS_PASSTHRU:
5658 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5660 rc = hpsa_passthru_ioctl(h, argp);
5661 atomic_inc(&h->passthru_cmds_avail);
5663 case CCISS_BIG_PASSTHRU:
5664 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5666 rc = hpsa_big_passthru_ioctl(h, argp);
5667 atomic_inc(&h->passthru_cmds_avail);
5674 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5677 struct CommandList *c;
5681 /* fill_cmd can't fail here, no data buffer to map */
5682 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5683 RAID_CTLR_LUNID, TYPE_MSG);
5684 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5686 enqueue_cmd_and_start_io(h, c);
5687 /* Don't wait for completion, the reset won't complete. Don't free
5688 * the command either. This is the last command we will send before
5689 * re-initializing everything, so it doesn't matter and won't leak.
5694 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5695 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5698 int pci_dir = XFER_NONE;
5699 u64 tag; /* for commands to be aborted */
5701 c->cmd_type = CMD_IOCTL_PEND;
5702 c->Header.ReplyQueue = 0;
5703 if (buff != NULL && size > 0) {
5704 c->Header.SGList = 1;
5705 c->Header.SGTotal = cpu_to_le16(1);
5707 c->Header.SGList = 0;
5708 c->Header.SGTotal = cpu_to_le16(0);
5710 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5712 if (cmd_type == TYPE_CMD) {
5715 /* are we trying to read a vital product page */
5716 if (page_code & VPD_PAGE) {
5717 c->Request.CDB[1] = 0x01;
5718 c->Request.CDB[2] = (page_code & 0xff);
5720 c->Request.CDBLen = 6;
5721 c->Request.type_attr_dir =
5722 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5723 c->Request.Timeout = 0;
5724 c->Request.CDB[0] = HPSA_INQUIRY;
5725 c->Request.CDB[4] = size & 0xFF;
5727 case HPSA_REPORT_LOG:
5728 case HPSA_REPORT_PHYS:
5729 /* Talking to controller so It's a physical command
5730 mode = 00 target = 0. Nothing to write.
5732 c->Request.CDBLen = 12;
5733 c->Request.type_attr_dir =
5734 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5735 c->Request.Timeout = 0;
5736 c->Request.CDB[0] = cmd;
5737 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5738 c->Request.CDB[7] = (size >> 16) & 0xFF;
5739 c->Request.CDB[8] = (size >> 8) & 0xFF;
5740 c->Request.CDB[9] = size & 0xFF;
5742 case HPSA_CACHE_FLUSH:
5743 c->Request.CDBLen = 12;
5744 c->Request.type_attr_dir =
5745 TYPE_ATTR_DIR(cmd_type,
5746 ATTR_SIMPLE, XFER_WRITE);
5747 c->Request.Timeout = 0;
5748 c->Request.CDB[0] = BMIC_WRITE;
5749 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5750 c->Request.CDB[7] = (size >> 8) & 0xFF;
5751 c->Request.CDB[8] = size & 0xFF;
5753 case TEST_UNIT_READY:
5754 c->Request.CDBLen = 6;
5755 c->Request.type_attr_dir =
5756 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5757 c->Request.Timeout = 0;
5759 case HPSA_GET_RAID_MAP:
5760 c->Request.CDBLen = 12;
5761 c->Request.type_attr_dir =
5762 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5763 c->Request.Timeout = 0;
5764 c->Request.CDB[0] = HPSA_CISS_READ;
5765 c->Request.CDB[1] = cmd;
5766 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5767 c->Request.CDB[7] = (size >> 16) & 0xFF;
5768 c->Request.CDB[8] = (size >> 8) & 0xFF;
5769 c->Request.CDB[9] = size & 0xFF;
5771 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5772 c->Request.CDBLen = 10;
5773 c->Request.type_attr_dir =
5774 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5775 c->Request.Timeout = 0;
5776 c->Request.CDB[0] = BMIC_READ;
5777 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5778 c->Request.CDB[7] = (size >> 16) & 0xFF;
5779 c->Request.CDB[8] = (size >> 8) & 0xFF;
5781 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
5782 c->Request.CDBLen = 10;
5783 c->Request.type_attr_dir =
5784 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5785 c->Request.Timeout = 0;
5786 c->Request.CDB[0] = BMIC_READ;
5787 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
5788 c->Request.CDB[7] = (size >> 16) & 0xFF;
5789 c->Request.CDB[8] = (size >> 8) & 0XFF;
5792 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5796 } else if (cmd_type == TYPE_MSG) {
5799 case HPSA_DEVICE_RESET_MSG:
5800 c->Request.CDBLen = 16;
5801 c->Request.type_attr_dir =
5802 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5803 c->Request.Timeout = 0; /* Don't time out */
5804 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5805 c->Request.CDB[0] = cmd;
5806 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5807 /* If bytes 4-7 are zero, it means reset the */
5809 c->Request.CDB[4] = 0x00;
5810 c->Request.CDB[5] = 0x00;
5811 c->Request.CDB[6] = 0x00;
5812 c->Request.CDB[7] = 0x00;
5814 case HPSA_ABORT_MSG:
5815 memcpy(&tag, buff, sizeof(tag));
5816 dev_dbg(&h->pdev->dev,
5817 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
5818 tag, c->Header.tag);
5819 c->Request.CDBLen = 16;
5820 c->Request.type_attr_dir =
5821 TYPE_ATTR_DIR(cmd_type,
5822 ATTR_SIMPLE, XFER_WRITE);
5823 c->Request.Timeout = 0; /* Don't time out */
5824 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5825 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5826 c->Request.CDB[2] = 0x00; /* reserved */
5827 c->Request.CDB[3] = 0x00; /* reserved */
5828 /* Tag to abort goes in CDB[4]-CDB[11] */
5829 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
5830 c->Request.CDB[12] = 0x00; /* reserved */
5831 c->Request.CDB[13] = 0x00; /* reserved */
5832 c->Request.CDB[14] = 0x00; /* reserved */
5833 c->Request.CDB[15] = 0x00; /* reserved */
5836 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5841 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5845 switch (GET_DIR(c->Request.type_attr_dir)) {
5847 pci_dir = PCI_DMA_FROMDEVICE;
5850 pci_dir = PCI_DMA_TODEVICE;
5853 pci_dir = PCI_DMA_NONE;
5856 pci_dir = PCI_DMA_BIDIRECTIONAL;
5858 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5864 * Map (physical) PCI mem into (virtual) kernel space
5866 static void __iomem *remap_pci_mem(ulong base, ulong size)
5868 ulong page_base = ((ulong) base) & PAGE_MASK;
5869 ulong page_offs = ((ulong) base) - page_base;
5870 void __iomem *page_remapped = ioremap_nocache(page_base,
5873 return page_remapped ? (page_remapped + page_offs) : NULL;
5876 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5878 return h->access.command_completed(h, q);
5881 static inline bool interrupt_pending(struct ctlr_info *h)
5883 return h->access.intr_pending(h);
5886 static inline long interrupt_not_for_us(struct ctlr_info *h)
5888 return (h->access.intr_pending(h) == 0) ||
5889 (h->interrupts_enabled == 0);
5892 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5895 if (unlikely(tag_index >= h->nr_cmds)) {
5896 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5902 static inline void finish_cmd(struct CommandList *c)
5904 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5905 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5906 || c->cmd_type == CMD_IOACCEL2))
5907 complete_scsi_command(c);
5908 else if (c->cmd_type == CMD_IOCTL_PEND)
5909 complete(c->waiting);
5913 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5915 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5916 #define HPSA_SIMPLE_ERROR_BITS 0x03
5917 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5918 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5919 return tag & ~HPSA_PERF_ERROR_BITS;
5922 /* process completion of an indexed ("direct lookup") command */
5923 static inline void process_indexed_cmd(struct ctlr_info *h,
5927 struct CommandList *c;
5929 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
5930 if (!bad_tag(h, tag_index, raw_tag)) {
5931 c = h->cmd_pool + tag_index;
5936 /* Some controllers, like p400, will give us one interrupt
5937 * after a soft reset, even if we turned interrupts off.
5938 * Only need to check for this in the hpsa_xxx_discard_completions
5941 static int ignore_bogus_interrupt(struct ctlr_info *h)
5943 if (likely(!reset_devices))
5946 if (likely(h->interrupts_enabled))
5949 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5950 "(known firmware bug.) Ignoring.\n");
5956 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5957 * Relies on (h-q[x] == x) being true for x such that
5958 * 0 <= x < MAX_REPLY_QUEUES.
5960 static struct ctlr_info *queue_to_hba(u8 *queue)
5962 return container_of((queue - *queue), struct ctlr_info, q[0]);
5965 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5967 struct ctlr_info *h = queue_to_hba(queue);
5968 u8 q = *(u8 *) queue;
5971 if (ignore_bogus_interrupt(h))
5974 if (interrupt_not_for_us(h))
5976 h->last_intr_timestamp = get_jiffies_64();
5977 while (interrupt_pending(h)) {
5978 raw_tag = get_next_completion(h, q);
5979 while (raw_tag != FIFO_EMPTY)
5980 raw_tag = next_command(h, q);
5985 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5987 struct ctlr_info *h = queue_to_hba(queue);
5989 u8 q = *(u8 *) queue;
5991 if (ignore_bogus_interrupt(h))
5994 h->last_intr_timestamp = get_jiffies_64();
5995 raw_tag = get_next_completion(h, q);
5996 while (raw_tag != FIFO_EMPTY)
5997 raw_tag = next_command(h, q);
6001 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6003 struct ctlr_info *h = queue_to_hba((u8 *) queue);
6005 u8 q = *(u8 *) queue;
6007 if (interrupt_not_for_us(h))
6009 h->last_intr_timestamp = get_jiffies_64();
6010 while (interrupt_pending(h)) {
6011 raw_tag = get_next_completion(h, q);
6012 while (raw_tag != FIFO_EMPTY) {
6013 process_indexed_cmd(h, raw_tag);
6014 raw_tag = next_command(h, q);
6020 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6022 struct ctlr_info *h = queue_to_hba(queue);
6024 u8 q = *(u8 *) queue;
6026 h->last_intr_timestamp = get_jiffies_64();
6027 raw_tag = get_next_completion(h, q);
6028 while (raw_tag != FIFO_EMPTY) {
6029 process_indexed_cmd(h, raw_tag);
6030 raw_tag = next_command(h, q);
6035 /* Send a message CDB to the firmware. Careful, this only works
6036 * in simple mode, not performant mode due to the tag lookup.
6037 * We only ever use this immediately after a controller reset.
6039 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6043 struct CommandListHeader CommandHeader;
6044 struct RequestBlock Request;
6045 struct ErrDescriptor ErrorDescriptor;
6047 struct Command *cmd;
6048 static const size_t cmd_sz = sizeof(*cmd) +
6049 sizeof(cmd->ErrorDescriptor);
6053 void __iomem *vaddr;
6056 vaddr = pci_ioremap_bar(pdev, 0);
6060 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6061 * CCISS commands, so they must be allocated from the lower 4GiB of
6064 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6070 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6076 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6077 * although there's no guarantee, we assume that the address is at
6078 * least 4-byte aligned (most likely, it's page-aligned).
6080 paddr32 = cpu_to_le32(paddr64);
6082 cmd->CommandHeader.ReplyQueue = 0;
6083 cmd->CommandHeader.SGList = 0;
6084 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
6085 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
6086 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6088 cmd->Request.CDBLen = 16;
6089 cmd->Request.type_attr_dir =
6090 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
6091 cmd->Request.Timeout = 0; /* Don't time out */
6092 cmd->Request.CDB[0] = opcode;
6093 cmd->Request.CDB[1] = type;
6094 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
6095 cmd->ErrorDescriptor.Addr =
6096 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
6097 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
6099 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
6101 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6102 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
6103 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
6105 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6110 /* we leak the DMA buffer here ... no choice since the controller could
6111 * still complete the command.
6113 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6114 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6119 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6121 if (tag & HPSA_ERROR_BIT) {
6122 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6127 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6132 #define hpsa_noop(p) hpsa_message(p, 3, 0)
6134 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
6135 void __iomem *vaddr, u32 use_doorbell)
6139 /* For everything after the P600, the PCI power state method
6140 * of resetting the controller doesn't work, so we have this
6141 * other way using the doorbell register.
6143 dev_info(&pdev->dev, "using doorbell to reset controller\n");
6144 writel(use_doorbell, vaddr + SA5_DOORBELL);
6146 /* PMC hardware guys tell us we need a 10 second delay after
6147 * doorbell reset and before any attempt to talk to the board
6148 * at all to ensure that this actually works and doesn't fall
6149 * over in some weird corner cases.
6152 } else { /* Try to do it the PCI power state way */
6154 /* Quoting from the Open CISS Specification: "The Power
6155 * Management Control/Status Register (CSR) controls the power
6156 * state of the device. The normal operating state is D0,
6157 * CSR=00h. The software off state is D3, CSR=03h. To reset
6158 * the controller, place the interface device in D3 then to D0,
6159 * this causes a secondary PCI reset which will reset the
6164 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
6166 /* enter the D3hot power management state */
6167 rc = pci_set_power_state(pdev, PCI_D3hot);
6173 /* enter the D0 power management state */
6174 rc = pci_set_power_state(pdev, PCI_D0);
6179 * The P600 requires a small delay when changing states.
6180 * Otherwise we may think the board did not reset and we bail.
6181 * This for kdump only and is particular to the P600.
6188 static void init_driver_version(char *driver_version, int len)
6190 memset(driver_version, 0, len);
6191 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
6194 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
6196 char *driver_version;
6197 int i, size = sizeof(cfgtable->driver_version);
6199 driver_version = kmalloc(size, GFP_KERNEL);
6200 if (!driver_version)
6203 init_driver_version(driver_version, size);
6204 for (i = 0; i < size; i++)
6205 writeb(driver_version[i], &cfgtable->driver_version[i]);
6206 kfree(driver_version);
6210 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6211 unsigned char *driver_ver)
6215 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6216 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6219 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
6222 char *driver_ver, *old_driver_ver;
6223 int rc, size = sizeof(cfgtable->driver_version);
6225 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6226 if (!old_driver_ver)
6228 driver_ver = old_driver_ver + size;
6230 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6231 * should have been changed, otherwise we know the reset failed.
6233 init_driver_version(old_driver_ver, size);
6234 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6235 rc = !memcmp(driver_ver, old_driver_ver, size);
6236 kfree(old_driver_ver);
6239 /* This does a hard reset of the controller using PCI power management
6240 * states or the using the doorbell register.
6242 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
6246 u64 cfg_base_addr_index;
6247 void __iomem *vaddr;
6248 unsigned long paddr;
6249 u32 misc_fw_support;
6251 struct CfgTable __iomem *cfgtable;
6253 u16 command_register;
6255 /* For controllers as old as the P600, this is very nearly
6258 * pci_save_state(pci_dev);
6259 * pci_set_power_state(pci_dev, PCI_D3hot);
6260 * pci_set_power_state(pci_dev, PCI_D0);
6261 * pci_restore_state(pci_dev);
6263 * For controllers newer than the P600, the pci power state
6264 * method of resetting doesn't work so we have another way
6265 * using the doorbell register.
6268 if (!ctlr_is_resettable(board_id)) {
6269 dev_warn(&pdev->dev, "Controller not resettable\n");
6273 /* if controller is soft- but not hard resettable... */
6274 if (!ctlr_is_hard_resettable(board_id))
6275 return -ENOTSUPP; /* try soft reset later. */
6277 /* Save the PCI command register */
6278 pci_read_config_word(pdev, 4, &command_register);
6279 pci_save_state(pdev);
6281 /* find the first memory BAR, so we can find the cfg table */
6282 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6285 vaddr = remap_pci_mem(paddr, 0x250);
6289 /* find cfgtable in order to check if reset via doorbell is supported */
6290 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6291 &cfg_base_addr_index, &cfg_offset);
6294 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6295 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6300 rc = write_driver_ver_to_cfgtable(cfgtable);
6302 goto unmap_cfgtable;
6304 /* If reset via doorbell register is supported, use that.
6305 * There are two such methods. Favor the newest method.
6307 misc_fw_support = readl(&cfgtable->misc_fw_support);
6308 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6310 use_doorbell = DOORBELL_CTLR_RESET2;
6312 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6314 dev_warn(&pdev->dev,
6315 "Soft reset not supported. Firmware update is required.\n");
6316 rc = -ENOTSUPP; /* try soft reset */
6317 goto unmap_cfgtable;
6321 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6323 goto unmap_cfgtable;
6325 pci_restore_state(pdev);
6326 pci_write_config_word(pdev, 4, command_register);
6328 /* Some devices (notably the HP Smart Array 5i Controller)
6329 need a little pause here */
6330 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6332 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6334 dev_warn(&pdev->dev,
6335 "Failed waiting for board to become ready after hard reset\n");
6336 goto unmap_cfgtable;
6339 rc = controller_reset_failed(vaddr);
6341 goto unmap_cfgtable;
6343 dev_warn(&pdev->dev, "Unable to successfully reset "
6344 "controller. Will try soft reset.\n");
6347 dev_info(&pdev->dev, "board ready after hard reset.\n");
6359 * We cannot read the structure directly, for portability we must use
6361 * This is for debug only.
6363 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6369 dev_info(dev, "Controller Configuration information\n");
6370 dev_info(dev, "------------------------------------\n");
6371 for (i = 0; i < 4; i++)
6372 temp_name[i] = readb(&(tb->Signature[i]));
6373 temp_name[4] = '\0';
6374 dev_info(dev, " Signature = %s\n", temp_name);
6375 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6376 dev_info(dev, " Transport methods supported = 0x%x\n",
6377 readl(&(tb->TransportSupport)));
6378 dev_info(dev, " Transport methods active = 0x%x\n",
6379 readl(&(tb->TransportActive)));
6380 dev_info(dev, " Requested transport Method = 0x%x\n",
6381 readl(&(tb->HostWrite.TransportRequest)));
6382 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6383 readl(&(tb->HostWrite.CoalIntDelay)));
6384 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6385 readl(&(tb->HostWrite.CoalIntCount)));
6386 dev_info(dev, " Max outstanding commands = %d\n",
6387 readl(&(tb->CmdsOutMax)));
6388 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6389 for (i = 0; i < 16; i++)
6390 temp_name[i] = readb(&(tb->ServerName[i]));
6391 temp_name[16] = '\0';
6392 dev_info(dev, " Server Name = %s\n", temp_name);
6393 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6394 readl(&(tb->HeartBeat)));
6395 #endif /* HPSA_DEBUG */
6398 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6400 int i, offset, mem_type, bar_type;
6402 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6405 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6406 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6407 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6410 mem_type = pci_resource_flags(pdev, i) &
6411 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6413 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6414 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6415 offset += 4; /* 32 bit */
6417 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6420 default: /* reserved in PCI 2.2 */
6421 dev_warn(&pdev->dev,
6422 "base address is invalid\n");
6427 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6433 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
6435 if (h->msix_vector) {
6436 if (h->pdev->msix_enabled)
6437 pci_disable_msix(h->pdev);
6438 } else if (h->msi_vector) {
6439 if (h->pdev->msi_enabled)
6440 pci_disable_msi(h->pdev);
6444 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6445 * controllers that are capable. If not, we use legacy INTx mode.
6447 static void hpsa_interrupt_mode(struct ctlr_info *h)
6449 #ifdef CONFIG_PCI_MSI
6451 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6453 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6454 hpsa_msix_entries[i].vector = 0;
6455 hpsa_msix_entries[i].entry = i;
6458 /* Some boards advertise MSI but don't really support it */
6459 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6460 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
6461 goto default_int_mode;
6462 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6463 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
6464 h->msix_vector = MAX_REPLY_QUEUES;
6465 if (h->msix_vector > num_online_cpus())
6466 h->msix_vector = num_online_cpus();
6467 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6470 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6472 goto single_msi_mode;
6473 } else if (err < h->msix_vector) {
6474 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6475 "available\n", err);
6477 h->msix_vector = err;
6478 for (i = 0; i < h->msix_vector; i++)
6479 h->intr[i] = hpsa_msix_entries[i].vector;
6483 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6484 dev_info(&h->pdev->dev, "MSI capable controller\n");
6485 if (!pci_enable_msi(h->pdev))
6488 dev_warn(&h->pdev->dev, "MSI init failed\n");
6491 #endif /* CONFIG_PCI_MSI */
6492 /* if we get here we're going to use the default interrupt mode */
6493 h->intr[h->intr_mode] = h->pdev->irq;
6496 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
6499 u32 subsystem_vendor_id, subsystem_device_id;
6501 subsystem_vendor_id = pdev->subsystem_vendor;
6502 subsystem_device_id = pdev->subsystem_device;
6503 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6504 subsystem_vendor_id;
6506 for (i = 0; i < ARRAY_SIZE(products); i++)
6507 if (*board_id == products[i].board_id)
6510 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6511 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6513 dev_warn(&pdev->dev, "unrecognized board ID: "
6514 "0x%08x, ignoring.\n", *board_id);
6517 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6520 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6521 unsigned long *memory_bar)
6525 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6526 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6527 /* addressing mode bits already removed */
6528 *memory_bar = pci_resource_start(pdev, i);
6529 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6533 dev_warn(&pdev->dev, "no memory BAR found\n");
6537 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6543 iterations = HPSA_BOARD_READY_ITERATIONS;
6545 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6547 for (i = 0; i < iterations; i++) {
6548 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6549 if (wait_for_ready) {
6550 if (scratchpad == HPSA_FIRMWARE_READY)
6553 if (scratchpad != HPSA_FIRMWARE_READY)
6556 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6558 dev_warn(&pdev->dev, "board not ready, timed out.\n");
6562 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6563 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6566 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6567 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6568 *cfg_base_addr &= (u32) 0x0000ffff;
6569 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6570 if (*cfg_base_addr_index == -1) {
6571 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6577 static void hpsa_free_cfgtables(struct ctlr_info *h)
6580 iounmap(h->transtable);
6582 iounmap(h->cfgtable);
6585 /* Find and map CISS config table and transfer table
6586 + * several items must be unmapped (freed) later
6588 static int hpsa_find_cfgtables(struct ctlr_info *h)
6592 u64 cfg_base_addr_index;
6596 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6597 &cfg_base_addr_index, &cfg_offset);
6600 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6601 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6603 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
6606 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6609 /* Find performant mode table. */
6610 trans_offset = readl(&h->cfgtable->TransMethodOffset);
6611 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6612 cfg_base_addr_index)+cfg_offset+trans_offset,
6613 sizeof(*h->transtable));
6614 if (!h->transtable) {
6615 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
6616 hpsa_free_cfgtables(h);
6622 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6624 #define MIN_MAX_COMMANDS 16
6625 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
6627 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
6629 /* Limit commands in memory limited kdump scenario. */
6630 if (reset_devices && h->max_commands > 32)
6631 h->max_commands = 32;
6633 if (h->max_commands < MIN_MAX_COMMANDS) {
6634 dev_warn(&h->pdev->dev,
6635 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
6638 h->max_commands = MIN_MAX_COMMANDS;
6642 /* If the controller reports that the total max sg entries is greater than 512,
6643 * then we know that chained SG blocks work. (Original smart arrays did not
6644 * support chained SG blocks and would return zero for max sg entries.)
6646 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6648 return h->maxsgentries > 512;
6651 /* Interrogate the hardware for some limits:
6652 * max commands, max SG elements without chaining, and with chaining,
6653 * SG chain block size, etc.
6655 static void hpsa_find_board_params(struct ctlr_info *h)
6657 hpsa_get_max_perf_mode_cmds(h);
6658 h->nr_cmds = h->max_commands;
6659 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6660 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6661 if (hpsa_supports_chained_sg_blocks(h)) {
6662 /* Limit in-command s/g elements to 32 save dma'able memory. */
6663 h->max_cmd_sg_entries = 32;
6664 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6665 h->maxsgentries--; /* save one for chain pointer */
6668 * Original smart arrays supported at most 31 s/g entries
6669 * embedded inline in the command (trying to use more
6670 * would lock up the controller)
6672 h->max_cmd_sg_entries = 31;
6673 h->maxsgentries = 31; /* default to traditional values */
6677 /* Find out what task management functions are supported and cache */
6678 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6679 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6680 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6681 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6682 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6685 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6687 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6688 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
6694 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6698 driver_support = readl(&(h->cfgtable->driver_support));
6699 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6701 driver_support |= ENABLE_SCSI_PREFETCH;
6703 driver_support |= ENABLE_UNIT_ATTN;
6704 writel(driver_support, &(h->cfgtable->driver_support));
6707 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6708 * in a prefetch beyond physical memory.
6710 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6714 if (h->board_id != 0x3225103C)
6716 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6717 dma_prefetch |= 0x8000;
6718 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6721 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6725 unsigned long flags;
6726 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6727 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
6728 spin_lock_irqsave(&h->lock, flags);
6729 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6730 spin_unlock_irqrestore(&h->lock, flags);
6731 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6733 /* delay and try again */
6734 msleep(CLEAR_EVENT_WAIT_INTERVAL);
6741 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6745 unsigned long flags;
6747 /* under certain very rare conditions, this can take awhile.
6748 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6749 * as we enter this code.)
6751 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
6752 if (h->remove_in_progress)
6754 spin_lock_irqsave(&h->lock, flags);
6755 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6756 spin_unlock_irqrestore(&h->lock, flags);
6757 if (!(doorbell_value & CFGTBL_ChangeReq))
6759 /* delay and try again */
6760 msleep(MODE_CHANGE_WAIT_INTERVAL);
6767 /* return -ENODEV or other reason on error, 0 on success */
6768 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6772 trans_support = readl(&(h->cfgtable->TransportSupport));
6773 if (!(trans_support & SIMPLE_MODE))
6776 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6778 /* Update the field, and then ring the doorbell */
6779 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6780 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6781 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6782 if (hpsa_wait_for_mode_change_ack(h))
6784 print_cfg_table(&h->pdev->dev, h->cfgtable);
6785 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6787 h->transMethod = CFGTBL_Trans_Simple;
6790 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
6794 /* free items allocated or mapped by hpsa_pci_init */
6795 static void hpsa_free_pci_init(struct ctlr_info *h)
6797 hpsa_free_cfgtables(h); /* pci_init 4 */
6798 iounmap(h->vaddr); /* pci_init 3 */
6799 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
6800 pci_release_regions(h->pdev); /* pci_init 2 */
6801 pci_disable_device(h->pdev); /* pci_init 1 */
6804 /* several items must be freed later */
6805 static int hpsa_pci_init(struct ctlr_info *h)
6807 int prod_index, err;
6809 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6812 h->product_name = products[prod_index].product_name;
6813 h->access = *(products[prod_index].access);
6815 h->needs_abort_tags_swizzled =
6816 ctlr_needs_abort_tags_swizzled(h->board_id);
6818 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6819 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6821 err = pci_enable_device(h->pdev);
6823 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
6827 err = pci_request_regions(h->pdev, HPSA);
6829 dev_err(&h->pdev->dev,
6830 "failed to obtain PCI resources\n");
6831 goto clean1; /* pci */
6834 pci_set_master(h->pdev);
6836 hpsa_interrupt_mode(h);
6837 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6839 goto clean2; /* intmode+region, pci */
6840 h->vaddr = remap_pci_mem(h->paddr, 0x250);
6842 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
6844 goto clean2; /* intmode+region, pci */
6846 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6848 goto clean3; /* vaddr, intmode+region, pci */
6849 err = hpsa_find_cfgtables(h);
6851 goto clean3; /* vaddr, intmode+region, pci */
6852 hpsa_find_board_params(h);
6854 if (!hpsa_CISS_signature_present(h)) {
6856 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
6858 hpsa_set_driver_support_bits(h);
6859 hpsa_p600_dma_prefetch_quirk(h);
6860 err = hpsa_enter_simple_mode(h);
6862 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
6865 clean4: /* cfgtables, vaddr, intmode+region, pci */
6866 hpsa_free_cfgtables(h);
6867 clean3: /* vaddr, intmode+region, pci */
6869 clean2: /* intmode+region, pci */
6870 hpsa_disable_interrupt_mode(h);
6871 pci_release_regions(h->pdev);
6873 pci_disable_device(h->pdev);
6877 static void hpsa_hba_inquiry(struct ctlr_info *h)
6881 #define HBA_INQUIRY_BYTE_COUNT 64
6882 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6883 if (!h->hba_inquiry_data)
6885 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6886 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6888 kfree(h->hba_inquiry_data);
6889 h->hba_inquiry_data = NULL;
6893 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
6896 void __iomem *vaddr;
6901 /* kdump kernel is loading, we don't know in which state is
6902 * the pci interface. The dev->enable_cnt is equal zero
6903 * so we call enable+disable, wait a while and switch it on.
6905 rc = pci_enable_device(pdev);
6907 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6910 pci_disable_device(pdev);
6911 msleep(260); /* a randomly chosen number */
6912 rc = pci_enable_device(pdev);
6914 dev_warn(&pdev->dev, "failed to enable device.\n");
6918 pci_set_master(pdev);
6920 vaddr = pci_ioremap_bar(pdev, 0);
6921 if (vaddr == NULL) {
6925 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
6928 /* Reset the controller with a PCI power-cycle or via doorbell */
6929 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
6931 /* -ENOTSUPP here means we cannot reset the controller
6932 * but it's already (and still) up and running in
6933 * "performant mode". Or, it might be 640x, which can't reset
6934 * due to concerns about shared bbwc between 6402/6404 pair.
6939 /* Now try to get the controller to respond to a no-op */
6940 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
6941 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6942 if (hpsa_noop(pdev) == 0)
6945 dev_warn(&pdev->dev, "no-op failed%s\n",
6946 (i < 11 ? "; re-trying" : ""));
6951 pci_disable_device(pdev);
6955 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6957 kfree(h->cmd_pool_bits);
6959 pci_free_consistent(h->pdev,
6960 h->nr_cmds * sizeof(struct CommandList),
6962 h->cmd_pool_dhandle);
6963 if (h->errinfo_pool)
6964 pci_free_consistent(h->pdev,
6965 h->nr_cmds * sizeof(struct ErrorInfo),
6967 h->errinfo_pool_dhandle);
6970 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
6972 h->cmd_pool_bits = kzalloc(
6973 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6974 sizeof(unsigned long), GFP_KERNEL);
6975 h->cmd_pool = pci_alloc_consistent(h->pdev,
6976 h->nr_cmds * sizeof(*h->cmd_pool),
6977 &(h->cmd_pool_dhandle));
6978 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6979 h->nr_cmds * sizeof(*h->errinfo_pool),
6980 &(h->errinfo_pool_dhandle));
6981 if ((h->cmd_pool_bits == NULL)
6982 || (h->cmd_pool == NULL)
6983 || (h->errinfo_pool == NULL)) {
6984 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6987 hpsa_preinitialize_commands(h);
6990 hpsa_free_cmd_pool(h);
6994 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6998 cpu = cpumask_first(cpu_online_mask);
6999 for (i = 0; i < h->msix_vector; i++) {
7000 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
7001 cpu = cpumask_next(cpu, cpu_online_mask);
7005 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7006 static void hpsa_free_irqs(struct ctlr_info *h)
7010 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7011 /* Single reply queue, only one irq to free */
7013 irq_set_affinity_hint(h->intr[i], NULL);
7014 free_irq(h->intr[i], &h->q[i]);
7018 for (i = 0; i < h->msix_vector; i++) {
7019 irq_set_affinity_hint(h->intr[i], NULL);
7020 free_irq(h->intr[i], &h->q[i]);
7022 for (; i < MAX_REPLY_QUEUES; i++)
7026 /* returns 0 on success; cleans up and returns -Enn on error */
7027 static int hpsa_request_irqs(struct ctlr_info *h,
7028 irqreturn_t (*msixhandler)(int, void *),
7029 irqreturn_t (*intxhandler)(int, void *))
7034 * initialize h->q[x] = x so that interrupt handlers know which
7037 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7040 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
7041 /* If performant mode and MSI-X, use multiple reply queues */
7042 for (i = 0; i < h->msix_vector; i++) {
7043 rc = request_irq(h->intr[i], msixhandler,
7049 dev_err(&h->pdev->dev,
7050 "failed to get irq %d for %s\n",
7051 h->intr[i], h->devname);
7052 for (j = 0; j < i; j++) {
7053 free_irq(h->intr[j], &h->q[j]);
7056 for (; j < MAX_REPLY_QUEUES; j++)
7061 hpsa_irq_affinity_hints(h);
7063 /* Use single reply pool */
7064 if (h->msix_vector > 0 || h->msi_vector) {
7065 rc = request_irq(h->intr[h->intr_mode],
7066 msixhandler, 0, h->devname,
7067 &h->q[h->intr_mode]);
7069 rc = request_irq(h->intr[h->intr_mode],
7070 intxhandler, IRQF_SHARED, h->devname,
7071 &h->q[h->intr_mode]);
7075 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
7076 h->intr[h->intr_mode], h->devname);
7083 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
7085 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
7087 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
7088 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
7089 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
7093 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
7094 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
7095 dev_warn(&h->pdev->dev, "Board failed to become ready "
7096 "after soft reset.\n");
7103 static void hpsa_free_reply_queues(struct ctlr_info *h)
7107 for (i = 0; i < h->nreply_queues; i++) {
7108 if (!h->reply_queue[i].head)
7110 pci_free_consistent(h->pdev,
7111 h->reply_queue_size,
7112 h->reply_queue[i].head,
7113 h->reply_queue[i].busaddr);
7114 h->reply_queue[i].head = NULL;
7115 h->reply_queue[i].busaddr = 0;
7119 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7122 hpsa_free_sg_chain_blocks(h);
7123 hpsa_free_cmd_pool(h);
7124 kfree(h->blockFetchTable); /* perf 2 */
7125 hpsa_free_reply_queues(h); /* perf 1 */
7126 hpsa_free_ioaccel1_cmd_and_bft(h); /* perf 1 */
7127 hpsa_free_ioaccel2_cmd_and_bft(h); /* perf 1 */
7128 hpsa_free_cfgtables(h); /* pci_init 4 */
7129 iounmap(h->vaddr); /* pci_init 3 */
7130 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7131 pci_disable_device(h->pdev);
7132 pci_release_regions(h->pdev); /* pci_init 2 */
7136 /* Called when controller lockup detected. */
7137 static void fail_all_outstanding_cmds(struct ctlr_info *h)
7140 struct CommandList *c;
7143 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
7144 for (i = 0; i < h->nr_cmds; i++) {
7145 c = h->cmd_pool + i;
7146 refcount = atomic_inc_return(&c->refcount);
7148 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
7150 atomic_dec(&h->commands_outstanding);
7155 dev_warn(&h->pdev->dev,
7156 "failed %d commands in fail_all\n", failcount);
7159 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7163 for_each_online_cpu(cpu) {
7164 u32 *lockup_detected;
7165 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7166 *lockup_detected = value;
7168 wmb(); /* be sure the per-cpu variables are out to memory */
7171 static void controller_lockup_detected(struct ctlr_info *h)
7173 unsigned long flags;
7174 u32 lockup_detected;
7176 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7177 spin_lock_irqsave(&h->lock, flags);
7178 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7179 if (!lockup_detected) {
7180 /* no heartbeat, but controller gave us a zero. */
7181 dev_warn(&h->pdev->dev,
7182 "lockup detected after %d but scratchpad register is zero\n",
7183 h->heartbeat_sample_interval / HZ);
7184 lockup_detected = 0xffffffff;
7186 set_lockup_detected_for_all_cpus(h, lockup_detected);
7187 spin_unlock_irqrestore(&h->lock, flags);
7188 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7189 lockup_detected, h->heartbeat_sample_interval / HZ);
7190 pci_disable_device(h->pdev);
7191 fail_all_outstanding_cmds(h);
7194 static int detect_controller_lockup(struct ctlr_info *h)
7198 unsigned long flags;
7200 now = get_jiffies_64();
7201 /* If we've received an interrupt recently, we're ok. */
7202 if (time_after64(h->last_intr_timestamp +
7203 (h->heartbeat_sample_interval), now))
7207 * If we've already checked the heartbeat recently, we're ok.
7208 * This could happen if someone sends us a signal. We
7209 * otherwise don't care about signals in this thread.
7211 if (time_after64(h->last_heartbeat_timestamp +
7212 (h->heartbeat_sample_interval), now))
7215 /* If heartbeat has not changed since we last looked, we're not ok. */
7216 spin_lock_irqsave(&h->lock, flags);
7217 heartbeat = readl(&h->cfgtable->HeartBeat);
7218 spin_unlock_irqrestore(&h->lock, flags);
7219 if (h->last_heartbeat == heartbeat) {
7220 controller_lockup_detected(h);
7225 h->last_heartbeat = heartbeat;
7226 h->last_heartbeat_timestamp = now;
7230 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
7235 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7238 /* Ask the controller to clear the events we're handling. */
7239 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7240 | CFGTBL_Trans_io_accel2)) &&
7241 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7242 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7244 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7245 event_type = "state change";
7246 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7247 event_type = "configuration change";
7248 /* Stop sending new RAID offload reqs via the IO accelerator */
7249 scsi_block_requests(h->scsi_host);
7250 for (i = 0; i < h->ndevices; i++)
7251 h->dev[i]->offload_enabled = 0;
7252 hpsa_drain_accel_commands(h);
7253 /* Set 'accelerator path config change' bit */
7254 dev_warn(&h->pdev->dev,
7255 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7256 h->events, event_type);
7257 writel(h->events, &(h->cfgtable->clear_event_notify));
7258 /* Set the "clear event notify field update" bit 6 */
7259 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7260 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7261 hpsa_wait_for_clear_event_notify_ack(h);
7262 scsi_unblock_requests(h->scsi_host);
7264 /* Acknowledge controller notification events. */
7265 writel(h->events, &(h->cfgtable->clear_event_notify));
7266 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7267 hpsa_wait_for_clear_event_notify_ack(h);
7269 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7270 hpsa_wait_for_mode_change_ack(h);
7276 /* Check a register on the controller to see if there are configuration
7277 * changes (added/changed/removed logical drives, etc.) which mean that
7278 * we should rescan the controller for devices.
7279 * Also check flag for driver-initiated rescan.
7281 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
7283 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7286 h->events = readl(&(h->cfgtable->event_notify));
7287 return h->events & RESCAN_REQUIRED_EVENT_BITS;
7291 * Check if any of the offline devices have become ready
7293 static int hpsa_offline_devices_ready(struct ctlr_info *h)
7295 unsigned long flags;
7296 struct offline_device_entry *d;
7297 struct list_head *this, *tmp;
7299 spin_lock_irqsave(&h->offline_device_lock, flags);
7300 list_for_each_safe(this, tmp, &h->offline_device_list) {
7301 d = list_entry(this, struct offline_device_entry,
7303 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7304 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7305 spin_lock_irqsave(&h->offline_device_lock, flags);
7306 list_del(&d->offline_list);
7307 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7310 spin_lock_irqsave(&h->offline_device_lock, flags);
7312 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7316 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
7318 unsigned long flags;
7319 struct ctlr_info *h = container_of(to_delayed_work(work),
7320 struct ctlr_info, rescan_ctlr_work);
7323 if (h->remove_in_progress)
7326 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7327 scsi_host_get(h->scsi_host);
7328 hpsa_ack_ctlr_events(h);
7329 hpsa_scan_start(h->scsi_host);
7330 scsi_host_put(h->scsi_host);
7332 spin_lock_irqsave(&h->lock, flags);
7333 if (!h->remove_in_progress)
7334 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7335 h->heartbeat_sample_interval);
7336 spin_unlock_irqrestore(&h->lock, flags);
7339 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7341 unsigned long flags;
7342 struct ctlr_info *h = container_of(to_delayed_work(work),
7343 struct ctlr_info, monitor_ctlr_work);
7345 detect_controller_lockup(h);
7346 if (lockup_detected(h))
7349 spin_lock_irqsave(&h->lock, flags);
7350 if (!h->remove_in_progress)
7351 schedule_delayed_work(&h->monitor_ctlr_work,
7352 h->heartbeat_sample_interval);
7353 spin_unlock_irqrestore(&h->lock, flags);
7356 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7359 struct workqueue_struct *wq = NULL;
7361 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
7363 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7368 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7371 struct ctlr_info *h;
7372 int try_soft_reset = 0;
7373 unsigned long flags;
7376 if (number_of_controllers == 0)
7377 printk(KERN_INFO DRIVER_NAME "\n");
7379 rc = hpsa_lookup_board_id(pdev, &board_id);
7381 dev_warn(&pdev->dev, "Board ID not found\n");
7385 rc = hpsa_init_reset_devices(pdev, board_id);
7387 if (rc != -ENOTSUPP)
7389 /* If the reset fails in a particular way (it has no way to do
7390 * a proper hard reset, so returns -ENOTSUPP) we can try to do
7391 * a soft reset once we get the controller configured up to the
7392 * point that it can accept a command.
7398 reinit_after_soft_reset:
7400 /* Command structures must be aligned on a 32-byte boundary because
7401 * the 5 lower bits of the address are used by the hardware. and by
7402 * the driver. See comments in hpsa.h for more info.
7404 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
7405 h = kzalloc(sizeof(*h), GFP_KERNEL);
7410 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
7411 INIT_LIST_HEAD(&h->offline_device_list);
7412 spin_lock_init(&h->lock);
7413 spin_lock_init(&h->offline_device_lock);
7414 spin_lock_init(&h->scan_lock);
7415 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
7416 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
7418 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
7419 if (!h->rescan_ctlr_wq) {
7424 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
7425 if (!h->resubmit_wq) {
7430 /* Allocate and clear per-cpu variable lockup_detected */
7431 h->lockup_detected = alloc_percpu(u32);
7432 if (!h->lockup_detected) {
7436 set_lockup_detected_for_all_cpus(h, 0);
7438 rc = hpsa_pci_init(h);
7442 sprintf(h->devname, HPSA "%d", number_of_controllers);
7443 h->ctlr = number_of_controllers;
7444 number_of_controllers++;
7446 /* configure PCI DMA stuff */
7447 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7451 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7455 dev_err(&pdev->dev, "no suitable DMA available\n");
7460 /* make sure the board interrupts are off */
7461 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7463 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
7465 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
7466 h->devname, pdev->device,
7467 h->intr[h->intr_mode], dac ? "" : " not");
7468 rc = hpsa_alloc_cmd_pool(h);
7470 goto clean2_and_free_irqs;
7471 if (hpsa_allocate_sg_chain_blocks(h))
7473 init_waitqueue_head(&h->scan_wait_queue);
7474 init_waitqueue_head(&h->abort_cmd_wait_queue);
7475 h->scan_finished = 1; /* no scan currently in progress */
7477 pci_set_drvdata(pdev, h);
7479 h->hba_mode_enabled = 0;
7480 h->scsi_host = NULL;
7481 spin_lock_init(&h->devlock);
7482 hpsa_put_ctlr_into_performant_mode(h);
7484 /* At this point, the controller is ready to take commands.
7485 * Now, if reset_devices and the hard reset didn't work, try
7486 * the soft reset and see if that works.
7488 if (try_soft_reset) {
7490 /* This is kind of gross. We may or may not get a completion
7491 * from the soft reset command, and if we do, then the value
7492 * from the fifo may or may not be valid. So, we wait 10 secs
7493 * after the reset throwing away any completions we get during
7494 * that time. Unregister the interrupt handler and register
7495 * fake ones to scoop up any residual completions.
7497 spin_lock_irqsave(&h->lock, flags);
7498 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7499 spin_unlock_irqrestore(&h->lock, flags);
7501 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
7502 hpsa_intx_discard_completions);
7504 dev_warn(&h->pdev->dev,
7505 "Failed to request_irq after soft reset.\n");
7509 rc = hpsa_kdump_soft_reset(h);
7511 /* Neither hard nor soft reset worked, we're hosed. */
7514 dev_info(&h->pdev->dev, "Board READY.\n");
7515 dev_info(&h->pdev->dev,
7516 "Waiting for stale completions to drain.\n");
7517 h->access.set_intr_mask(h, HPSA_INTR_ON);
7519 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7521 rc = controller_reset_failed(h->cfgtable);
7523 dev_info(&h->pdev->dev,
7524 "Soft reset appears to have failed.\n");
7526 /* since the controller's reset, we have to go back and re-init
7527 * everything. Easiest to just forget what we've done and do it
7530 hpsa_undo_allocations_after_kdump_soft_reset(h);
7533 /* don't go to clean4, we already unallocated */
7536 goto reinit_after_soft_reset;
7539 /* Enable Accelerated IO path at driver layer */
7540 h->acciopath_status = 1;
7543 /* Turn the interrupts on so we can service requests */
7544 h->access.set_intr_mask(h, HPSA_INTR_ON);
7546 hpsa_hba_inquiry(h);
7547 rc = hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
7551 /* Monitor the controller for firmware lockups */
7552 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7553 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7554 schedule_delayed_work(&h->monitor_ctlr_work,
7555 h->heartbeat_sample_interval);
7556 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
7557 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7558 h->heartbeat_sample_interval);
7562 hpsa_free_sg_chain_blocks(h);
7563 hpsa_free_cmd_pool(h);
7564 hpsa_free_ioaccel1_cmd_and_bft(h);
7565 hpsa_free_ioaccel2_cmd_and_bft(h);
7566 clean2_and_free_irqs:
7569 hpsa_free_pci_init(h);
7572 destroy_workqueue(h->resubmit_wq);
7573 if (h->rescan_ctlr_wq)
7574 destroy_workqueue(h->rescan_ctlr_wq);
7575 if (h->lockup_detected)
7576 free_percpu(h->lockup_detected);
7581 static void hpsa_flush_cache(struct ctlr_info *h)
7584 struct CommandList *c;
7587 /* Don't bother trying to flush the cache if locked up */
7588 /* FIXME not necessary if do_simple_cmd does the check */
7589 if (unlikely(lockup_detected(h)))
7591 flush_buf = kzalloc(4, GFP_KERNEL);
7597 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7598 RAID_CTLR_LUNID, TYPE_CMD)) {
7601 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
7602 PCI_DMA_TODEVICE, NO_TIMEOUT);
7605 if (c->err_info->CommandStatus != 0)
7607 dev_warn(&h->pdev->dev,
7608 "error flushing cache on controller\n");
7613 static void hpsa_shutdown(struct pci_dev *pdev)
7615 struct ctlr_info *h;
7617 h = pci_get_drvdata(pdev);
7618 /* Turn board interrupts off and send the flush cache command
7619 * sendcmd will turn off interrupt, and send the flush...
7620 * To write all data in the battery backed cache to disks
7622 hpsa_flush_cache(h);
7623 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7625 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7628 static void hpsa_free_device_info(struct ctlr_info *h)
7632 for (i = 0; i < h->ndevices; i++)
7636 static void hpsa_remove_one(struct pci_dev *pdev)
7638 struct ctlr_info *h;
7639 unsigned long flags;
7641 if (pci_get_drvdata(pdev) == NULL) {
7642 dev_err(&pdev->dev, "unable to remove device\n");
7645 h = pci_get_drvdata(pdev);
7647 /* Get rid of any controller monitoring work items */
7648 spin_lock_irqsave(&h->lock, flags);
7649 h->remove_in_progress = 1;
7650 spin_unlock_irqrestore(&h->lock, flags);
7651 cancel_delayed_work_sync(&h->monitor_ctlr_work);
7652 cancel_delayed_work_sync(&h->rescan_ctlr_work);
7653 destroy_workqueue(h->rescan_ctlr_wq);
7654 destroy_workqueue(h->resubmit_wq);
7655 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7657 /* includes hpsa_free_irqs */
7658 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
7659 hpsa_shutdown(pdev);
7661 hpsa_free_device_info(h);
7662 hpsa_free_sg_chain_blocks(h);
7663 kfree(h->blockFetchTable); /* perf 2 */
7664 hpsa_free_reply_queues(h); /* perf 1 */
7665 hpsa_free_ioaccel1_cmd_and_bft(h); /* perf 1 */
7666 hpsa_free_ioaccel2_cmd_and_bft(h); /* perf 1 */
7667 hpsa_free_cmd_pool(h); /* init_one 5 */
7668 kfree(h->hba_inquiry_data);
7670 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
7671 hpsa_free_pci_init(h);
7673 free_percpu(h->lockup_detected);
7677 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7678 __attribute__((unused)) pm_message_t state)
7683 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7688 static struct pci_driver hpsa_pci_driver = {
7690 .probe = hpsa_init_one,
7691 .remove = hpsa_remove_one,
7692 .id_table = hpsa_pci_device_id, /* id_table */
7693 .shutdown = hpsa_shutdown,
7694 .suspend = hpsa_suspend,
7695 .resume = hpsa_resume,
7698 /* Fill in bucket_map[], given nsgs (the max number of
7699 * scatter gather elements supported) and bucket[],
7700 * which is an array of 8 integers. The bucket[] array
7701 * contains 8 different DMA transfer sizes (in 16
7702 * byte increments) which the controller uses to fetch
7703 * commands. This function fills in bucket_map[], which
7704 * maps a given number of scatter gather elements to one of
7705 * the 8 DMA transfer sizes. The point of it is to allow the
7706 * controller to only do as much DMA as needed to fetch the
7707 * command, with the DMA transfer size encoded in the lower
7708 * bits of the command address.
7710 static void calc_bucket_map(int bucket[], int num_buckets,
7711 int nsgs, int min_blocks, u32 *bucket_map)
7715 /* Note, bucket_map must have nsgs+1 entries. */
7716 for (i = 0; i <= nsgs; i++) {
7717 /* Compute size of a command with i SG entries */
7718 size = i + min_blocks;
7719 b = num_buckets; /* Assume the biggest bucket */
7720 /* Find the bucket that is just big enough */
7721 for (j = 0; j < num_buckets; j++) {
7722 if (bucket[j] >= size) {
7727 /* for a command with i SG entries, use bucket b. */
7732 /* return -ENODEV or other reason on error, 0 on success */
7733 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7736 unsigned long register_value;
7737 unsigned long transMethod = CFGTBL_Trans_Performant |
7738 (trans_support & CFGTBL_Trans_use_short_tags) |
7739 CFGTBL_Trans_enable_directed_msix |
7740 (trans_support & (CFGTBL_Trans_io_accel1 |
7741 CFGTBL_Trans_io_accel2));
7742 struct access_method access = SA5_performant_access;
7744 /* This is a bit complicated. There are 8 registers on
7745 * the controller which we write to to tell it 8 different
7746 * sizes of commands which there may be. It's a way of
7747 * reducing the DMA done to fetch each command. Encoded into
7748 * each command's tag are 3 bits which communicate to the controller
7749 * which of the eight sizes that command fits within. The size of
7750 * each command depends on how many scatter gather entries there are.
7751 * Each SG entry requires 16 bytes. The eight registers are programmed
7752 * with the number of 16-byte blocks a command of that size requires.
7753 * The smallest command possible requires 5 such 16 byte blocks.
7754 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7755 * blocks. Note, this only extends to the SG entries contained
7756 * within the command block, and does not extend to chained blocks
7757 * of SG elements. bft[] contains the eight values we write to
7758 * the registers. They are not evenly distributed, but have more
7759 * sizes for small commands, and fewer sizes for larger commands.
7761 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7762 #define MIN_IOACCEL2_BFT_ENTRY 5
7763 #define HPSA_IOACCEL2_HEADER_SZ 4
7764 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7765 13, 14, 15, 16, 17, 18, 19,
7766 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7767 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7768 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7769 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7770 16 * MIN_IOACCEL2_BFT_ENTRY);
7771 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7772 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7773 /* 5 = 1 s/g entry or 4k
7774 * 6 = 2 s/g entry or 8k
7775 * 8 = 4 s/g entry or 16k
7776 * 10 = 6 s/g entry or 24k
7779 /* If the controller supports either ioaccel method then
7780 * we can also use the RAID stack submit path that does not
7781 * perform the superfluous readl() after each command submission.
7783 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7784 access = SA5_performant_access_no_read;
7786 /* Controller spec: zero out this buffer. */
7787 for (i = 0; i < h->nreply_queues; i++)
7788 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7790 bft[7] = SG_ENTRIES_IN_CMD + 4;
7791 calc_bucket_map(bft, ARRAY_SIZE(bft),
7792 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7793 for (i = 0; i < 8; i++)
7794 writel(bft[i], &h->transtable->BlockFetch[i]);
7796 /* size of controller ring buffer */
7797 writel(h->max_commands, &h->transtable->RepQSize);
7798 writel(h->nreply_queues, &h->transtable->RepQCount);
7799 writel(0, &h->transtable->RepQCtrAddrLow32);
7800 writel(0, &h->transtable->RepQCtrAddrHigh32);
7802 for (i = 0; i < h->nreply_queues; i++) {
7803 writel(0, &h->transtable->RepQAddr[i].upper);
7804 writel(h->reply_queue[i].busaddr,
7805 &h->transtable->RepQAddr[i].lower);
7808 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7809 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7811 * enable outbound interrupt coalescing in accelerator mode;
7813 if (trans_support & CFGTBL_Trans_io_accel1) {
7814 access = SA5_ioaccel_mode1_access;
7815 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7816 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7818 if (trans_support & CFGTBL_Trans_io_accel2) {
7819 access = SA5_ioaccel_mode2_access;
7820 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7821 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7824 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7825 if (hpsa_wait_for_mode_change_ack(h)) {
7826 dev_err(&h->pdev->dev,
7827 "performant mode problem - doorbell timeout\n");
7830 register_value = readl(&(h->cfgtable->TransportActive));
7831 if (!(register_value & CFGTBL_Trans_Performant)) {
7832 dev_err(&h->pdev->dev,
7833 "performant mode problem - transport not active\n");
7836 /* Change the access methods to the performant access methods */
7838 h->transMethod = transMethod;
7840 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7841 (trans_support & CFGTBL_Trans_io_accel2)))
7844 if (trans_support & CFGTBL_Trans_io_accel1) {
7845 /* Set up I/O accelerator mode */
7846 for (i = 0; i < h->nreply_queues; i++) {
7847 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7848 h->reply_queue[i].current_entry =
7849 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7851 bft[7] = h->ioaccel_maxsg + 8;
7852 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7853 h->ioaccel1_blockFetchTable);
7855 /* initialize all reply queue entries to unused */
7856 for (i = 0; i < h->nreply_queues; i++)
7857 memset(h->reply_queue[i].head,
7858 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7859 h->reply_queue_size);
7861 /* set all the constant fields in the accelerator command
7862 * frames once at init time to save CPU cycles later.
7864 for (i = 0; i < h->nr_cmds; i++) {
7865 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7867 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7868 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7869 (i * sizeof(struct ErrorInfo)));
7870 cp->err_info_len = sizeof(struct ErrorInfo);
7871 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7872 cp->host_context_flags =
7873 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
7874 cp->timeout_sec = 0;
7877 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
7879 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7880 (i * sizeof(struct io_accel1_cmd)));
7882 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7883 u64 cfg_offset, cfg_base_addr_index;
7884 u32 bft2_offset, cfg_base_addr;
7887 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7888 &cfg_base_addr_index, &cfg_offset);
7889 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7890 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7891 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7892 4, h->ioaccel2_blockFetchTable);
7893 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7894 BUILD_BUG_ON(offsetof(struct CfgTable,
7895 io_accel_request_size_offset) != 0xb8);
7896 h->ioaccel2_bft2_regs =
7897 remap_pci_mem(pci_resource_start(h->pdev,
7898 cfg_base_addr_index) +
7899 cfg_offset + bft2_offset,
7901 sizeof(*h->ioaccel2_bft2_regs));
7902 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7903 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7905 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7906 if (hpsa_wait_for_mode_change_ack(h)) {
7907 dev_err(&h->pdev->dev,
7908 "performant mode problem - enabling ioaccel mode\n");
7914 /* Free ioaccel1 mode command blocks and block fetch table */
7915 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
7917 if (h->ioaccel_cmd_pool)
7918 pci_free_consistent(h->pdev,
7919 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7920 h->ioaccel_cmd_pool,
7921 h->ioaccel_cmd_pool_dhandle);
7922 kfree(h->ioaccel1_blockFetchTable);
7925 /* Allocate ioaccel1 mode command blocks and block fetch table */
7926 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
7929 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7930 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7931 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7933 /* Command structures must be aligned on a 128-byte boundary
7934 * because the 7 lower bits of the address are used by the
7937 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7938 IOACCEL1_COMMANDLIST_ALIGNMENT);
7939 h->ioaccel_cmd_pool =
7940 pci_alloc_consistent(h->pdev,
7941 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7942 &(h->ioaccel_cmd_pool_dhandle));
7944 h->ioaccel1_blockFetchTable =
7945 kmalloc(((h->ioaccel_maxsg + 1) *
7946 sizeof(u32)), GFP_KERNEL);
7948 if ((h->ioaccel_cmd_pool == NULL) ||
7949 (h->ioaccel1_blockFetchTable == NULL))
7952 memset(h->ioaccel_cmd_pool, 0,
7953 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7957 hpsa_free_ioaccel1_cmd_and_bft(h);
7961 /* Free ioaccel2 mode command blocks and block fetch table */
7962 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
7964 hpsa_free_ioaccel2_sg_chain_blocks(h);
7966 if (h->ioaccel2_cmd_pool)
7967 pci_free_consistent(h->pdev,
7968 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7969 h->ioaccel2_cmd_pool,
7970 h->ioaccel2_cmd_pool_dhandle);
7971 kfree(h->ioaccel2_blockFetchTable);
7974 /* Allocate ioaccel2 mode command blocks and block fetch table */
7975 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
7979 /* Allocate ioaccel2 mode command blocks and block fetch table */
7982 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7983 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7984 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7986 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7987 IOACCEL2_COMMANDLIST_ALIGNMENT);
7988 h->ioaccel2_cmd_pool =
7989 pci_alloc_consistent(h->pdev,
7990 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7991 &(h->ioaccel2_cmd_pool_dhandle));
7993 h->ioaccel2_blockFetchTable =
7994 kmalloc(((h->ioaccel_maxsg + 1) *
7995 sizeof(u32)), GFP_KERNEL);
7997 if ((h->ioaccel2_cmd_pool == NULL) ||
7998 (h->ioaccel2_blockFetchTable == NULL)) {
8003 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8007 memset(h->ioaccel2_cmd_pool, 0,
8008 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8012 hpsa_free_ioaccel2_cmd_and_bft(h);
8016 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
8019 unsigned long transMethod = CFGTBL_Trans_Performant |
8020 CFGTBL_Trans_use_short_tags;
8023 if (hpsa_simple_mode)
8026 trans_support = readl(&(h->cfgtable->TransportSupport));
8027 if (!(trans_support & PERFORMANT_MODE))
8030 /* Check for I/O accelerator mode support */
8031 if (trans_support & CFGTBL_Trans_io_accel1) {
8032 transMethod |= CFGTBL_Trans_io_accel1 |
8033 CFGTBL_Trans_enable_directed_msix;
8034 if (hpsa_alloc_ioaccel1_cmd_and_bft(h))
8037 if (trans_support & CFGTBL_Trans_io_accel2) {
8038 transMethod |= CFGTBL_Trans_io_accel2 |
8039 CFGTBL_Trans_enable_directed_msix;
8040 if (hpsa_alloc_ioaccel2_cmd_and_bft(h))
8045 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
8046 hpsa_get_max_perf_mode_cmds(h);
8047 /* Performant mode ring buffer and supporting data structures */
8048 h->reply_queue_size = h->max_commands * sizeof(u64);
8050 for (i = 0; i < h->nreply_queues; i++) {
8051 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8052 h->reply_queue_size,
8053 &(h->reply_queue[i].busaddr));
8054 if (!h->reply_queue[i].head)
8056 h->reply_queue[i].size = h->max_commands;
8057 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
8058 h->reply_queue[i].current_entry = 0;
8061 /* Need a block fetch table for performant mode */
8062 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
8063 sizeof(u32)), GFP_KERNEL);
8064 if (!h->blockFetchTable)
8067 hpsa_enter_performant_mode(h, trans_support);
8071 hpsa_free_reply_queues(h);
8072 kfree(h->blockFetchTable);
8075 static int is_accelerated_cmd(struct CommandList *c)
8077 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8080 static void hpsa_drain_accel_commands(struct ctlr_info *h)
8082 struct CommandList *c = NULL;
8083 int i, accel_cmds_out;
8086 do { /* wait for all outstanding ioaccel commands to drain out */
8088 for (i = 0; i < h->nr_cmds; i++) {
8089 c = h->cmd_pool + i;
8090 refcount = atomic_inc_return(&c->refcount);
8091 if (refcount > 1) /* Command is allocated */
8092 accel_cmds_out += is_accelerated_cmd(c);
8095 if (accel_cmds_out <= 0)
8102 * This is it. Register the PCI driver information for the cards we control
8103 * the OS will call our registered routines when it finds one of our cards.
8105 static int __init hpsa_init(void)
8107 return pci_register_driver(&hpsa_pci_driver);
8110 static void __exit hpsa_cleanup(void)
8112 pci_unregister_driver(&hpsa_pci_driver);
8115 static void __attribute__((unused)) verify_offsets(void)
8117 #define VERIFY_OFFSET(member, offset) \
8118 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8120 VERIFY_OFFSET(structure_size, 0);
8121 VERIFY_OFFSET(volume_blk_size, 4);
8122 VERIFY_OFFSET(volume_blk_cnt, 8);
8123 VERIFY_OFFSET(phys_blk_shift, 16);
8124 VERIFY_OFFSET(parity_rotation_shift, 17);
8125 VERIFY_OFFSET(strip_size, 18);
8126 VERIFY_OFFSET(disk_starting_blk, 20);
8127 VERIFY_OFFSET(disk_blk_cnt, 28);
8128 VERIFY_OFFSET(data_disks_per_row, 36);
8129 VERIFY_OFFSET(metadata_disks_per_row, 38);
8130 VERIFY_OFFSET(row_cnt, 40);
8131 VERIFY_OFFSET(layout_map_count, 42);
8132 VERIFY_OFFSET(flags, 44);
8133 VERIFY_OFFSET(dekindex, 46);
8134 /* VERIFY_OFFSET(reserved, 48 */
8135 VERIFY_OFFSET(data, 64);
8137 #undef VERIFY_OFFSET
8139 #define VERIFY_OFFSET(member, offset) \
8140 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8142 VERIFY_OFFSET(IU_type, 0);
8143 VERIFY_OFFSET(direction, 1);
8144 VERIFY_OFFSET(reply_queue, 2);
8145 /* VERIFY_OFFSET(reserved1, 3); */
8146 VERIFY_OFFSET(scsi_nexus, 4);
8147 VERIFY_OFFSET(Tag, 8);
8148 VERIFY_OFFSET(cdb, 16);
8149 VERIFY_OFFSET(cciss_lun, 32);
8150 VERIFY_OFFSET(data_len, 40);
8151 VERIFY_OFFSET(cmd_priority_task_attr, 44);
8152 VERIFY_OFFSET(sg_count, 45);
8153 /* VERIFY_OFFSET(reserved3 */
8154 VERIFY_OFFSET(err_ptr, 48);
8155 VERIFY_OFFSET(err_len, 56);
8156 /* VERIFY_OFFSET(reserved4 */
8157 VERIFY_OFFSET(sg, 64);
8159 #undef VERIFY_OFFSET
8161 #define VERIFY_OFFSET(member, offset) \
8162 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8164 VERIFY_OFFSET(dev_handle, 0x00);
8165 VERIFY_OFFSET(reserved1, 0x02);
8166 VERIFY_OFFSET(function, 0x03);
8167 VERIFY_OFFSET(reserved2, 0x04);
8168 VERIFY_OFFSET(err_info, 0x0C);
8169 VERIFY_OFFSET(reserved3, 0x10);
8170 VERIFY_OFFSET(err_info_len, 0x12);
8171 VERIFY_OFFSET(reserved4, 0x13);
8172 VERIFY_OFFSET(sgl_offset, 0x14);
8173 VERIFY_OFFSET(reserved5, 0x15);
8174 VERIFY_OFFSET(transfer_len, 0x1C);
8175 VERIFY_OFFSET(reserved6, 0x20);
8176 VERIFY_OFFSET(io_flags, 0x24);
8177 VERIFY_OFFSET(reserved7, 0x26);
8178 VERIFY_OFFSET(LUN, 0x34);
8179 VERIFY_OFFSET(control, 0x3C);
8180 VERIFY_OFFSET(CDB, 0x40);
8181 VERIFY_OFFSET(reserved8, 0x50);
8182 VERIFY_OFFSET(host_context_flags, 0x60);
8183 VERIFY_OFFSET(timeout_sec, 0x62);
8184 VERIFY_OFFSET(ReplyQueue, 0x64);
8185 VERIFY_OFFSET(reserved9, 0x65);
8186 VERIFY_OFFSET(tag, 0x68);
8187 VERIFY_OFFSET(host_addr, 0x70);
8188 VERIFY_OFFSET(CISS_LUN, 0x78);
8189 VERIFY_OFFSET(SG, 0x78 + 8);
8190 #undef VERIFY_OFFSET
8193 module_init(hpsa_init);
8194 module_exit(hpsa_cleanup);