2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_eh.h>
47 #include <scsi/scsi_dbg.h>
48 #include <linux/cciss_ioctl.h>
49 #include <linux/string.h>
50 #include <linux/bitmap.h>
51 #include <linux/atomic.h>
52 #include <linux/jiffies.h>
53 #include <linux/percpu-defs.h>
54 #include <linux/percpu.h>
55 #include <asm/unaligned.h>
56 #include <asm/div64.h>
60 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
61 #define HPSA_DRIVER_VERSION "3.4.4-1"
62 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65 /* How long to wait for CISS doorbell communication */
66 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
67 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
68 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
69 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
70 #define MAX_IOCTL_CONFIG_WAIT 1000
72 /*define how many times we will try a command because of bus resets */
73 #define MAX_CMD_RETRIES 3
75 /* Embedded module documentation macros - see modules.h */
76 MODULE_AUTHOR("Hewlett-Packard Company");
77 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
79 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
80 MODULE_VERSION(HPSA_DRIVER_VERSION);
81 MODULE_LICENSE("GPL");
83 static int hpsa_allow_any;
84 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
85 MODULE_PARM_DESC(hpsa_allow_any,
86 "Allow hpsa driver to access unknown HP Smart Array hardware");
87 static int hpsa_simple_mode;
88 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
89 MODULE_PARM_DESC(hpsa_simple_mode,
90 "Use 'simple mode' rather than 'performant mode'");
92 /* define the PCI info for the cards we can control */
93 static const struct pci_device_id hpsa_pci_device_id[] = {
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
134 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
136 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
137 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
138 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
139 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
140 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
144 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
146 /* board_id = Subsystem Device ID & Vendor ID
147 * product = Marketing Name for the board
148 * access = Address of the struct of function pointers
150 static struct board_type products[] = {
151 {0x3241103C, "Smart Array P212", &SA5_access},
152 {0x3243103C, "Smart Array P410", &SA5_access},
153 {0x3245103C, "Smart Array P410i", &SA5_access},
154 {0x3247103C, "Smart Array P411", &SA5_access},
155 {0x3249103C, "Smart Array P812", &SA5_access},
156 {0x324A103C, "Smart Array P712m", &SA5_access},
157 {0x324B103C, "Smart Array P711m", &SA5_access},
158 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
159 {0x3350103C, "Smart Array P222", &SA5_access},
160 {0x3351103C, "Smart Array P420", &SA5_access},
161 {0x3352103C, "Smart Array P421", &SA5_access},
162 {0x3353103C, "Smart Array P822", &SA5_access},
163 {0x3354103C, "Smart Array P420i", &SA5_access},
164 {0x3355103C, "Smart Array P220i", &SA5_access},
165 {0x3356103C, "Smart Array P721m", &SA5_access},
166 {0x1921103C, "Smart Array P830i", &SA5_access},
167 {0x1922103C, "Smart Array P430", &SA5_access},
168 {0x1923103C, "Smart Array P431", &SA5_access},
169 {0x1924103C, "Smart Array P830", &SA5_access},
170 {0x1926103C, "Smart Array P731m", &SA5_access},
171 {0x1928103C, "Smart Array P230i", &SA5_access},
172 {0x1929103C, "Smart Array P530", &SA5_access},
173 {0x21BD103C, "Smart Array P244br", &SA5_access},
174 {0x21BE103C, "Smart Array P741m", &SA5_access},
175 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
176 {0x21C0103C, "Smart Array P440ar", &SA5_access},
177 {0x21C1103C, "Smart Array P840ar", &SA5_access},
178 {0x21C2103C, "Smart Array P440", &SA5_access},
179 {0x21C3103C, "Smart Array P441", &SA5_access},
180 {0x21C4103C, "Smart Array", &SA5_access},
181 {0x21C5103C, "Smart Array P841", &SA5_access},
182 {0x21C6103C, "Smart HBA H244br", &SA5_access},
183 {0x21C7103C, "Smart HBA H240", &SA5_access},
184 {0x21C8103C, "Smart HBA H241", &SA5_access},
185 {0x21C9103C, "Smart Array", &SA5_access},
186 {0x21CA103C, "Smart Array P246br", &SA5_access},
187 {0x21CB103C, "Smart Array P840", &SA5_access},
188 {0x21CC103C, "Smart Array", &SA5_access},
189 {0x21CD103C, "Smart Array", &SA5_access},
190 {0x21CE103C, "Smart HBA", &SA5_access},
191 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
192 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
193 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
194 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
195 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
196 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
199 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
200 static const struct scsi_cmnd hpsa_cmd_busy;
201 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
202 static const struct scsi_cmnd hpsa_cmd_idle;
203 static int number_of_controllers;
205 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
206 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
207 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
210 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
214 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
215 static struct CommandList *cmd_alloc(struct ctlr_info *h);
216 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
217 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
218 struct scsi_cmnd *scmd);
219 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
220 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
222 static void hpsa_free_cmd_pool(struct ctlr_info *h);
223 #define VPD_PAGE (1 << 8)
225 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
226 static void hpsa_scan_start(struct Scsi_Host *);
227 static int hpsa_scan_finished(struct Scsi_Host *sh,
228 unsigned long elapsed_time);
229 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
231 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
232 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
233 static int hpsa_slave_alloc(struct scsi_device *sdev);
234 static int hpsa_slave_configure(struct scsi_device *sdev);
235 static void hpsa_slave_destroy(struct scsi_device *sdev);
237 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
238 static int check_for_unit_attention(struct ctlr_info *h,
239 struct CommandList *c);
240 static void check_ioctl_unit_attention(struct ctlr_info *h,
241 struct CommandList *c);
242 /* performant mode helper functions */
243 static void calc_bucket_map(int *bucket, int num_buckets,
244 int nsgs, int min_blocks, u32 *bucket_map);
245 static void hpsa_free_performant_mode(struct ctlr_info *h);
246 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
247 static inline u32 next_command(struct ctlr_info *h, u8 q);
248 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
249 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
251 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
252 unsigned long *memory_bar);
253 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
254 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
256 static inline void finish_cmd(struct CommandList *c);
257 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
258 #define BOARD_NOT_READY 0
259 #define BOARD_READY 1
260 static void hpsa_drain_accel_commands(struct ctlr_info *h);
261 static void hpsa_flush_cache(struct ctlr_info *h);
262 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
263 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
264 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
265 static void hpsa_command_resubmit_worker(struct work_struct *work);
266 static u32 lockup_detected(struct ctlr_info *h);
267 static int detect_controller_lockup(struct ctlr_info *h);
269 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
271 unsigned long *priv = shost_priv(sdev->host);
272 return (struct ctlr_info *) *priv;
275 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
277 unsigned long *priv = shost_priv(sh);
278 return (struct ctlr_info *) *priv;
281 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
283 return c->scsi_cmd == SCSI_CMD_IDLE;
286 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
287 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
288 u8 *sense_key, u8 *asc, u8 *ascq)
290 struct scsi_sense_hdr sshdr;
297 if (sense_data_len < 1)
300 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
302 *sense_key = sshdr.sense_key;
308 static int check_for_unit_attention(struct ctlr_info *h,
309 struct CommandList *c)
311 u8 sense_key, asc, ascq;
314 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
315 sense_len = sizeof(c->err_info->SenseInfo);
317 sense_len = c->err_info->SenseLen;
319 decode_sense_data(c->err_info->SenseInfo, sense_len,
320 &sense_key, &asc, &ascq);
321 if (sense_key != UNIT_ATTENTION || asc == -1)
326 dev_warn(&h->pdev->dev,
327 "%s: a state change detected, command retried\n",
331 dev_warn(&h->pdev->dev,
332 "%s: LUN failure detected\n", h->devname);
334 case REPORT_LUNS_CHANGED:
335 dev_warn(&h->pdev->dev,
336 "%s: report LUN data changed\n", h->devname);
338 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
339 * target (array) devices.
343 dev_warn(&h->pdev->dev,
344 "%s: a power on or device reset detected\n",
347 case UNIT_ATTENTION_CLEARED:
348 dev_warn(&h->pdev->dev,
349 "%s: unit attention cleared by another initiator\n",
353 dev_warn(&h->pdev->dev,
354 "%s: unknown unit attention detected\n",
361 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
363 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
364 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
365 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
367 dev_warn(&h->pdev->dev, HPSA "device busy");
371 static u32 lockup_detected(struct ctlr_info *h);
372 static ssize_t host_show_lockup_detected(struct device *dev,
373 struct device_attribute *attr, char *buf)
377 struct Scsi_Host *shost = class_to_shost(dev);
379 h = shost_to_hba(shost);
380 ld = lockup_detected(h);
382 return sprintf(buf, "ld=%d\n", ld);
385 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
386 struct device_attribute *attr,
387 const char *buf, size_t count)
391 struct Scsi_Host *shost = class_to_shost(dev);
394 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
396 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
397 strncpy(tmpbuf, buf, len);
399 if (sscanf(tmpbuf, "%d", &status) != 1)
401 h = shost_to_hba(shost);
402 h->acciopath_status = !!status;
403 dev_warn(&h->pdev->dev,
404 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
405 h->acciopath_status ? "enabled" : "disabled");
409 static ssize_t host_store_raid_offload_debug(struct device *dev,
410 struct device_attribute *attr,
411 const char *buf, size_t count)
413 int debug_level, len;
415 struct Scsi_Host *shost = class_to_shost(dev);
418 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
420 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
421 strncpy(tmpbuf, buf, len);
423 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
427 h = shost_to_hba(shost);
428 h->raid_offload_debug = debug_level;
429 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
430 h->raid_offload_debug);
434 static ssize_t host_store_rescan(struct device *dev,
435 struct device_attribute *attr,
436 const char *buf, size_t count)
439 struct Scsi_Host *shost = class_to_shost(dev);
440 h = shost_to_hba(shost);
441 hpsa_scan_start(h->scsi_host);
445 static ssize_t host_show_firmware_revision(struct device *dev,
446 struct device_attribute *attr, char *buf)
449 struct Scsi_Host *shost = class_to_shost(dev);
450 unsigned char *fwrev;
452 h = shost_to_hba(shost);
453 if (!h->hba_inquiry_data)
455 fwrev = &h->hba_inquiry_data[32];
456 return snprintf(buf, 20, "%c%c%c%c\n",
457 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
460 static ssize_t host_show_commands_outstanding(struct device *dev,
461 struct device_attribute *attr, char *buf)
463 struct Scsi_Host *shost = class_to_shost(dev);
464 struct ctlr_info *h = shost_to_hba(shost);
466 return snprintf(buf, 20, "%d\n",
467 atomic_read(&h->commands_outstanding));
470 static ssize_t host_show_transport_mode(struct device *dev,
471 struct device_attribute *attr, char *buf)
474 struct Scsi_Host *shost = class_to_shost(dev);
476 h = shost_to_hba(shost);
477 return snprintf(buf, 20, "%s\n",
478 h->transMethod & CFGTBL_Trans_Performant ?
479 "performant" : "simple");
482 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
483 struct device_attribute *attr, char *buf)
486 struct Scsi_Host *shost = class_to_shost(dev);
488 h = shost_to_hba(shost);
489 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
490 (h->acciopath_status == 1) ? "enabled" : "disabled");
493 /* List of controllers which cannot be hard reset on kexec with reset_devices */
494 static u32 unresettable_controller[] = {
495 0x324a103C, /* Smart Array P712m */
496 0x324b103C, /* Smart Array P711m */
497 0x3223103C, /* Smart Array P800 */
498 0x3234103C, /* Smart Array P400 */
499 0x3235103C, /* Smart Array P400i */
500 0x3211103C, /* Smart Array E200i */
501 0x3212103C, /* Smart Array E200 */
502 0x3213103C, /* Smart Array E200i */
503 0x3214103C, /* Smart Array E200i */
504 0x3215103C, /* Smart Array E200i */
505 0x3237103C, /* Smart Array E500 */
506 0x323D103C, /* Smart Array P700m */
507 0x40800E11, /* Smart Array 5i */
508 0x409C0E11, /* Smart Array 6400 */
509 0x409D0E11, /* Smart Array 6400 EM */
510 0x40700E11, /* Smart Array 5300 */
511 0x40820E11, /* Smart Array 532 */
512 0x40830E11, /* Smart Array 5312 */
513 0x409A0E11, /* Smart Array 641 */
514 0x409B0E11, /* Smart Array 642 */
515 0x40910E11, /* Smart Array 6i */
518 /* List of controllers which cannot even be soft reset */
519 static u32 soft_unresettable_controller[] = {
520 0x40800E11, /* Smart Array 5i */
521 0x40700E11, /* Smart Array 5300 */
522 0x40820E11, /* Smart Array 532 */
523 0x40830E11, /* Smart Array 5312 */
524 0x409A0E11, /* Smart Array 641 */
525 0x409B0E11, /* Smart Array 642 */
526 0x40910E11, /* Smart Array 6i */
527 /* Exclude 640x boards. These are two pci devices in one slot
528 * which share a battery backed cache module. One controls the
529 * cache, the other accesses the cache through the one that controls
530 * it. If we reset the one controlling the cache, the other will
531 * likely not be happy. Just forbid resetting this conjoined mess.
532 * The 640x isn't really supported by hpsa anyway.
534 0x409C0E11, /* Smart Array 6400 */
535 0x409D0E11, /* Smart Array 6400 EM */
538 static u32 needs_abort_tags_swizzled[] = {
539 0x323D103C, /* Smart Array P700m */
540 0x324a103C, /* Smart Array P712m */
541 0x324b103C, /* SmartArray P711m */
544 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
548 for (i = 0; i < nelems; i++)
549 if (a[i] == board_id)
554 static int ctlr_is_hard_resettable(u32 board_id)
556 return !board_id_in_array(unresettable_controller,
557 ARRAY_SIZE(unresettable_controller), board_id);
560 static int ctlr_is_soft_resettable(u32 board_id)
562 return !board_id_in_array(soft_unresettable_controller,
563 ARRAY_SIZE(soft_unresettable_controller), board_id);
566 static int ctlr_is_resettable(u32 board_id)
568 return ctlr_is_hard_resettable(board_id) ||
569 ctlr_is_soft_resettable(board_id);
572 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
574 return board_id_in_array(needs_abort_tags_swizzled,
575 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
578 static ssize_t host_show_resettable(struct device *dev,
579 struct device_attribute *attr, char *buf)
582 struct Scsi_Host *shost = class_to_shost(dev);
584 h = shost_to_hba(shost);
585 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
588 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
590 return (scsi3addr[3] & 0xC0) == 0x40;
593 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
594 "1(+0)ADM", "UNKNOWN"
596 #define HPSA_RAID_0 0
597 #define HPSA_RAID_4 1
598 #define HPSA_RAID_1 2 /* also used for RAID 10 */
599 #define HPSA_RAID_5 3 /* also used for RAID 50 */
600 #define HPSA_RAID_51 4
601 #define HPSA_RAID_6 5 /* also used for RAID 60 */
602 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
603 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
605 static ssize_t raid_level_show(struct device *dev,
606 struct device_attribute *attr, char *buf)
609 unsigned char rlevel;
611 struct scsi_device *sdev;
612 struct hpsa_scsi_dev_t *hdev;
615 sdev = to_scsi_device(dev);
616 h = sdev_to_hba(sdev);
617 spin_lock_irqsave(&h->lock, flags);
618 hdev = sdev->hostdata;
620 spin_unlock_irqrestore(&h->lock, flags);
624 /* Is this even a logical drive? */
625 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
626 spin_unlock_irqrestore(&h->lock, flags);
627 l = snprintf(buf, PAGE_SIZE, "N/A\n");
631 rlevel = hdev->raid_level;
632 spin_unlock_irqrestore(&h->lock, flags);
633 if (rlevel > RAID_UNKNOWN)
634 rlevel = RAID_UNKNOWN;
635 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
639 static ssize_t lunid_show(struct device *dev,
640 struct device_attribute *attr, char *buf)
643 struct scsi_device *sdev;
644 struct hpsa_scsi_dev_t *hdev;
646 unsigned char lunid[8];
648 sdev = to_scsi_device(dev);
649 h = sdev_to_hba(sdev);
650 spin_lock_irqsave(&h->lock, flags);
651 hdev = sdev->hostdata;
653 spin_unlock_irqrestore(&h->lock, flags);
656 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
657 spin_unlock_irqrestore(&h->lock, flags);
658 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
659 lunid[0], lunid[1], lunid[2], lunid[3],
660 lunid[4], lunid[5], lunid[6], lunid[7]);
663 static ssize_t unique_id_show(struct device *dev,
664 struct device_attribute *attr, char *buf)
667 struct scsi_device *sdev;
668 struct hpsa_scsi_dev_t *hdev;
670 unsigned char sn[16];
672 sdev = to_scsi_device(dev);
673 h = sdev_to_hba(sdev);
674 spin_lock_irqsave(&h->lock, flags);
675 hdev = sdev->hostdata;
677 spin_unlock_irqrestore(&h->lock, flags);
680 memcpy(sn, hdev->device_id, sizeof(sn));
681 spin_unlock_irqrestore(&h->lock, flags);
682 return snprintf(buf, 16 * 2 + 2,
683 "%02X%02X%02X%02X%02X%02X%02X%02X"
684 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
685 sn[0], sn[1], sn[2], sn[3],
686 sn[4], sn[5], sn[6], sn[7],
687 sn[8], sn[9], sn[10], sn[11],
688 sn[12], sn[13], sn[14], sn[15]);
691 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
692 struct device_attribute *attr, char *buf)
695 struct scsi_device *sdev;
696 struct hpsa_scsi_dev_t *hdev;
700 sdev = to_scsi_device(dev);
701 h = sdev_to_hba(sdev);
702 spin_lock_irqsave(&h->lock, flags);
703 hdev = sdev->hostdata;
705 spin_unlock_irqrestore(&h->lock, flags);
708 offload_enabled = hdev->offload_enabled;
709 spin_unlock_irqrestore(&h->lock, flags);
710 return snprintf(buf, 20, "%d\n", offload_enabled);
713 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
714 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
715 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
716 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
717 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
718 host_show_hp_ssd_smart_path_enabled, NULL);
719 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
720 host_show_hp_ssd_smart_path_status,
721 host_store_hp_ssd_smart_path_status);
722 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
723 host_store_raid_offload_debug);
724 static DEVICE_ATTR(firmware_revision, S_IRUGO,
725 host_show_firmware_revision, NULL);
726 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
727 host_show_commands_outstanding, NULL);
728 static DEVICE_ATTR(transport_mode, S_IRUGO,
729 host_show_transport_mode, NULL);
730 static DEVICE_ATTR(resettable, S_IRUGO,
731 host_show_resettable, NULL);
732 static DEVICE_ATTR(lockup_detected, S_IRUGO,
733 host_show_lockup_detected, NULL);
735 static struct device_attribute *hpsa_sdev_attrs[] = {
736 &dev_attr_raid_level,
739 &dev_attr_hp_ssd_smart_path_enabled,
740 &dev_attr_lockup_detected,
744 static struct device_attribute *hpsa_shost_attrs[] = {
746 &dev_attr_firmware_revision,
747 &dev_attr_commands_outstanding,
748 &dev_attr_transport_mode,
749 &dev_attr_resettable,
750 &dev_attr_hp_ssd_smart_path_status,
751 &dev_attr_raid_offload_debug,
755 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
756 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
758 static struct scsi_host_template hpsa_driver_template = {
759 .module = THIS_MODULE,
762 .queuecommand = hpsa_scsi_queue_command,
763 .scan_start = hpsa_scan_start,
764 .scan_finished = hpsa_scan_finished,
765 .change_queue_depth = hpsa_change_queue_depth,
767 .use_clustering = ENABLE_CLUSTERING,
768 .eh_abort_handler = hpsa_eh_abort_handler,
769 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
771 .slave_alloc = hpsa_slave_alloc,
772 .slave_configure = hpsa_slave_configure,
773 .slave_destroy = hpsa_slave_destroy,
775 .compat_ioctl = hpsa_compat_ioctl,
777 .sdev_attrs = hpsa_sdev_attrs,
778 .shost_attrs = hpsa_shost_attrs,
783 static inline u32 next_command(struct ctlr_info *h, u8 q)
786 struct reply_queue_buffer *rq = &h->reply_queue[q];
788 if (h->transMethod & CFGTBL_Trans_io_accel1)
789 return h->access.command_completed(h, q);
791 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
792 return h->access.command_completed(h, q);
794 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
795 a = rq->head[rq->current_entry];
797 atomic_dec(&h->commands_outstanding);
801 /* Check for wraparound */
802 if (rq->current_entry == h->max_commands) {
803 rq->current_entry = 0;
810 * There are some special bits in the bus address of the
811 * command that we have to set for the controller to know
812 * how to process the command:
814 * Normal performant mode:
815 * bit 0: 1 means performant mode, 0 means simple mode.
816 * bits 1-3 = block fetch table entry
817 * bits 4-6 = command type (== 0)
820 * bit 0 = "performant mode" bit.
821 * bits 1-3 = block fetch table entry
822 * bits 4-6 = command type (== 110)
823 * (command type is needed because ioaccel1 mode
824 * commands are submitted through the same register as normal
825 * mode commands, so this is how the controller knows whether
826 * the command is normal mode or ioaccel1 mode.)
829 * bit 0 = "performant mode" bit.
830 * bits 1-4 = block fetch table entry (note extra bit)
831 * bits 4-6 = not needed, because ioaccel2 mode has
832 * a separate special register for submitting commands.
836 * set_performant_mode: Modify the tag for cciss performant
837 * set bit 0 for pull model, bits 3-1 for block fetch
840 #define DEFAULT_REPLY_QUEUE (-1)
841 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
844 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
845 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
846 if (unlikely(!h->msix_vector))
848 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
849 c->Header.ReplyQueue =
850 raw_smp_processor_id() % h->nreply_queues;
852 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
856 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
857 struct CommandList *c,
860 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
863 * Tell the controller to post the reply to the queue for this
864 * processor. This seems to give the best I/O throughput.
866 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
867 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
869 cp->ReplyQueue = reply_queue % h->nreply_queues;
871 * Set the bits in the address sent down to include:
872 * - performant mode bit (bit 0)
873 * - pull count (bits 1-3)
874 * - command type (bits 4-6)
876 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
877 IOACCEL1_BUSADDR_CMDTYPE;
880 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
881 struct CommandList *c,
884 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
885 &h->ioaccel2_cmd_pool[c->cmdindex];
887 /* Tell the controller to post the reply to the queue for this
888 * processor. This seems to give the best I/O throughput.
890 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
891 cp->reply_queue = smp_processor_id() % h->nreply_queues;
893 cp->reply_queue = reply_queue % h->nreply_queues;
894 /* Set the bits in the address sent down to include:
895 * - performant mode bit not used in ioaccel mode 2
896 * - pull count (bits 0-3)
897 * - command type isn't needed for ioaccel2
899 c->busaddr |= h->ioaccel2_blockFetchTable[0];
902 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
903 struct CommandList *c,
906 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
909 * Tell the controller to post the reply to the queue for this
910 * processor. This seems to give the best I/O throughput.
912 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
913 cp->reply_queue = smp_processor_id() % h->nreply_queues;
915 cp->reply_queue = reply_queue % h->nreply_queues;
917 * Set the bits in the address sent down to include:
918 * - performant mode bit not used in ioaccel mode 2
919 * - pull count (bits 0-3)
920 * - command type isn't needed for ioaccel2
922 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
925 static int is_firmware_flash_cmd(u8 *cdb)
927 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
931 * During firmware flash, the heartbeat register may not update as frequently
932 * as it should. So we dial down lockup detection during firmware flash. and
933 * dial it back up when firmware flash completes.
935 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
936 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
937 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
938 struct CommandList *c)
940 if (!is_firmware_flash_cmd(c->Request.CDB))
942 atomic_inc(&h->firmware_flash_in_progress);
943 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
946 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
947 struct CommandList *c)
949 if (is_firmware_flash_cmd(c->Request.CDB) &&
950 atomic_dec_and_test(&h->firmware_flash_in_progress))
951 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
954 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
955 struct CommandList *c, int reply_queue)
957 dial_down_lockup_detection_during_fw_flash(h, c);
958 atomic_inc(&h->commands_outstanding);
959 switch (c->cmd_type) {
961 set_ioaccel1_performant_mode(h, c, reply_queue);
962 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
965 set_ioaccel2_performant_mode(h, c, reply_queue);
966 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
969 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
970 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
973 set_performant_mode(h, c, reply_queue);
974 h->access.submit_command(h, c);
978 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
980 if (unlikely(c->abort_pending))
981 return finish_cmd(c);
983 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
986 static inline int is_hba_lunid(unsigned char scsi3addr[])
988 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
991 static inline int is_scsi_rev_5(struct ctlr_info *h)
993 if (!h->hba_inquiry_data)
995 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1000 static int hpsa_find_target_lun(struct ctlr_info *h,
1001 unsigned char scsi3addr[], int bus, int *target, int *lun)
1003 /* finds an unused bus, target, lun for a new physical device
1004 * assumes h->devlock is held
1007 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1009 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1011 for (i = 0; i < h->ndevices; i++) {
1012 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1013 __set_bit(h->dev[i]->target, lun_taken);
1016 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1017 if (i < HPSA_MAX_DEVICES) {
1026 static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1027 struct hpsa_scsi_dev_t *dev, char *description)
1029 dev_printk(level, &h->pdev->dev,
1030 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1031 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1033 scsi_device_type(dev->devtype),
1036 dev->raid_level > RAID_UNKNOWN ?
1037 "RAID-?" : raid_label[dev->raid_level],
1038 dev->offload_config ? '+' : '-',
1039 dev->offload_enabled ? '+' : '-',
1043 /* Add an entry into h->dev[] array. */
1044 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
1045 struct hpsa_scsi_dev_t *device,
1046 struct hpsa_scsi_dev_t *added[], int *nadded)
1048 /* assumes h->devlock is held */
1049 int n = h->ndevices;
1051 unsigned char addr1[8], addr2[8];
1052 struct hpsa_scsi_dev_t *sd;
1054 if (n >= HPSA_MAX_DEVICES) {
1055 dev_err(&h->pdev->dev, "too many devices, some will be "
1060 /* physical devices do not have lun or target assigned until now. */
1061 if (device->lun != -1)
1062 /* Logical device, lun is already assigned. */
1065 /* If this device a non-zero lun of a multi-lun device
1066 * byte 4 of the 8-byte LUN addr will contain the logical
1067 * unit no, zero otherwise.
1069 if (device->scsi3addr[4] == 0) {
1070 /* This is not a non-zero lun of a multi-lun device */
1071 if (hpsa_find_target_lun(h, device->scsi3addr,
1072 device->bus, &device->target, &device->lun) != 0)
1077 /* This is a non-zero lun of a multi-lun device.
1078 * Search through our list and find the device which
1079 * has the same 8 byte LUN address, excepting byte 4.
1080 * Assign the same bus and target for this new LUN.
1081 * Use the logical unit number from the firmware.
1083 memcpy(addr1, device->scsi3addr, 8);
1085 for (i = 0; i < n; i++) {
1087 memcpy(addr2, sd->scsi3addr, 8);
1089 /* differ only in byte 4? */
1090 if (memcmp(addr1, addr2, 8) == 0) {
1091 device->bus = sd->bus;
1092 device->target = sd->target;
1093 device->lun = device->scsi3addr[4];
1097 if (device->lun == -1) {
1098 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1099 " suspect firmware bug or unsupported hardware "
1100 "configuration.\n");
1108 added[*nadded] = device;
1110 hpsa_show_dev_msg(KERN_INFO, h, device,
1111 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
1112 device->offload_to_be_enabled = device->offload_enabled;
1113 device->offload_enabled = 0;
1117 /* Update an entry in h->dev[] array. */
1118 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1119 int entry, struct hpsa_scsi_dev_t *new_entry)
1121 int offload_enabled;
1122 /* assumes h->devlock is held */
1123 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1125 /* Raid level changed. */
1126 h->dev[entry]->raid_level = new_entry->raid_level;
1128 /* Raid offload parameters changed. Careful about the ordering. */
1129 if (new_entry->offload_config && new_entry->offload_enabled) {
1131 * if drive is newly offload_enabled, we want to copy the
1132 * raid map data first. If previously offload_enabled and
1133 * offload_config were set, raid map data had better be
1134 * the same as it was before. if raid map data is changed
1135 * then it had better be the case that
1136 * h->dev[entry]->offload_enabled is currently 0.
1138 h->dev[entry]->raid_map = new_entry->raid_map;
1139 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1141 if (new_entry->hba_ioaccel_enabled) {
1142 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1143 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1145 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1146 h->dev[entry]->offload_config = new_entry->offload_config;
1147 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1148 h->dev[entry]->queue_depth = new_entry->queue_depth;
1151 * We can turn off ioaccel offload now, but need to delay turning
1152 * it on until we can update h->dev[entry]->phys_disk[], but we
1153 * can't do that until all the devices are updated.
1155 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1156 if (!new_entry->offload_enabled)
1157 h->dev[entry]->offload_enabled = 0;
1159 offload_enabled = h->dev[entry]->offload_enabled;
1160 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1161 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1162 h->dev[entry]->offload_enabled = offload_enabled;
1165 /* Replace an entry from h->dev[] array. */
1166 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1167 int entry, struct hpsa_scsi_dev_t *new_entry,
1168 struct hpsa_scsi_dev_t *added[], int *nadded,
1169 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1171 /* assumes h->devlock is held */
1172 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1173 removed[*nremoved] = h->dev[entry];
1177 * New physical devices won't have target/lun assigned yet
1178 * so we need to preserve the values in the slot we are replacing.
1180 if (new_entry->target == -1) {
1181 new_entry->target = h->dev[entry]->target;
1182 new_entry->lun = h->dev[entry]->lun;
1185 h->dev[entry] = new_entry;
1186 added[*nadded] = new_entry;
1188 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1189 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1190 new_entry->offload_enabled = 0;
1193 /* Remove an entry from h->dev[] array. */
1194 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1195 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1197 /* assumes h->devlock is held */
1199 struct hpsa_scsi_dev_t *sd;
1201 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1204 removed[*nremoved] = h->dev[entry];
1207 for (i = entry; i < h->ndevices-1; i++)
1208 h->dev[i] = h->dev[i+1];
1210 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1213 #define SCSI3ADDR_EQ(a, b) ( \
1214 (a)[7] == (b)[7] && \
1215 (a)[6] == (b)[6] && \
1216 (a)[5] == (b)[5] && \
1217 (a)[4] == (b)[4] && \
1218 (a)[3] == (b)[3] && \
1219 (a)[2] == (b)[2] && \
1220 (a)[1] == (b)[1] && \
1223 static void fixup_botched_add(struct ctlr_info *h,
1224 struct hpsa_scsi_dev_t *added)
1226 /* called when scsi_add_device fails in order to re-adjust
1227 * h->dev[] to match the mid layer's view.
1229 unsigned long flags;
1232 spin_lock_irqsave(&h->lock, flags);
1233 for (i = 0; i < h->ndevices; i++) {
1234 if (h->dev[i] == added) {
1235 for (j = i; j < h->ndevices-1; j++)
1236 h->dev[j] = h->dev[j+1];
1241 spin_unlock_irqrestore(&h->lock, flags);
1245 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1246 struct hpsa_scsi_dev_t *dev2)
1248 /* we compare everything except lun and target as these
1249 * are not yet assigned. Compare parts likely
1252 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1253 sizeof(dev1->scsi3addr)) != 0)
1255 if (memcmp(dev1->device_id, dev2->device_id,
1256 sizeof(dev1->device_id)) != 0)
1258 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1260 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1262 if (dev1->devtype != dev2->devtype)
1264 if (dev1->bus != dev2->bus)
1269 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1270 struct hpsa_scsi_dev_t *dev2)
1272 /* Device attributes that can change, but don't mean
1273 * that the device is a different device, nor that the OS
1274 * needs to be told anything about the change.
1276 if (dev1->raid_level != dev2->raid_level)
1278 if (dev1->offload_config != dev2->offload_config)
1280 if (dev1->offload_enabled != dev2->offload_enabled)
1282 if (dev1->queue_depth != dev2->queue_depth)
1287 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1288 * and return needle location in *index. If scsi3addr matches, but not
1289 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1290 * location in *index.
1291 * In the case of a minor device attribute change, such as RAID level, just
1292 * return DEVICE_UPDATED, along with the updated device's location in index.
1293 * If needle not found, return DEVICE_NOT_FOUND.
1295 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1296 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1300 #define DEVICE_NOT_FOUND 0
1301 #define DEVICE_CHANGED 1
1302 #define DEVICE_SAME 2
1303 #define DEVICE_UPDATED 3
1304 for (i = 0; i < haystack_size; i++) {
1305 if (haystack[i] == NULL) /* previously removed. */
1307 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1309 if (device_is_the_same(needle, haystack[i])) {
1310 if (device_updated(needle, haystack[i]))
1311 return DEVICE_UPDATED;
1314 /* Keep offline devices offline */
1315 if (needle->volume_offline)
1316 return DEVICE_NOT_FOUND;
1317 return DEVICE_CHANGED;
1322 return DEVICE_NOT_FOUND;
1325 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1326 unsigned char scsi3addr[])
1328 struct offline_device_entry *device;
1329 unsigned long flags;
1331 /* Check to see if device is already on the list */
1332 spin_lock_irqsave(&h->offline_device_lock, flags);
1333 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1334 if (memcmp(device->scsi3addr, scsi3addr,
1335 sizeof(device->scsi3addr)) == 0) {
1336 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1340 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1342 /* Device is not on the list, add it. */
1343 device = kmalloc(sizeof(*device), GFP_KERNEL);
1345 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1348 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1349 spin_lock_irqsave(&h->offline_device_lock, flags);
1350 list_add_tail(&device->offline_list, &h->offline_device_list);
1351 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1354 /* Print a message explaining various offline volume states */
1355 static void hpsa_show_volume_status(struct ctlr_info *h,
1356 struct hpsa_scsi_dev_t *sd)
1358 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1359 dev_info(&h->pdev->dev,
1360 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1361 h->scsi_host->host_no,
1362 sd->bus, sd->target, sd->lun);
1363 switch (sd->volume_offline) {
1366 case HPSA_LV_UNDERGOING_ERASE:
1367 dev_info(&h->pdev->dev,
1368 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1369 h->scsi_host->host_no,
1370 sd->bus, sd->target, sd->lun);
1372 case HPSA_LV_UNDERGOING_RPI:
1373 dev_info(&h->pdev->dev,
1374 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1375 h->scsi_host->host_no,
1376 sd->bus, sd->target, sd->lun);
1378 case HPSA_LV_PENDING_RPI:
1379 dev_info(&h->pdev->dev,
1380 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1381 h->scsi_host->host_no,
1382 sd->bus, sd->target, sd->lun);
1384 case HPSA_LV_ENCRYPTED_NO_KEY:
1385 dev_info(&h->pdev->dev,
1386 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1387 h->scsi_host->host_no,
1388 sd->bus, sd->target, sd->lun);
1390 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1391 dev_info(&h->pdev->dev,
1392 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1393 h->scsi_host->host_no,
1394 sd->bus, sd->target, sd->lun);
1396 case HPSA_LV_UNDERGOING_ENCRYPTION:
1397 dev_info(&h->pdev->dev,
1398 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1399 h->scsi_host->host_no,
1400 sd->bus, sd->target, sd->lun);
1402 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1403 dev_info(&h->pdev->dev,
1404 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1405 h->scsi_host->host_no,
1406 sd->bus, sd->target, sd->lun);
1408 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1409 dev_info(&h->pdev->dev,
1410 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1411 h->scsi_host->host_no,
1412 sd->bus, sd->target, sd->lun);
1414 case HPSA_LV_PENDING_ENCRYPTION:
1415 dev_info(&h->pdev->dev,
1416 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1417 h->scsi_host->host_no,
1418 sd->bus, sd->target, sd->lun);
1420 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1421 dev_info(&h->pdev->dev,
1422 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1423 h->scsi_host->host_no,
1424 sd->bus, sd->target, sd->lun);
1430 * Figure the list of physical drive pointers for a logical drive with
1431 * raid offload configured.
1433 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1434 struct hpsa_scsi_dev_t *dev[], int ndevices,
1435 struct hpsa_scsi_dev_t *logical_drive)
1437 struct raid_map_data *map = &logical_drive->raid_map;
1438 struct raid_map_disk_data *dd = &map->data[0];
1440 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1441 le16_to_cpu(map->metadata_disks_per_row);
1442 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1443 le16_to_cpu(map->layout_map_count) *
1444 total_disks_per_row;
1445 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1446 total_disks_per_row;
1449 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1450 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1453 for (i = 0; i < nraid_map_entries; i++) {
1454 logical_drive->phys_disk[i] = NULL;
1455 if (!logical_drive->offload_config)
1457 for (j = 0; j < ndevices; j++) {
1458 if (dev[j]->devtype != TYPE_DISK)
1460 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1462 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1465 logical_drive->phys_disk[i] = dev[j];
1467 qdepth = min(h->nr_cmds, qdepth +
1468 logical_drive->phys_disk[i]->queue_depth);
1473 * This can happen if a physical drive is removed and
1474 * the logical drive is degraded. In that case, the RAID
1475 * map data will refer to a physical disk which isn't actually
1476 * present. And in that case offload_enabled should already
1477 * be 0, but we'll turn it off here just in case
1479 if (!logical_drive->phys_disk[i]) {
1480 logical_drive->offload_enabled = 0;
1481 logical_drive->offload_to_be_enabled = 0;
1482 logical_drive->queue_depth = 8;
1485 if (nraid_map_entries)
1487 * This is correct for reads, too high for full stripe writes,
1488 * way too high for partial stripe writes
1490 logical_drive->queue_depth = qdepth;
1492 logical_drive->queue_depth = h->nr_cmds;
1495 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1496 struct hpsa_scsi_dev_t *dev[], int ndevices)
1500 for (i = 0; i < ndevices; i++) {
1501 if (dev[i]->devtype != TYPE_DISK)
1503 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1507 * If offload is currently enabled, the RAID map and
1508 * phys_disk[] assignment *better* not be changing
1509 * and since it isn't changing, we do not need to
1512 if (dev[i]->offload_enabled)
1515 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1519 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1520 struct hpsa_scsi_dev_t *sd[], int nsds)
1522 /* sd contains scsi3 addresses and devtypes, and inquiry
1523 * data. This function takes what's in sd to be the current
1524 * reality and updates h->dev[] to reflect that reality.
1526 int i, entry, device_change, changes = 0;
1527 struct hpsa_scsi_dev_t *csd;
1528 unsigned long flags;
1529 struct hpsa_scsi_dev_t **added, **removed;
1530 int nadded, nremoved;
1531 struct Scsi_Host *sh = NULL;
1533 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1534 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1536 if (!added || !removed) {
1537 dev_warn(&h->pdev->dev, "out of memory in "
1538 "adjust_hpsa_scsi_table\n");
1542 spin_lock_irqsave(&h->devlock, flags);
1544 /* find any devices in h->dev[] that are not in
1545 * sd[] and remove them from h->dev[], and for any
1546 * devices which have changed, remove the old device
1547 * info and add the new device info.
1548 * If minor device attributes change, just update
1549 * the existing device structure.
1554 while (i < h->ndevices) {
1556 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1557 if (device_change == DEVICE_NOT_FOUND) {
1559 hpsa_scsi_remove_entry(h, hostno, i,
1560 removed, &nremoved);
1561 continue; /* remove ^^^, hence i not incremented */
1562 } else if (device_change == DEVICE_CHANGED) {
1564 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1565 added, &nadded, removed, &nremoved);
1566 /* Set it to NULL to prevent it from being freed
1567 * at the bottom of hpsa_update_scsi_devices()
1570 } else if (device_change == DEVICE_UPDATED) {
1571 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1576 /* Now, make sure every device listed in sd[] is also
1577 * listed in h->dev[], adding them if they aren't found
1580 for (i = 0; i < nsds; i++) {
1581 if (!sd[i]) /* if already added above. */
1584 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1585 * as the SCSI mid-layer does not handle such devices well.
1586 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1587 * at 160Hz, and prevents the system from coming up.
1589 if (sd[i]->volume_offline) {
1590 hpsa_show_volume_status(h, sd[i]);
1591 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1595 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1596 h->ndevices, &entry);
1597 if (device_change == DEVICE_NOT_FOUND) {
1599 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1600 added, &nadded) != 0)
1602 sd[i] = NULL; /* prevent from being freed later. */
1603 } else if (device_change == DEVICE_CHANGED) {
1604 /* should never happen... */
1606 dev_warn(&h->pdev->dev,
1607 "device unexpectedly changed.\n");
1608 /* but if it does happen, we just ignore that device */
1611 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1613 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1614 * any logical drives that need it enabled.
1616 for (i = 0; i < h->ndevices; i++)
1617 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1619 spin_unlock_irqrestore(&h->devlock, flags);
1621 /* Monitor devices which are in one of several NOT READY states to be
1622 * brought online later. This must be done without holding h->devlock,
1623 * so don't touch h->dev[]
1625 for (i = 0; i < nsds; i++) {
1626 if (!sd[i]) /* if already added above. */
1628 if (sd[i]->volume_offline)
1629 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1632 /* Don't notify scsi mid layer of any changes the first time through
1633 * (or if there are no changes) scsi_scan_host will do it later the
1634 * first time through.
1636 if (hostno == -1 || !changes)
1640 /* Notify scsi mid layer of any removed devices */
1641 for (i = 0; i < nremoved; i++) {
1642 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1643 struct scsi_device *sdev =
1644 scsi_device_lookup(sh, removed[i]->bus,
1645 removed[i]->target, removed[i]->lun);
1647 scsi_remove_device(sdev);
1648 scsi_device_put(sdev);
1651 * We don't expect to get here.
1652 * future cmds to this device will get selection
1653 * timeout as if the device was gone.
1655 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1656 "didn't find device for removal.");
1663 /* Notify scsi mid layer of any added devices */
1664 for (i = 0; i < nadded; i++) {
1665 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1667 if (scsi_add_device(sh, added[i]->bus,
1668 added[i]->target, added[i]->lun) == 0)
1670 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1671 "addition failed, device not added.");
1672 /* now we have to remove it from h->dev,
1673 * since it didn't get added to scsi mid layer
1675 fixup_botched_add(h, added[i]);
1685 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1686 * Assume's h->devlock is held.
1688 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1689 int bus, int target, int lun)
1692 struct hpsa_scsi_dev_t *sd;
1694 for (i = 0; i < h->ndevices; i++) {
1696 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1702 static int hpsa_slave_alloc(struct scsi_device *sdev)
1704 struct hpsa_scsi_dev_t *sd;
1705 unsigned long flags;
1706 struct ctlr_info *h;
1708 h = sdev_to_hba(sdev);
1709 spin_lock_irqsave(&h->devlock, flags);
1710 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1711 sdev_id(sdev), sdev->lun);
1713 atomic_set(&sd->ioaccel_cmds_out, 0);
1714 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1716 sdev->hostdata = NULL;
1717 spin_unlock_irqrestore(&h->devlock, flags);
1721 /* configure scsi device based on internal per-device structure */
1722 static int hpsa_slave_configure(struct scsi_device *sdev)
1724 struct hpsa_scsi_dev_t *sd;
1727 sd = sdev->hostdata;
1728 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1731 queue_depth = sd->queue_depth != 0 ?
1732 sd->queue_depth : sdev->host->can_queue;
1734 queue_depth = sdev->host->can_queue;
1736 scsi_change_queue_depth(sdev, queue_depth);
1741 static void hpsa_slave_destroy(struct scsi_device *sdev)
1743 /* nothing to do. */
1746 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1750 if (!h->ioaccel2_cmd_sg_list)
1752 for (i = 0; i < h->nr_cmds; i++) {
1753 kfree(h->ioaccel2_cmd_sg_list[i]);
1754 h->ioaccel2_cmd_sg_list[i] = NULL;
1756 kfree(h->ioaccel2_cmd_sg_list);
1757 h->ioaccel2_cmd_sg_list = NULL;
1760 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1764 if (h->chainsize <= 0)
1767 h->ioaccel2_cmd_sg_list =
1768 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1770 if (!h->ioaccel2_cmd_sg_list)
1772 for (i = 0; i < h->nr_cmds; i++) {
1773 h->ioaccel2_cmd_sg_list[i] =
1774 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1775 h->maxsgentries, GFP_KERNEL);
1776 if (!h->ioaccel2_cmd_sg_list[i])
1782 hpsa_free_ioaccel2_sg_chain_blocks(h);
1786 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1790 if (!h->cmd_sg_list)
1792 for (i = 0; i < h->nr_cmds; i++) {
1793 kfree(h->cmd_sg_list[i]);
1794 h->cmd_sg_list[i] = NULL;
1796 kfree(h->cmd_sg_list);
1797 h->cmd_sg_list = NULL;
1800 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
1804 if (h->chainsize <= 0)
1807 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1809 if (!h->cmd_sg_list) {
1810 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1813 for (i = 0; i < h->nr_cmds; i++) {
1814 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1815 h->chainsize, GFP_KERNEL);
1816 if (!h->cmd_sg_list[i]) {
1817 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1824 hpsa_free_sg_chain_blocks(h);
1828 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1829 struct io_accel2_cmd *cp, struct CommandList *c)
1831 struct ioaccel2_sg_element *chain_block;
1835 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1836 chain_size = le32_to_cpu(cp->data_len);
1837 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1839 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1840 /* prevent subsequent unmapping */
1841 cp->sg->address = 0;
1844 cp->sg->address = cpu_to_le64(temp64);
1848 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1849 struct io_accel2_cmd *cp)
1851 struct ioaccel2_sg_element *chain_sg;
1856 temp64 = le64_to_cpu(chain_sg->address);
1857 chain_size = le32_to_cpu(cp->data_len);
1858 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
1861 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1862 struct CommandList *c)
1864 struct SGDescriptor *chain_sg, *chain_block;
1868 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1869 chain_block = h->cmd_sg_list[c->cmdindex];
1870 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1871 chain_len = sizeof(*chain_sg) *
1872 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1873 chain_sg->Len = cpu_to_le32(chain_len);
1874 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1876 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1877 /* prevent subsequent unmapping */
1878 chain_sg->Addr = cpu_to_le64(0);
1881 chain_sg->Addr = cpu_to_le64(temp64);
1885 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1886 struct CommandList *c)
1888 struct SGDescriptor *chain_sg;
1890 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1893 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1894 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1895 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1899 /* Decode the various types of errors on ioaccel2 path.
1900 * Return 1 for any error that should generate a RAID path retry.
1901 * Return 0 for errors that don't require a RAID path retry.
1903 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1904 struct CommandList *c,
1905 struct scsi_cmnd *cmd,
1906 struct io_accel2_cmd *c2)
1910 u32 ioaccel2_resid = 0;
1912 switch (c2->error_data.serv_response) {
1913 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1914 switch (c2->error_data.status) {
1915 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1917 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1918 cmd->result |= SAM_STAT_CHECK_CONDITION;
1919 if (c2->error_data.data_present !=
1920 IOACCEL2_SENSE_DATA_PRESENT) {
1921 memset(cmd->sense_buffer, 0,
1922 SCSI_SENSE_BUFFERSIZE);
1925 /* copy the sense data */
1926 data_len = c2->error_data.sense_data_len;
1927 if (data_len > SCSI_SENSE_BUFFERSIZE)
1928 data_len = SCSI_SENSE_BUFFERSIZE;
1929 if (data_len > sizeof(c2->error_data.sense_data_buff))
1931 sizeof(c2->error_data.sense_data_buff);
1932 memcpy(cmd->sense_buffer,
1933 c2->error_data.sense_data_buff, data_len);
1936 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1939 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1942 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1945 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1953 case IOACCEL2_SERV_RESPONSE_FAILURE:
1954 switch (c2->error_data.status) {
1955 case IOACCEL2_STATUS_SR_IO_ERROR:
1956 case IOACCEL2_STATUS_SR_IO_ABORTED:
1957 case IOACCEL2_STATUS_SR_OVERRUN:
1960 case IOACCEL2_STATUS_SR_UNDERRUN:
1961 cmd->result = (DID_OK << 16); /* host byte */
1962 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1963 ioaccel2_resid = get_unaligned_le32(
1964 &c2->error_data.resid_cnt[0]);
1965 scsi_set_resid(cmd, ioaccel2_resid);
1967 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
1968 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
1969 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
1970 /* We will get an event from ctlr to trigger rescan */
1977 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1979 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1981 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1984 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1991 return retry; /* retry on raid path? */
1994 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
1995 struct CommandList *c)
1998 * Prevent the following race in the abort handler:
2000 * 1. LLD is requested to abort a SCSI command
2001 * 2. The SCSI command completes
2002 * 3. The struct CommandList associated with step 2 is made available
2003 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2004 * 5. Abort handler follows scsi_cmnd->host_scribble and
2005 * finds struct CommandList and tries to aborts it
2006 * Now we have aborted the wrong command.
2008 * Clear c->scsi_cmd here so that the abort handler will know this
2009 * command has completed. Then, check to see if the abort handler is
2010 * waiting for this command, and, if so, wake it.
2012 c->scsi_cmd = SCSI_CMD_IDLE;
2013 mb(); /* Ensure c->scsi_cmd is set to SCSI_CMD_IDLE */
2014 if (c->abort_pending) {
2015 c->abort_pending = false;
2016 wake_up_all(&h->abort_sync_wait_queue);
2020 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2021 struct CommandList *c)
2023 hpsa_cmd_resolve_events(h, c);
2024 cmd_tagged_free(h, c);
2027 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2028 struct CommandList *c, struct scsi_cmnd *cmd)
2030 hpsa_cmd_resolve_and_free(h, c);
2031 cmd->scsi_done(cmd);
2034 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2036 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2037 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2040 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2042 cmd->result = DID_ABORT << 16;
2045 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2046 struct scsi_cmnd *cmd)
2048 hpsa_set_scsi_cmd_aborted(cmd);
2049 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2050 c->Request.CDB, c->err_info->ScsiStatus);
2051 hpsa_cmd_resolve_and_free(h, c);
2054 static void process_ioaccel2_completion(struct ctlr_info *h,
2055 struct CommandList *c, struct scsi_cmnd *cmd,
2056 struct hpsa_scsi_dev_t *dev)
2058 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2060 /* check for good status */
2061 if (likely(c2->error_data.serv_response == 0 &&
2062 c2->error_data.status == 0))
2063 return hpsa_cmd_free_and_done(h, c, cmd);
2065 /* don't requeue a command which is being aborted */
2066 if (unlikely(c->abort_pending))
2067 return hpsa_cmd_abort_and_free(h, c, cmd);
2070 * Any RAID offload error results in retry which will use
2071 * the normal I/O path so the controller can handle whatever's
2074 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
2075 c2->error_data.serv_response ==
2076 IOACCEL2_SERV_RESPONSE_FAILURE) {
2077 if (c2->error_data.status ==
2078 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2079 dev->offload_enabled = 0;
2081 return hpsa_retry_cmd(h, c);
2084 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
2085 return hpsa_retry_cmd(h, c);
2087 return hpsa_cmd_free_and_done(h, c, cmd);
2090 /* Returns 0 on success, < 0 otherwise. */
2091 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2092 struct CommandList *cp)
2094 u8 tmf_status = cp->err_info->ScsiStatus;
2096 switch (tmf_status) {
2097 case CISS_TMF_COMPLETE:
2099 * CISS_TMF_COMPLETE never happens, instead,
2100 * ei->CommandStatus == 0 for this case.
2102 case CISS_TMF_SUCCESS:
2104 case CISS_TMF_INVALID_FRAME:
2105 case CISS_TMF_NOT_SUPPORTED:
2106 case CISS_TMF_FAILED:
2107 case CISS_TMF_WRONG_LUN:
2108 case CISS_TMF_OVERLAPPED_TAG:
2111 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2118 static void complete_scsi_command(struct CommandList *cp)
2120 struct scsi_cmnd *cmd;
2121 struct ctlr_info *h;
2122 struct ErrorInfo *ei;
2123 struct hpsa_scsi_dev_t *dev;
2124 struct io_accel2_cmd *c2;
2127 u8 asc; /* additional sense code */
2128 u8 ascq; /* additional sense code qualifier */
2129 unsigned long sense_data_size;
2134 dev = cmd->device->hostdata;
2135 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2137 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2138 if ((cp->cmd_type == CMD_SCSI) &&
2139 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2140 hpsa_unmap_sg_chain_block(h, cp);
2142 if ((cp->cmd_type == CMD_IOACCEL2) &&
2143 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2144 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2146 cmd->result = (DID_OK << 16); /* host byte */
2147 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2149 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2150 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2153 * We check for lockup status here as it may be set for
2154 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2155 * fail_all_oustanding_cmds()
2157 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2158 /* DID_NO_CONNECT will prevent a retry */
2159 cmd->result = DID_NO_CONNECT << 16;
2160 return hpsa_cmd_free_and_done(h, cp, cmd);
2163 if (cp->cmd_type == CMD_IOACCEL2)
2164 return process_ioaccel2_completion(h, cp, cmd, dev);
2166 scsi_set_resid(cmd, ei->ResidualCnt);
2167 if (ei->CommandStatus == 0)
2168 return hpsa_cmd_free_and_done(h, cp, cmd);
2170 /* For I/O accelerator commands, copy over some fields to the normal
2171 * CISS header used below for error handling.
2173 if (cp->cmd_type == CMD_IOACCEL1) {
2174 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2175 cp->Header.SGList = scsi_sg_count(cmd);
2176 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2177 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2178 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2179 cp->Header.tag = c->tag;
2180 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2181 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2183 /* Any RAID offload error results in retry which will use
2184 * the normal I/O path so the controller can handle whatever's
2187 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2188 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2189 dev->offload_enabled = 0;
2190 if (!cp->abort_pending)
2191 return hpsa_retry_cmd(h, cp);
2195 if (cp->abort_pending)
2196 ei->CommandStatus = CMD_ABORTED;
2198 /* an error has occurred */
2199 switch (ei->CommandStatus) {
2201 case CMD_TARGET_STATUS:
2202 cmd->result |= ei->ScsiStatus;
2203 /* copy the sense data */
2204 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2205 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2207 sense_data_size = sizeof(ei->SenseInfo);
2208 if (ei->SenseLen < sense_data_size)
2209 sense_data_size = ei->SenseLen;
2210 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2212 decode_sense_data(ei->SenseInfo, sense_data_size,
2213 &sense_key, &asc, &ascq);
2214 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2215 if (sense_key == ABORTED_COMMAND) {
2216 cmd->result |= DID_SOFT_ERROR << 16;
2221 /* Problem was not a check condition
2222 * Pass it up to the upper layers...
2224 if (ei->ScsiStatus) {
2225 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2226 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2227 "Returning result: 0x%x\n",
2229 sense_key, asc, ascq,
2231 } else { /* scsi status is zero??? How??? */
2232 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2233 "Returning no connection.\n", cp),
2235 /* Ordinarily, this case should never happen,
2236 * but there is a bug in some released firmware
2237 * revisions that allows it to happen if, for
2238 * example, a 4100 backplane loses power and
2239 * the tape drive is in it. We assume that
2240 * it's a fatal error of some kind because we
2241 * can't show that it wasn't. We will make it
2242 * look like selection timeout since that is
2243 * the most common reason for this to occur,
2244 * and it's severe enough.
2247 cmd->result = DID_NO_CONNECT << 16;
2251 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2253 case CMD_DATA_OVERRUN:
2254 dev_warn(&h->pdev->dev,
2255 "CDB %16phN data overrun\n", cp->Request.CDB);
2258 /* print_bytes(cp, sizeof(*cp), 1, 0);
2260 /* We get CMD_INVALID if you address a non-existent device
2261 * instead of a selection timeout (no response). You will
2262 * see this if you yank out a drive, then try to access it.
2263 * This is kind of a shame because it means that any other
2264 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2265 * missing target. */
2266 cmd->result = DID_NO_CONNECT << 16;
2269 case CMD_PROTOCOL_ERR:
2270 cmd->result = DID_ERROR << 16;
2271 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2274 case CMD_HARDWARE_ERR:
2275 cmd->result = DID_ERROR << 16;
2276 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2279 case CMD_CONNECTION_LOST:
2280 cmd->result = DID_ERROR << 16;
2281 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2285 /* Return now to avoid calling scsi_done(). */
2286 return hpsa_cmd_abort_and_free(h, cp, cmd);
2287 case CMD_ABORT_FAILED:
2288 cmd->result = DID_ERROR << 16;
2289 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2292 case CMD_UNSOLICITED_ABORT:
2293 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2294 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2298 cmd->result = DID_TIME_OUT << 16;
2299 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2302 case CMD_UNABORTABLE:
2303 cmd->result = DID_ERROR << 16;
2304 dev_warn(&h->pdev->dev, "Command unabortable\n");
2306 case CMD_TMF_STATUS:
2307 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2308 cmd->result = DID_ERROR << 16;
2310 case CMD_IOACCEL_DISABLED:
2311 /* This only handles the direct pass-through case since RAID
2312 * offload is handled above. Just attempt a retry.
2314 cmd->result = DID_SOFT_ERROR << 16;
2315 dev_warn(&h->pdev->dev,
2316 "cp %p had HP SSD Smart Path error\n", cp);
2319 cmd->result = DID_ERROR << 16;
2320 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2321 cp, ei->CommandStatus);
2324 return hpsa_cmd_free_and_done(h, cp, cmd);
2327 static void hpsa_pci_unmap(struct pci_dev *pdev,
2328 struct CommandList *c, int sg_used, int data_direction)
2332 for (i = 0; i < sg_used; i++)
2333 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2334 le32_to_cpu(c->SG[i].Len),
2338 static int hpsa_map_one(struct pci_dev *pdev,
2339 struct CommandList *cp,
2346 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2347 cp->Header.SGList = 0;
2348 cp->Header.SGTotal = cpu_to_le16(0);
2352 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2353 if (dma_mapping_error(&pdev->dev, addr64)) {
2354 /* Prevent subsequent unmap of something never mapped */
2355 cp->Header.SGList = 0;
2356 cp->Header.SGTotal = cpu_to_le16(0);
2359 cp->SG[0].Addr = cpu_to_le64(addr64);
2360 cp->SG[0].Len = cpu_to_le32(buflen);
2361 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2362 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2363 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2367 #define NO_TIMEOUT ((unsigned long) -1)
2368 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2369 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2370 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2372 DECLARE_COMPLETION_ONSTACK(wait);
2375 __enqueue_cmd_and_start_io(h, c, reply_queue);
2376 if (timeout_msecs == NO_TIMEOUT) {
2377 /* TODO: get rid of this no-timeout thing */
2378 wait_for_completion_io(&wait);
2381 if (!wait_for_completion_io_timeout(&wait,
2382 msecs_to_jiffies(timeout_msecs))) {
2383 dev_warn(&h->pdev->dev, "Command timed out.\n");
2389 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2390 int reply_queue, unsigned long timeout_msecs)
2392 if (unlikely(lockup_detected(h))) {
2393 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2396 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2399 static u32 lockup_detected(struct ctlr_info *h)
2402 u32 rc, *lockup_detected;
2405 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2406 rc = *lockup_detected;
2411 #define MAX_DRIVER_CMD_RETRIES 25
2412 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2413 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2415 int backoff_time = 10, retry_count = 0;
2419 memset(c->err_info, 0, sizeof(*c->err_info));
2420 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2425 if (retry_count > 3) {
2426 msleep(backoff_time);
2427 if (backoff_time < 1000)
2430 } while ((check_for_unit_attention(h, c) ||
2431 check_for_busy(h, c)) &&
2432 retry_count <= MAX_DRIVER_CMD_RETRIES);
2433 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2434 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2439 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2440 struct CommandList *c)
2442 const u8 *cdb = c->Request.CDB;
2443 const u8 *lun = c->Header.LUN.LunAddrBytes;
2445 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2446 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2447 txt, lun[0], lun[1], lun[2], lun[3],
2448 lun[4], lun[5], lun[6], lun[7],
2449 cdb[0], cdb[1], cdb[2], cdb[3],
2450 cdb[4], cdb[5], cdb[6], cdb[7],
2451 cdb[8], cdb[9], cdb[10], cdb[11],
2452 cdb[12], cdb[13], cdb[14], cdb[15]);
2455 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2456 struct CommandList *cp)
2458 const struct ErrorInfo *ei = cp->err_info;
2459 struct device *d = &cp->h->pdev->dev;
2460 u8 sense_key, asc, ascq;
2463 switch (ei->CommandStatus) {
2464 case CMD_TARGET_STATUS:
2465 if (ei->SenseLen > sizeof(ei->SenseInfo))
2466 sense_len = sizeof(ei->SenseInfo);
2468 sense_len = ei->SenseLen;
2469 decode_sense_data(ei->SenseInfo, sense_len,
2470 &sense_key, &asc, &ascq);
2471 hpsa_print_cmd(h, "SCSI status", cp);
2472 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2473 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2474 sense_key, asc, ascq);
2476 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2477 if (ei->ScsiStatus == 0)
2478 dev_warn(d, "SCSI status is abnormally zero. "
2479 "(probably indicates selection timeout "
2480 "reported incorrectly due to a known "
2481 "firmware bug, circa July, 2001.)\n");
2483 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2485 case CMD_DATA_OVERRUN:
2486 hpsa_print_cmd(h, "overrun condition", cp);
2489 /* controller unfortunately reports SCSI passthru's
2490 * to non-existent targets as invalid commands.
2492 hpsa_print_cmd(h, "invalid command", cp);
2493 dev_warn(d, "probably means device no longer present\n");
2496 case CMD_PROTOCOL_ERR:
2497 hpsa_print_cmd(h, "protocol error", cp);
2499 case CMD_HARDWARE_ERR:
2500 hpsa_print_cmd(h, "hardware error", cp);
2502 case CMD_CONNECTION_LOST:
2503 hpsa_print_cmd(h, "connection lost", cp);
2506 hpsa_print_cmd(h, "aborted", cp);
2508 case CMD_ABORT_FAILED:
2509 hpsa_print_cmd(h, "abort failed", cp);
2511 case CMD_UNSOLICITED_ABORT:
2512 hpsa_print_cmd(h, "unsolicited abort", cp);
2515 hpsa_print_cmd(h, "timed out", cp);
2517 case CMD_UNABORTABLE:
2518 hpsa_print_cmd(h, "unabortable", cp);
2520 case CMD_CTLR_LOCKUP:
2521 hpsa_print_cmd(h, "controller lockup detected", cp);
2524 hpsa_print_cmd(h, "unknown status", cp);
2525 dev_warn(d, "Unknown command status %x\n",
2530 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2531 u16 page, unsigned char *buf,
2532 unsigned char bufsize)
2535 struct CommandList *c;
2536 struct ErrorInfo *ei;
2540 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2541 page, scsi3addr, TYPE_CMD)) {
2545 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2546 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2550 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2551 hpsa_scsi_interpret_error(h, c);
2559 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2560 unsigned char *scsi3addr, unsigned char page,
2561 struct bmic_controller_parameters *buf, size_t bufsize)
2564 struct CommandList *c;
2565 struct ErrorInfo *ei;
2568 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2569 page, scsi3addr, TYPE_CMD)) {
2573 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2574 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2578 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2579 hpsa_scsi_interpret_error(h, c);
2587 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2588 u8 reset_type, int reply_queue)
2591 struct CommandList *c;
2592 struct ErrorInfo *ei;
2597 /* fill_cmd can't fail here, no data buffer to map. */
2598 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2599 scsi3addr, TYPE_MSG);
2600 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2601 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2603 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2606 /* no unmap needed here because no data xfer. */
2609 if (ei->CommandStatus != 0) {
2610 hpsa_scsi_interpret_error(h, c);
2618 static void hpsa_get_raid_level(struct ctlr_info *h,
2619 unsigned char *scsi3addr, unsigned char *raid_level)
2624 *raid_level = RAID_UNKNOWN;
2625 buf = kzalloc(64, GFP_KERNEL);
2628 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2630 *raid_level = buf[8];
2631 if (*raid_level > RAID_UNKNOWN)
2632 *raid_level = RAID_UNKNOWN;
2637 #define HPSA_MAP_DEBUG
2638 #ifdef HPSA_MAP_DEBUG
2639 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2640 struct raid_map_data *map_buff)
2642 struct raid_map_disk_data *dd = &map_buff->data[0];
2644 u16 map_cnt, row_cnt, disks_per_row;
2649 /* Show details only if debugging has been activated. */
2650 if (h->raid_offload_debug < 2)
2653 dev_info(&h->pdev->dev, "structure_size = %u\n",
2654 le32_to_cpu(map_buff->structure_size));
2655 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2656 le32_to_cpu(map_buff->volume_blk_size));
2657 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2658 le64_to_cpu(map_buff->volume_blk_cnt));
2659 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2660 map_buff->phys_blk_shift);
2661 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2662 map_buff->parity_rotation_shift);
2663 dev_info(&h->pdev->dev, "strip_size = %u\n",
2664 le16_to_cpu(map_buff->strip_size));
2665 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2666 le64_to_cpu(map_buff->disk_starting_blk));
2667 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2668 le64_to_cpu(map_buff->disk_blk_cnt));
2669 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2670 le16_to_cpu(map_buff->data_disks_per_row));
2671 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2672 le16_to_cpu(map_buff->metadata_disks_per_row));
2673 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2674 le16_to_cpu(map_buff->row_cnt));
2675 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2676 le16_to_cpu(map_buff->layout_map_count));
2677 dev_info(&h->pdev->dev, "flags = 0x%x\n",
2678 le16_to_cpu(map_buff->flags));
2679 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2680 le16_to_cpu(map_buff->flags) &
2681 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
2682 dev_info(&h->pdev->dev, "dekindex = %u\n",
2683 le16_to_cpu(map_buff->dekindex));
2684 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2685 for (map = 0; map < map_cnt; map++) {
2686 dev_info(&h->pdev->dev, "Map%u:\n", map);
2687 row_cnt = le16_to_cpu(map_buff->row_cnt);
2688 for (row = 0; row < row_cnt; row++) {
2689 dev_info(&h->pdev->dev, " Row%u:\n", row);
2691 le16_to_cpu(map_buff->data_disks_per_row);
2692 for (col = 0; col < disks_per_row; col++, dd++)
2693 dev_info(&h->pdev->dev,
2694 " D%02u: h=0x%04x xor=%u,%u\n",
2695 col, dd->ioaccel_handle,
2696 dd->xor_mult[0], dd->xor_mult[1]);
2698 le16_to_cpu(map_buff->metadata_disks_per_row);
2699 for (col = 0; col < disks_per_row; col++, dd++)
2700 dev_info(&h->pdev->dev,
2701 " M%02u: h=0x%04x xor=%u,%u\n",
2702 col, dd->ioaccel_handle,
2703 dd->xor_mult[0], dd->xor_mult[1]);
2708 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2709 __attribute__((unused)) int rc,
2710 __attribute__((unused)) struct raid_map_data *map_buff)
2715 static int hpsa_get_raid_map(struct ctlr_info *h,
2716 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2719 struct CommandList *c;
2720 struct ErrorInfo *ei;
2724 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2725 sizeof(this_device->raid_map), 0,
2726 scsi3addr, TYPE_CMD)) {
2727 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2731 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2732 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2736 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2737 hpsa_scsi_interpret_error(h, c);
2743 /* @todo in the future, dynamically allocate RAID map memory */
2744 if (le32_to_cpu(this_device->raid_map.structure_size) >
2745 sizeof(this_device->raid_map)) {
2746 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2749 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2756 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2757 unsigned char scsi3addr[], u16 bmic_device_index,
2758 struct bmic_identify_physical_device *buf, size_t bufsize)
2761 struct CommandList *c;
2762 struct ErrorInfo *ei;
2765 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2766 0, RAID_CTLR_LUNID, TYPE_CMD);
2770 c->Request.CDB[2] = bmic_device_index & 0xff;
2771 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2773 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2776 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2777 hpsa_scsi_interpret_error(h, c);
2785 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2786 unsigned char scsi3addr[], u8 page)
2791 unsigned char *buf, bufsize;
2793 buf = kzalloc(256, GFP_KERNEL);
2797 /* Get the size of the page list first */
2798 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2799 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2800 buf, HPSA_VPD_HEADER_SZ);
2802 goto exit_unsupported;
2804 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2805 bufsize = pages + HPSA_VPD_HEADER_SZ;
2809 /* Get the whole VPD page list */
2810 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2811 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2814 goto exit_unsupported;
2817 for (i = 1; i <= pages; i++)
2818 if (buf[3 + i] == page)
2819 goto exit_supported;
2828 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2829 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2835 this_device->offload_config = 0;
2836 this_device->offload_enabled = 0;
2837 this_device->offload_to_be_enabled = 0;
2839 buf = kzalloc(64, GFP_KERNEL);
2842 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2844 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2845 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2849 #define IOACCEL_STATUS_BYTE 4
2850 #define OFFLOAD_CONFIGURED_BIT 0x01
2851 #define OFFLOAD_ENABLED_BIT 0x02
2852 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2853 this_device->offload_config =
2854 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2855 if (this_device->offload_config) {
2856 this_device->offload_enabled =
2857 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2858 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2859 this_device->offload_enabled = 0;
2861 this_device->offload_to_be_enabled = this_device->offload_enabled;
2867 /* Get the device id from inquiry page 0x83 */
2868 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2869 unsigned char *device_id, int buflen)
2876 buf = kzalloc(64, GFP_KERNEL);
2879 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2881 memcpy(device_id, &buf[8], buflen);
2886 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2887 void *buf, int bufsize,
2888 int extended_response)
2891 struct CommandList *c;
2892 unsigned char scsi3addr[8];
2893 struct ErrorInfo *ei;
2897 /* address the controller */
2898 memset(scsi3addr, 0, sizeof(scsi3addr));
2899 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2900 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2904 if (extended_response)
2905 c->Request.CDB[1] = extended_response;
2906 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2907 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2911 if (ei->CommandStatus != 0 &&
2912 ei->CommandStatus != CMD_DATA_UNDERRUN) {
2913 hpsa_scsi_interpret_error(h, c);
2916 struct ReportLUNdata *rld = buf;
2918 if (rld->extended_response_flag != extended_response) {
2919 dev_err(&h->pdev->dev,
2920 "report luns requested format %u, got %u\n",
2922 rld->extended_response_flag);
2931 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2932 struct ReportExtendedLUNdata *buf, int bufsize)
2934 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2935 HPSA_REPORT_PHYS_EXTENDED);
2938 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2939 struct ReportLUNdata *buf, int bufsize)
2941 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2944 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2945 int bus, int target, int lun)
2948 device->target = target;
2952 /* Use VPD inquiry to get details of volume status */
2953 static int hpsa_get_volume_status(struct ctlr_info *h,
2954 unsigned char scsi3addr[])
2961 buf = kzalloc(64, GFP_KERNEL);
2963 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2965 /* Does controller have VPD for logical volume status? */
2966 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2969 /* Get the size of the VPD return buffer */
2970 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2971 buf, HPSA_VPD_HEADER_SZ);
2976 /* Now get the whole VPD buffer */
2977 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2978 buf, size + HPSA_VPD_HEADER_SZ);
2981 status = buf[4]; /* status byte */
2987 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2990 /* Determine offline status of a volume.
2993 * 0xff (offline for unknown reasons)
2994 * # (integer code indicating one of several NOT READY states
2995 * describing why a volume is to be kept offline)
2997 static int hpsa_volume_offline(struct ctlr_info *h,
2998 unsigned char scsi3addr[])
3000 struct CommandList *c;
3001 unsigned char *sense;
3002 u8 sense_key, asc, ascq;
3007 #define ASC_LUN_NOT_READY 0x04
3008 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3009 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3013 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3014 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3019 sense = c->err_info->SenseInfo;
3020 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3021 sense_len = sizeof(c->err_info->SenseInfo);
3023 sense_len = c->err_info->SenseLen;
3024 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3025 cmd_status = c->err_info->CommandStatus;
3026 scsi_status = c->err_info->ScsiStatus;
3028 /* Is the volume 'not ready'? */
3029 if (cmd_status != CMD_TARGET_STATUS ||
3030 scsi_status != SAM_STAT_CHECK_CONDITION ||
3031 sense_key != NOT_READY ||
3032 asc != ASC_LUN_NOT_READY) {
3036 /* Determine the reason for not ready state */
3037 ldstat = hpsa_get_volume_status(h, scsi3addr);
3039 /* Keep volume offline in certain cases: */
3041 case HPSA_LV_UNDERGOING_ERASE:
3042 case HPSA_LV_UNDERGOING_RPI:
3043 case HPSA_LV_PENDING_RPI:
3044 case HPSA_LV_ENCRYPTED_NO_KEY:
3045 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3046 case HPSA_LV_UNDERGOING_ENCRYPTION:
3047 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3048 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3050 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3051 /* If VPD status page isn't available,
3052 * use ASC/ASCQ to determine state
3054 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3055 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3065 * Find out if a logical device supports aborts by simply trying one.
3066 * Smart Array may claim not to support aborts on logical drives, but
3067 * if a MSA2000 * is connected, the drives on that will be presented
3068 * by the Smart Array as logical drives, and aborts may be sent to
3069 * those devices successfully. So the simplest way to find out is
3070 * to simply try an abort and see how the device responds.
3072 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3073 unsigned char *scsi3addr)
3075 struct CommandList *c;
3076 struct ErrorInfo *ei;
3079 u64 tag = (u64) -1; /* bogus tag */
3081 /* Assume that physical devices support aborts */
3082 if (!is_logical_dev_addr_mode(scsi3addr))
3087 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3088 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3089 /* no unmap needed here because no data xfer. */
3091 switch (ei->CommandStatus) {
3095 case CMD_UNABORTABLE:
3096 case CMD_ABORT_FAILED:
3099 case CMD_TMF_STATUS:
3100 rc = hpsa_evaluate_tmf_status(h, c);
3110 static int hpsa_update_device_info(struct ctlr_info *h,
3111 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3112 unsigned char *is_OBDR_device)
3115 #define OBDR_SIG_OFFSET 43
3116 #define OBDR_TAPE_SIG "$DR-10"
3117 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3118 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3120 unsigned char *inq_buff;
3121 unsigned char *obdr_sig;
3123 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3127 /* Do an inquiry to the device to see what it is. */
3128 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3129 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3130 /* Inquiry failed (msg printed already) */
3131 dev_err(&h->pdev->dev,
3132 "hpsa_update_device_info: inquiry failed\n");
3136 this_device->devtype = (inq_buff[0] & 0x1f);
3137 memcpy(this_device->scsi3addr, scsi3addr, 8);
3138 memcpy(this_device->vendor, &inq_buff[8],
3139 sizeof(this_device->vendor));
3140 memcpy(this_device->model, &inq_buff[16],
3141 sizeof(this_device->model));
3142 memset(this_device->device_id, 0,
3143 sizeof(this_device->device_id));
3144 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3145 sizeof(this_device->device_id));
3147 if (this_device->devtype == TYPE_DISK &&
3148 is_logical_dev_addr_mode(scsi3addr)) {
3151 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3152 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3153 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3154 volume_offline = hpsa_volume_offline(h, scsi3addr);
3155 if (volume_offline < 0 || volume_offline > 0xff)
3156 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3157 this_device->volume_offline = volume_offline & 0xff;
3159 this_device->raid_level = RAID_UNKNOWN;
3160 this_device->offload_config = 0;
3161 this_device->offload_enabled = 0;
3162 this_device->offload_to_be_enabled = 0;
3163 this_device->hba_ioaccel_enabled = 0;
3164 this_device->volume_offline = 0;
3165 this_device->queue_depth = h->nr_cmds;
3168 if (is_OBDR_device) {
3169 /* See if this is a One-Button-Disaster-Recovery device
3170 * by looking for "$DR-10" at offset 43 in inquiry data.
3172 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3173 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3174 strncmp(obdr_sig, OBDR_TAPE_SIG,
3175 OBDR_SIG_LEN) == 0);
3185 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3186 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3188 unsigned long flags;
3191 * See if this device supports aborts. If we already know
3192 * the device, we already know if it supports aborts, otherwise
3193 * we have to find out if it supports aborts by trying one.
3195 spin_lock_irqsave(&h->devlock, flags);
3196 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3197 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3198 entry >= 0 && entry < h->ndevices) {
3199 dev->supports_aborts = h->dev[entry]->supports_aborts;
3200 spin_unlock_irqrestore(&h->devlock, flags);
3202 spin_unlock_irqrestore(&h->devlock, flags);
3203 dev->supports_aborts =
3204 hpsa_device_supports_aborts(h, scsi3addr);
3205 if (dev->supports_aborts < 0)
3206 dev->supports_aborts = 0;
3210 static unsigned char *ext_target_model[] = {
3220 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
3224 for (i = 0; ext_target_model[i]; i++)
3225 if (strncmp(device->model, ext_target_model[i],
3226 strlen(ext_target_model[i])) == 0)
3231 /* Helper function to assign bus, target, lun mapping of devices.
3232 * Puts non-external target logical volumes on bus 0, external target logical
3233 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3234 * Logical drive target and lun are assigned at this time, but
3235 * physical device lun and target assignment are deferred (assigned
3236 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3238 static void figure_bus_target_lun(struct ctlr_info *h,
3239 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3241 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
3243 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3244 /* physical device, target and lun filled in later */
3245 if (is_hba_lunid(lunaddrbytes))
3246 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
3248 /* defer target, lun assignment for physical devices */
3249 hpsa_set_bus_target_lun(device, 2, -1, -1);
3252 /* It's a logical device */
3253 if (is_ext_target(h, device)) {
3254 /* external target way, put logicals on bus 1
3255 * and match target/lun numbers box
3256 * reports, other smart array, bus 0, target 0, match lunid
3258 hpsa_set_bus_target_lun(device,
3259 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3262 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
3266 * If there is no lun 0 on a target, linux won't find any devices.
3267 * For the external targets (arrays), we have to manually detect the enclosure
3268 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3269 * it for some reason. *tmpdevice is the target we're adding,
3270 * this_device is a pointer into the current element of currentsd[]
3271 * that we're building up in update_scsi_devices(), below.
3272 * lunzerobits is a bitmap that tracks which targets already have a
3274 * Returns 1 if an enclosure was added, 0 if not.
3276 static int add_ext_target_dev(struct ctlr_info *h,
3277 struct hpsa_scsi_dev_t *tmpdevice,
3278 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
3279 unsigned long lunzerobits[], int *n_ext_target_devs)
3281 unsigned char scsi3addr[8];
3283 if (test_bit(tmpdevice->target, lunzerobits))
3284 return 0; /* There is already a lun 0 on this target. */
3286 if (!is_logical_dev_addr_mode(lunaddrbytes))
3287 return 0; /* It's the logical targets that may lack lun 0. */
3289 if (!is_ext_target(h, tmpdevice))
3290 return 0; /* Only external target devices have this problem. */
3292 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
3295 memset(scsi3addr, 0, 8);
3296 scsi3addr[3] = tmpdevice->target;
3297 if (is_hba_lunid(scsi3addr))
3298 return 0; /* Don't add the RAID controller here. */
3300 if (is_scsi_rev_5(h))
3301 return 0; /* p1210m doesn't need to do this. */
3303 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
3304 dev_warn(&h->pdev->dev, "Maximum number of external "
3305 "target devices exceeded. Check your hardware "
3310 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
3312 (*n_ext_target_devs)++;
3313 hpsa_set_bus_target_lun(this_device,
3314 tmpdevice->bus, tmpdevice->target, 0);
3315 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
3316 set_bit(tmpdevice->target, lunzerobits);
3321 * Get address of physical disk used for an ioaccel2 mode command:
3322 * 1. Extract ioaccel2 handle from the command.
3323 * 2. Find a matching ioaccel2 handle from list of physical disks.
3325 * 1 and set scsi3addr to address of matching physical
3326 * 0 if no matching physical disk was found.
3328 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3329 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3331 struct io_accel2_cmd *c2 =
3332 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3333 unsigned long flags;
3336 spin_lock_irqsave(&h->devlock, flags);
3337 for (i = 0; i < h->ndevices; i++)
3338 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3339 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3340 sizeof(h->dev[i]->scsi3addr));
3341 spin_unlock_irqrestore(&h->devlock, flags);
3344 spin_unlock_irqrestore(&h->devlock, flags);
3349 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3350 * logdev. The number of luns in physdev and logdev are returned in
3351 * *nphysicals and *nlogicals, respectively.
3352 * Returns 0 on success, -1 otherwise.
3354 static int hpsa_gather_lun_info(struct ctlr_info *h,
3355 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3356 struct ReportLUNdata *logdev, u32 *nlogicals)
3358 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3359 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3362 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3363 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3364 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3365 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3366 *nphysicals = HPSA_MAX_PHYS_LUN;
3368 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3369 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3372 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3373 /* Reject Logicals in excess of our max capability. */
3374 if (*nlogicals > HPSA_MAX_LUN) {
3375 dev_warn(&h->pdev->dev,
3376 "maximum logical LUNs (%d) exceeded. "
3377 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3378 *nlogicals - HPSA_MAX_LUN);
3379 *nlogicals = HPSA_MAX_LUN;
3381 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3382 dev_warn(&h->pdev->dev,
3383 "maximum logical + physical LUNs (%d) exceeded. "
3384 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3385 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3386 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3391 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3392 int i, int nphysicals, int nlogicals,
3393 struct ReportExtendedLUNdata *physdev_list,
3394 struct ReportLUNdata *logdev_list)
3396 /* Helper function, figure out where the LUN ID info is coming from
3397 * given index i, lists of physical and logical devices, where in
3398 * the list the raid controller is supposed to appear (first or last)
3401 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3402 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3404 if (i == raid_ctlr_position)
3405 return RAID_CTLR_LUNID;
3407 if (i < logicals_start)
3408 return &physdev_list->LUN[i -
3409 (raid_ctlr_position == 0)].lunid[0];
3411 if (i < last_device)
3412 return &logdev_list->LUN[i - nphysicals -
3413 (raid_ctlr_position == 0)][0];
3418 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3421 int hba_mode_enabled;
3422 struct bmic_controller_parameters *ctlr_params;
3423 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3428 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3429 sizeof(struct bmic_controller_parameters));
3436 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3438 return hba_mode_enabled;
3441 /* get physical drive ioaccel handle and queue depth */
3442 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3443 struct hpsa_scsi_dev_t *dev,
3445 struct bmic_identify_physical_device *id_phys)
3448 struct ext_report_lun_entry *rle =
3449 (struct ext_report_lun_entry *) lunaddrbytes;
3451 dev->ioaccel_handle = rle->ioaccel_handle;
3452 if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3453 dev->hba_ioaccel_enabled = 1;
3454 memset(id_phys, 0, sizeof(*id_phys));
3455 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3456 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3459 /* Reserve space for FW operations */
3460 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3461 #define DRIVE_QUEUE_DEPTH 7
3463 le16_to_cpu(id_phys->current_queue_depth_limit) -
3464 DRIVE_CMDS_RESERVED_FOR_FW;
3466 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3467 atomic_set(&dev->ioaccel_cmds_out, 0);
3470 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3472 /* the idea here is we could get notified
3473 * that some devices have changed, so we do a report
3474 * physical luns and report logical luns cmd, and adjust
3475 * our list of devices accordingly.
3477 * The scsi3addr's of devices won't change so long as the
3478 * adapter is not reset. That means we can rescan and
3479 * tell which devices we already know about, vs. new
3480 * devices, vs. disappearing devices.
3482 struct ReportExtendedLUNdata *physdev_list = NULL;
3483 struct ReportLUNdata *logdev_list = NULL;
3484 struct bmic_identify_physical_device *id_phys = NULL;
3487 u32 ndev_allocated = 0;
3488 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3490 int i, n_ext_target_devs, ndevs_to_allocate;
3491 int raid_ctlr_position;
3492 int rescan_hba_mode;
3493 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3495 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3496 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3497 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3498 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3499 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3501 if (!currentsd || !physdev_list || !logdev_list ||
3502 !tmpdevice || !id_phys) {
3503 dev_err(&h->pdev->dev, "out of memory\n");
3506 memset(lunzerobits, 0, sizeof(lunzerobits));
3508 rescan_hba_mode = hpsa_hba_mode_enabled(h);
3509 if (rescan_hba_mode < 0)
3512 if (!h->hba_mode_enabled && rescan_hba_mode)
3513 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3514 else if (h->hba_mode_enabled && !rescan_hba_mode)
3515 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3517 h->hba_mode_enabled = rescan_hba_mode;
3519 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3520 logdev_list, &nlogicals))
3523 /* We might see up to the maximum number of logical and physical disks
3524 * plus external target devices, and a device for the local RAID
3527 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3529 /* Allocate the per device structures */
3530 for (i = 0; i < ndevs_to_allocate; i++) {
3531 if (i >= HPSA_MAX_DEVICES) {
3532 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3533 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3534 ndevs_to_allocate - HPSA_MAX_DEVICES);
3538 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3539 if (!currentsd[i]) {
3540 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3541 __FILE__, __LINE__);
3547 if (is_scsi_rev_5(h))
3548 raid_ctlr_position = 0;
3550 raid_ctlr_position = nphysicals + nlogicals;
3552 /* adjust our table of devices */
3553 n_ext_target_devs = 0;
3554 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3555 u8 *lunaddrbytes, is_OBDR = 0;
3557 /* Figure out where the LUN ID info is coming from */
3558 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3559 i, nphysicals, nlogicals, physdev_list, logdev_list);
3561 /* skip masked non-disk devices */
3562 if (MASKED_DEVICE(lunaddrbytes))
3563 if (i < nphysicals + (raid_ctlr_position == 0) &&
3564 NON_DISK_PHYS_DEV(lunaddrbytes))
3567 /* Get device type, vendor, model, device id */
3568 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3570 continue; /* skip it if we can't talk to it. */
3571 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3572 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
3573 this_device = currentsd[ncurrent];
3576 * For external target devices, we have to insert a LUN 0 which
3577 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3578 * is nonetheless an enclosure device there. We have to
3579 * present that otherwise linux won't find anything if
3580 * there is no lun 0.
3582 if (add_ext_target_dev(h, tmpdevice, this_device,
3583 lunaddrbytes, lunzerobits,
3584 &n_ext_target_devs)) {
3586 this_device = currentsd[ncurrent];
3589 *this_device = *tmpdevice;
3591 /* do not expose masked devices */
3592 if (MASKED_DEVICE(lunaddrbytes) &&
3593 i < nphysicals + (raid_ctlr_position == 0)) {
3594 if (h->hba_mode_enabled)
3595 dev_warn(&h->pdev->dev,
3596 "Masked physical device detected\n");
3597 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3599 this_device->expose_state =
3600 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3603 switch (this_device->devtype) {
3605 /* We don't *really* support actual CD-ROM devices,
3606 * just "One Button Disaster Recovery" tape drive
3607 * which temporarily pretends to be a CD-ROM drive.
3608 * So we check that the device is really an OBDR tape
3609 * device by checking for "$DR-10" in bytes 43-48 of
3616 if (i >= nphysicals) {
3621 if (h->hba_mode_enabled)
3622 /* never use raid mapper in HBA mode */
3623 this_device->offload_enabled = 0;
3624 else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
3625 h->transMethod & CFGTBL_Trans_io_accel2))
3628 hpsa_get_ioaccel_drive_info(h, this_device,
3629 lunaddrbytes, id_phys);
3630 atomic_set(&this_device->ioaccel_cmds_out, 0);
3634 case TYPE_MEDIUM_CHANGER:
3637 case TYPE_ENCLOSURE:
3638 if (h->hba_mode_enabled)
3642 /* Only present the Smartarray HBA as a RAID controller.
3643 * If it's a RAID controller other than the HBA itself
3644 * (an external RAID controller, MSA500 or similar)
3647 if (!is_hba_lunid(lunaddrbytes))
3654 if (ncurrent >= HPSA_MAX_DEVICES)
3657 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3660 for (i = 0; i < ndev_allocated; i++)
3661 kfree(currentsd[i]);
3663 kfree(physdev_list);
3668 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3669 struct scatterlist *sg)
3671 u64 addr64 = (u64) sg_dma_address(sg);
3672 unsigned int len = sg_dma_len(sg);
3674 desc->Addr = cpu_to_le64(addr64);
3675 desc->Len = cpu_to_le32(len);
3680 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3681 * dma mapping and fills in the scatter gather entries of the
3684 static int hpsa_scatter_gather(struct ctlr_info *h,
3685 struct CommandList *cp,
3686 struct scsi_cmnd *cmd)
3688 struct scatterlist *sg;
3689 int use_sg, i, sg_limit, chained, last_sg;
3690 struct SGDescriptor *curr_sg;
3692 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3694 use_sg = scsi_dma_map(cmd);
3699 goto sglist_finished;
3702 * If the number of entries is greater than the max for a single list,
3703 * then we have a chained list; we will set up all but one entry in the
3704 * first list (the last entry is saved for link information);
3705 * otherwise, we don't have a chained list and we'll set up at each of
3706 * the entries in the one list.
3709 chained = use_sg > h->max_cmd_sg_entries;
3710 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3711 last_sg = scsi_sg_count(cmd) - 1;
3712 scsi_for_each_sg(cmd, sg, sg_limit, i) {
3713 hpsa_set_sg_descriptor(curr_sg, sg);
3719 * Continue with the chained list. Set curr_sg to the chained
3720 * list. Modify the limit to the total count less the entries
3721 * we've already set up. Resume the scan at the list entry
3722 * where the previous loop left off.
3724 curr_sg = h->cmd_sg_list[cp->cmdindex];
3725 sg_limit = use_sg - sg_limit;
3726 for_each_sg(sg, sg, sg_limit, i) {
3727 hpsa_set_sg_descriptor(curr_sg, sg);
3732 /* Back the pointer up to the last entry and mark it as "last". */
3733 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
3735 if (use_sg + chained > h->maxSG)
3736 h->maxSG = use_sg + chained;
3739 cp->Header.SGList = h->max_cmd_sg_entries;
3740 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3741 if (hpsa_map_sg_chain_block(h, cp)) {
3742 scsi_dma_unmap(cmd);
3750 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3751 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3755 #define IO_ACCEL_INELIGIBLE (1)
3756 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3762 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3769 if (*cdb_len == 6) {
3770 block = (((u32) cdb[2]) << 8) | cdb[3];
3773 BUG_ON(*cdb_len != 12);
3774 block = (((u32) cdb[2]) << 24) |
3775 (((u32) cdb[3]) << 16) |
3776 (((u32) cdb[4]) << 8) |
3779 (((u32) cdb[6]) << 24) |
3780 (((u32) cdb[7]) << 16) |
3781 (((u32) cdb[8]) << 8) |
3784 if (block_cnt > 0xffff)
3785 return IO_ACCEL_INELIGIBLE;
3787 cdb[0] = is_write ? WRITE_10 : READ_10;
3789 cdb[2] = (u8) (block >> 24);
3790 cdb[3] = (u8) (block >> 16);
3791 cdb[4] = (u8) (block >> 8);
3792 cdb[5] = (u8) (block);
3794 cdb[7] = (u8) (block_cnt >> 8);
3795 cdb[8] = (u8) (block_cnt);
3803 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3804 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3805 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3807 struct scsi_cmnd *cmd = c->scsi_cmd;
3808 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3810 unsigned int total_len = 0;
3811 struct scatterlist *sg;
3814 struct SGDescriptor *curr_sg;
3815 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3817 /* TODO: implement chaining support */
3818 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3819 atomic_dec(&phys_disk->ioaccel_cmds_out);
3820 return IO_ACCEL_INELIGIBLE;
3823 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3825 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3826 atomic_dec(&phys_disk->ioaccel_cmds_out);
3827 return IO_ACCEL_INELIGIBLE;
3830 c->cmd_type = CMD_IOACCEL1;
3832 /* Adjust the DMA address to point to the accelerated command buffer */
3833 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3834 (c->cmdindex * sizeof(*cp));
3835 BUG_ON(c->busaddr & 0x0000007F);
3837 use_sg = scsi_dma_map(cmd);
3839 atomic_dec(&phys_disk->ioaccel_cmds_out);
3845 scsi_for_each_sg(cmd, sg, use_sg, i) {
3846 addr64 = (u64) sg_dma_address(sg);
3847 len = sg_dma_len(sg);
3849 curr_sg->Addr = cpu_to_le64(addr64);
3850 curr_sg->Len = cpu_to_le32(len);
3851 curr_sg->Ext = cpu_to_le32(0);
3854 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3856 switch (cmd->sc_data_direction) {
3858 control |= IOACCEL1_CONTROL_DATA_OUT;
3860 case DMA_FROM_DEVICE:
3861 control |= IOACCEL1_CONTROL_DATA_IN;
3864 control |= IOACCEL1_CONTROL_NODATAXFER;
3867 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3868 cmd->sc_data_direction);
3873 control |= IOACCEL1_CONTROL_NODATAXFER;
3876 c->Header.SGList = use_sg;
3877 /* Fill out the command structure to submit */
3878 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3879 cp->transfer_len = cpu_to_le32(total_len);
3880 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3881 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3882 cp->control = cpu_to_le32(control);
3883 memcpy(cp->CDB, cdb, cdb_len);
3884 memcpy(cp->CISS_LUN, scsi3addr, 8);
3885 /* Tag was already set at init time. */
3886 enqueue_cmd_and_start_io(h, c);
3891 * Queue a command directly to a device behind the controller using the
3892 * I/O accelerator path.
3894 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3895 struct CommandList *c)
3897 struct scsi_cmnd *cmd = c->scsi_cmd;
3898 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3902 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3903 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3907 * Set encryption parameters for the ioaccel2 request
3909 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3910 struct CommandList *c, struct io_accel2_cmd *cp)
3912 struct scsi_cmnd *cmd = c->scsi_cmd;
3913 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3914 struct raid_map_data *map = &dev->raid_map;
3917 /* Are we doing encryption on this device */
3918 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3920 /* Set the data encryption key index. */
3921 cp->dekindex = map->dekindex;
3923 /* Set the encryption enable flag, encoded into direction field. */
3924 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3926 /* Set encryption tweak values based on logical block address
3927 * If block size is 512, tweak value is LBA.
3928 * For other block sizes, tweak is (LBA * block size)/ 512)
3930 switch (cmd->cmnd[0]) {
3931 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3934 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3938 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3941 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3945 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3948 dev_err(&h->pdev->dev,
3949 "ERROR: %s: size (0x%x) not supported for encryption\n",
3950 __func__, cmd->cmnd[0]);
3955 if (le32_to_cpu(map->volume_blk_size) != 512)
3956 first_block = first_block *
3957 le32_to_cpu(map->volume_blk_size)/512;
3959 cp->tweak_lower = cpu_to_le32(first_block);
3960 cp->tweak_upper = cpu_to_le32(first_block >> 32);
3963 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3964 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3965 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3967 struct scsi_cmnd *cmd = c->scsi_cmd;
3968 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3969 struct ioaccel2_sg_element *curr_sg;
3971 struct scatterlist *sg;
3976 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3978 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3979 atomic_dec(&phys_disk->ioaccel_cmds_out);
3980 return IO_ACCEL_INELIGIBLE;
3983 c->cmd_type = CMD_IOACCEL2;
3984 /* Adjust the DMA address to point to the accelerated command buffer */
3985 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3986 (c->cmdindex * sizeof(*cp));
3987 BUG_ON(c->busaddr & 0x0000007F);
3989 memset(cp, 0, sizeof(*cp));
3990 cp->IU_type = IOACCEL2_IU_TYPE;
3992 use_sg = scsi_dma_map(cmd);
3994 atomic_dec(&phys_disk->ioaccel_cmds_out);
4000 if (use_sg > h->ioaccel_maxsg) {
4001 addr64 = le64_to_cpu(
4002 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4003 curr_sg->address = cpu_to_le64(addr64);
4004 curr_sg->length = 0;
4005 curr_sg->reserved[0] = 0;
4006 curr_sg->reserved[1] = 0;
4007 curr_sg->reserved[2] = 0;
4008 curr_sg->chain_indicator = 0x80;
4010 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4012 scsi_for_each_sg(cmd, sg, use_sg, i) {
4013 addr64 = (u64) sg_dma_address(sg);
4014 len = sg_dma_len(sg);
4016 curr_sg->address = cpu_to_le64(addr64);
4017 curr_sg->length = cpu_to_le32(len);
4018 curr_sg->reserved[0] = 0;
4019 curr_sg->reserved[1] = 0;
4020 curr_sg->reserved[2] = 0;
4021 curr_sg->chain_indicator = 0;
4025 switch (cmd->sc_data_direction) {
4027 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4028 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4030 case DMA_FROM_DEVICE:
4031 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4032 cp->direction |= IOACCEL2_DIR_DATA_IN;
4035 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4036 cp->direction |= IOACCEL2_DIR_NO_DATA;
4039 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4040 cmd->sc_data_direction);
4045 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4046 cp->direction |= IOACCEL2_DIR_NO_DATA;
4049 /* Set encryption parameters, if necessary */
4050 set_encrypt_ioaccel2(h, c, cp);
4052 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4053 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4054 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4056 cp->data_len = cpu_to_le32(total_len);
4057 cp->err_ptr = cpu_to_le64(c->busaddr +
4058 offsetof(struct io_accel2_cmd, error_data));
4059 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4061 /* fill in sg elements */
4062 if (use_sg > h->ioaccel_maxsg) {
4064 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4065 atomic_dec(&phys_disk->ioaccel_cmds_out);
4066 scsi_dma_unmap(cmd);
4070 cp->sg_count = (u8) use_sg;
4072 enqueue_cmd_and_start_io(h, c);
4077 * Queue a command to the correct I/O accelerator path.
4079 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4080 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4081 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4083 /* Try to honor the device's queue depth */
4084 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4085 phys_disk->queue_depth) {
4086 atomic_dec(&phys_disk->ioaccel_cmds_out);
4087 return IO_ACCEL_INELIGIBLE;
4089 if (h->transMethod & CFGTBL_Trans_io_accel1)
4090 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4091 cdb, cdb_len, scsi3addr,
4094 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4095 cdb, cdb_len, scsi3addr,
4099 static void raid_map_helper(struct raid_map_data *map,
4100 int offload_to_mirror, u32 *map_index, u32 *current_group)
4102 if (offload_to_mirror == 0) {
4103 /* use physical disk in the first mirrored group. */
4104 *map_index %= le16_to_cpu(map->data_disks_per_row);
4108 /* determine mirror group that *map_index indicates */
4109 *current_group = *map_index /
4110 le16_to_cpu(map->data_disks_per_row);
4111 if (offload_to_mirror == *current_group)
4113 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4114 /* select map index from next group */
4115 *map_index += le16_to_cpu(map->data_disks_per_row);
4118 /* select map index from first group */
4119 *map_index %= le16_to_cpu(map->data_disks_per_row);
4122 } while (offload_to_mirror != *current_group);
4126 * Attempt to perform offload RAID mapping for a logical volume I/O.
4128 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4129 struct CommandList *c)
4131 struct scsi_cmnd *cmd = c->scsi_cmd;
4132 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4133 struct raid_map_data *map = &dev->raid_map;
4134 struct raid_map_disk_data *dd = &map->data[0];
4137 u64 first_block, last_block;
4140 u64 first_row, last_row;
4141 u32 first_row_offset, last_row_offset;
4142 u32 first_column, last_column;
4143 u64 r0_first_row, r0_last_row;
4144 u32 r5or6_blocks_per_row;
4145 u64 r5or6_first_row, r5or6_last_row;
4146 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4147 u32 r5or6_first_column, r5or6_last_column;
4148 u32 total_disks_per_row;
4150 u32 first_group, last_group, current_group;
4158 #if BITS_PER_LONG == 32
4161 int offload_to_mirror;
4163 /* check for valid opcode, get LBA and block count */
4164 switch (cmd->cmnd[0]) {
4169 (((u64) cmd->cmnd[2]) << 8) |
4171 block_cnt = cmd->cmnd[4];
4179 (((u64) cmd->cmnd[2]) << 24) |
4180 (((u64) cmd->cmnd[3]) << 16) |
4181 (((u64) cmd->cmnd[4]) << 8) |
4184 (((u32) cmd->cmnd[7]) << 8) |
4191 (((u64) cmd->cmnd[2]) << 24) |
4192 (((u64) cmd->cmnd[3]) << 16) |
4193 (((u64) cmd->cmnd[4]) << 8) |
4196 (((u32) cmd->cmnd[6]) << 24) |
4197 (((u32) cmd->cmnd[7]) << 16) |
4198 (((u32) cmd->cmnd[8]) << 8) |
4205 (((u64) cmd->cmnd[2]) << 56) |
4206 (((u64) cmd->cmnd[3]) << 48) |
4207 (((u64) cmd->cmnd[4]) << 40) |
4208 (((u64) cmd->cmnd[5]) << 32) |
4209 (((u64) cmd->cmnd[6]) << 24) |
4210 (((u64) cmd->cmnd[7]) << 16) |
4211 (((u64) cmd->cmnd[8]) << 8) |
4214 (((u32) cmd->cmnd[10]) << 24) |
4215 (((u32) cmd->cmnd[11]) << 16) |
4216 (((u32) cmd->cmnd[12]) << 8) |
4220 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4222 last_block = first_block + block_cnt - 1;
4224 /* check for write to non-RAID-0 */
4225 if (is_write && dev->raid_level != 0)
4226 return IO_ACCEL_INELIGIBLE;
4228 /* check for invalid block or wraparound */
4229 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4230 last_block < first_block)
4231 return IO_ACCEL_INELIGIBLE;
4233 /* calculate stripe information for the request */
4234 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4235 le16_to_cpu(map->strip_size);
4236 strip_size = le16_to_cpu(map->strip_size);
4237 #if BITS_PER_LONG == 32
4238 tmpdiv = first_block;
4239 (void) do_div(tmpdiv, blocks_per_row);
4241 tmpdiv = last_block;
4242 (void) do_div(tmpdiv, blocks_per_row);
4244 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4245 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4246 tmpdiv = first_row_offset;
4247 (void) do_div(tmpdiv, strip_size);
4248 first_column = tmpdiv;
4249 tmpdiv = last_row_offset;
4250 (void) do_div(tmpdiv, strip_size);
4251 last_column = tmpdiv;
4253 first_row = first_block / blocks_per_row;
4254 last_row = last_block / blocks_per_row;
4255 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4256 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4257 first_column = first_row_offset / strip_size;
4258 last_column = last_row_offset / strip_size;
4261 /* if this isn't a single row/column then give to the controller */
4262 if ((first_row != last_row) || (first_column != last_column))
4263 return IO_ACCEL_INELIGIBLE;
4265 /* proceeding with driver mapping */
4266 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4267 le16_to_cpu(map->metadata_disks_per_row);
4268 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4269 le16_to_cpu(map->row_cnt);
4270 map_index = (map_row * total_disks_per_row) + first_column;
4272 switch (dev->raid_level) {
4274 break; /* nothing special to do */
4276 /* Handles load balance across RAID 1 members.
4277 * (2-drive R1 and R10 with even # of drives.)
4278 * Appropriate for SSDs, not optimal for HDDs
4280 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4281 if (dev->offload_to_mirror)
4282 map_index += le16_to_cpu(map->data_disks_per_row);
4283 dev->offload_to_mirror = !dev->offload_to_mirror;
4286 /* Handles N-way mirrors (R1-ADM)
4287 * and R10 with # of drives divisible by 3.)
4289 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4291 offload_to_mirror = dev->offload_to_mirror;
4292 raid_map_helper(map, offload_to_mirror,
4293 &map_index, ¤t_group);
4294 /* set mirror group to use next time */
4296 (offload_to_mirror >=
4297 le16_to_cpu(map->layout_map_count) - 1)
4298 ? 0 : offload_to_mirror + 1;
4299 dev->offload_to_mirror = offload_to_mirror;
4300 /* Avoid direct use of dev->offload_to_mirror within this
4301 * function since multiple threads might simultaneously
4302 * increment it beyond the range of dev->layout_map_count -1.
4307 if (le16_to_cpu(map->layout_map_count) <= 1)
4310 /* Verify first and last block are in same RAID group */
4311 r5or6_blocks_per_row =
4312 le16_to_cpu(map->strip_size) *
4313 le16_to_cpu(map->data_disks_per_row);
4314 BUG_ON(r5or6_blocks_per_row == 0);
4315 stripesize = r5or6_blocks_per_row *
4316 le16_to_cpu(map->layout_map_count);
4317 #if BITS_PER_LONG == 32
4318 tmpdiv = first_block;
4319 first_group = do_div(tmpdiv, stripesize);
4320 tmpdiv = first_group;
4321 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4322 first_group = tmpdiv;
4323 tmpdiv = last_block;
4324 last_group = do_div(tmpdiv, stripesize);
4325 tmpdiv = last_group;
4326 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4327 last_group = tmpdiv;
4329 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4330 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4332 if (first_group != last_group)
4333 return IO_ACCEL_INELIGIBLE;
4335 /* Verify request is in a single row of RAID 5/6 */
4336 #if BITS_PER_LONG == 32
4337 tmpdiv = first_block;
4338 (void) do_div(tmpdiv, stripesize);
4339 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4340 tmpdiv = last_block;
4341 (void) do_div(tmpdiv, stripesize);
4342 r5or6_last_row = r0_last_row = tmpdiv;
4344 first_row = r5or6_first_row = r0_first_row =
4345 first_block / stripesize;
4346 r5or6_last_row = r0_last_row = last_block / stripesize;
4348 if (r5or6_first_row != r5or6_last_row)
4349 return IO_ACCEL_INELIGIBLE;
4352 /* Verify request is in a single column */
4353 #if BITS_PER_LONG == 32
4354 tmpdiv = first_block;
4355 first_row_offset = do_div(tmpdiv, stripesize);
4356 tmpdiv = first_row_offset;
4357 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4358 r5or6_first_row_offset = first_row_offset;
4359 tmpdiv = last_block;
4360 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4361 tmpdiv = r5or6_last_row_offset;
4362 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4363 tmpdiv = r5or6_first_row_offset;
4364 (void) do_div(tmpdiv, map->strip_size);
4365 first_column = r5or6_first_column = tmpdiv;
4366 tmpdiv = r5or6_last_row_offset;
4367 (void) do_div(tmpdiv, map->strip_size);
4368 r5or6_last_column = tmpdiv;
4370 first_row_offset = r5or6_first_row_offset =
4371 (u32)((first_block % stripesize) %
4372 r5or6_blocks_per_row);
4374 r5or6_last_row_offset =
4375 (u32)((last_block % stripesize) %
4376 r5or6_blocks_per_row);
4378 first_column = r5or6_first_column =
4379 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4381 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4383 if (r5or6_first_column != r5or6_last_column)
4384 return IO_ACCEL_INELIGIBLE;
4386 /* Request is eligible */
4387 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4388 le16_to_cpu(map->row_cnt);
4390 map_index = (first_group *
4391 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4392 (map_row * total_disks_per_row) + first_column;
4395 return IO_ACCEL_INELIGIBLE;
4398 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4399 return IO_ACCEL_INELIGIBLE;
4401 c->phys_disk = dev->phys_disk[map_index];
4403 disk_handle = dd[map_index].ioaccel_handle;
4404 disk_block = le64_to_cpu(map->disk_starting_blk) +
4405 first_row * le16_to_cpu(map->strip_size) +
4406 (first_row_offset - first_column *
4407 le16_to_cpu(map->strip_size));
4408 disk_block_cnt = block_cnt;
4410 /* handle differing logical/physical block sizes */
4411 if (map->phys_blk_shift) {
4412 disk_block <<= map->phys_blk_shift;
4413 disk_block_cnt <<= map->phys_blk_shift;
4415 BUG_ON(disk_block_cnt > 0xffff);
4417 /* build the new CDB for the physical disk I/O */
4418 if (disk_block > 0xffffffff) {
4419 cdb[0] = is_write ? WRITE_16 : READ_16;
4421 cdb[2] = (u8) (disk_block >> 56);
4422 cdb[3] = (u8) (disk_block >> 48);
4423 cdb[4] = (u8) (disk_block >> 40);
4424 cdb[5] = (u8) (disk_block >> 32);
4425 cdb[6] = (u8) (disk_block >> 24);
4426 cdb[7] = (u8) (disk_block >> 16);
4427 cdb[8] = (u8) (disk_block >> 8);
4428 cdb[9] = (u8) (disk_block);
4429 cdb[10] = (u8) (disk_block_cnt >> 24);
4430 cdb[11] = (u8) (disk_block_cnt >> 16);
4431 cdb[12] = (u8) (disk_block_cnt >> 8);
4432 cdb[13] = (u8) (disk_block_cnt);
4437 cdb[0] = is_write ? WRITE_10 : READ_10;
4439 cdb[2] = (u8) (disk_block >> 24);
4440 cdb[3] = (u8) (disk_block >> 16);
4441 cdb[4] = (u8) (disk_block >> 8);
4442 cdb[5] = (u8) (disk_block);
4444 cdb[7] = (u8) (disk_block_cnt >> 8);
4445 cdb[8] = (u8) (disk_block_cnt);
4449 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4451 dev->phys_disk[map_index]);
4455 * Submit commands down the "normal" RAID stack path
4456 * All callers to hpsa_ciss_submit must check lockup_detected
4457 * beforehand, before (opt.) and after calling cmd_alloc
4459 static int hpsa_ciss_submit(struct ctlr_info *h,
4460 struct CommandList *c, struct scsi_cmnd *cmd,
4461 unsigned char scsi3addr[])
4463 cmd->host_scribble = (unsigned char *) c;
4464 c->cmd_type = CMD_SCSI;
4466 c->Header.ReplyQueue = 0; /* unused in simple mode */
4467 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4468 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4470 /* Fill in the request block... */
4472 c->Request.Timeout = 0;
4473 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4474 c->Request.CDBLen = cmd->cmd_len;
4475 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4476 switch (cmd->sc_data_direction) {
4478 c->Request.type_attr_dir =
4479 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4481 case DMA_FROM_DEVICE:
4482 c->Request.type_attr_dir =
4483 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4486 c->Request.type_attr_dir =
4487 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4489 case DMA_BIDIRECTIONAL:
4490 /* This can happen if a buggy application does a scsi passthru
4491 * and sets both inlen and outlen to non-zero. ( see
4492 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4495 c->Request.type_attr_dir =
4496 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4497 /* This is technically wrong, and hpsa controllers should
4498 * reject it with CMD_INVALID, which is the most correct
4499 * response, but non-fibre backends appear to let it
4500 * slide by, and give the same results as if this field
4501 * were set correctly. Either way is acceptable for
4502 * our purposes here.
4508 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4509 cmd->sc_data_direction);
4514 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4515 hpsa_cmd_resolve_and_free(h, c);
4516 return SCSI_MLQUEUE_HOST_BUSY;
4518 enqueue_cmd_and_start_io(h, c);
4519 /* the cmd'll come back via intr handler in complete_scsi_command() */
4523 static void hpsa_cmd_init(struct ctlr_info *h, int index,
4524 struct CommandList *c)
4526 dma_addr_t cmd_dma_handle, err_dma_handle;
4528 /* Zero out all of commandlist except the last field, refcount */
4529 memset(c, 0, offsetof(struct CommandList, refcount));
4530 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4531 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4532 c->err_info = h->errinfo_pool + index;
4533 memset(c->err_info, 0, sizeof(*c->err_info));
4534 err_dma_handle = h->errinfo_pool_dhandle
4535 + index * sizeof(*c->err_info);
4536 c->cmdindex = index;
4537 c->busaddr = (u32) cmd_dma_handle;
4538 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4539 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4541 c->scsi_cmd = SCSI_CMD_IDLE;
4544 static void hpsa_preinitialize_commands(struct ctlr_info *h)
4548 for (i = 0; i < h->nr_cmds; i++) {
4549 struct CommandList *c = h->cmd_pool + i;
4551 hpsa_cmd_init(h, i, c);
4552 atomic_set(&c->refcount, 0);
4556 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4557 struct CommandList *c)
4559 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4561 BUG_ON(c->cmdindex != index);
4563 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4564 memset(c->err_info, 0, sizeof(*c->err_info));
4565 c->busaddr = (u32) cmd_dma_handle;
4568 static int hpsa_ioaccel_submit(struct ctlr_info *h,
4569 struct CommandList *c, struct scsi_cmnd *cmd,
4570 unsigned char *scsi3addr)
4572 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4573 int rc = IO_ACCEL_INELIGIBLE;
4575 cmd->host_scribble = (unsigned char *) c;
4577 if (dev->offload_enabled) {
4578 hpsa_cmd_init(h, c->cmdindex, c);
4579 c->cmd_type = CMD_SCSI;
4581 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4582 if (rc < 0) /* scsi_dma_map failed. */
4583 rc = SCSI_MLQUEUE_HOST_BUSY;
4584 } else if (dev->hba_ioaccel_enabled) {
4585 hpsa_cmd_init(h, c->cmdindex, c);
4586 c->cmd_type = CMD_SCSI;
4588 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4589 if (rc < 0) /* scsi_dma_map failed. */
4590 rc = SCSI_MLQUEUE_HOST_BUSY;
4595 static void hpsa_command_resubmit_worker(struct work_struct *work)
4597 struct scsi_cmnd *cmd;
4598 struct hpsa_scsi_dev_t *dev;
4599 struct CommandList *c = container_of(work, struct CommandList, work);
4602 dev = cmd->device->hostdata;
4604 cmd->result = DID_NO_CONNECT << 16;
4605 return hpsa_cmd_free_and_done(c->h, c, cmd);
4607 if (c->abort_pending)
4608 return hpsa_cmd_abort_and_free(c->h, c, cmd);
4609 if (c->cmd_type == CMD_IOACCEL2) {
4610 struct ctlr_info *h = c->h;
4611 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4614 if (c2->error_data.serv_response ==
4615 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4616 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4619 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4621 * If we get here, it means dma mapping failed.
4622 * Try again via scsi mid layer, which will
4623 * then get SCSI_MLQUEUE_HOST_BUSY.
4625 cmd->result = DID_IMM_RETRY << 16;
4626 return hpsa_cmd_free_and_done(h, c, cmd);
4628 /* else, fall thru and resubmit down CISS path */
4631 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
4632 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4634 * If we get here, it means dma mapping failed. Try
4635 * again via scsi mid layer, which will then get
4636 * SCSI_MLQUEUE_HOST_BUSY.
4638 * hpsa_ciss_submit will have already freed c
4639 * if it encountered a dma mapping failure.
4641 cmd->result = DID_IMM_RETRY << 16;
4642 cmd->scsi_done(cmd);
4646 /* Running in struct Scsi_Host->host_lock less mode */
4647 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4649 struct ctlr_info *h;
4650 struct hpsa_scsi_dev_t *dev;
4651 unsigned char scsi3addr[8];
4652 struct CommandList *c;
4655 /* Get the ptr to our adapter structure out of cmd->host. */
4656 h = sdev_to_hba(cmd->device);
4658 BUG_ON(cmd->request->tag < 0);
4660 dev = cmd->device->hostdata;
4662 cmd->result = DID_NO_CONNECT << 16;
4663 cmd->scsi_done(cmd);
4667 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4669 if (unlikely(lockup_detected(h))) {
4670 cmd->result = DID_NO_CONNECT << 16;
4671 cmd->scsi_done(cmd);
4674 c = cmd_tagged_alloc(h, cmd);
4677 * Call alternate submit routine for I/O accelerated commands.
4678 * Retries always go down the normal I/O path.
4680 if (likely(cmd->retries == 0 &&
4681 cmd->request->cmd_type == REQ_TYPE_FS &&
4682 h->acciopath_status)) {
4683 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4686 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4687 hpsa_cmd_resolve_and_free(h, c);
4688 return SCSI_MLQUEUE_HOST_BUSY;
4691 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4694 static void hpsa_scan_complete(struct ctlr_info *h)
4696 unsigned long flags;
4698 spin_lock_irqsave(&h->scan_lock, flags);
4699 h->scan_finished = 1;
4700 wake_up_all(&h->scan_wait_queue);
4701 spin_unlock_irqrestore(&h->scan_lock, flags);
4704 static void hpsa_scan_start(struct Scsi_Host *sh)
4706 struct ctlr_info *h = shost_to_hba(sh);
4707 unsigned long flags;
4710 * Don't let rescans be initiated on a controller known to be locked
4711 * up. If the controller locks up *during* a rescan, that thread is
4712 * probably hosed, but at least we can prevent new rescan threads from
4713 * piling up on a locked up controller.
4715 if (unlikely(lockup_detected(h)))
4716 return hpsa_scan_complete(h);
4718 /* wait until any scan already in progress is finished. */
4720 spin_lock_irqsave(&h->scan_lock, flags);
4721 if (h->scan_finished)
4723 spin_unlock_irqrestore(&h->scan_lock, flags);
4724 wait_event(h->scan_wait_queue, h->scan_finished);
4725 /* Note: We don't need to worry about a race between this
4726 * thread and driver unload because the midlayer will
4727 * have incremented the reference count, so unload won't
4728 * happen if we're in here.
4731 h->scan_finished = 0; /* mark scan as in progress */
4732 spin_unlock_irqrestore(&h->scan_lock, flags);
4734 if (unlikely(lockup_detected(h)))
4735 return hpsa_scan_complete(h);
4737 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4739 hpsa_scan_complete(h);
4742 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4744 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4751 else if (qdepth > logical_drive->queue_depth)
4752 qdepth = logical_drive->queue_depth;
4754 return scsi_change_queue_depth(sdev, qdepth);
4757 static int hpsa_scan_finished(struct Scsi_Host *sh,
4758 unsigned long elapsed_time)
4760 struct ctlr_info *h = shost_to_hba(sh);
4761 unsigned long flags;
4764 spin_lock_irqsave(&h->scan_lock, flags);
4765 finished = h->scan_finished;
4766 spin_unlock_irqrestore(&h->scan_lock, flags);
4770 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
4772 struct Scsi_Host *sh;
4775 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4777 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
4784 sh->max_channel = 3;
4785 sh->max_cmd_len = MAX_COMMAND_SIZE;
4786 sh->max_lun = HPSA_MAX_LUN;
4787 sh->max_id = HPSA_MAX_LUN;
4788 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
4789 sh->cmd_per_lun = sh->can_queue;
4790 sh->sg_tablesize = h->maxsgentries;
4791 sh->hostdata[0] = (unsigned long) h;
4792 sh->irq = h->intr[h->intr_mode];
4793 sh->unique_id = sh->irq;
4794 error = scsi_init_shared_tag_map(sh, sh->can_queue);
4796 dev_err(&h->pdev->dev,
4797 "%s: scsi_init_shared_tag_map failed for controller %d\n",
4806 static int hpsa_scsi_add_host(struct ctlr_info *h)
4810 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
4812 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
4815 scsi_scan_host(h->scsi_host);
4820 * The block layer has already gone to the trouble of picking out a unique,
4821 * small-integer tag for this request. We use an offset from that value as
4822 * an index to select our command block. (The offset allows us to reserve the
4823 * low-numbered entries for our own uses.)
4825 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
4827 int idx = scmd->request->tag;
4832 /* Offset to leave space for internal cmds. */
4833 return idx += HPSA_NRESERVED_CMDS;
4837 * Send a TEST_UNIT_READY command to the specified LUN using the specified
4838 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
4840 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
4841 struct CommandList *c, unsigned char lunaddr[],
4846 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4847 (void) fill_cmd(c, TEST_UNIT_READY, h,
4848 NULL, 0, 0, lunaddr, TYPE_CMD);
4849 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4852 /* no unmap needed here because no data xfer. */
4854 /* Check if the unit is already ready. */
4855 if (c->err_info->CommandStatus == CMD_SUCCESS)
4859 * The first command sent after reset will receive "unit attention" to
4860 * indicate that the LUN has been reset...this is actually what we're
4861 * looking for (but, success is good too).
4863 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4864 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4865 (c->err_info->SenseInfo[2] == NO_SENSE ||
4866 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4873 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
4874 * returns zero when the unit is ready, and non-zero when giving up.
4876 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
4877 struct CommandList *c,
4878 unsigned char lunaddr[], int reply_queue)
4882 int waittime = 1; /* seconds */
4884 /* Send test unit ready until device ready, or give up. */
4885 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
4888 * Wait for a bit. do this first, because if we send
4889 * the TUR right away, the reset will just abort it.
4891 msleep(1000 * waittime);
4893 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
4897 /* Increase wait time with each try, up to a point. */
4898 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4901 dev_warn(&h->pdev->dev,
4902 "waiting %d secs for device to become ready.\n",
4909 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4910 unsigned char lunaddr[],
4917 struct CommandList *c;
4922 * If no specific reply queue was requested, then send the TUR
4923 * repeatedly, requesting a reply on each reply queue; otherwise execute
4924 * the loop exactly once using only the specified queue.
4926 if (reply_queue == DEFAULT_REPLY_QUEUE) {
4928 last_queue = h->nreply_queues - 1;
4930 first_queue = reply_queue;
4931 last_queue = reply_queue;
4934 for (rq = first_queue; rq <= last_queue; rq++) {
4935 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
4941 dev_warn(&h->pdev->dev, "giving up on device.\n");
4943 dev_warn(&h->pdev->dev, "device is ready.\n");
4949 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4950 * complaining. Doing a host- or bus-reset can't do anything good here.
4952 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4955 struct ctlr_info *h;
4956 struct hpsa_scsi_dev_t *dev;
4959 /* find the controller to which the command to be aborted was sent */
4960 h = sdev_to_hba(scsicmd->device);
4961 if (h == NULL) /* paranoia */
4964 if (lockup_detected(h))
4967 dev = scsicmd->device->hostdata;
4969 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4970 "device lookup failed.\n");
4974 /* if controller locked up, we can guarantee command won't complete */
4975 if (lockup_detected(h)) {
4976 sprintf(msg, "cmd %d RESET FAILED, lockup detected",
4977 hpsa_get_cmd_index(scsicmd));
4978 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
4982 /* this reset request might be the result of a lockup; check */
4983 if (detect_controller_lockup(h)) {
4984 sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
4985 hpsa_get_cmd_index(scsicmd));
4986 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
4990 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
4992 /* send a reset to the SCSI LUN which the command was sent to */
4993 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
4994 DEFAULT_REPLY_QUEUE);
4998 dev_warn(&h->pdev->dev,
4999 "scsi %d:%d:%d:%d reset failed\n",
5000 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
5004 static void swizzle_abort_tag(u8 *tag)
5008 memcpy(original_tag, tag, 8);
5009 tag[0] = original_tag[3];
5010 tag[1] = original_tag[2];
5011 tag[2] = original_tag[1];
5012 tag[3] = original_tag[0];
5013 tag[4] = original_tag[7];
5014 tag[5] = original_tag[6];
5015 tag[6] = original_tag[5];
5016 tag[7] = original_tag[4];
5019 static void hpsa_get_tag(struct ctlr_info *h,
5020 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5023 if (c->cmd_type == CMD_IOACCEL1) {
5024 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5025 &h->ioaccel_cmd_pool[c->cmdindex];
5026 tag = le64_to_cpu(cm1->tag);
5027 *tagupper = cpu_to_le32(tag >> 32);
5028 *taglower = cpu_to_le32(tag);
5031 if (c->cmd_type == CMD_IOACCEL2) {
5032 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5033 &h->ioaccel2_cmd_pool[c->cmdindex];
5034 /* upper tag not used in ioaccel2 mode */
5035 memset(tagupper, 0, sizeof(*tagupper));
5036 *taglower = cm2->Tag;
5039 tag = le64_to_cpu(c->Header.tag);
5040 *tagupper = cpu_to_le32(tag >> 32);
5041 *taglower = cpu_to_le32(tag);
5044 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5045 struct CommandList *abort, int reply_queue)
5048 struct CommandList *c;
5049 struct ErrorInfo *ei;
5050 __le32 tagupper, taglower;
5054 /* fill_cmd can't fail here, no buffer to map */
5055 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5056 0, 0, scsi3addr, TYPE_MSG);
5057 if (h->needs_abort_tags_swizzled)
5058 swizzle_abort_tag(&c->Request.CDB[4]);
5059 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5060 hpsa_get_tag(h, abort, &taglower, &tagupper);
5061 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5062 __func__, tagupper, taglower);
5063 /* no unmap needed here because no data xfer. */
5066 switch (ei->CommandStatus) {
5069 case CMD_TMF_STATUS:
5070 rc = hpsa_evaluate_tmf_status(h, c);
5072 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5076 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5077 __func__, tagupper, taglower);
5078 hpsa_scsi_interpret_error(h, c);
5083 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5084 __func__, tagupper, taglower);
5088 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5089 struct CommandList *command_to_abort, int reply_queue)
5091 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5092 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5093 struct io_accel2_cmd *c2a =
5094 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5095 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5096 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5099 * We're overlaying struct hpsa_tmf_struct on top of something which
5100 * was allocated as a struct io_accel2_cmd, so we better be sure it
5101 * actually fits, and doesn't overrun the error info space.
5103 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5104 sizeof(struct io_accel2_cmd));
5105 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5106 offsetof(struct hpsa_tmf_struct, error_len) +
5107 sizeof(ac->error_len));
5109 c->cmd_type = IOACCEL2_TMF;
5110 c->scsi_cmd = SCSI_CMD_BUSY;
5112 /* Adjust the DMA address to point to the accelerated command buffer */
5113 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5114 (c->cmdindex * sizeof(struct io_accel2_cmd));
5115 BUG_ON(c->busaddr & 0x0000007F);
5117 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5118 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5119 ac->reply_queue = reply_queue;
5120 ac->tmf = IOACCEL2_TMF_ABORT;
5121 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5122 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5123 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5124 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5125 ac->error_ptr = cpu_to_le64(c->busaddr +
5126 offsetof(struct io_accel2_cmd, error_data));
5127 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5130 /* ioaccel2 path firmware cannot handle abort task requests.
5131 * Change abort requests to physical target reset, and send to the
5132 * address of the physical disk used for the ioaccel 2 command.
5133 * Return 0 on success (IO_OK)
5137 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5138 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5141 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5142 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5143 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5144 unsigned char *psa = &phys_scsi3addr[0];
5146 /* Get a pointer to the hpsa logical device. */
5147 scmd = abort->scsi_cmd;
5148 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5150 dev_warn(&h->pdev->dev,
5151 "Cannot abort: no device pointer for command.\n");
5152 return -1; /* not abortable */
5155 if (h->raid_offload_debug > 0)
5156 dev_info(&h->pdev->dev,
5157 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5158 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5160 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5161 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5163 if (!dev->offload_enabled) {
5164 dev_warn(&h->pdev->dev,
5165 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5166 return -1; /* not abortable */
5169 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5170 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5171 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5172 return -1; /* not abortable */
5175 /* send the reset */
5176 if (h->raid_offload_debug > 0)
5177 dev_info(&h->pdev->dev,
5178 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5179 psa[0], psa[1], psa[2], psa[3],
5180 psa[4], psa[5], psa[6], psa[7]);
5181 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
5183 dev_warn(&h->pdev->dev,
5184 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5185 psa[0], psa[1], psa[2], psa[3],
5186 psa[4], psa[5], psa[6], psa[7]);
5187 return rc; /* failed to reset */
5190 /* wait for device to recover */
5191 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
5192 dev_warn(&h->pdev->dev,
5193 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5194 psa[0], psa[1], psa[2], psa[3],
5195 psa[4], psa[5], psa[6], psa[7]);
5196 return -1; /* failed to recover */
5199 /* device recovered */
5200 dev_info(&h->pdev->dev,
5201 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5202 psa[0], psa[1], psa[2], psa[3],
5203 psa[4], psa[5], psa[6], psa[7]);
5205 return rc; /* success */
5208 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5209 struct CommandList *abort, int reply_queue)
5212 struct CommandList *c;
5213 __le32 taglower, tagupper;
5214 struct hpsa_scsi_dev_t *dev;
5215 struct io_accel2_cmd *c2;
5217 dev = abort->scsi_cmd->device->hostdata;
5218 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5222 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5223 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5224 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5225 hpsa_get_tag(h, abort, &taglower, &tagupper);
5226 dev_dbg(&h->pdev->dev,
5227 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5228 __func__, tagupper, taglower);
5229 /* no unmap needed here because no data xfer. */
5231 dev_dbg(&h->pdev->dev,
5232 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5233 __func__, tagupper, taglower, c2->error_data.serv_response);
5234 switch (c2->error_data.serv_response) {
5235 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5236 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5239 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5240 case IOACCEL2_SERV_RESPONSE_FAILURE:
5241 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5245 dev_warn(&h->pdev->dev,
5246 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5247 __func__, tagupper, taglower,
5248 c2->error_data.serv_response);
5252 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5253 tagupper, taglower);
5257 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5258 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5261 * ioccelerator mode 2 commands should be aborted via the
5262 * accelerated path, since RAID path is unaware of these commands,
5263 * but not all underlying firmware can handle abort TMF.
5264 * Change abort to physical device reset when abort TMF is unsupported.
5266 if (abort->cmd_type == CMD_IOACCEL2) {
5267 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5268 return hpsa_send_abort_ioaccel2(h, abort,
5271 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5272 abort, reply_queue);
5274 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5277 /* Find out which reply queue a command was meant to return on */
5278 static int hpsa_extract_reply_queue(struct ctlr_info *h,
5279 struct CommandList *c)
5281 if (c->cmd_type == CMD_IOACCEL2)
5282 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5283 return c->Header.ReplyQueue;
5287 * Limit concurrency of abort commands to prevent
5288 * over-subscription of commands
5290 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5292 #define ABORT_CMD_WAIT_MSECS 5000
5293 return !wait_event_timeout(h->abort_cmd_wait_queue,
5294 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5295 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5298 /* Send an abort for the specified command.
5299 * If the device and controller support it,
5300 * send a task abort request.
5302 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5306 struct ctlr_info *h;
5307 struct hpsa_scsi_dev_t *dev;
5308 struct CommandList *abort; /* pointer to command to be aborted */
5309 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5310 char msg[256]; /* For debug messaging. */
5312 __le32 tagupper, taglower;
5313 int refcount, reply_queue;
5318 if (sc->device == NULL)
5321 /* Find the controller of the command to be aborted */
5322 h = sdev_to_hba(sc->device);
5326 /* Find the device of the command to be aborted */
5327 dev = sc->device->hostdata;
5329 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5334 /* If controller locked up, we can guarantee command won't complete */
5335 if (lockup_detected(h)) {
5336 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5337 "ABORT FAILED, lockup detected");
5341 /* This is a good time to check if controller lockup has occurred */
5342 if (detect_controller_lockup(h)) {
5343 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5344 "ABORT FAILED, new lockup detected");
5348 /* Check that controller supports some kind of task abort */
5349 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5350 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5353 memset(msg, 0, sizeof(msg));
5354 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
5355 h->scsi_host->host_no, sc->device->channel,
5356 sc->device->id, sc->device->lun,
5357 "Aborting command", sc);
5359 /* Get SCSI command to be aborted */
5360 abort = (struct CommandList *) sc->host_scribble;
5361 if (abort == NULL) {
5362 /* This can happen if the command already completed. */
5365 refcount = atomic_inc_return(&abort->refcount);
5366 if (refcount == 1) { /* Command is done already. */
5371 /* Don't bother trying the abort if we know it won't work. */
5372 if (abort->cmd_type != CMD_IOACCEL2 &&
5373 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5379 * Check that we're aborting the right command.
5380 * It's possible the CommandList already completed and got re-used.
5382 if (abort->scsi_cmd != sc) {
5387 abort->abort_pending = true;
5388 hpsa_get_tag(h, abort, &taglower, &tagupper);
5389 reply_queue = hpsa_extract_reply_queue(h, abort);
5390 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
5391 as = abort->scsi_cmd;
5393 ml += sprintf(msg+ml,
5394 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5395 as->cmd_len, as->cmnd[0], as->cmnd[1],
5397 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
5398 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5401 * Command is in flight, or possibly already completed
5402 * by the firmware (but not to the scsi mid layer) but we can't
5403 * distinguish which. Send the abort down.
5405 if (wait_for_available_abort_cmd(h)) {
5406 dev_warn(&h->pdev->dev,
5407 "%s FAILED, timeout waiting for an abort command to become available.\n",
5412 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
5413 atomic_inc(&h->abort_cmds_available);
5414 wake_up_all(&h->abort_cmd_wait_queue);
5416 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
5417 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5418 "FAILED to abort command");
5422 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
5423 wait_event(h->abort_sync_wait_queue,
5424 abort->scsi_cmd != sc || lockup_detected(h));
5426 return !lockup_detected(h) ? SUCCESS : FAILED;
5430 * For operations with an associated SCSI command, a command block is allocated
5431 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5432 * block request tag as an index into a table of entries. cmd_tagged_free() is
5433 * the complement, although cmd_free() may be called instead.
5435 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5436 struct scsi_cmnd *scmd)
5438 int idx = hpsa_get_cmd_index(scmd);
5439 struct CommandList *c = h->cmd_pool + idx;
5441 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5442 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5443 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5444 /* The index value comes from the block layer, so if it's out of
5445 * bounds, it's probably not our bug.
5450 atomic_inc(&c->refcount);
5451 if (unlikely(!hpsa_is_cmd_idle(c))) {
5453 * We expect that the SCSI layer will hand us a unique tag
5454 * value. Thus, there should never be a collision here between
5455 * two requests...because if the selected command isn't idle
5456 * then someone is going to be very disappointed.
5458 dev_err(&h->pdev->dev,
5459 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5461 if (c->scsi_cmd != NULL)
5462 scsi_print_command(c->scsi_cmd);
5463 scsi_print_command(scmd);
5466 hpsa_cmd_partial_init(h, idx, c);
5470 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5473 * Release our reference to the block. We don't need to do anything
5474 * else to free it, because it is accessed by index. (There's no point
5475 * in checking the result of the decrement, since we cannot guarantee
5476 * that there isn't a concurrent abort which is also accessing it.)
5478 (void)atomic_dec(&c->refcount);
5482 * For operations that cannot sleep, a command block is allocated at init,
5483 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5484 * which ones are free or in use. Lock must be held when calling this.
5485 * cmd_free() is the complement.
5486 * This function never gives up and returns NULL. If it hangs,
5487 * another thread must call cmd_free() to free some tags.
5490 static struct CommandList *cmd_alloc(struct ctlr_info *h)
5492 struct CommandList *c;
5497 * There is some *extremely* small but non-zero chance that that
5498 * multiple threads could get in here, and one thread could
5499 * be scanning through the list of bits looking for a free
5500 * one, but the free ones are always behind him, and other
5501 * threads sneak in behind him and eat them before he can
5502 * get to them, so that while there is always a free one, a
5503 * very unlucky thread might be starved anyway, never able to
5504 * beat the other threads. In reality, this happens so
5505 * infrequently as to be indistinguishable from never.
5507 * Note that we start allocating commands before the SCSI host structure
5508 * is initialized. Since the search starts at bit zero, this
5509 * all works, since we have at least one command structure available;
5510 * however, it means that the structures with the low indexes have to be
5511 * reserved for driver-initiated requests, while requests from the block
5512 * layer will use the higher indexes.
5516 i = find_next_zero_bit(h->cmd_pool_bits,
5517 HPSA_NRESERVED_CMDS,
5519 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
5523 c = h->cmd_pool + i;
5524 refcount = atomic_inc_return(&c->refcount);
5525 if (unlikely(refcount > 1)) {
5526 cmd_free(h, c); /* already in use */
5527 offset = (i + 1) % HPSA_NRESERVED_CMDS;
5530 set_bit(i & (BITS_PER_LONG - 1),
5531 h->cmd_pool_bits + (i / BITS_PER_LONG));
5532 break; /* it's ours now. */
5534 hpsa_cmd_partial_init(h, i, c);
5539 * This is the complementary operation to cmd_alloc(). Note, however, in some
5540 * corner cases it may also be used to free blocks allocated by
5541 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5542 * the clear-bit is harmless.
5544 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5546 if (atomic_dec_and_test(&c->refcount)) {
5549 i = c - h->cmd_pool;
5550 clear_bit(i & (BITS_PER_LONG - 1),
5551 h->cmd_pool_bits + (i / BITS_PER_LONG));
5555 #ifdef CONFIG_COMPAT
5557 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5560 IOCTL32_Command_struct __user *arg32 =
5561 (IOCTL32_Command_struct __user *) arg;
5562 IOCTL_Command_struct arg64;
5563 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5567 memset(&arg64, 0, sizeof(arg64));
5569 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5570 sizeof(arg64.LUN_info));
5571 err |= copy_from_user(&arg64.Request, &arg32->Request,
5572 sizeof(arg64.Request));
5573 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5574 sizeof(arg64.error_info));
5575 err |= get_user(arg64.buf_size, &arg32->buf_size);
5576 err |= get_user(cp, &arg32->buf);
5577 arg64.buf = compat_ptr(cp);
5578 err |= copy_to_user(p, &arg64, sizeof(arg64));
5583 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
5586 err |= copy_in_user(&arg32->error_info, &p->error_info,
5587 sizeof(arg32->error_info));
5593 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
5594 int cmd, void __user *arg)
5596 BIG_IOCTL32_Command_struct __user *arg32 =
5597 (BIG_IOCTL32_Command_struct __user *) arg;
5598 BIG_IOCTL_Command_struct arg64;
5599 BIG_IOCTL_Command_struct __user *p =
5600 compat_alloc_user_space(sizeof(arg64));
5604 memset(&arg64, 0, sizeof(arg64));
5606 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5607 sizeof(arg64.LUN_info));
5608 err |= copy_from_user(&arg64.Request, &arg32->Request,
5609 sizeof(arg64.Request));
5610 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5611 sizeof(arg64.error_info));
5612 err |= get_user(arg64.buf_size, &arg32->buf_size);
5613 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5614 err |= get_user(cp, &arg32->buf);
5615 arg64.buf = compat_ptr(cp);
5616 err |= copy_to_user(p, &arg64, sizeof(arg64));
5621 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
5624 err |= copy_in_user(&arg32->error_info, &p->error_info,
5625 sizeof(arg32->error_info));
5631 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5634 case CCISS_GETPCIINFO:
5635 case CCISS_GETINTINFO:
5636 case CCISS_SETINTINFO:
5637 case CCISS_GETNODENAME:
5638 case CCISS_SETNODENAME:
5639 case CCISS_GETHEARTBEAT:
5640 case CCISS_GETBUSTYPES:
5641 case CCISS_GETFIRMVER:
5642 case CCISS_GETDRIVVER:
5643 case CCISS_REVALIDVOLS:
5644 case CCISS_DEREGDISK:
5645 case CCISS_REGNEWDISK:
5647 case CCISS_RESCANDISK:
5648 case CCISS_GETLUNINFO:
5649 return hpsa_ioctl(dev, cmd, arg);
5651 case CCISS_PASSTHRU32:
5652 return hpsa_ioctl32_passthru(dev, cmd, arg);
5653 case CCISS_BIG_PASSTHRU32:
5654 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5657 return -ENOIOCTLCMD;
5662 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5664 struct hpsa_pci_info pciinfo;
5668 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5669 pciinfo.bus = h->pdev->bus->number;
5670 pciinfo.dev_fn = h->pdev->devfn;
5671 pciinfo.board_id = h->board_id;
5672 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5677 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5679 DriverVer_type DriverVer;
5680 unsigned char vmaj, vmin, vsubmin;
5683 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5684 &vmaj, &vmin, &vsubmin);
5686 dev_info(&h->pdev->dev, "driver version string '%s' "
5687 "unrecognized.", HPSA_DRIVER_VERSION);
5692 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5695 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5700 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5702 IOCTL_Command_struct iocommand;
5703 struct CommandList *c;
5710 if (!capable(CAP_SYS_RAWIO))
5712 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5714 if ((iocommand.buf_size < 1) &&
5715 (iocommand.Request.Type.Direction != XFER_NONE)) {
5718 if (iocommand.buf_size > 0) {
5719 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5722 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5723 /* Copy the data into the buffer we created */
5724 if (copy_from_user(buff, iocommand.buf,
5725 iocommand.buf_size)) {
5730 memset(buff, 0, iocommand.buf_size);
5735 /* Fill in the command type */
5736 c->cmd_type = CMD_IOCTL_PEND;
5737 c->scsi_cmd = SCSI_CMD_BUSY;
5738 /* Fill in Command Header */
5739 c->Header.ReplyQueue = 0; /* unused in simple mode */
5740 if (iocommand.buf_size > 0) { /* buffer to fill */
5741 c->Header.SGList = 1;
5742 c->Header.SGTotal = cpu_to_le16(1);
5743 } else { /* no buffers to fill */
5744 c->Header.SGList = 0;
5745 c->Header.SGTotal = cpu_to_le16(0);
5747 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
5749 /* Fill in Request block */
5750 memcpy(&c->Request, &iocommand.Request,
5751 sizeof(c->Request));
5753 /* Fill in the scatter gather information */
5754 if (iocommand.buf_size > 0) {
5755 temp64 = pci_map_single(h->pdev, buff,
5756 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5757 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5758 c->SG[0].Addr = cpu_to_le64(0);
5759 c->SG[0].Len = cpu_to_le32(0);
5763 c->SG[0].Addr = cpu_to_le64(temp64);
5764 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5765 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
5767 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5768 if (iocommand.buf_size > 0)
5769 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
5770 check_ioctl_unit_attention(h, c);
5776 /* Copy the error information out */
5777 memcpy(&iocommand.error_info, c->err_info,
5778 sizeof(iocommand.error_info));
5779 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
5783 if ((iocommand.Request.Type.Direction & XFER_READ) &&
5784 iocommand.buf_size > 0) {
5785 /* Copy the data out of the buffer we created */
5786 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
5798 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5800 BIG_IOCTL_Command_struct *ioc;
5801 struct CommandList *c;
5802 unsigned char **buff = NULL;
5803 int *buff_size = NULL;
5809 BYTE __user *data_ptr;
5813 if (!capable(CAP_SYS_RAWIO))
5815 ioc = (BIG_IOCTL_Command_struct *)
5816 kmalloc(sizeof(*ioc), GFP_KERNEL);
5821 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5825 if ((ioc->buf_size < 1) &&
5826 (ioc->Request.Type.Direction != XFER_NONE)) {
5830 /* Check kmalloc limits using all SGs */
5831 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5835 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5839 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5844 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5849 left = ioc->buf_size;
5850 data_ptr = ioc->buf;
5852 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5853 buff_size[sg_used] = sz;
5854 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5855 if (buff[sg_used] == NULL) {
5859 if (ioc->Request.Type.Direction & XFER_WRITE) {
5860 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5865 memset(buff[sg_used], 0, sz);
5872 c->cmd_type = CMD_IOCTL_PEND;
5873 c->scsi_cmd = SCSI_CMD_BUSY;
5874 c->Header.ReplyQueue = 0;
5875 c->Header.SGList = (u8) sg_used;
5876 c->Header.SGTotal = cpu_to_le16(sg_used);
5877 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5878 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5879 if (ioc->buf_size > 0) {
5881 for (i = 0; i < sg_used; i++) {
5882 temp64 = pci_map_single(h->pdev, buff[i],
5883 buff_size[i], PCI_DMA_BIDIRECTIONAL);
5884 if (dma_mapping_error(&h->pdev->dev,
5885 (dma_addr_t) temp64)) {
5886 c->SG[i].Addr = cpu_to_le64(0);
5887 c->SG[i].Len = cpu_to_le32(0);
5888 hpsa_pci_unmap(h->pdev, c, i,
5889 PCI_DMA_BIDIRECTIONAL);
5893 c->SG[i].Addr = cpu_to_le64(temp64);
5894 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5895 c->SG[i].Ext = cpu_to_le32(0);
5897 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5899 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5901 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5902 check_ioctl_unit_attention(h, c);
5908 /* Copy the error information out */
5909 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5910 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5914 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5917 /* Copy the data out of the buffer we created */
5918 BYTE __user *ptr = ioc->buf;
5919 for (i = 0; i < sg_used; i++) {
5920 if (copy_to_user(ptr, buff[i], buff_size[i])) {
5924 ptr += buff_size[i];
5934 for (i = 0; i < sg_used; i++)
5943 static void check_ioctl_unit_attention(struct ctlr_info *h,
5944 struct CommandList *c)
5946 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5947 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5948 (void) check_for_unit_attention(h, c);
5954 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5956 struct ctlr_info *h;
5957 void __user *argp = (void __user *)arg;
5960 h = sdev_to_hba(dev);
5963 case CCISS_DEREGDISK:
5964 case CCISS_REGNEWDISK:
5966 hpsa_scan_start(h->scsi_host);
5968 case CCISS_GETPCIINFO:
5969 return hpsa_getpciinfo_ioctl(h, argp);
5970 case CCISS_GETDRIVVER:
5971 return hpsa_getdrivver_ioctl(h, argp);
5972 case CCISS_PASSTHRU:
5973 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5975 rc = hpsa_passthru_ioctl(h, argp);
5976 atomic_inc(&h->passthru_cmds_avail);
5978 case CCISS_BIG_PASSTHRU:
5979 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5981 rc = hpsa_big_passthru_ioctl(h, argp);
5982 atomic_inc(&h->passthru_cmds_avail);
5989 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5992 struct CommandList *c;
5996 /* fill_cmd can't fail here, no data buffer to map */
5997 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5998 RAID_CTLR_LUNID, TYPE_MSG);
5999 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6001 enqueue_cmd_and_start_io(h, c);
6002 /* Don't wait for completion, the reset won't complete. Don't free
6003 * the command either. This is the last command we will send before
6004 * re-initializing everything, so it doesn't matter and won't leak.
6009 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6010 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6013 int pci_dir = XFER_NONE;
6014 u64 tag; /* for commands to be aborted */
6016 c->cmd_type = CMD_IOCTL_PEND;
6017 c->scsi_cmd = SCSI_CMD_BUSY;
6018 c->Header.ReplyQueue = 0;
6019 if (buff != NULL && size > 0) {
6020 c->Header.SGList = 1;
6021 c->Header.SGTotal = cpu_to_le16(1);
6023 c->Header.SGList = 0;
6024 c->Header.SGTotal = cpu_to_le16(0);
6026 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6028 if (cmd_type == TYPE_CMD) {
6031 /* are we trying to read a vital product page */
6032 if (page_code & VPD_PAGE) {
6033 c->Request.CDB[1] = 0x01;
6034 c->Request.CDB[2] = (page_code & 0xff);
6036 c->Request.CDBLen = 6;
6037 c->Request.type_attr_dir =
6038 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6039 c->Request.Timeout = 0;
6040 c->Request.CDB[0] = HPSA_INQUIRY;
6041 c->Request.CDB[4] = size & 0xFF;
6043 case HPSA_REPORT_LOG:
6044 case HPSA_REPORT_PHYS:
6045 /* Talking to controller so It's a physical command
6046 mode = 00 target = 0. Nothing to write.
6048 c->Request.CDBLen = 12;
6049 c->Request.type_attr_dir =
6050 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6051 c->Request.Timeout = 0;
6052 c->Request.CDB[0] = cmd;
6053 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6054 c->Request.CDB[7] = (size >> 16) & 0xFF;
6055 c->Request.CDB[8] = (size >> 8) & 0xFF;
6056 c->Request.CDB[9] = size & 0xFF;
6058 case HPSA_CACHE_FLUSH:
6059 c->Request.CDBLen = 12;
6060 c->Request.type_attr_dir =
6061 TYPE_ATTR_DIR(cmd_type,
6062 ATTR_SIMPLE, XFER_WRITE);
6063 c->Request.Timeout = 0;
6064 c->Request.CDB[0] = BMIC_WRITE;
6065 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6066 c->Request.CDB[7] = (size >> 8) & 0xFF;
6067 c->Request.CDB[8] = size & 0xFF;
6069 case TEST_UNIT_READY:
6070 c->Request.CDBLen = 6;
6071 c->Request.type_attr_dir =
6072 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6073 c->Request.Timeout = 0;
6075 case HPSA_GET_RAID_MAP:
6076 c->Request.CDBLen = 12;
6077 c->Request.type_attr_dir =
6078 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6079 c->Request.Timeout = 0;
6080 c->Request.CDB[0] = HPSA_CISS_READ;
6081 c->Request.CDB[1] = cmd;
6082 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6083 c->Request.CDB[7] = (size >> 16) & 0xFF;
6084 c->Request.CDB[8] = (size >> 8) & 0xFF;
6085 c->Request.CDB[9] = size & 0xFF;
6087 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6088 c->Request.CDBLen = 10;
6089 c->Request.type_attr_dir =
6090 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6091 c->Request.Timeout = 0;
6092 c->Request.CDB[0] = BMIC_READ;
6093 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6094 c->Request.CDB[7] = (size >> 16) & 0xFF;
6095 c->Request.CDB[8] = (size >> 8) & 0xFF;
6097 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6098 c->Request.CDBLen = 10;
6099 c->Request.type_attr_dir =
6100 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6101 c->Request.Timeout = 0;
6102 c->Request.CDB[0] = BMIC_READ;
6103 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6104 c->Request.CDB[7] = (size >> 16) & 0xFF;
6105 c->Request.CDB[8] = (size >> 8) & 0XFF;
6108 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6112 } else if (cmd_type == TYPE_MSG) {
6115 case HPSA_DEVICE_RESET_MSG:
6116 c->Request.CDBLen = 16;
6117 c->Request.type_attr_dir =
6118 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6119 c->Request.Timeout = 0; /* Don't time out */
6120 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6121 c->Request.CDB[0] = cmd;
6122 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6123 /* If bytes 4-7 are zero, it means reset the */
6125 c->Request.CDB[4] = 0x00;
6126 c->Request.CDB[5] = 0x00;
6127 c->Request.CDB[6] = 0x00;
6128 c->Request.CDB[7] = 0x00;
6130 case HPSA_ABORT_MSG:
6131 memcpy(&tag, buff, sizeof(tag));
6132 dev_dbg(&h->pdev->dev,
6133 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6134 tag, c->Header.tag);
6135 c->Request.CDBLen = 16;
6136 c->Request.type_attr_dir =
6137 TYPE_ATTR_DIR(cmd_type,
6138 ATTR_SIMPLE, XFER_WRITE);
6139 c->Request.Timeout = 0; /* Don't time out */
6140 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6141 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6142 c->Request.CDB[2] = 0x00; /* reserved */
6143 c->Request.CDB[3] = 0x00; /* reserved */
6144 /* Tag to abort goes in CDB[4]-CDB[11] */
6145 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
6146 c->Request.CDB[12] = 0x00; /* reserved */
6147 c->Request.CDB[13] = 0x00; /* reserved */
6148 c->Request.CDB[14] = 0x00; /* reserved */
6149 c->Request.CDB[15] = 0x00; /* reserved */
6152 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6157 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6161 switch (GET_DIR(c->Request.type_attr_dir)) {
6163 pci_dir = PCI_DMA_FROMDEVICE;
6166 pci_dir = PCI_DMA_TODEVICE;
6169 pci_dir = PCI_DMA_NONE;
6172 pci_dir = PCI_DMA_BIDIRECTIONAL;
6174 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6180 * Map (physical) PCI mem into (virtual) kernel space
6182 static void __iomem *remap_pci_mem(ulong base, ulong size)
6184 ulong page_base = ((ulong) base) & PAGE_MASK;
6185 ulong page_offs = ((ulong) base) - page_base;
6186 void __iomem *page_remapped = ioremap_nocache(page_base,
6189 return page_remapped ? (page_remapped + page_offs) : NULL;
6192 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6194 return h->access.command_completed(h, q);
6197 static inline bool interrupt_pending(struct ctlr_info *h)
6199 return h->access.intr_pending(h);
6202 static inline long interrupt_not_for_us(struct ctlr_info *h)
6204 return (h->access.intr_pending(h) == 0) ||
6205 (h->interrupts_enabled == 0);
6208 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6211 if (unlikely(tag_index >= h->nr_cmds)) {
6212 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6218 static inline void finish_cmd(struct CommandList *c)
6220 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6221 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6222 || c->cmd_type == CMD_IOACCEL2))
6223 complete_scsi_command(c);
6224 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6225 complete(c->waiting);
6229 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
6231 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
6232 #define HPSA_SIMPLE_ERROR_BITS 0x03
6233 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
6234 return tag & ~HPSA_SIMPLE_ERROR_BITS;
6235 return tag & ~HPSA_PERF_ERROR_BITS;
6238 /* process completion of an indexed ("direct lookup") command */
6239 static inline void process_indexed_cmd(struct ctlr_info *h,
6243 struct CommandList *c;
6245 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6246 if (!bad_tag(h, tag_index, raw_tag)) {
6247 c = h->cmd_pool + tag_index;
6252 /* Some controllers, like p400, will give us one interrupt
6253 * after a soft reset, even if we turned interrupts off.
6254 * Only need to check for this in the hpsa_xxx_discard_completions
6257 static int ignore_bogus_interrupt(struct ctlr_info *h)
6259 if (likely(!reset_devices))
6262 if (likely(h->interrupts_enabled))
6265 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6266 "(known firmware bug.) Ignoring.\n");
6272 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6273 * Relies on (h-q[x] == x) being true for x such that
6274 * 0 <= x < MAX_REPLY_QUEUES.
6276 static struct ctlr_info *queue_to_hba(u8 *queue)
6278 return container_of((queue - *queue), struct ctlr_info, q[0]);
6281 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6283 struct ctlr_info *h = queue_to_hba(queue);
6284 u8 q = *(u8 *) queue;
6287 if (ignore_bogus_interrupt(h))
6290 if (interrupt_not_for_us(h))
6292 h->last_intr_timestamp = get_jiffies_64();
6293 while (interrupt_pending(h)) {
6294 raw_tag = get_next_completion(h, q);
6295 while (raw_tag != FIFO_EMPTY)
6296 raw_tag = next_command(h, q);
6301 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6303 struct ctlr_info *h = queue_to_hba(queue);
6305 u8 q = *(u8 *) queue;
6307 if (ignore_bogus_interrupt(h))
6310 h->last_intr_timestamp = get_jiffies_64();
6311 raw_tag = get_next_completion(h, q);
6312 while (raw_tag != FIFO_EMPTY)
6313 raw_tag = next_command(h, q);
6317 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6319 struct ctlr_info *h = queue_to_hba((u8 *) queue);
6321 u8 q = *(u8 *) queue;
6323 if (interrupt_not_for_us(h))
6325 h->last_intr_timestamp = get_jiffies_64();
6326 while (interrupt_pending(h)) {
6327 raw_tag = get_next_completion(h, q);
6328 while (raw_tag != FIFO_EMPTY) {
6329 process_indexed_cmd(h, raw_tag);
6330 raw_tag = next_command(h, q);
6336 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6338 struct ctlr_info *h = queue_to_hba(queue);
6340 u8 q = *(u8 *) queue;
6342 h->last_intr_timestamp = get_jiffies_64();
6343 raw_tag = get_next_completion(h, q);
6344 while (raw_tag != FIFO_EMPTY) {
6345 process_indexed_cmd(h, raw_tag);
6346 raw_tag = next_command(h, q);
6351 /* Send a message CDB to the firmware. Careful, this only works
6352 * in simple mode, not performant mode due to the tag lookup.
6353 * We only ever use this immediately after a controller reset.
6355 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6359 struct CommandListHeader CommandHeader;
6360 struct RequestBlock Request;
6361 struct ErrDescriptor ErrorDescriptor;
6363 struct Command *cmd;
6364 static const size_t cmd_sz = sizeof(*cmd) +
6365 sizeof(cmd->ErrorDescriptor);
6369 void __iomem *vaddr;
6372 vaddr = pci_ioremap_bar(pdev, 0);
6376 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6377 * CCISS commands, so they must be allocated from the lower 4GiB of
6380 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6386 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6392 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6393 * although there's no guarantee, we assume that the address is at
6394 * least 4-byte aligned (most likely, it's page-aligned).
6396 paddr32 = cpu_to_le32(paddr64);
6398 cmd->CommandHeader.ReplyQueue = 0;
6399 cmd->CommandHeader.SGList = 0;
6400 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
6401 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
6402 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6404 cmd->Request.CDBLen = 16;
6405 cmd->Request.type_attr_dir =
6406 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
6407 cmd->Request.Timeout = 0; /* Don't time out */
6408 cmd->Request.CDB[0] = opcode;
6409 cmd->Request.CDB[1] = type;
6410 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
6411 cmd->ErrorDescriptor.Addr =
6412 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
6413 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
6415 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
6417 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6418 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
6419 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
6421 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6426 /* we leak the DMA buffer here ... no choice since the controller could
6427 * still complete the command.
6429 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6430 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6435 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6437 if (tag & HPSA_ERROR_BIT) {
6438 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6443 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6448 #define hpsa_noop(p) hpsa_message(p, 3, 0)
6450 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
6451 void __iomem *vaddr, u32 use_doorbell)
6455 /* For everything after the P600, the PCI power state method
6456 * of resetting the controller doesn't work, so we have this
6457 * other way using the doorbell register.
6459 dev_info(&pdev->dev, "using doorbell to reset controller\n");
6460 writel(use_doorbell, vaddr + SA5_DOORBELL);
6462 /* PMC hardware guys tell us we need a 10 second delay after
6463 * doorbell reset and before any attempt to talk to the board
6464 * at all to ensure that this actually works and doesn't fall
6465 * over in some weird corner cases.
6468 } else { /* Try to do it the PCI power state way */
6470 /* Quoting from the Open CISS Specification: "The Power
6471 * Management Control/Status Register (CSR) controls the power
6472 * state of the device. The normal operating state is D0,
6473 * CSR=00h. The software off state is D3, CSR=03h. To reset
6474 * the controller, place the interface device in D3 then to D0,
6475 * this causes a secondary PCI reset which will reset the
6480 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
6482 /* enter the D3hot power management state */
6483 rc = pci_set_power_state(pdev, PCI_D3hot);
6489 /* enter the D0 power management state */
6490 rc = pci_set_power_state(pdev, PCI_D0);
6495 * The P600 requires a small delay when changing states.
6496 * Otherwise we may think the board did not reset and we bail.
6497 * This for kdump only and is particular to the P600.
6504 static void init_driver_version(char *driver_version, int len)
6506 memset(driver_version, 0, len);
6507 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
6510 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
6512 char *driver_version;
6513 int i, size = sizeof(cfgtable->driver_version);
6515 driver_version = kmalloc(size, GFP_KERNEL);
6516 if (!driver_version)
6519 init_driver_version(driver_version, size);
6520 for (i = 0; i < size; i++)
6521 writeb(driver_version[i], &cfgtable->driver_version[i]);
6522 kfree(driver_version);
6526 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6527 unsigned char *driver_ver)
6531 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6532 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6535 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
6538 char *driver_ver, *old_driver_ver;
6539 int rc, size = sizeof(cfgtable->driver_version);
6541 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6542 if (!old_driver_ver)
6544 driver_ver = old_driver_ver + size;
6546 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6547 * should have been changed, otherwise we know the reset failed.
6549 init_driver_version(old_driver_ver, size);
6550 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6551 rc = !memcmp(driver_ver, old_driver_ver, size);
6552 kfree(old_driver_ver);
6555 /* This does a hard reset of the controller using PCI power management
6556 * states or the using the doorbell register.
6558 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
6562 u64 cfg_base_addr_index;
6563 void __iomem *vaddr;
6564 unsigned long paddr;
6565 u32 misc_fw_support;
6567 struct CfgTable __iomem *cfgtable;
6569 u16 command_register;
6571 /* For controllers as old as the P600, this is very nearly
6574 * pci_save_state(pci_dev);
6575 * pci_set_power_state(pci_dev, PCI_D3hot);
6576 * pci_set_power_state(pci_dev, PCI_D0);
6577 * pci_restore_state(pci_dev);
6579 * For controllers newer than the P600, the pci power state
6580 * method of resetting doesn't work so we have another way
6581 * using the doorbell register.
6584 if (!ctlr_is_resettable(board_id)) {
6585 dev_warn(&pdev->dev, "Controller not resettable\n");
6589 /* if controller is soft- but not hard resettable... */
6590 if (!ctlr_is_hard_resettable(board_id))
6591 return -ENOTSUPP; /* try soft reset later. */
6593 /* Save the PCI command register */
6594 pci_read_config_word(pdev, 4, &command_register);
6595 pci_save_state(pdev);
6597 /* find the first memory BAR, so we can find the cfg table */
6598 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6601 vaddr = remap_pci_mem(paddr, 0x250);
6605 /* find cfgtable in order to check if reset via doorbell is supported */
6606 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6607 &cfg_base_addr_index, &cfg_offset);
6610 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6611 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6616 rc = write_driver_ver_to_cfgtable(cfgtable);
6618 goto unmap_cfgtable;
6620 /* If reset via doorbell register is supported, use that.
6621 * There are two such methods. Favor the newest method.
6623 misc_fw_support = readl(&cfgtable->misc_fw_support);
6624 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6626 use_doorbell = DOORBELL_CTLR_RESET2;
6628 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6630 dev_warn(&pdev->dev,
6631 "Soft reset not supported. Firmware update is required.\n");
6632 rc = -ENOTSUPP; /* try soft reset */
6633 goto unmap_cfgtable;
6637 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6639 goto unmap_cfgtable;
6641 pci_restore_state(pdev);
6642 pci_write_config_word(pdev, 4, command_register);
6644 /* Some devices (notably the HP Smart Array 5i Controller)
6645 need a little pause here */
6646 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6648 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6650 dev_warn(&pdev->dev,
6651 "Failed waiting for board to become ready after hard reset\n");
6652 goto unmap_cfgtable;
6655 rc = controller_reset_failed(vaddr);
6657 goto unmap_cfgtable;
6659 dev_warn(&pdev->dev, "Unable to successfully reset "
6660 "controller. Will try soft reset.\n");
6663 dev_info(&pdev->dev, "board ready after hard reset.\n");
6675 * We cannot read the structure directly, for portability we must use
6677 * This is for debug only.
6679 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6685 dev_info(dev, "Controller Configuration information\n");
6686 dev_info(dev, "------------------------------------\n");
6687 for (i = 0; i < 4; i++)
6688 temp_name[i] = readb(&(tb->Signature[i]));
6689 temp_name[4] = '\0';
6690 dev_info(dev, " Signature = %s\n", temp_name);
6691 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6692 dev_info(dev, " Transport methods supported = 0x%x\n",
6693 readl(&(tb->TransportSupport)));
6694 dev_info(dev, " Transport methods active = 0x%x\n",
6695 readl(&(tb->TransportActive)));
6696 dev_info(dev, " Requested transport Method = 0x%x\n",
6697 readl(&(tb->HostWrite.TransportRequest)));
6698 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6699 readl(&(tb->HostWrite.CoalIntDelay)));
6700 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6701 readl(&(tb->HostWrite.CoalIntCount)));
6702 dev_info(dev, " Max outstanding commands = %d\n",
6703 readl(&(tb->CmdsOutMax)));
6704 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6705 for (i = 0; i < 16; i++)
6706 temp_name[i] = readb(&(tb->ServerName[i]));
6707 temp_name[16] = '\0';
6708 dev_info(dev, " Server Name = %s\n", temp_name);
6709 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6710 readl(&(tb->HeartBeat)));
6711 #endif /* HPSA_DEBUG */
6714 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6716 int i, offset, mem_type, bar_type;
6718 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6721 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6722 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6723 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6726 mem_type = pci_resource_flags(pdev, i) &
6727 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6729 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6730 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6731 offset += 4; /* 32 bit */
6733 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6736 default: /* reserved in PCI 2.2 */
6737 dev_warn(&pdev->dev,
6738 "base address is invalid\n");
6743 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6749 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
6751 if (h->msix_vector) {
6752 if (h->pdev->msix_enabled)
6753 pci_disable_msix(h->pdev);
6755 } else if (h->msi_vector) {
6756 if (h->pdev->msi_enabled)
6757 pci_disable_msi(h->pdev);
6762 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6763 * controllers that are capable. If not, we use legacy INTx mode.
6765 static void hpsa_interrupt_mode(struct ctlr_info *h)
6767 #ifdef CONFIG_PCI_MSI
6769 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6771 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6772 hpsa_msix_entries[i].vector = 0;
6773 hpsa_msix_entries[i].entry = i;
6776 /* Some boards advertise MSI but don't really support it */
6777 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6778 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
6779 goto default_int_mode;
6780 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6781 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
6782 h->msix_vector = MAX_REPLY_QUEUES;
6783 if (h->msix_vector > num_online_cpus())
6784 h->msix_vector = num_online_cpus();
6785 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6788 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6790 goto single_msi_mode;
6791 } else if (err < h->msix_vector) {
6792 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6793 "available\n", err);
6795 h->msix_vector = err;
6796 for (i = 0; i < h->msix_vector; i++)
6797 h->intr[i] = hpsa_msix_entries[i].vector;
6801 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6802 dev_info(&h->pdev->dev, "MSI capable controller\n");
6803 if (!pci_enable_msi(h->pdev))
6806 dev_warn(&h->pdev->dev, "MSI init failed\n");
6809 #endif /* CONFIG_PCI_MSI */
6810 /* if we get here we're going to use the default interrupt mode */
6811 h->intr[h->intr_mode] = h->pdev->irq;
6814 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
6817 u32 subsystem_vendor_id, subsystem_device_id;
6819 subsystem_vendor_id = pdev->subsystem_vendor;
6820 subsystem_device_id = pdev->subsystem_device;
6821 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6822 subsystem_vendor_id;
6824 for (i = 0; i < ARRAY_SIZE(products); i++)
6825 if (*board_id == products[i].board_id)
6828 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6829 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6831 dev_warn(&pdev->dev, "unrecognized board ID: "
6832 "0x%08x, ignoring.\n", *board_id);
6835 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6838 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6839 unsigned long *memory_bar)
6843 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6844 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6845 /* addressing mode bits already removed */
6846 *memory_bar = pci_resource_start(pdev, i);
6847 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6851 dev_warn(&pdev->dev, "no memory BAR found\n");
6855 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6861 iterations = HPSA_BOARD_READY_ITERATIONS;
6863 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6865 for (i = 0; i < iterations; i++) {
6866 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6867 if (wait_for_ready) {
6868 if (scratchpad == HPSA_FIRMWARE_READY)
6871 if (scratchpad != HPSA_FIRMWARE_READY)
6874 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6876 dev_warn(&pdev->dev, "board not ready, timed out.\n");
6880 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6881 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6884 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6885 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6886 *cfg_base_addr &= (u32) 0x0000ffff;
6887 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6888 if (*cfg_base_addr_index == -1) {
6889 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6895 static void hpsa_free_cfgtables(struct ctlr_info *h)
6897 if (h->transtable) {
6898 iounmap(h->transtable);
6899 h->transtable = NULL;
6902 iounmap(h->cfgtable);
6907 /* Find and map CISS config table and transfer table
6908 + * several items must be unmapped (freed) later
6910 static int hpsa_find_cfgtables(struct ctlr_info *h)
6914 u64 cfg_base_addr_index;
6918 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6919 &cfg_base_addr_index, &cfg_offset);
6922 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6923 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6925 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
6928 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6931 /* Find performant mode table. */
6932 trans_offset = readl(&h->cfgtable->TransMethodOffset);
6933 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6934 cfg_base_addr_index)+cfg_offset+trans_offset,
6935 sizeof(*h->transtable));
6936 if (!h->transtable) {
6937 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
6938 hpsa_free_cfgtables(h);
6944 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6946 #define MIN_MAX_COMMANDS 16
6947 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
6949 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
6951 /* Limit commands in memory limited kdump scenario. */
6952 if (reset_devices && h->max_commands > 32)
6953 h->max_commands = 32;
6955 if (h->max_commands < MIN_MAX_COMMANDS) {
6956 dev_warn(&h->pdev->dev,
6957 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
6960 h->max_commands = MIN_MAX_COMMANDS;
6964 /* If the controller reports that the total max sg entries is greater than 512,
6965 * then we know that chained SG blocks work. (Original smart arrays did not
6966 * support chained SG blocks and would return zero for max sg entries.)
6968 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6970 return h->maxsgentries > 512;
6973 /* Interrogate the hardware for some limits:
6974 * max commands, max SG elements without chaining, and with chaining,
6975 * SG chain block size, etc.
6977 static void hpsa_find_board_params(struct ctlr_info *h)
6979 hpsa_get_max_perf_mode_cmds(h);
6980 h->nr_cmds = h->max_commands;
6981 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6982 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6983 if (hpsa_supports_chained_sg_blocks(h)) {
6984 /* Limit in-command s/g elements to 32 save dma'able memory. */
6985 h->max_cmd_sg_entries = 32;
6986 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6987 h->maxsgentries--; /* save one for chain pointer */
6990 * Original smart arrays supported at most 31 s/g entries
6991 * embedded inline in the command (trying to use more
6992 * would lock up the controller)
6994 h->max_cmd_sg_entries = 31;
6995 h->maxsgentries = 31; /* default to traditional values */
6999 /* Find out what task management functions are supported and cache */
7000 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7001 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7002 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7003 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7004 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7005 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7006 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7009 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7011 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7012 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7018 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7022 driver_support = readl(&(h->cfgtable->driver_support));
7023 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7025 driver_support |= ENABLE_SCSI_PREFETCH;
7027 driver_support |= ENABLE_UNIT_ATTN;
7028 writel(driver_support, &(h->cfgtable->driver_support));
7031 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7032 * in a prefetch beyond physical memory.
7034 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7038 if (h->board_id != 0x3225103C)
7040 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7041 dma_prefetch |= 0x8000;
7042 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7045 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7049 unsigned long flags;
7050 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7051 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7052 spin_lock_irqsave(&h->lock, flags);
7053 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7054 spin_unlock_irqrestore(&h->lock, flags);
7055 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7057 /* delay and try again */
7058 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7065 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7069 unsigned long flags;
7071 /* under certain very rare conditions, this can take awhile.
7072 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7073 * as we enter this code.)
7075 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7076 if (h->remove_in_progress)
7078 spin_lock_irqsave(&h->lock, flags);
7079 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7080 spin_unlock_irqrestore(&h->lock, flags);
7081 if (!(doorbell_value & CFGTBL_ChangeReq))
7083 /* delay and try again */
7084 msleep(MODE_CHANGE_WAIT_INTERVAL);
7091 /* return -ENODEV or other reason on error, 0 on success */
7092 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7096 trans_support = readl(&(h->cfgtable->TransportSupport));
7097 if (!(trans_support & SIMPLE_MODE))
7100 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7102 /* Update the field, and then ring the doorbell */
7103 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7104 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7105 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7106 if (hpsa_wait_for_mode_change_ack(h))
7108 print_cfg_table(&h->pdev->dev, h->cfgtable);
7109 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7111 h->transMethod = CFGTBL_Trans_Simple;
7114 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7118 /* free items allocated or mapped by hpsa_pci_init */
7119 static void hpsa_free_pci_init(struct ctlr_info *h)
7121 hpsa_free_cfgtables(h); /* pci_init 4 */
7122 iounmap(h->vaddr); /* pci_init 3 */
7124 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7126 * call pci_disable_device before pci_release_regions per
7127 * Documentation/PCI/pci.txt
7129 pci_disable_device(h->pdev); /* pci_init 1 */
7130 pci_release_regions(h->pdev); /* pci_init 2 */
7133 /* several items must be freed later */
7134 static int hpsa_pci_init(struct ctlr_info *h)
7136 int prod_index, err;
7138 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7141 h->product_name = products[prod_index].product_name;
7142 h->access = *(products[prod_index].access);
7144 h->needs_abort_tags_swizzled =
7145 ctlr_needs_abort_tags_swizzled(h->board_id);
7147 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7148 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7150 err = pci_enable_device(h->pdev);
7152 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7153 pci_disable_device(h->pdev);
7157 err = pci_request_regions(h->pdev, HPSA);
7159 dev_err(&h->pdev->dev,
7160 "failed to obtain PCI resources\n");
7161 pci_disable_device(h->pdev);
7165 pci_set_master(h->pdev);
7167 hpsa_interrupt_mode(h);
7168 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7170 goto clean2; /* intmode+region, pci */
7171 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7173 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7175 goto clean2; /* intmode+region, pci */
7177 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7179 goto clean3; /* vaddr, intmode+region, pci */
7180 err = hpsa_find_cfgtables(h);
7182 goto clean3; /* vaddr, intmode+region, pci */
7183 hpsa_find_board_params(h);
7185 if (!hpsa_CISS_signature_present(h)) {
7187 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7189 hpsa_set_driver_support_bits(h);
7190 hpsa_p600_dma_prefetch_quirk(h);
7191 err = hpsa_enter_simple_mode(h);
7193 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7196 clean4: /* cfgtables, vaddr, intmode+region, pci */
7197 hpsa_free_cfgtables(h);
7198 clean3: /* vaddr, intmode+region, pci */
7201 clean2: /* intmode+region, pci */
7202 hpsa_disable_interrupt_mode(h);
7204 * call pci_disable_device before pci_release_regions per
7205 * Documentation/PCI/pci.txt
7207 pci_disable_device(h->pdev);
7208 pci_release_regions(h->pdev);
7212 static void hpsa_hba_inquiry(struct ctlr_info *h)
7216 #define HBA_INQUIRY_BYTE_COUNT 64
7217 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7218 if (!h->hba_inquiry_data)
7220 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7221 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7223 kfree(h->hba_inquiry_data);
7224 h->hba_inquiry_data = NULL;
7228 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7231 void __iomem *vaddr;
7236 /* kdump kernel is loading, we don't know in which state is
7237 * the pci interface. The dev->enable_cnt is equal zero
7238 * so we call enable+disable, wait a while and switch it on.
7240 rc = pci_enable_device(pdev);
7242 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7245 pci_disable_device(pdev);
7246 msleep(260); /* a randomly chosen number */
7247 rc = pci_enable_device(pdev);
7249 dev_warn(&pdev->dev, "failed to enable device.\n");
7253 pci_set_master(pdev);
7255 vaddr = pci_ioremap_bar(pdev, 0);
7256 if (vaddr == NULL) {
7260 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7263 /* Reset the controller with a PCI power-cycle or via doorbell */
7264 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7266 /* -ENOTSUPP here means we cannot reset the controller
7267 * but it's already (and still) up and running in
7268 * "performant mode". Or, it might be 640x, which can't reset
7269 * due to concerns about shared bbwc between 6402/6404 pair.
7274 /* Now try to get the controller to respond to a no-op */
7275 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7276 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7277 if (hpsa_noop(pdev) == 0)
7280 dev_warn(&pdev->dev, "no-op failed%s\n",
7281 (i < 11 ? "; re-trying" : ""));
7286 pci_disable_device(pdev);
7290 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7292 kfree(h->cmd_pool_bits);
7293 h->cmd_pool_bits = NULL;
7295 pci_free_consistent(h->pdev,
7296 h->nr_cmds * sizeof(struct CommandList),
7298 h->cmd_pool_dhandle);
7300 h->cmd_pool_dhandle = 0;
7302 if (h->errinfo_pool) {
7303 pci_free_consistent(h->pdev,
7304 h->nr_cmds * sizeof(struct ErrorInfo),
7306 h->errinfo_pool_dhandle);
7307 h->errinfo_pool = NULL;
7308 h->errinfo_pool_dhandle = 0;
7312 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7314 h->cmd_pool_bits = kzalloc(
7315 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7316 sizeof(unsigned long), GFP_KERNEL);
7317 h->cmd_pool = pci_alloc_consistent(h->pdev,
7318 h->nr_cmds * sizeof(*h->cmd_pool),
7319 &(h->cmd_pool_dhandle));
7320 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7321 h->nr_cmds * sizeof(*h->errinfo_pool),
7322 &(h->errinfo_pool_dhandle));
7323 if ((h->cmd_pool_bits == NULL)
7324 || (h->cmd_pool == NULL)
7325 || (h->errinfo_pool == NULL)) {
7326 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7329 hpsa_preinitialize_commands(h);
7332 hpsa_free_cmd_pool(h);
7336 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7340 cpu = cpumask_first(cpu_online_mask);
7341 for (i = 0; i < h->msix_vector; i++) {
7342 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
7343 cpu = cpumask_next(cpu, cpu_online_mask);
7347 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7348 static void hpsa_free_irqs(struct ctlr_info *h)
7352 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7353 /* Single reply queue, only one irq to free */
7355 irq_set_affinity_hint(h->intr[i], NULL);
7356 free_irq(h->intr[i], &h->q[i]);
7361 for (i = 0; i < h->msix_vector; i++) {
7362 irq_set_affinity_hint(h->intr[i], NULL);
7363 free_irq(h->intr[i], &h->q[i]);
7366 for (; i < MAX_REPLY_QUEUES; i++)
7370 /* returns 0 on success; cleans up and returns -Enn on error */
7371 static int hpsa_request_irqs(struct ctlr_info *h,
7372 irqreturn_t (*msixhandler)(int, void *),
7373 irqreturn_t (*intxhandler)(int, void *))
7378 * initialize h->q[x] = x so that interrupt handlers know which
7381 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7384 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
7385 /* If performant mode and MSI-X, use multiple reply queues */
7386 for (i = 0; i < h->msix_vector; i++) {
7387 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
7388 rc = request_irq(h->intr[i], msixhandler,
7394 dev_err(&h->pdev->dev,
7395 "failed to get irq %d for %s\n",
7396 h->intr[i], h->devname);
7397 for (j = 0; j < i; j++) {
7398 free_irq(h->intr[j], &h->q[j]);
7401 for (; j < MAX_REPLY_QUEUES; j++)
7406 hpsa_irq_affinity_hints(h);
7408 /* Use single reply pool */
7409 if (h->msix_vector > 0 || h->msi_vector) {
7411 sprintf(h->intrname[h->intr_mode],
7412 "%s-msix", h->devname);
7414 sprintf(h->intrname[h->intr_mode],
7415 "%s-msi", h->devname);
7416 rc = request_irq(h->intr[h->intr_mode],
7418 h->intrname[h->intr_mode],
7419 &h->q[h->intr_mode]);
7421 sprintf(h->intrname[h->intr_mode],
7422 "%s-intx", h->devname);
7423 rc = request_irq(h->intr[h->intr_mode],
7424 intxhandler, IRQF_SHARED,
7425 h->intrname[h->intr_mode],
7426 &h->q[h->intr_mode]);
7428 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
7431 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
7432 h->intr[h->intr_mode], h->devname);
7439 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
7442 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
7444 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
7445 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7447 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
7451 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
7452 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7454 dev_warn(&h->pdev->dev, "Board failed to become ready "
7455 "after soft reset.\n");
7462 static void hpsa_free_reply_queues(struct ctlr_info *h)
7466 for (i = 0; i < h->nreply_queues; i++) {
7467 if (!h->reply_queue[i].head)
7469 pci_free_consistent(h->pdev,
7470 h->reply_queue_size,
7471 h->reply_queue[i].head,
7472 h->reply_queue[i].busaddr);
7473 h->reply_queue[i].head = NULL;
7474 h->reply_queue[i].busaddr = 0;
7476 h->reply_queue_size = 0;
7479 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7481 hpsa_free_performant_mode(h); /* init_one 7 */
7482 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7483 hpsa_free_cmd_pool(h); /* init_one 5 */
7484 hpsa_free_irqs(h); /* init_one 4 */
7485 scsi_host_put(h->scsi_host); /* init_one 3 */
7486 h->scsi_host = NULL; /* init_one 3 */
7487 hpsa_free_pci_init(h); /* init_one 2_5 */
7488 free_percpu(h->lockup_detected); /* init_one 2 */
7489 h->lockup_detected = NULL; /* init_one 2 */
7490 if (h->resubmit_wq) {
7491 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
7492 h->resubmit_wq = NULL;
7494 if (h->rescan_ctlr_wq) {
7495 destroy_workqueue(h->rescan_ctlr_wq);
7496 h->rescan_ctlr_wq = NULL;
7498 kfree(h); /* init_one 1 */
7501 /* Called when controller lockup detected. */
7502 static void fail_all_outstanding_cmds(struct ctlr_info *h)
7505 struct CommandList *c;
7508 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
7509 for (i = 0; i < h->nr_cmds; i++) {
7510 c = h->cmd_pool + i;
7511 refcount = atomic_inc_return(&c->refcount);
7513 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
7515 atomic_dec(&h->commands_outstanding);
7520 dev_warn(&h->pdev->dev,
7521 "failed %d commands in fail_all\n", failcount);
7524 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7528 for_each_online_cpu(cpu) {
7529 u32 *lockup_detected;
7530 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7531 *lockup_detected = value;
7533 wmb(); /* be sure the per-cpu variables are out to memory */
7536 static void controller_lockup_detected(struct ctlr_info *h)
7538 unsigned long flags;
7539 u32 lockup_detected;
7541 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7542 spin_lock_irqsave(&h->lock, flags);
7543 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7544 if (!lockup_detected) {
7545 /* no heartbeat, but controller gave us a zero. */
7546 dev_warn(&h->pdev->dev,
7547 "lockup detected after %d but scratchpad register is zero\n",
7548 h->heartbeat_sample_interval / HZ);
7549 lockup_detected = 0xffffffff;
7551 set_lockup_detected_for_all_cpus(h, lockup_detected);
7552 spin_unlock_irqrestore(&h->lock, flags);
7553 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7554 lockup_detected, h->heartbeat_sample_interval / HZ);
7555 pci_disable_device(h->pdev);
7556 fail_all_outstanding_cmds(h);
7559 static int detect_controller_lockup(struct ctlr_info *h)
7563 unsigned long flags;
7565 now = get_jiffies_64();
7566 /* If we've received an interrupt recently, we're ok. */
7567 if (time_after64(h->last_intr_timestamp +
7568 (h->heartbeat_sample_interval), now))
7572 * If we've already checked the heartbeat recently, we're ok.
7573 * This could happen if someone sends us a signal. We
7574 * otherwise don't care about signals in this thread.
7576 if (time_after64(h->last_heartbeat_timestamp +
7577 (h->heartbeat_sample_interval), now))
7580 /* If heartbeat has not changed since we last looked, we're not ok. */
7581 spin_lock_irqsave(&h->lock, flags);
7582 heartbeat = readl(&h->cfgtable->HeartBeat);
7583 spin_unlock_irqrestore(&h->lock, flags);
7584 if (h->last_heartbeat == heartbeat) {
7585 controller_lockup_detected(h);
7590 h->last_heartbeat = heartbeat;
7591 h->last_heartbeat_timestamp = now;
7595 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
7600 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7603 /* Ask the controller to clear the events we're handling. */
7604 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7605 | CFGTBL_Trans_io_accel2)) &&
7606 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7607 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7609 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7610 event_type = "state change";
7611 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7612 event_type = "configuration change";
7613 /* Stop sending new RAID offload reqs via the IO accelerator */
7614 scsi_block_requests(h->scsi_host);
7615 for (i = 0; i < h->ndevices; i++)
7616 h->dev[i]->offload_enabled = 0;
7617 hpsa_drain_accel_commands(h);
7618 /* Set 'accelerator path config change' bit */
7619 dev_warn(&h->pdev->dev,
7620 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7621 h->events, event_type);
7622 writel(h->events, &(h->cfgtable->clear_event_notify));
7623 /* Set the "clear event notify field update" bit 6 */
7624 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7625 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7626 hpsa_wait_for_clear_event_notify_ack(h);
7627 scsi_unblock_requests(h->scsi_host);
7629 /* Acknowledge controller notification events. */
7630 writel(h->events, &(h->cfgtable->clear_event_notify));
7631 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7632 hpsa_wait_for_clear_event_notify_ack(h);
7634 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7635 hpsa_wait_for_mode_change_ack(h);
7641 /* Check a register on the controller to see if there are configuration
7642 * changes (added/changed/removed logical drives, etc.) which mean that
7643 * we should rescan the controller for devices.
7644 * Also check flag for driver-initiated rescan.
7646 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
7648 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7651 h->events = readl(&(h->cfgtable->event_notify));
7652 return h->events & RESCAN_REQUIRED_EVENT_BITS;
7656 * Check if any of the offline devices have become ready
7658 static int hpsa_offline_devices_ready(struct ctlr_info *h)
7660 unsigned long flags;
7661 struct offline_device_entry *d;
7662 struct list_head *this, *tmp;
7664 spin_lock_irqsave(&h->offline_device_lock, flags);
7665 list_for_each_safe(this, tmp, &h->offline_device_list) {
7666 d = list_entry(this, struct offline_device_entry,
7668 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7669 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7670 spin_lock_irqsave(&h->offline_device_lock, flags);
7671 list_del(&d->offline_list);
7672 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7675 spin_lock_irqsave(&h->offline_device_lock, flags);
7677 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7681 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
7683 unsigned long flags;
7684 struct ctlr_info *h = container_of(to_delayed_work(work),
7685 struct ctlr_info, rescan_ctlr_work);
7688 if (h->remove_in_progress)
7691 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7692 scsi_host_get(h->scsi_host);
7693 hpsa_ack_ctlr_events(h);
7694 hpsa_scan_start(h->scsi_host);
7695 scsi_host_put(h->scsi_host);
7697 spin_lock_irqsave(&h->lock, flags);
7698 if (!h->remove_in_progress)
7699 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7700 h->heartbeat_sample_interval);
7701 spin_unlock_irqrestore(&h->lock, flags);
7704 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7706 unsigned long flags;
7707 struct ctlr_info *h = container_of(to_delayed_work(work),
7708 struct ctlr_info, monitor_ctlr_work);
7710 detect_controller_lockup(h);
7711 if (lockup_detected(h))
7714 spin_lock_irqsave(&h->lock, flags);
7715 if (!h->remove_in_progress)
7716 schedule_delayed_work(&h->monitor_ctlr_work,
7717 h->heartbeat_sample_interval);
7718 spin_unlock_irqrestore(&h->lock, flags);
7721 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7724 struct workqueue_struct *wq = NULL;
7726 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
7728 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7733 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7736 struct ctlr_info *h;
7737 int try_soft_reset = 0;
7738 unsigned long flags;
7741 if (number_of_controllers == 0)
7742 printk(KERN_INFO DRIVER_NAME "\n");
7744 rc = hpsa_lookup_board_id(pdev, &board_id);
7746 dev_warn(&pdev->dev, "Board ID not found\n");
7750 rc = hpsa_init_reset_devices(pdev, board_id);
7752 if (rc != -ENOTSUPP)
7754 /* If the reset fails in a particular way (it has no way to do
7755 * a proper hard reset, so returns -ENOTSUPP) we can try to do
7756 * a soft reset once we get the controller configured up to the
7757 * point that it can accept a command.
7763 reinit_after_soft_reset:
7765 /* Command structures must be aligned on a 32-byte boundary because
7766 * the 5 lower bits of the address are used by the hardware. and by
7767 * the driver. See comments in hpsa.h for more info.
7769 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
7770 h = kzalloc(sizeof(*h), GFP_KERNEL);
7772 dev_err(&pdev->dev, "Failed to allocate controller head\n");
7778 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
7779 INIT_LIST_HEAD(&h->offline_device_list);
7780 spin_lock_init(&h->lock);
7781 spin_lock_init(&h->offline_device_lock);
7782 spin_lock_init(&h->scan_lock);
7783 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
7784 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
7786 /* Allocate and clear per-cpu variable lockup_detected */
7787 h->lockup_detected = alloc_percpu(u32);
7788 if (!h->lockup_detected) {
7789 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
7791 goto clean1; /* aer/h */
7793 set_lockup_detected_for_all_cpus(h, 0);
7795 rc = hpsa_pci_init(h);
7797 goto clean2; /* lu, aer/h */
7799 /* relies on h-> settings made by hpsa_pci_init, including
7800 * interrupt_mode h->intr */
7801 rc = hpsa_scsi_host_alloc(h);
7803 goto clean2_5; /* pci, lu, aer/h */
7805 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
7806 h->ctlr = number_of_controllers;
7807 number_of_controllers++;
7809 /* configure PCI DMA stuff */
7810 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7814 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7818 dev_err(&pdev->dev, "no suitable DMA available\n");
7819 goto clean3; /* shost, pci, lu, aer/h */
7823 /* make sure the board interrupts are off */
7824 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7826 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
7828 goto clean3; /* shost, pci, lu, aer/h */
7829 rc = hpsa_alloc_cmd_pool(h);
7831 goto clean4; /* irq, shost, pci, lu, aer/h */
7832 rc = hpsa_alloc_sg_chain_blocks(h);
7834 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
7835 init_waitqueue_head(&h->scan_wait_queue);
7836 init_waitqueue_head(&h->abort_cmd_wait_queue);
7837 init_waitqueue_head(&h->abort_sync_wait_queue);
7838 h->scan_finished = 1; /* no scan currently in progress */
7840 pci_set_drvdata(pdev, h);
7842 h->hba_mode_enabled = 0;
7844 spin_lock_init(&h->devlock);
7845 rc = hpsa_put_ctlr_into_performant_mode(h);
7847 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
7849 /* hook into SCSI subsystem */
7850 rc = hpsa_scsi_add_host(h);
7852 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
7854 /* create the resubmit workqueue */
7855 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
7856 if (!h->rescan_ctlr_wq) {
7861 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
7862 if (!h->resubmit_wq) {
7864 goto clean7; /* aer/h */
7868 * At this point, the controller is ready to take commands.
7869 * Now, if reset_devices and the hard reset didn't work, try
7870 * the soft reset and see if that works.
7872 if (try_soft_reset) {
7874 /* This is kind of gross. We may or may not get a completion
7875 * from the soft reset command, and if we do, then the value
7876 * from the fifo may or may not be valid. So, we wait 10 secs
7877 * after the reset throwing away any completions we get during
7878 * that time. Unregister the interrupt handler and register
7879 * fake ones to scoop up any residual completions.
7881 spin_lock_irqsave(&h->lock, flags);
7882 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7883 spin_unlock_irqrestore(&h->lock, flags);
7885 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
7886 hpsa_intx_discard_completions);
7888 dev_warn(&h->pdev->dev,
7889 "Failed to request_irq after soft reset.\n");
7891 * cannot goto clean7 or free_irqs will be called
7892 * again. Instead, do its work
7894 hpsa_free_performant_mode(h); /* clean7 */
7895 hpsa_free_sg_chain_blocks(h); /* clean6 */
7896 hpsa_free_cmd_pool(h); /* clean5 */
7898 * skip hpsa_free_irqs(h) clean4 since that
7899 * was just called before request_irqs failed
7904 rc = hpsa_kdump_soft_reset(h);
7906 /* Neither hard nor soft reset worked, we're hosed. */
7909 dev_info(&h->pdev->dev, "Board READY.\n");
7910 dev_info(&h->pdev->dev,
7911 "Waiting for stale completions to drain.\n");
7912 h->access.set_intr_mask(h, HPSA_INTR_ON);
7914 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7916 rc = controller_reset_failed(h->cfgtable);
7918 dev_info(&h->pdev->dev,
7919 "Soft reset appears to have failed.\n");
7921 /* since the controller's reset, we have to go back and re-init
7922 * everything. Easiest to just forget what we've done and do it
7925 hpsa_undo_allocations_after_kdump_soft_reset(h);
7928 /* don't goto clean, we already unallocated */
7931 goto reinit_after_soft_reset;
7934 /* Enable Accelerated IO path at driver layer */
7935 h->acciopath_status = 1;
7938 /* Turn the interrupts on so we can service requests */
7939 h->access.set_intr_mask(h, HPSA_INTR_ON);
7941 hpsa_hba_inquiry(h);
7943 /* Monitor the controller for firmware lockups */
7944 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7945 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7946 schedule_delayed_work(&h->monitor_ctlr_work,
7947 h->heartbeat_sample_interval);
7948 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
7949 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7950 h->heartbeat_sample_interval);
7953 clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
7954 kfree(h->hba_inquiry_data);
7955 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
7956 hpsa_free_performant_mode(h);
7957 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7958 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
7959 hpsa_free_sg_chain_blocks(h);
7960 clean5: /* cmd, irq, shost, pci, lu, aer/h */
7961 hpsa_free_cmd_pool(h);
7962 clean4: /* irq, shost, pci, lu, aer/h */
7964 clean3: /* shost, pci, lu, aer/h */
7965 scsi_host_put(h->scsi_host);
7966 h->scsi_host = NULL;
7967 clean2_5: /* pci, lu, aer/h */
7968 hpsa_free_pci_init(h);
7969 clean2: /* lu, aer/h */
7970 if (h->lockup_detected) {
7971 free_percpu(h->lockup_detected);
7972 h->lockup_detected = NULL;
7974 clean1: /* wq/aer/h */
7975 if (h->resubmit_wq) {
7976 destroy_workqueue(h->resubmit_wq);
7977 h->resubmit_wq = NULL;
7979 if (h->rescan_ctlr_wq) {
7980 destroy_workqueue(h->rescan_ctlr_wq);
7981 h->rescan_ctlr_wq = NULL;
7987 static void hpsa_flush_cache(struct ctlr_info *h)
7990 struct CommandList *c;
7993 if (unlikely(lockup_detected(h)))
7995 flush_buf = kzalloc(4, GFP_KERNEL);
8001 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8002 RAID_CTLR_LUNID, TYPE_CMD)) {
8005 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8006 PCI_DMA_TODEVICE, NO_TIMEOUT);
8009 if (c->err_info->CommandStatus != 0)
8011 dev_warn(&h->pdev->dev,
8012 "error flushing cache on controller\n");
8017 static void hpsa_shutdown(struct pci_dev *pdev)
8019 struct ctlr_info *h;
8021 h = pci_get_drvdata(pdev);
8022 /* Turn board interrupts off and send the flush cache command
8023 * sendcmd will turn off interrupt, and send the flush...
8024 * To write all data in the battery backed cache to disks
8026 hpsa_flush_cache(h);
8027 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8028 hpsa_free_irqs(h); /* init_one 4 */
8029 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
8032 static void hpsa_free_device_info(struct ctlr_info *h)
8036 for (i = 0; i < h->ndevices; i++) {
8042 static void hpsa_remove_one(struct pci_dev *pdev)
8044 struct ctlr_info *h;
8045 unsigned long flags;
8047 if (pci_get_drvdata(pdev) == NULL) {
8048 dev_err(&pdev->dev, "unable to remove device\n");
8051 h = pci_get_drvdata(pdev);
8053 /* Get rid of any controller monitoring work items */
8054 spin_lock_irqsave(&h->lock, flags);
8055 h->remove_in_progress = 1;
8056 spin_unlock_irqrestore(&h->lock, flags);
8057 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8058 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8059 destroy_workqueue(h->rescan_ctlr_wq);
8060 destroy_workqueue(h->resubmit_wq);
8062 /* includes hpsa_free_irqs - init_one 4 */
8063 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8064 hpsa_shutdown(pdev);
8066 hpsa_free_device_info(h); /* scan */
8068 kfree(h->hba_inquiry_data); /* init_one 10 */
8069 h->hba_inquiry_data = NULL; /* init_one 10 */
8071 scsi_remove_host(h->scsi_host); /* init_one 8 */
8072 hpsa_free_ioaccel2_sg_chain_blocks(h);
8073 hpsa_free_performant_mode(h); /* init_one 7 */
8074 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8075 hpsa_free_cmd_pool(h); /* init_one 5 */
8077 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8079 scsi_host_put(h->scsi_host); /* init_one 3 */
8080 h->scsi_host = NULL; /* init_one 3 */
8082 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8083 hpsa_free_pci_init(h); /* init_one 2.5 */
8085 free_percpu(h->lockup_detected); /* init_one 2 */
8086 h->lockup_detected = NULL; /* init_one 2 */
8087 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8088 kfree(h); /* init_one 1 */
8091 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8092 __attribute__((unused)) pm_message_t state)
8097 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8102 static struct pci_driver hpsa_pci_driver = {
8104 .probe = hpsa_init_one,
8105 .remove = hpsa_remove_one,
8106 .id_table = hpsa_pci_device_id, /* id_table */
8107 .shutdown = hpsa_shutdown,
8108 .suspend = hpsa_suspend,
8109 .resume = hpsa_resume,
8112 /* Fill in bucket_map[], given nsgs (the max number of
8113 * scatter gather elements supported) and bucket[],
8114 * which is an array of 8 integers. The bucket[] array
8115 * contains 8 different DMA transfer sizes (in 16
8116 * byte increments) which the controller uses to fetch
8117 * commands. This function fills in bucket_map[], which
8118 * maps a given number of scatter gather elements to one of
8119 * the 8 DMA transfer sizes. The point of it is to allow the
8120 * controller to only do as much DMA as needed to fetch the
8121 * command, with the DMA transfer size encoded in the lower
8122 * bits of the command address.
8124 static void calc_bucket_map(int bucket[], int num_buckets,
8125 int nsgs, int min_blocks, u32 *bucket_map)
8129 /* Note, bucket_map must have nsgs+1 entries. */
8130 for (i = 0; i <= nsgs; i++) {
8131 /* Compute size of a command with i SG entries */
8132 size = i + min_blocks;
8133 b = num_buckets; /* Assume the biggest bucket */
8134 /* Find the bucket that is just big enough */
8135 for (j = 0; j < num_buckets; j++) {
8136 if (bucket[j] >= size) {
8141 /* for a command with i SG entries, use bucket b. */
8147 * return -ENODEV on err, 0 on success (or no action)
8148 * allocates numerous items that must be freed later
8150 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
8153 unsigned long register_value;
8154 unsigned long transMethod = CFGTBL_Trans_Performant |
8155 (trans_support & CFGTBL_Trans_use_short_tags) |
8156 CFGTBL_Trans_enable_directed_msix |
8157 (trans_support & (CFGTBL_Trans_io_accel1 |
8158 CFGTBL_Trans_io_accel2));
8159 struct access_method access = SA5_performant_access;
8161 /* This is a bit complicated. There are 8 registers on
8162 * the controller which we write to to tell it 8 different
8163 * sizes of commands which there may be. It's a way of
8164 * reducing the DMA done to fetch each command. Encoded into
8165 * each command's tag are 3 bits which communicate to the controller
8166 * which of the eight sizes that command fits within. The size of
8167 * each command depends on how many scatter gather entries there are.
8168 * Each SG entry requires 16 bytes. The eight registers are programmed
8169 * with the number of 16-byte blocks a command of that size requires.
8170 * The smallest command possible requires 5 such 16 byte blocks.
8171 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8172 * blocks. Note, this only extends to the SG entries contained
8173 * within the command block, and does not extend to chained blocks
8174 * of SG elements. bft[] contains the eight values we write to
8175 * the registers. They are not evenly distributed, but have more
8176 * sizes for small commands, and fewer sizes for larger commands.
8178 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
8179 #define MIN_IOACCEL2_BFT_ENTRY 5
8180 #define HPSA_IOACCEL2_HEADER_SZ 4
8181 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8182 13, 14, 15, 16, 17, 18, 19,
8183 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8184 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8185 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8186 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8187 16 * MIN_IOACCEL2_BFT_ENTRY);
8188 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
8189 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
8190 /* 5 = 1 s/g entry or 4k
8191 * 6 = 2 s/g entry or 8k
8192 * 8 = 4 s/g entry or 16k
8193 * 10 = 6 s/g entry or 24k
8196 /* If the controller supports either ioaccel method then
8197 * we can also use the RAID stack submit path that does not
8198 * perform the superfluous readl() after each command submission.
8200 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8201 access = SA5_performant_access_no_read;
8203 /* Controller spec: zero out this buffer. */
8204 for (i = 0; i < h->nreply_queues; i++)
8205 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
8207 bft[7] = SG_ENTRIES_IN_CMD + 4;
8208 calc_bucket_map(bft, ARRAY_SIZE(bft),
8209 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
8210 for (i = 0; i < 8; i++)
8211 writel(bft[i], &h->transtable->BlockFetch[i]);
8213 /* size of controller ring buffer */
8214 writel(h->max_commands, &h->transtable->RepQSize);
8215 writel(h->nreply_queues, &h->transtable->RepQCount);
8216 writel(0, &h->transtable->RepQCtrAddrLow32);
8217 writel(0, &h->transtable->RepQCtrAddrHigh32);
8219 for (i = 0; i < h->nreply_queues; i++) {
8220 writel(0, &h->transtable->RepQAddr[i].upper);
8221 writel(h->reply_queue[i].busaddr,
8222 &h->transtable->RepQAddr[i].lower);
8225 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8226 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8228 * enable outbound interrupt coalescing in accelerator mode;
8230 if (trans_support & CFGTBL_Trans_io_accel1) {
8231 access = SA5_ioaccel_mode1_access;
8232 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8233 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8235 if (trans_support & CFGTBL_Trans_io_accel2) {
8236 access = SA5_ioaccel_mode2_access;
8237 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8238 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8241 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8242 if (hpsa_wait_for_mode_change_ack(h)) {
8243 dev_err(&h->pdev->dev,
8244 "performant mode problem - doorbell timeout\n");
8247 register_value = readl(&(h->cfgtable->TransportActive));
8248 if (!(register_value & CFGTBL_Trans_Performant)) {
8249 dev_err(&h->pdev->dev,
8250 "performant mode problem - transport not active\n");
8253 /* Change the access methods to the performant access methods */
8255 h->transMethod = transMethod;
8257 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8258 (trans_support & CFGTBL_Trans_io_accel2)))
8261 if (trans_support & CFGTBL_Trans_io_accel1) {
8262 /* Set up I/O accelerator mode */
8263 for (i = 0; i < h->nreply_queues; i++) {
8264 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8265 h->reply_queue[i].current_entry =
8266 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8268 bft[7] = h->ioaccel_maxsg + 8;
8269 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8270 h->ioaccel1_blockFetchTable);
8272 /* initialize all reply queue entries to unused */
8273 for (i = 0; i < h->nreply_queues; i++)
8274 memset(h->reply_queue[i].head,
8275 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8276 h->reply_queue_size);
8278 /* set all the constant fields in the accelerator command
8279 * frames once at init time to save CPU cycles later.
8281 for (i = 0; i < h->nr_cmds; i++) {
8282 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8284 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8285 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8286 (i * sizeof(struct ErrorInfo)));
8287 cp->err_info_len = sizeof(struct ErrorInfo);
8288 cp->sgl_offset = IOACCEL1_SGLOFFSET;
8289 cp->host_context_flags =
8290 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
8291 cp->timeout_sec = 0;
8294 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
8296 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
8297 (i * sizeof(struct io_accel1_cmd)));
8299 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8300 u64 cfg_offset, cfg_base_addr_index;
8301 u32 bft2_offset, cfg_base_addr;
8304 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8305 &cfg_base_addr_index, &cfg_offset);
8306 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8307 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8308 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8309 4, h->ioaccel2_blockFetchTable);
8310 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8311 BUILD_BUG_ON(offsetof(struct CfgTable,
8312 io_accel_request_size_offset) != 0xb8);
8313 h->ioaccel2_bft2_regs =
8314 remap_pci_mem(pci_resource_start(h->pdev,
8315 cfg_base_addr_index) +
8316 cfg_offset + bft2_offset,
8318 sizeof(*h->ioaccel2_bft2_regs));
8319 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8320 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
8322 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8323 if (hpsa_wait_for_mode_change_ack(h)) {
8324 dev_err(&h->pdev->dev,
8325 "performant mode problem - enabling ioaccel mode\n");
8331 /* Free ioaccel1 mode command blocks and block fetch table */
8332 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8334 if (h->ioaccel_cmd_pool) {
8335 pci_free_consistent(h->pdev,
8336 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8337 h->ioaccel_cmd_pool,
8338 h->ioaccel_cmd_pool_dhandle);
8339 h->ioaccel_cmd_pool = NULL;
8340 h->ioaccel_cmd_pool_dhandle = 0;
8342 kfree(h->ioaccel1_blockFetchTable);
8343 h->ioaccel1_blockFetchTable = NULL;
8346 /* Allocate ioaccel1 mode command blocks and block fetch table */
8347 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8350 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8351 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8352 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8354 /* Command structures must be aligned on a 128-byte boundary
8355 * because the 7 lower bits of the address are used by the
8358 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8359 IOACCEL1_COMMANDLIST_ALIGNMENT);
8360 h->ioaccel_cmd_pool =
8361 pci_alloc_consistent(h->pdev,
8362 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8363 &(h->ioaccel_cmd_pool_dhandle));
8365 h->ioaccel1_blockFetchTable =
8366 kmalloc(((h->ioaccel_maxsg + 1) *
8367 sizeof(u32)), GFP_KERNEL);
8369 if ((h->ioaccel_cmd_pool == NULL) ||
8370 (h->ioaccel1_blockFetchTable == NULL))
8373 memset(h->ioaccel_cmd_pool, 0,
8374 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8378 hpsa_free_ioaccel1_cmd_and_bft(h);
8382 /* Free ioaccel2 mode command blocks and block fetch table */
8383 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8385 hpsa_free_ioaccel2_sg_chain_blocks(h);
8387 if (h->ioaccel2_cmd_pool) {
8388 pci_free_consistent(h->pdev,
8389 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8390 h->ioaccel2_cmd_pool,
8391 h->ioaccel2_cmd_pool_dhandle);
8392 h->ioaccel2_cmd_pool = NULL;
8393 h->ioaccel2_cmd_pool_dhandle = 0;
8395 kfree(h->ioaccel2_blockFetchTable);
8396 h->ioaccel2_blockFetchTable = NULL;
8399 /* Allocate ioaccel2 mode command blocks and block fetch table */
8400 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8404 /* Allocate ioaccel2 mode command blocks and block fetch table */
8407 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8408 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8409 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8411 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8412 IOACCEL2_COMMANDLIST_ALIGNMENT);
8413 h->ioaccel2_cmd_pool =
8414 pci_alloc_consistent(h->pdev,
8415 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8416 &(h->ioaccel2_cmd_pool_dhandle));
8418 h->ioaccel2_blockFetchTable =
8419 kmalloc(((h->ioaccel_maxsg + 1) *
8420 sizeof(u32)), GFP_KERNEL);
8422 if ((h->ioaccel2_cmd_pool == NULL) ||
8423 (h->ioaccel2_blockFetchTable == NULL)) {
8428 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8432 memset(h->ioaccel2_cmd_pool, 0,
8433 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8437 hpsa_free_ioaccel2_cmd_and_bft(h);
8441 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8442 static void hpsa_free_performant_mode(struct ctlr_info *h)
8444 kfree(h->blockFetchTable);
8445 h->blockFetchTable = NULL;
8446 hpsa_free_reply_queues(h);
8447 hpsa_free_ioaccel1_cmd_and_bft(h);
8448 hpsa_free_ioaccel2_cmd_and_bft(h);
8451 /* return -ENODEV on error, 0 on success (or no action)
8452 * allocates numerous items that must be freed later
8454 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
8457 unsigned long transMethod = CFGTBL_Trans_Performant |
8458 CFGTBL_Trans_use_short_tags;
8461 if (hpsa_simple_mode)
8464 trans_support = readl(&(h->cfgtable->TransportSupport));
8465 if (!(trans_support & PERFORMANT_MODE))
8468 /* Check for I/O accelerator mode support */
8469 if (trans_support & CFGTBL_Trans_io_accel1) {
8470 transMethod |= CFGTBL_Trans_io_accel1 |
8471 CFGTBL_Trans_enable_directed_msix;
8472 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8475 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8476 transMethod |= CFGTBL_Trans_io_accel2 |
8477 CFGTBL_Trans_enable_directed_msix;
8478 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8483 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
8484 hpsa_get_max_perf_mode_cmds(h);
8485 /* Performant mode ring buffer and supporting data structures */
8486 h->reply_queue_size = h->max_commands * sizeof(u64);
8488 for (i = 0; i < h->nreply_queues; i++) {
8489 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8490 h->reply_queue_size,
8491 &(h->reply_queue[i].busaddr));
8492 if (!h->reply_queue[i].head) {
8494 goto clean1; /* rq, ioaccel */
8496 h->reply_queue[i].size = h->max_commands;
8497 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
8498 h->reply_queue[i].current_entry = 0;
8501 /* Need a block fetch table for performant mode */
8502 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
8503 sizeof(u32)), GFP_KERNEL);
8504 if (!h->blockFetchTable) {
8506 goto clean1; /* rq, ioaccel */
8509 rc = hpsa_enter_performant_mode(h, trans_support);
8511 goto clean2; /* bft, rq, ioaccel */
8514 clean2: /* bft, rq, ioaccel */
8515 kfree(h->blockFetchTable);
8516 h->blockFetchTable = NULL;
8517 clean1: /* rq, ioaccel */
8518 hpsa_free_reply_queues(h);
8519 hpsa_free_ioaccel1_cmd_and_bft(h);
8520 hpsa_free_ioaccel2_cmd_and_bft(h);
8524 static int is_accelerated_cmd(struct CommandList *c)
8526 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8529 static void hpsa_drain_accel_commands(struct ctlr_info *h)
8531 struct CommandList *c = NULL;
8532 int i, accel_cmds_out;
8535 do { /* wait for all outstanding ioaccel commands to drain out */
8537 for (i = 0; i < h->nr_cmds; i++) {
8538 c = h->cmd_pool + i;
8539 refcount = atomic_inc_return(&c->refcount);
8540 if (refcount > 1) /* Command is allocated */
8541 accel_cmds_out += is_accelerated_cmd(c);
8544 if (accel_cmds_out <= 0)
8551 * This is it. Register the PCI driver information for the cards we control
8552 * the OS will call our registered routines when it finds one of our cards.
8554 static int __init hpsa_init(void)
8556 return pci_register_driver(&hpsa_pci_driver);
8559 static void __exit hpsa_cleanup(void)
8561 pci_unregister_driver(&hpsa_pci_driver);
8564 static void __attribute__((unused)) verify_offsets(void)
8566 #define VERIFY_OFFSET(member, offset) \
8567 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8569 VERIFY_OFFSET(structure_size, 0);
8570 VERIFY_OFFSET(volume_blk_size, 4);
8571 VERIFY_OFFSET(volume_blk_cnt, 8);
8572 VERIFY_OFFSET(phys_blk_shift, 16);
8573 VERIFY_OFFSET(parity_rotation_shift, 17);
8574 VERIFY_OFFSET(strip_size, 18);
8575 VERIFY_OFFSET(disk_starting_blk, 20);
8576 VERIFY_OFFSET(disk_blk_cnt, 28);
8577 VERIFY_OFFSET(data_disks_per_row, 36);
8578 VERIFY_OFFSET(metadata_disks_per_row, 38);
8579 VERIFY_OFFSET(row_cnt, 40);
8580 VERIFY_OFFSET(layout_map_count, 42);
8581 VERIFY_OFFSET(flags, 44);
8582 VERIFY_OFFSET(dekindex, 46);
8583 /* VERIFY_OFFSET(reserved, 48 */
8584 VERIFY_OFFSET(data, 64);
8586 #undef VERIFY_OFFSET
8588 #define VERIFY_OFFSET(member, offset) \
8589 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8591 VERIFY_OFFSET(IU_type, 0);
8592 VERIFY_OFFSET(direction, 1);
8593 VERIFY_OFFSET(reply_queue, 2);
8594 /* VERIFY_OFFSET(reserved1, 3); */
8595 VERIFY_OFFSET(scsi_nexus, 4);
8596 VERIFY_OFFSET(Tag, 8);
8597 VERIFY_OFFSET(cdb, 16);
8598 VERIFY_OFFSET(cciss_lun, 32);
8599 VERIFY_OFFSET(data_len, 40);
8600 VERIFY_OFFSET(cmd_priority_task_attr, 44);
8601 VERIFY_OFFSET(sg_count, 45);
8602 /* VERIFY_OFFSET(reserved3 */
8603 VERIFY_OFFSET(err_ptr, 48);
8604 VERIFY_OFFSET(err_len, 56);
8605 /* VERIFY_OFFSET(reserved4 */
8606 VERIFY_OFFSET(sg, 64);
8608 #undef VERIFY_OFFSET
8610 #define VERIFY_OFFSET(member, offset) \
8611 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8613 VERIFY_OFFSET(dev_handle, 0x00);
8614 VERIFY_OFFSET(reserved1, 0x02);
8615 VERIFY_OFFSET(function, 0x03);
8616 VERIFY_OFFSET(reserved2, 0x04);
8617 VERIFY_OFFSET(err_info, 0x0C);
8618 VERIFY_OFFSET(reserved3, 0x10);
8619 VERIFY_OFFSET(err_info_len, 0x12);
8620 VERIFY_OFFSET(reserved4, 0x13);
8621 VERIFY_OFFSET(sgl_offset, 0x14);
8622 VERIFY_OFFSET(reserved5, 0x15);
8623 VERIFY_OFFSET(transfer_len, 0x1C);
8624 VERIFY_OFFSET(reserved6, 0x20);
8625 VERIFY_OFFSET(io_flags, 0x24);
8626 VERIFY_OFFSET(reserved7, 0x26);
8627 VERIFY_OFFSET(LUN, 0x34);
8628 VERIFY_OFFSET(control, 0x3C);
8629 VERIFY_OFFSET(CDB, 0x40);
8630 VERIFY_OFFSET(reserved8, 0x50);
8631 VERIFY_OFFSET(host_context_flags, 0x60);
8632 VERIFY_OFFSET(timeout_sec, 0x62);
8633 VERIFY_OFFSET(ReplyQueue, 0x64);
8634 VERIFY_OFFSET(reserved9, 0x65);
8635 VERIFY_OFFSET(tag, 0x68);
8636 VERIFY_OFFSET(host_addr, 0x70);
8637 VERIFY_OFFSET(CISS_LUN, 0x78);
8638 VERIFY_OFFSET(SG, 0x78 + 8);
8639 #undef VERIFY_OFFSET
8642 module_init(hpsa_init);
8643 module_exit(hpsa_cleanup);