7adbe42581ee828f23b343cb59dc252861b66ed8
[platform/kernel/linux-rpi.git] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; version 2 of the License.
8  *
9  *    This program is distributed in the hope that it will be useful,
10  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
13  *
14  *    You should have received a copy of the GNU General Public License
15  *    along with this program; if not, write to the Free Software
16  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  *
18  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/fs.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
37 #include <linux/io.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_eh.h>
47 #include <scsi/scsi_dbg.h>
48 #include <linux/cciss_ioctl.h>
49 #include <linux/string.h>
50 #include <linux/bitmap.h>
51 #include <linux/atomic.h>
52 #include <linux/jiffies.h>
53 #include <linux/percpu-defs.h>
54 #include <linux/percpu.h>
55 #include <asm/unaligned.h>
56 #include <asm/div64.h>
57 #include "hpsa_cmd.h"
58 #include "hpsa.h"
59
60 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
61 #define HPSA_DRIVER_VERSION "3.4.4-1"
62 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
63 #define HPSA "hpsa"
64
65 /* How long to wait for CISS doorbell communication */
66 #define CLEAR_EVENT_WAIT_INTERVAL 20    /* ms for each msleep() call */
67 #define MODE_CHANGE_WAIT_INTERVAL 10    /* ms for each msleep() call */
68 #define MAX_CLEAR_EVENT_WAIT 30000      /* times 20 ms = 600 s */
69 #define MAX_MODE_CHANGE_WAIT 2000       /* times 10 ms = 20 s */
70 #define MAX_IOCTL_CONFIG_WAIT 1000
71
72 /*define how many times we will try a command because of bus resets */
73 #define MAX_CMD_RETRIES 3
74
75 /* Embedded module documentation macros - see modules.h */
76 MODULE_AUTHOR("Hewlett-Packard Company");
77 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
78         HPSA_DRIVER_VERSION);
79 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
80 MODULE_VERSION(HPSA_DRIVER_VERSION);
81 MODULE_LICENSE("GPL");
82
83 static int hpsa_allow_any;
84 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
85 MODULE_PARM_DESC(hpsa_allow_any,
86                 "Allow hpsa driver to access unknown HP Smart Array hardware");
87 static int hpsa_simple_mode;
88 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
89 MODULE_PARM_DESC(hpsa_simple_mode,
90         "Use 'simple mode' rather than 'performant mode'");
91
92 /* define the PCI info for the cards we can control */
93 static const struct pci_device_id hpsa_pci_device_id[] = {
94         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
95         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
96         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
97         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
98         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
99         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
100         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
101         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
102         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
103         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
104         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
105         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
106         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
107         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
108         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
109         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
110         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
111         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
112         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
113         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
114         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
115         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
116         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
117         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
118         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
119         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
120         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
121         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
122         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
123         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
124         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
125         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
126         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
127         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
128         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
129         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
130         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
131         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
132         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
133         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
134         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
135         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
136         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
137         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
138         {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
139         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
140                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
141         {0,}
142 };
143
144 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
145
146 /*  board_id = Subsystem Device ID & Vendor ID
147  *  product = Marketing Name for the board
148  *  access = Address of the struct of function pointers
149  */
150 static struct board_type products[] = {
151         {0x3241103C, "Smart Array P212", &SA5_access},
152         {0x3243103C, "Smart Array P410", &SA5_access},
153         {0x3245103C, "Smart Array P410i", &SA5_access},
154         {0x3247103C, "Smart Array P411", &SA5_access},
155         {0x3249103C, "Smart Array P812", &SA5_access},
156         {0x324A103C, "Smart Array P712m", &SA5_access},
157         {0x324B103C, "Smart Array P711m", &SA5_access},
158         {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
159         {0x3350103C, "Smart Array P222", &SA5_access},
160         {0x3351103C, "Smart Array P420", &SA5_access},
161         {0x3352103C, "Smart Array P421", &SA5_access},
162         {0x3353103C, "Smart Array P822", &SA5_access},
163         {0x3354103C, "Smart Array P420i", &SA5_access},
164         {0x3355103C, "Smart Array P220i", &SA5_access},
165         {0x3356103C, "Smart Array P721m", &SA5_access},
166         {0x1921103C, "Smart Array P830i", &SA5_access},
167         {0x1922103C, "Smart Array P430", &SA5_access},
168         {0x1923103C, "Smart Array P431", &SA5_access},
169         {0x1924103C, "Smart Array P830", &SA5_access},
170         {0x1926103C, "Smart Array P731m", &SA5_access},
171         {0x1928103C, "Smart Array P230i", &SA5_access},
172         {0x1929103C, "Smart Array P530", &SA5_access},
173         {0x21BD103C, "Smart Array P244br", &SA5_access},
174         {0x21BE103C, "Smart Array P741m", &SA5_access},
175         {0x21BF103C, "Smart HBA H240ar", &SA5_access},
176         {0x21C0103C, "Smart Array P440ar", &SA5_access},
177         {0x21C1103C, "Smart Array P840ar", &SA5_access},
178         {0x21C2103C, "Smart Array P440", &SA5_access},
179         {0x21C3103C, "Smart Array P441", &SA5_access},
180         {0x21C4103C, "Smart Array", &SA5_access},
181         {0x21C5103C, "Smart Array P841", &SA5_access},
182         {0x21C6103C, "Smart HBA H244br", &SA5_access},
183         {0x21C7103C, "Smart HBA H240", &SA5_access},
184         {0x21C8103C, "Smart HBA H241", &SA5_access},
185         {0x21C9103C, "Smart Array", &SA5_access},
186         {0x21CA103C, "Smart Array P246br", &SA5_access},
187         {0x21CB103C, "Smart Array P840", &SA5_access},
188         {0x21CC103C, "Smart Array", &SA5_access},
189         {0x21CD103C, "Smart Array", &SA5_access},
190         {0x21CE103C, "Smart HBA", &SA5_access},
191         {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
192         {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
193         {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
194         {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
195         {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
196         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
197 };
198
199 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
200 static const struct scsi_cmnd hpsa_cmd_busy;
201 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
202 static const struct scsi_cmnd hpsa_cmd_idle;
203 static int number_of_controllers;
204
205 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
206 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
207 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
208
209 #ifdef CONFIG_COMPAT
210 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
211         void __user *arg);
212 #endif
213
214 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
215 static struct CommandList *cmd_alloc(struct ctlr_info *h);
216 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
217 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
218                                             struct scsi_cmnd *scmd);
219 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
220         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
221         int cmd_type);
222 static void hpsa_free_cmd_pool(struct ctlr_info *h);
223 #define VPD_PAGE (1 << 8)
224
225 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
226 static void hpsa_scan_start(struct Scsi_Host *);
227 static int hpsa_scan_finished(struct Scsi_Host *sh,
228         unsigned long elapsed_time);
229 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
230
231 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
232 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
233 static int hpsa_slave_alloc(struct scsi_device *sdev);
234 static int hpsa_slave_configure(struct scsi_device *sdev);
235 static void hpsa_slave_destroy(struct scsi_device *sdev);
236
237 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
238 static int check_for_unit_attention(struct ctlr_info *h,
239         struct CommandList *c);
240 static void check_ioctl_unit_attention(struct ctlr_info *h,
241         struct CommandList *c);
242 /* performant mode helper functions */
243 static void calc_bucket_map(int *bucket, int num_buckets,
244         int nsgs, int min_blocks, u32 *bucket_map);
245 static void hpsa_free_performant_mode(struct ctlr_info *h);
246 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
247 static inline u32 next_command(struct ctlr_info *h, u8 q);
248 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
249                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
250                                u64 *cfg_offset);
251 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
252                                     unsigned long *memory_bar);
253 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
254 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
255                                      int wait_for_ready);
256 static inline void finish_cmd(struct CommandList *c);
257 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
258 #define BOARD_NOT_READY 0
259 #define BOARD_READY 1
260 static void hpsa_drain_accel_commands(struct ctlr_info *h);
261 static void hpsa_flush_cache(struct ctlr_info *h);
262 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
263         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
264         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
265 static void hpsa_command_resubmit_worker(struct work_struct *work);
266 static u32 lockup_detected(struct ctlr_info *h);
267 static int detect_controller_lockup(struct ctlr_info *h);
268
269 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
270 {
271         unsigned long *priv = shost_priv(sdev->host);
272         return (struct ctlr_info *) *priv;
273 }
274
275 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
276 {
277         unsigned long *priv = shost_priv(sh);
278         return (struct ctlr_info *) *priv;
279 }
280
281 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
282 {
283         return c->scsi_cmd == SCSI_CMD_IDLE;
284 }
285
286 /* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
287 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
288                         u8 *sense_key, u8 *asc, u8 *ascq)
289 {
290         struct scsi_sense_hdr sshdr;
291         bool rc;
292
293         *sense_key = -1;
294         *asc = -1;
295         *ascq = -1;
296
297         if (sense_data_len < 1)
298                 return;
299
300         rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
301         if (rc) {
302                 *sense_key = sshdr.sense_key;
303                 *asc = sshdr.asc;
304                 *ascq = sshdr.ascq;
305         }
306 }
307
308 static int check_for_unit_attention(struct ctlr_info *h,
309         struct CommandList *c)
310 {
311         u8 sense_key, asc, ascq;
312         int sense_len;
313
314         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
315                 sense_len = sizeof(c->err_info->SenseInfo);
316         else
317                 sense_len = c->err_info->SenseLen;
318
319         decode_sense_data(c->err_info->SenseInfo, sense_len,
320                                 &sense_key, &asc, &ascq);
321         if (sense_key != UNIT_ATTENTION || asc == -1)
322                 return 0;
323
324         switch (asc) {
325         case STATE_CHANGED:
326                 dev_warn(&h->pdev->dev,
327                         "%s: a state change detected, command retried\n",
328                         h->devname);
329                 break;
330         case LUN_FAILED:
331                 dev_warn(&h->pdev->dev,
332                         "%s: LUN failure detected\n", h->devname);
333                 break;
334         case REPORT_LUNS_CHANGED:
335                 dev_warn(&h->pdev->dev,
336                         "%s: report LUN data changed\n", h->devname);
337         /*
338          * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
339          * target (array) devices.
340          */
341                 break;
342         case POWER_OR_RESET:
343                 dev_warn(&h->pdev->dev,
344                         "%s: a power on or device reset detected\n",
345                         h->devname);
346                 break;
347         case UNIT_ATTENTION_CLEARED:
348                 dev_warn(&h->pdev->dev,
349                         "%s: unit attention cleared by another initiator\n",
350                         h->devname);
351                 break;
352         default:
353                 dev_warn(&h->pdev->dev,
354                         "%s: unknown unit attention detected\n",
355                         h->devname);
356                 break;
357         }
358         return 1;
359 }
360
361 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
362 {
363         if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
364                 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
365                  c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
366                 return 0;
367         dev_warn(&h->pdev->dev, HPSA "device busy");
368         return 1;
369 }
370
371 static u32 lockup_detected(struct ctlr_info *h);
372 static ssize_t host_show_lockup_detected(struct device *dev,
373                 struct device_attribute *attr, char *buf)
374 {
375         int ld;
376         struct ctlr_info *h;
377         struct Scsi_Host *shost = class_to_shost(dev);
378
379         h = shost_to_hba(shost);
380         ld = lockup_detected(h);
381
382         return sprintf(buf, "ld=%d\n", ld);
383 }
384
385 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
386                                          struct device_attribute *attr,
387                                          const char *buf, size_t count)
388 {
389         int status, len;
390         struct ctlr_info *h;
391         struct Scsi_Host *shost = class_to_shost(dev);
392         char tmpbuf[10];
393
394         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
395                 return -EACCES;
396         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
397         strncpy(tmpbuf, buf, len);
398         tmpbuf[len] = '\0';
399         if (sscanf(tmpbuf, "%d", &status) != 1)
400                 return -EINVAL;
401         h = shost_to_hba(shost);
402         h->acciopath_status = !!status;
403         dev_warn(&h->pdev->dev,
404                 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
405                 h->acciopath_status ? "enabled" : "disabled");
406         return count;
407 }
408
409 static ssize_t host_store_raid_offload_debug(struct device *dev,
410                                          struct device_attribute *attr,
411                                          const char *buf, size_t count)
412 {
413         int debug_level, len;
414         struct ctlr_info *h;
415         struct Scsi_Host *shost = class_to_shost(dev);
416         char tmpbuf[10];
417
418         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
419                 return -EACCES;
420         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
421         strncpy(tmpbuf, buf, len);
422         tmpbuf[len] = '\0';
423         if (sscanf(tmpbuf, "%d", &debug_level) != 1)
424                 return -EINVAL;
425         if (debug_level < 0)
426                 debug_level = 0;
427         h = shost_to_hba(shost);
428         h->raid_offload_debug = debug_level;
429         dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
430                 h->raid_offload_debug);
431         return count;
432 }
433
434 static ssize_t host_store_rescan(struct device *dev,
435                                  struct device_attribute *attr,
436                                  const char *buf, size_t count)
437 {
438         struct ctlr_info *h;
439         struct Scsi_Host *shost = class_to_shost(dev);
440         h = shost_to_hba(shost);
441         hpsa_scan_start(h->scsi_host);
442         return count;
443 }
444
445 static ssize_t host_show_firmware_revision(struct device *dev,
446              struct device_attribute *attr, char *buf)
447 {
448         struct ctlr_info *h;
449         struct Scsi_Host *shost = class_to_shost(dev);
450         unsigned char *fwrev;
451
452         h = shost_to_hba(shost);
453         if (!h->hba_inquiry_data)
454                 return 0;
455         fwrev = &h->hba_inquiry_data[32];
456         return snprintf(buf, 20, "%c%c%c%c\n",
457                 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
458 }
459
460 static ssize_t host_show_commands_outstanding(struct device *dev,
461              struct device_attribute *attr, char *buf)
462 {
463         struct Scsi_Host *shost = class_to_shost(dev);
464         struct ctlr_info *h = shost_to_hba(shost);
465
466         return snprintf(buf, 20, "%d\n",
467                         atomic_read(&h->commands_outstanding));
468 }
469
470 static ssize_t host_show_transport_mode(struct device *dev,
471         struct device_attribute *attr, char *buf)
472 {
473         struct ctlr_info *h;
474         struct Scsi_Host *shost = class_to_shost(dev);
475
476         h = shost_to_hba(shost);
477         return snprintf(buf, 20, "%s\n",
478                 h->transMethod & CFGTBL_Trans_Performant ?
479                         "performant" : "simple");
480 }
481
482 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
483         struct device_attribute *attr, char *buf)
484 {
485         struct ctlr_info *h;
486         struct Scsi_Host *shost = class_to_shost(dev);
487
488         h = shost_to_hba(shost);
489         return snprintf(buf, 30, "HP SSD Smart Path %s\n",
490                 (h->acciopath_status == 1) ?  "enabled" : "disabled");
491 }
492
493 /* List of controllers which cannot be hard reset on kexec with reset_devices */
494 static u32 unresettable_controller[] = {
495         0x324a103C, /* Smart Array P712m */
496         0x324b103C, /* Smart Array P711m */
497         0x3223103C, /* Smart Array P800 */
498         0x3234103C, /* Smart Array P400 */
499         0x3235103C, /* Smart Array P400i */
500         0x3211103C, /* Smart Array E200i */
501         0x3212103C, /* Smart Array E200 */
502         0x3213103C, /* Smart Array E200i */
503         0x3214103C, /* Smart Array E200i */
504         0x3215103C, /* Smart Array E200i */
505         0x3237103C, /* Smart Array E500 */
506         0x323D103C, /* Smart Array P700m */
507         0x40800E11, /* Smart Array 5i */
508         0x409C0E11, /* Smart Array 6400 */
509         0x409D0E11, /* Smart Array 6400 EM */
510         0x40700E11, /* Smart Array 5300 */
511         0x40820E11, /* Smart Array 532 */
512         0x40830E11, /* Smart Array 5312 */
513         0x409A0E11, /* Smart Array 641 */
514         0x409B0E11, /* Smart Array 642 */
515         0x40910E11, /* Smart Array 6i */
516 };
517
518 /* List of controllers which cannot even be soft reset */
519 static u32 soft_unresettable_controller[] = {
520         0x40800E11, /* Smart Array 5i */
521         0x40700E11, /* Smart Array 5300 */
522         0x40820E11, /* Smart Array 532 */
523         0x40830E11, /* Smart Array 5312 */
524         0x409A0E11, /* Smart Array 641 */
525         0x409B0E11, /* Smart Array 642 */
526         0x40910E11, /* Smart Array 6i */
527         /* Exclude 640x boards.  These are two pci devices in one slot
528          * which share a battery backed cache module.  One controls the
529          * cache, the other accesses the cache through the one that controls
530          * it.  If we reset the one controlling the cache, the other will
531          * likely not be happy.  Just forbid resetting this conjoined mess.
532          * The 640x isn't really supported by hpsa anyway.
533          */
534         0x409C0E11, /* Smart Array 6400 */
535         0x409D0E11, /* Smart Array 6400 EM */
536 };
537
538 static u32 needs_abort_tags_swizzled[] = {
539         0x323D103C, /* Smart Array P700m */
540         0x324a103C, /* Smart Array P712m */
541         0x324b103C, /* SmartArray P711m */
542 };
543
544 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
545 {
546         int i;
547
548         for (i = 0; i < nelems; i++)
549                 if (a[i] == board_id)
550                         return 1;
551         return 0;
552 }
553
554 static int ctlr_is_hard_resettable(u32 board_id)
555 {
556         return !board_id_in_array(unresettable_controller,
557                         ARRAY_SIZE(unresettable_controller), board_id);
558 }
559
560 static int ctlr_is_soft_resettable(u32 board_id)
561 {
562         return !board_id_in_array(soft_unresettable_controller,
563                         ARRAY_SIZE(soft_unresettable_controller), board_id);
564 }
565
566 static int ctlr_is_resettable(u32 board_id)
567 {
568         return ctlr_is_hard_resettable(board_id) ||
569                 ctlr_is_soft_resettable(board_id);
570 }
571
572 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
573 {
574         return board_id_in_array(needs_abort_tags_swizzled,
575                         ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
576 }
577
578 static ssize_t host_show_resettable(struct device *dev,
579         struct device_attribute *attr, char *buf)
580 {
581         struct ctlr_info *h;
582         struct Scsi_Host *shost = class_to_shost(dev);
583
584         h = shost_to_hba(shost);
585         return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
586 }
587
588 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
589 {
590         return (scsi3addr[3] & 0xC0) == 0x40;
591 }
592
593 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
594         "1(+0)ADM", "UNKNOWN"
595 };
596 #define HPSA_RAID_0     0
597 #define HPSA_RAID_4     1
598 #define HPSA_RAID_1     2       /* also used for RAID 10 */
599 #define HPSA_RAID_5     3       /* also used for RAID 50 */
600 #define HPSA_RAID_51    4
601 #define HPSA_RAID_6     5       /* also used for RAID 60 */
602 #define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
603 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
604
605 static ssize_t raid_level_show(struct device *dev,
606              struct device_attribute *attr, char *buf)
607 {
608         ssize_t l = 0;
609         unsigned char rlevel;
610         struct ctlr_info *h;
611         struct scsi_device *sdev;
612         struct hpsa_scsi_dev_t *hdev;
613         unsigned long flags;
614
615         sdev = to_scsi_device(dev);
616         h = sdev_to_hba(sdev);
617         spin_lock_irqsave(&h->lock, flags);
618         hdev = sdev->hostdata;
619         if (!hdev) {
620                 spin_unlock_irqrestore(&h->lock, flags);
621                 return -ENODEV;
622         }
623
624         /* Is this even a logical drive? */
625         if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
626                 spin_unlock_irqrestore(&h->lock, flags);
627                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
628                 return l;
629         }
630
631         rlevel = hdev->raid_level;
632         spin_unlock_irqrestore(&h->lock, flags);
633         if (rlevel > RAID_UNKNOWN)
634                 rlevel = RAID_UNKNOWN;
635         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
636         return l;
637 }
638
639 static ssize_t lunid_show(struct device *dev,
640              struct device_attribute *attr, char *buf)
641 {
642         struct ctlr_info *h;
643         struct scsi_device *sdev;
644         struct hpsa_scsi_dev_t *hdev;
645         unsigned long flags;
646         unsigned char lunid[8];
647
648         sdev = to_scsi_device(dev);
649         h = sdev_to_hba(sdev);
650         spin_lock_irqsave(&h->lock, flags);
651         hdev = sdev->hostdata;
652         if (!hdev) {
653                 spin_unlock_irqrestore(&h->lock, flags);
654                 return -ENODEV;
655         }
656         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
657         spin_unlock_irqrestore(&h->lock, flags);
658         return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
659                 lunid[0], lunid[1], lunid[2], lunid[3],
660                 lunid[4], lunid[5], lunid[6], lunid[7]);
661 }
662
663 static ssize_t unique_id_show(struct device *dev,
664              struct device_attribute *attr, char *buf)
665 {
666         struct ctlr_info *h;
667         struct scsi_device *sdev;
668         struct hpsa_scsi_dev_t *hdev;
669         unsigned long flags;
670         unsigned char sn[16];
671
672         sdev = to_scsi_device(dev);
673         h = sdev_to_hba(sdev);
674         spin_lock_irqsave(&h->lock, flags);
675         hdev = sdev->hostdata;
676         if (!hdev) {
677                 spin_unlock_irqrestore(&h->lock, flags);
678                 return -ENODEV;
679         }
680         memcpy(sn, hdev->device_id, sizeof(sn));
681         spin_unlock_irqrestore(&h->lock, flags);
682         return snprintf(buf, 16 * 2 + 2,
683                         "%02X%02X%02X%02X%02X%02X%02X%02X"
684                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
685                         sn[0], sn[1], sn[2], sn[3],
686                         sn[4], sn[5], sn[6], sn[7],
687                         sn[8], sn[9], sn[10], sn[11],
688                         sn[12], sn[13], sn[14], sn[15]);
689 }
690
691 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
692              struct device_attribute *attr, char *buf)
693 {
694         struct ctlr_info *h;
695         struct scsi_device *sdev;
696         struct hpsa_scsi_dev_t *hdev;
697         unsigned long flags;
698         int offload_enabled;
699
700         sdev = to_scsi_device(dev);
701         h = sdev_to_hba(sdev);
702         spin_lock_irqsave(&h->lock, flags);
703         hdev = sdev->hostdata;
704         if (!hdev) {
705                 spin_unlock_irqrestore(&h->lock, flags);
706                 return -ENODEV;
707         }
708         offload_enabled = hdev->offload_enabled;
709         spin_unlock_irqrestore(&h->lock, flags);
710         return snprintf(buf, 20, "%d\n", offload_enabled);
711 }
712
713 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
714 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
715 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
716 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
717 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
718                         host_show_hp_ssd_smart_path_enabled, NULL);
719 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
720                 host_show_hp_ssd_smart_path_status,
721                 host_store_hp_ssd_smart_path_status);
722 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
723                         host_store_raid_offload_debug);
724 static DEVICE_ATTR(firmware_revision, S_IRUGO,
725         host_show_firmware_revision, NULL);
726 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
727         host_show_commands_outstanding, NULL);
728 static DEVICE_ATTR(transport_mode, S_IRUGO,
729         host_show_transport_mode, NULL);
730 static DEVICE_ATTR(resettable, S_IRUGO,
731         host_show_resettable, NULL);
732 static DEVICE_ATTR(lockup_detected, S_IRUGO,
733         host_show_lockup_detected, NULL);
734
735 static struct device_attribute *hpsa_sdev_attrs[] = {
736         &dev_attr_raid_level,
737         &dev_attr_lunid,
738         &dev_attr_unique_id,
739         &dev_attr_hp_ssd_smart_path_enabled,
740         &dev_attr_lockup_detected,
741         NULL,
742 };
743
744 static struct device_attribute *hpsa_shost_attrs[] = {
745         &dev_attr_rescan,
746         &dev_attr_firmware_revision,
747         &dev_attr_commands_outstanding,
748         &dev_attr_transport_mode,
749         &dev_attr_resettable,
750         &dev_attr_hp_ssd_smart_path_status,
751         &dev_attr_raid_offload_debug,
752         NULL,
753 };
754
755 #define HPSA_NRESERVED_CMDS     (HPSA_CMDS_RESERVED_FOR_ABORTS + \
756                 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
757
758 static struct scsi_host_template hpsa_driver_template = {
759         .module                 = THIS_MODULE,
760         .name                   = HPSA,
761         .proc_name              = HPSA,
762         .queuecommand           = hpsa_scsi_queue_command,
763         .scan_start             = hpsa_scan_start,
764         .scan_finished          = hpsa_scan_finished,
765         .change_queue_depth     = hpsa_change_queue_depth,
766         .this_id                = -1,
767         .use_clustering         = ENABLE_CLUSTERING,
768         .eh_abort_handler       = hpsa_eh_abort_handler,
769         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
770         .ioctl                  = hpsa_ioctl,
771         .slave_alloc            = hpsa_slave_alloc,
772         .slave_configure        = hpsa_slave_configure,
773         .slave_destroy          = hpsa_slave_destroy,
774 #ifdef CONFIG_COMPAT
775         .compat_ioctl           = hpsa_compat_ioctl,
776 #endif
777         .sdev_attrs = hpsa_sdev_attrs,
778         .shost_attrs = hpsa_shost_attrs,
779         .max_sectors = 8192,
780         .no_write_same = 1,
781 };
782
783 static inline u32 next_command(struct ctlr_info *h, u8 q)
784 {
785         u32 a;
786         struct reply_queue_buffer *rq = &h->reply_queue[q];
787
788         if (h->transMethod & CFGTBL_Trans_io_accel1)
789                 return h->access.command_completed(h, q);
790
791         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
792                 return h->access.command_completed(h, q);
793
794         if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
795                 a = rq->head[rq->current_entry];
796                 rq->current_entry++;
797                 atomic_dec(&h->commands_outstanding);
798         } else {
799                 a = FIFO_EMPTY;
800         }
801         /* Check for wraparound */
802         if (rq->current_entry == h->max_commands) {
803                 rq->current_entry = 0;
804                 rq->wraparound ^= 1;
805         }
806         return a;
807 }
808
809 /*
810  * There are some special bits in the bus address of the
811  * command that we have to set for the controller to know
812  * how to process the command:
813  *
814  * Normal performant mode:
815  * bit 0: 1 means performant mode, 0 means simple mode.
816  * bits 1-3 = block fetch table entry
817  * bits 4-6 = command type (== 0)
818  *
819  * ioaccel1 mode:
820  * bit 0 = "performant mode" bit.
821  * bits 1-3 = block fetch table entry
822  * bits 4-6 = command type (== 110)
823  * (command type is needed because ioaccel1 mode
824  * commands are submitted through the same register as normal
825  * mode commands, so this is how the controller knows whether
826  * the command is normal mode or ioaccel1 mode.)
827  *
828  * ioaccel2 mode:
829  * bit 0 = "performant mode" bit.
830  * bits 1-4 = block fetch table entry (note extra bit)
831  * bits 4-6 = not needed, because ioaccel2 mode has
832  * a separate special register for submitting commands.
833  */
834
835 /*
836  * set_performant_mode: Modify the tag for cciss performant
837  * set bit 0 for pull model, bits 3-1 for block fetch
838  * register number
839  */
840 #define DEFAULT_REPLY_QUEUE (-1)
841 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
842                                         int reply_queue)
843 {
844         if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
845                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
846                 if (unlikely(!h->msix_vector))
847                         return;
848                 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
849                         c->Header.ReplyQueue =
850                                 raw_smp_processor_id() % h->nreply_queues;
851                 else
852                         c->Header.ReplyQueue = reply_queue % h->nreply_queues;
853         }
854 }
855
856 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
857                                                 struct CommandList *c,
858                                                 int reply_queue)
859 {
860         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
861
862         /*
863          * Tell the controller to post the reply to the queue for this
864          * processor.  This seems to give the best I/O throughput.
865          */
866         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
867                 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
868         else
869                 cp->ReplyQueue = reply_queue % h->nreply_queues;
870         /*
871          * Set the bits in the address sent down to include:
872          *  - performant mode bit (bit 0)
873          *  - pull count (bits 1-3)
874          *  - command type (bits 4-6)
875          */
876         c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
877                                         IOACCEL1_BUSADDR_CMDTYPE;
878 }
879
880 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
881                                                 struct CommandList *c,
882                                                 int reply_queue)
883 {
884         struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
885                 &h->ioaccel2_cmd_pool[c->cmdindex];
886
887         /* Tell the controller to post the reply to the queue for this
888          * processor.  This seems to give the best I/O throughput.
889          */
890         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
891                 cp->reply_queue = smp_processor_id() % h->nreply_queues;
892         else
893                 cp->reply_queue = reply_queue % h->nreply_queues;
894         /* Set the bits in the address sent down to include:
895          *  - performant mode bit not used in ioaccel mode 2
896          *  - pull count (bits 0-3)
897          *  - command type isn't needed for ioaccel2
898          */
899         c->busaddr |= h->ioaccel2_blockFetchTable[0];
900 }
901
902 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
903                                                 struct CommandList *c,
904                                                 int reply_queue)
905 {
906         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
907
908         /*
909          * Tell the controller to post the reply to the queue for this
910          * processor.  This seems to give the best I/O throughput.
911          */
912         if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
913                 cp->reply_queue = smp_processor_id() % h->nreply_queues;
914         else
915                 cp->reply_queue = reply_queue % h->nreply_queues;
916         /*
917          * Set the bits in the address sent down to include:
918          *  - performant mode bit not used in ioaccel mode 2
919          *  - pull count (bits 0-3)
920          *  - command type isn't needed for ioaccel2
921          */
922         c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
923 }
924
925 static int is_firmware_flash_cmd(u8 *cdb)
926 {
927         return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
928 }
929
930 /*
931  * During firmware flash, the heartbeat register may not update as frequently
932  * as it should.  So we dial down lockup detection during firmware flash. and
933  * dial it back up when firmware flash completes.
934  */
935 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
936 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
937 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
938                 struct CommandList *c)
939 {
940         if (!is_firmware_flash_cmd(c->Request.CDB))
941                 return;
942         atomic_inc(&h->firmware_flash_in_progress);
943         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
944 }
945
946 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
947                 struct CommandList *c)
948 {
949         if (is_firmware_flash_cmd(c->Request.CDB) &&
950                 atomic_dec_and_test(&h->firmware_flash_in_progress))
951                 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
952 }
953
954 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
955         struct CommandList *c, int reply_queue)
956 {
957         dial_down_lockup_detection_during_fw_flash(h, c);
958         atomic_inc(&h->commands_outstanding);
959         switch (c->cmd_type) {
960         case CMD_IOACCEL1:
961                 set_ioaccel1_performant_mode(h, c, reply_queue);
962                 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
963                 break;
964         case CMD_IOACCEL2:
965                 set_ioaccel2_performant_mode(h, c, reply_queue);
966                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
967                 break;
968         case IOACCEL2_TMF:
969                 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
970                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
971                 break;
972         default:
973                 set_performant_mode(h, c, reply_queue);
974                 h->access.submit_command(h, c);
975         }
976 }
977
978 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
979 {
980         if (unlikely(c->abort_pending))
981                 return finish_cmd(c);
982
983         __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
984 }
985
986 static inline int is_hba_lunid(unsigned char scsi3addr[])
987 {
988         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
989 }
990
991 static inline int is_scsi_rev_5(struct ctlr_info *h)
992 {
993         if (!h->hba_inquiry_data)
994                 return 0;
995         if ((h->hba_inquiry_data[2] & 0x07) == 5)
996                 return 1;
997         return 0;
998 }
999
1000 static int hpsa_find_target_lun(struct ctlr_info *h,
1001         unsigned char scsi3addr[], int bus, int *target, int *lun)
1002 {
1003         /* finds an unused bus, target, lun for a new physical device
1004          * assumes h->devlock is held
1005          */
1006         int i, found = 0;
1007         DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1008
1009         bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1010
1011         for (i = 0; i < h->ndevices; i++) {
1012                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1013                         __set_bit(h->dev[i]->target, lun_taken);
1014         }
1015
1016         i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1017         if (i < HPSA_MAX_DEVICES) {
1018                 /* *bus = 1; */
1019                 *target = i;
1020                 *lun = 0;
1021                 found = 1;
1022         }
1023         return !found;
1024 }
1025
1026 static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1027         struct hpsa_scsi_dev_t *dev, char *description)
1028 {
1029         dev_printk(level, &h->pdev->dev,
1030                         "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1031                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1032                         description,
1033                         scsi_device_type(dev->devtype),
1034                         dev->vendor,
1035                         dev->model,
1036                         dev->raid_level > RAID_UNKNOWN ?
1037                                 "RAID-?" : raid_label[dev->raid_level],
1038                         dev->offload_config ? '+' : '-',
1039                         dev->offload_enabled ? '+' : '-',
1040                         dev->expose_state);
1041 }
1042
1043 /* Add an entry into h->dev[] array. */
1044 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
1045                 struct hpsa_scsi_dev_t *device,
1046                 struct hpsa_scsi_dev_t *added[], int *nadded)
1047 {
1048         /* assumes h->devlock is held */
1049         int n = h->ndevices;
1050         int i;
1051         unsigned char addr1[8], addr2[8];
1052         struct hpsa_scsi_dev_t *sd;
1053
1054         if (n >= HPSA_MAX_DEVICES) {
1055                 dev_err(&h->pdev->dev, "too many devices, some will be "
1056                         "inaccessible.\n");
1057                 return -1;
1058         }
1059
1060         /* physical devices do not have lun or target assigned until now. */
1061         if (device->lun != -1)
1062                 /* Logical device, lun is already assigned. */
1063                 goto lun_assigned;
1064
1065         /* If this device a non-zero lun of a multi-lun device
1066          * byte 4 of the 8-byte LUN addr will contain the logical
1067          * unit no, zero otherwise.
1068          */
1069         if (device->scsi3addr[4] == 0) {
1070                 /* This is not a non-zero lun of a multi-lun device */
1071                 if (hpsa_find_target_lun(h, device->scsi3addr,
1072                         device->bus, &device->target, &device->lun) != 0)
1073                         return -1;
1074                 goto lun_assigned;
1075         }
1076
1077         /* This is a non-zero lun of a multi-lun device.
1078          * Search through our list and find the device which
1079          * has the same 8 byte LUN address, excepting byte 4.
1080          * Assign the same bus and target for this new LUN.
1081          * Use the logical unit number from the firmware.
1082          */
1083         memcpy(addr1, device->scsi3addr, 8);
1084         addr1[4] = 0;
1085         for (i = 0; i < n; i++) {
1086                 sd = h->dev[i];
1087                 memcpy(addr2, sd->scsi3addr, 8);
1088                 addr2[4] = 0;
1089                 /* differ only in byte 4? */
1090                 if (memcmp(addr1, addr2, 8) == 0) {
1091                         device->bus = sd->bus;
1092                         device->target = sd->target;
1093                         device->lun = device->scsi3addr[4];
1094                         break;
1095                 }
1096         }
1097         if (device->lun == -1) {
1098                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1099                         " suspect firmware bug or unsupported hardware "
1100                         "configuration.\n");
1101                         return -1;
1102         }
1103
1104 lun_assigned:
1105
1106         h->dev[n] = device;
1107         h->ndevices++;
1108         added[*nadded] = device;
1109         (*nadded)++;
1110         hpsa_show_dev_msg(KERN_INFO, h, device,
1111                 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
1112         device->offload_to_be_enabled = device->offload_enabled;
1113         device->offload_enabled = 0;
1114         return 0;
1115 }
1116
1117 /* Update an entry in h->dev[] array. */
1118 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1119         int entry, struct hpsa_scsi_dev_t *new_entry)
1120 {
1121         int offload_enabled;
1122         /* assumes h->devlock is held */
1123         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1124
1125         /* Raid level changed. */
1126         h->dev[entry]->raid_level = new_entry->raid_level;
1127
1128         /* Raid offload parameters changed.  Careful about the ordering. */
1129         if (new_entry->offload_config && new_entry->offload_enabled) {
1130                 /*
1131                  * if drive is newly offload_enabled, we want to copy the
1132                  * raid map data first.  If previously offload_enabled and
1133                  * offload_config were set, raid map data had better be
1134                  * the same as it was before.  if raid map data is changed
1135                  * then it had better be the case that
1136                  * h->dev[entry]->offload_enabled is currently 0.
1137                  */
1138                 h->dev[entry]->raid_map = new_entry->raid_map;
1139                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1140         }
1141         if (new_entry->hba_ioaccel_enabled) {
1142                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1143                 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1144         }
1145         h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1146         h->dev[entry]->offload_config = new_entry->offload_config;
1147         h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1148         h->dev[entry]->queue_depth = new_entry->queue_depth;
1149
1150         /*
1151          * We can turn off ioaccel offload now, but need to delay turning
1152          * it on until we can update h->dev[entry]->phys_disk[], but we
1153          * can't do that until all the devices are updated.
1154          */
1155         h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1156         if (!new_entry->offload_enabled)
1157                 h->dev[entry]->offload_enabled = 0;
1158
1159         offload_enabled = h->dev[entry]->offload_enabled;
1160         h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1161         hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1162         h->dev[entry]->offload_enabled = offload_enabled;
1163 }
1164
1165 /* Replace an entry from h->dev[] array. */
1166 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1167         int entry, struct hpsa_scsi_dev_t *new_entry,
1168         struct hpsa_scsi_dev_t *added[], int *nadded,
1169         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1170 {
1171         /* assumes h->devlock is held */
1172         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1173         removed[*nremoved] = h->dev[entry];
1174         (*nremoved)++;
1175
1176         /*
1177          * New physical devices won't have target/lun assigned yet
1178          * so we need to preserve the values in the slot we are replacing.
1179          */
1180         if (new_entry->target == -1) {
1181                 new_entry->target = h->dev[entry]->target;
1182                 new_entry->lun = h->dev[entry]->lun;
1183         }
1184
1185         h->dev[entry] = new_entry;
1186         added[*nadded] = new_entry;
1187         (*nadded)++;
1188         hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1189         new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1190         new_entry->offload_enabled = 0;
1191 }
1192
1193 /* Remove an entry from h->dev[] array. */
1194 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1195         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1196 {
1197         /* assumes h->devlock is held */
1198         int i;
1199         struct hpsa_scsi_dev_t *sd;
1200
1201         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1202
1203         sd = h->dev[entry];
1204         removed[*nremoved] = h->dev[entry];
1205         (*nremoved)++;
1206
1207         for (i = entry; i < h->ndevices-1; i++)
1208                 h->dev[i] = h->dev[i+1];
1209         h->ndevices--;
1210         hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1211 }
1212
1213 #define SCSI3ADDR_EQ(a, b) ( \
1214         (a)[7] == (b)[7] && \
1215         (a)[6] == (b)[6] && \
1216         (a)[5] == (b)[5] && \
1217         (a)[4] == (b)[4] && \
1218         (a)[3] == (b)[3] && \
1219         (a)[2] == (b)[2] && \
1220         (a)[1] == (b)[1] && \
1221         (a)[0] == (b)[0])
1222
1223 static void fixup_botched_add(struct ctlr_info *h,
1224         struct hpsa_scsi_dev_t *added)
1225 {
1226         /* called when scsi_add_device fails in order to re-adjust
1227          * h->dev[] to match the mid layer's view.
1228          */
1229         unsigned long flags;
1230         int i, j;
1231
1232         spin_lock_irqsave(&h->lock, flags);
1233         for (i = 0; i < h->ndevices; i++) {
1234                 if (h->dev[i] == added) {
1235                         for (j = i; j < h->ndevices-1; j++)
1236                                 h->dev[j] = h->dev[j+1];
1237                         h->ndevices--;
1238                         break;
1239                 }
1240         }
1241         spin_unlock_irqrestore(&h->lock, flags);
1242         kfree(added);
1243 }
1244
1245 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1246         struct hpsa_scsi_dev_t *dev2)
1247 {
1248         /* we compare everything except lun and target as these
1249          * are not yet assigned.  Compare parts likely
1250          * to differ first
1251          */
1252         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1253                 sizeof(dev1->scsi3addr)) != 0)
1254                 return 0;
1255         if (memcmp(dev1->device_id, dev2->device_id,
1256                 sizeof(dev1->device_id)) != 0)
1257                 return 0;
1258         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1259                 return 0;
1260         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1261                 return 0;
1262         if (dev1->devtype != dev2->devtype)
1263                 return 0;
1264         if (dev1->bus != dev2->bus)
1265                 return 0;
1266         return 1;
1267 }
1268
1269 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1270         struct hpsa_scsi_dev_t *dev2)
1271 {
1272         /* Device attributes that can change, but don't mean
1273          * that the device is a different device, nor that the OS
1274          * needs to be told anything about the change.
1275          */
1276         if (dev1->raid_level != dev2->raid_level)
1277                 return 1;
1278         if (dev1->offload_config != dev2->offload_config)
1279                 return 1;
1280         if (dev1->offload_enabled != dev2->offload_enabled)
1281                 return 1;
1282         if (dev1->queue_depth != dev2->queue_depth)
1283                 return 1;
1284         return 0;
1285 }
1286
1287 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1288  * and return needle location in *index.  If scsi3addr matches, but not
1289  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1290  * location in *index.
1291  * In the case of a minor device attribute change, such as RAID level, just
1292  * return DEVICE_UPDATED, along with the updated device's location in index.
1293  * If needle not found, return DEVICE_NOT_FOUND.
1294  */
1295 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1296         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1297         int *index)
1298 {
1299         int i;
1300 #define DEVICE_NOT_FOUND 0
1301 #define DEVICE_CHANGED 1
1302 #define DEVICE_SAME 2
1303 #define DEVICE_UPDATED 3
1304         for (i = 0; i < haystack_size; i++) {
1305                 if (haystack[i] == NULL) /* previously removed. */
1306                         continue;
1307                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1308                         *index = i;
1309                         if (device_is_the_same(needle, haystack[i])) {
1310                                 if (device_updated(needle, haystack[i]))
1311                                         return DEVICE_UPDATED;
1312                                 return DEVICE_SAME;
1313                         } else {
1314                                 /* Keep offline devices offline */
1315                                 if (needle->volume_offline)
1316                                         return DEVICE_NOT_FOUND;
1317                                 return DEVICE_CHANGED;
1318                         }
1319                 }
1320         }
1321         *index = -1;
1322         return DEVICE_NOT_FOUND;
1323 }
1324
1325 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1326                                         unsigned char scsi3addr[])
1327 {
1328         struct offline_device_entry *device;
1329         unsigned long flags;
1330
1331         /* Check to see if device is already on the list */
1332         spin_lock_irqsave(&h->offline_device_lock, flags);
1333         list_for_each_entry(device, &h->offline_device_list, offline_list) {
1334                 if (memcmp(device->scsi3addr, scsi3addr,
1335                         sizeof(device->scsi3addr)) == 0) {
1336                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1337                         return;
1338                 }
1339         }
1340         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1341
1342         /* Device is not on the list, add it. */
1343         device = kmalloc(sizeof(*device), GFP_KERNEL);
1344         if (!device) {
1345                 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1346                 return;
1347         }
1348         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1349         spin_lock_irqsave(&h->offline_device_lock, flags);
1350         list_add_tail(&device->offline_list, &h->offline_device_list);
1351         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1352 }
1353
1354 /* Print a message explaining various offline volume states */
1355 static void hpsa_show_volume_status(struct ctlr_info *h,
1356         struct hpsa_scsi_dev_t *sd)
1357 {
1358         if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1359                 dev_info(&h->pdev->dev,
1360                         "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1361                         h->scsi_host->host_no,
1362                         sd->bus, sd->target, sd->lun);
1363         switch (sd->volume_offline) {
1364         case HPSA_LV_OK:
1365                 break;
1366         case HPSA_LV_UNDERGOING_ERASE:
1367                 dev_info(&h->pdev->dev,
1368                         "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1369                         h->scsi_host->host_no,
1370                         sd->bus, sd->target, sd->lun);
1371                 break;
1372         case HPSA_LV_UNDERGOING_RPI:
1373                 dev_info(&h->pdev->dev,
1374                         "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1375                         h->scsi_host->host_no,
1376                         sd->bus, sd->target, sd->lun);
1377                 break;
1378         case HPSA_LV_PENDING_RPI:
1379                 dev_info(&h->pdev->dev,
1380                                 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1381                                 h->scsi_host->host_no,
1382                                 sd->bus, sd->target, sd->lun);
1383                 break;
1384         case HPSA_LV_ENCRYPTED_NO_KEY:
1385                 dev_info(&h->pdev->dev,
1386                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1387                         h->scsi_host->host_no,
1388                         sd->bus, sd->target, sd->lun);
1389                 break;
1390         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1391                 dev_info(&h->pdev->dev,
1392                         "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1393                         h->scsi_host->host_no,
1394                         sd->bus, sd->target, sd->lun);
1395                 break;
1396         case HPSA_LV_UNDERGOING_ENCRYPTION:
1397                 dev_info(&h->pdev->dev,
1398                         "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1399                         h->scsi_host->host_no,
1400                         sd->bus, sd->target, sd->lun);
1401                 break;
1402         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1403                 dev_info(&h->pdev->dev,
1404                         "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1405                         h->scsi_host->host_no,
1406                         sd->bus, sd->target, sd->lun);
1407                 break;
1408         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1409                 dev_info(&h->pdev->dev,
1410                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1411                         h->scsi_host->host_no,
1412                         sd->bus, sd->target, sd->lun);
1413                 break;
1414         case HPSA_LV_PENDING_ENCRYPTION:
1415                 dev_info(&h->pdev->dev,
1416                         "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1417                         h->scsi_host->host_no,
1418                         sd->bus, sd->target, sd->lun);
1419                 break;
1420         case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1421                 dev_info(&h->pdev->dev,
1422                         "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1423                         h->scsi_host->host_no,
1424                         sd->bus, sd->target, sd->lun);
1425                 break;
1426         }
1427 }
1428
1429 /*
1430  * Figure the list of physical drive pointers for a logical drive with
1431  * raid offload configured.
1432  */
1433 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1434                                 struct hpsa_scsi_dev_t *dev[], int ndevices,
1435                                 struct hpsa_scsi_dev_t *logical_drive)
1436 {
1437         struct raid_map_data *map = &logical_drive->raid_map;
1438         struct raid_map_disk_data *dd = &map->data[0];
1439         int i, j;
1440         int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1441                                 le16_to_cpu(map->metadata_disks_per_row);
1442         int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1443                                 le16_to_cpu(map->layout_map_count) *
1444                                 total_disks_per_row;
1445         int nphys_disk = le16_to_cpu(map->layout_map_count) *
1446                                 total_disks_per_row;
1447         int qdepth;
1448
1449         if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1450                 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1451
1452         qdepth = 0;
1453         for (i = 0; i < nraid_map_entries; i++) {
1454                 logical_drive->phys_disk[i] = NULL;
1455                 if (!logical_drive->offload_config)
1456                         continue;
1457                 for (j = 0; j < ndevices; j++) {
1458                         if (dev[j]->devtype != TYPE_DISK)
1459                                 continue;
1460                         if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1461                                 continue;
1462                         if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1463                                 continue;
1464
1465                         logical_drive->phys_disk[i] = dev[j];
1466                         if (i < nphys_disk)
1467                                 qdepth = min(h->nr_cmds, qdepth +
1468                                     logical_drive->phys_disk[i]->queue_depth);
1469                         break;
1470                 }
1471
1472                 /*
1473                  * This can happen if a physical drive is removed and
1474                  * the logical drive is degraded.  In that case, the RAID
1475                  * map data will refer to a physical disk which isn't actually
1476                  * present.  And in that case offload_enabled should already
1477                  * be 0, but we'll turn it off here just in case
1478                  */
1479                 if (!logical_drive->phys_disk[i]) {
1480                         logical_drive->offload_enabled = 0;
1481                         logical_drive->offload_to_be_enabled = 0;
1482                         logical_drive->queue_depth = 8;
1483                 }
1484         }
1485         if (nraid_map_entries)
1486                 /*
1487                  * This is correct for reads, too high for full stripe writes,
1488                  * way too high for partial stripe writes
1489                  */
1490                 logical_drive->queue_depth = qdepth;
1491         else
1492                 logical_drive->queue_depth = h->nr_cmds;
1493 }
1494
1495 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1496                                 struct hpsa_scsi_dev_t *dev[], int ndevices)
1497 {
1498         int i;
1499
1500         for (i = 0; i < ndevices; i++) {
1501                 if (dev[i]->devtype != TYPE_DISK)
1502                         continue;
1503                 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1504                         continue;
1505
1506                 /*
1507                  * If offload is currently enabled, the RAID map and
1508                  * phys_disk[] assignment *better* not be changing
1509                  * and since it isn't changing, we do not need to
1510                  * update it.
1511                  */
1512                 if (dev[i]->offload_enabled)
1513                         continue;
1514
1515                 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1516         }
1517 }
1518
1519 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1520         struct hpsa_scsi_dev_t *sd[], int nsds)
1521 {
1522         /* sd contains scsi3 addresses and devtypes, and inquiry
1523          * data.  This function takes what's in sd to be the current
1524          * reality and updates h->dev[] to reflect that reality.
1525          */
1526         int i, entry, device_change, changes = 0;
1527         struct hpsa_scsi_dev_t *csd;
1528         unsigned long flags;
1529         struct hpsa_scsi_dev_t **added, **removed;
1530         int nadded, nremoved;
1531         struct Scsi_Host *sh = NULL;
1532
1533         added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1534         removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1535
1536         if (!added || !removed) {
1537                 dev_warn(&h->pdev->dev, "out of memory in "
1538                         "adjust_hpsa_scsi_table\n");
1539                 goto free_and_out;
1540         }
1541
1542         spin_lock_irqsave(&h->devlock, flags);
1543
1544         /* find any devices in h->dev[] that are not in
1545          * sd[] and remove them from h->dev[], and for any
1546          * devices which have changed, remove the old device
1547          * info and add the new device info.
1548          * If minor device attributes change, just update
1549          * the existing device structure.
1550          */
1551         i = 0;
1552         nremoved = 0;
1553         nadded = 0;
1554         while (i < h->ndevices) {
1555                 csd = h->dev[i];
1556                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1557                 if (device_change == DEVICE_NOT_FOUND) {
1558                         changes++;
1559                         hpsa_scsi_remove_entry(h, hostno, i,
1560                                 removed, &nremoved);
1561                         continue; /* remove ^^^, hence i not incremented */
1562                 } else if (device_change == DEVICE_CHANGED) {
1563                         changes++;
1564                         hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1565                                 added, &nadded, removed, &nremoved);
1566                         /* Set it to NULL to prevent it from being freed
1567                          * at the bottom of hpsa_update_scsi_devices()
1568                          */
1569                         sd[entry] = NULL;
1570                 } else if (device_change == DEVICE_UPDATED) {
1571                         hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1572                 }
1573                 i++;
1574         }
1575
1576         /* Now, make sure every device listed in sd[] is also
1577          * listed in h->dev[], adding them if they aren't found
1578          */
1579
1580         for (i = 0; i < nsds; i++) {
1581                 if (!sd[i]) /* if already added above. */
1582                         continue;
1583
1584                 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1585                  * as the SCSI mid-layer does not handle such devices well.
1586                  * It relentlessly loops sending TUR at 3Hz, then READ(10)
1587                  * at 160Hz, and prevents the system from coming up.
1588                  */
1589                 if (sd[i]->volume_offline) {
1590                         hpsa_show_volume_status(h, sd[i]);
1591                         hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1592                         continue;
1593                 }
1594
1595                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1596                                         h->ndevices, &entry);
1597                 if (device_change == DEVICE_NOT_FOUND) {
1598                         changes++;
1599                         if (hpsa_scsi_add_entry(h, hostno, sd[i],
1600                                 added, &nadded) != 0)
1601                                 break;
1602                         sd[i] = NULL; /* prevent from being freed later. */
1603                 } else if (device_change == DEVICE_CHANGED) {
1604                         /* should never happen... */
1605                         changes++;
1606                         dev_warn(&h->pdev->dev,
1607                                 "device unexpectedly changed.\n");
1608                         /* but if it does happen, we just ignore that device */
1609                 }
1610         }
1611         hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1612
1613         /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1614          * any logical drives that need it enabled.
1615          */
1616         for (i = 0; i < h->ndevices; i++)
1617                 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1618
1619         spin_unlock_irqrestore(&h->devlock, flags);
1620
1621         /* Monitor devices which are in one of several NOT READY states to be
1622          * brought online later. This must be done without holding h->devlock,
1623          * so don't touch h->dev[]
1624          */
1625         for (i = 0; i < nsds; i++) {
1626                 if (!sd[i]) /* if already added above. */
1627                         continue;
1628                 if (sd[i]->volume_offline)
1629                         hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1630         }
1631
1632         /* Don't notify scsi mid layer of any changes the first time through
1633          * (or if there are no changes) scsi_scan_host will do it later the
1634          * first time through.
1635          */
1636         if (hostno == -1 || !changes)
1637                 goto free_and_out;
1638
1639         sh = h->scsi_host;
1640         /* Notify scsi mid layer of any removed devices */
1641         for (i = 0; i < nremoved; i++) {
1642                 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1643                         struct scsi_device *sdev =
1644                                 scsi_device_lookup(sh, removed[i]->bus,
1645                                         removed[i]->target, removed[i]->lun);
1646                         if (sdev != NULL) {
1647                                 scsi_remove_device(sdev);
1648                                 scsi_device_put(sdev);
1649                         } else {
1650                                 /*
1651                                  * We don't expect to get here.
1652                                  * future cmds to this device will get selection
1653                                  * timeout as if the device was gone.
1654                                  */
1655                                 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1656                                         "didn't find device for removal.");
1657                         }
1658                 }
1659                 kfree(removed[i]);
1660                 removed[i] = NULL;
1661         }
1662
1663         /* Notify scsi mid layer of any added devices */
1664         for (i = 0; i < nadded; i++) {
1665                 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1666                         continue;
1667                 if (scsi_add_device(sh, added[i]->bus,
1668                         added[i]->target, added[i]->lun) == 0)
1669                         continue;
1670                 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1671                                         "addition failed, device not added.");
1672                 /* now we have to remove it from h->dev,
1673                  * since it didn't get added to scsi mid layer
1674                  */
1675                 fixup_botched_add(h, added[i]);
1676                 added[i] = NULL;
1677         }
1678
1679 free_and_out:
1680         kfree(added);
1681         kfree(removed);
1682 }
1683
1684 /*
1685  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1686  * Assume's h->devlock is held.
1687  */
1688 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1689         int bus, int target, int lun)
1690 {
1691         int i;
1692         struct hpsa_scsi_dev_t *sd;
1693
1694         for (i = 0; i < h->ndevices; i++) {
1695                 sd = h->dev[i];
1696                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1697                         return sd;
1698         }
1699         return NULL;
1700 }
1701
1702 static int hpsa_slave_alloc(struct scsi_device *sdev)
1703 {
1704         struct hpsa_scsi_dev_t *sd;
1705         unsigned long flags;
1706         struct ctlr_info *h;
1707
1708         h = sdev_to_hba(sdev);
1709         spin_lock_irqsave(&h->devlock, flags);
1710         sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1711                 sdev_id(sdev), sdev->lun);
1712         if (likely(sd)) {
1713                 atomic_set(&sd->ioaccel_cmds_out, 0);
1714                 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1715         } else
1716                 sdev->hostdata = NULL;
1717         spin_unlock_irqrestore(&h->devlock, flags);
1718         return 0;
1719 }
1720
1721 /* configure scsi device based on internal per-device structure */
1722 static int hpsa_slave_configure(struct scsi_device *sdev)
1723 {
1724         struct hpsa_scsi_dev_t *sd;
1725         int queue_depth;
1726
1727         sd = sdev->hostdata;
1728         sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1729
1730         if (sd)
1731                 queue_depth = sd->queue_depth != 0 ?
1732                         sd->queue_depth : sdev->host->can_queue;
1733         else
1734                 queue_depth = sdev->host->can_queue;
1735
1736         scsi_change_queue_depth(sdev, queue_depth);
1737
1738         return 0;
1739 }
1740
1741 static void hpsa_slave_destroy(struct scsi_device *sdev)
1742 {
1743         /* nothing to do. */
1744 }
1745
1746 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1747 {
1748         int i;
1749
1750         if (!h->ioaccel2_cmd_sg_list)
1751                 return;
1752         for (i = 0; i < h->nr_cmds; i++) {
1753                 kfree(h->ioaccel2_cmd_sg_list[i]);
1754                 h->ioaccel2_cmd_sg_list[i] = NULL;
1755         }
1756         kfree(h->ioaccel2_cmd_sg_list);
1757         h->ioaccel2_cmd_sg_list = NULL;
1758 }
1759
1760 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1761 {
1762         int i;
1763
1764         if (h->chainsize <= 0)
1765                 return 0;
1766
1767         h->ioaccel2_cmd_sg_list =
1768                 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1769                                         GFP_KERNEL);
1770         if (!h->ioaccel2_cmd_sg_list)
1771                 return -ENOMEM;
1772         for (i = 0; i < h->nr_cmds; i++) {
1773                 h->ioaccel2_cmd_sg_list[i] =
1774                         kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1775                                         h->maxsgentries, GFP_KERNEL);
1776                 if (!h->ioaccel2_cmd_sg_list[i])
1777                         goto clean;
1778         }
1779         return 0;
1780
1781 clean:
1782         hpsa_free_ioaccel2_sg_chain_blocks(h);
1783         return -ENOMEM;
1784 }
1785
1786 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1787 {
1788         int i;
1789
1790         if (!h->cmd_sg_list)
1791                 return;
1792         for (i = 0; i < h->nr_cmds; i++) {
1793                 kfree(h->cmd_sg_list[i]);
1794                 h->cmd_sg_list[i] = NULL;
1795         }
1796         kfree(h->cmd_sg_list);
1797         h->cmd_sg_list = NULL;
1798 }
1799
1800 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
1801 {
1802         int i;
1803
1804         if (h->chainsize <= 0)
1805                 return 0;
1806
1807         h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1808                                 GFP_KERNEL);
1809         if (!h->cmd_sg_list) {
1810                 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1811                 return -ENOMEM;
1812         }
1813         for (i = 0; i < h->nr_cmds; i++) {
1814                 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1815                                                 h->chainsize, GFP_KERNEL);
1816                 if (!h->cmd_sg_list[i]) {
1817                         dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1818                         goto clean;
1819                 }
1820         }
1821         return 0;
1822
1823 clean:
1824         hpsa_free_sg_chain_blocks(h);
1825         return -ENOMEM;
1826 }
1827
1828 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1829         struct io_accel2_cmd *cp, struct CommandList *c)
1830 {
1831         struct ioaccel2_sg_element *chain_block;
1832         u64 temp64;
1833         u32 chain_size;
1834
1835         chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1836         chain_size = le32_to_cpu(cp->data_len);
1837         temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1838                                 PCI_DMA_TODEVICE);
1839         if (dma_mapping_error(&h->pdev->dev, temp64)) {
1840                 /* prevent subsequent unmapping */
1841                 cp->sg->address = 0;
1842                 return -1;
1843         }
1844         cp->sg->address = cpu_to_le64(temp64);
1845         return 0;
1846 }
1847
1848 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1849         struct io_accel2_cmd *cp)
1850 {
1851         struct ioaccel2_sg_element *chain_sg;
1852         u64 temp64;
1853         u32 chain_size;
1854
1855         chain_sg = cp->sg;
1856         temp64 = le64_to_cpu(chain_sg->address);
1857         chain_size = le32_to_cpu(cp->data_len);
1858         pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
1859 }
1860
1861 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1862         struct CommandList *c)
1863 {
1864         struct SGDescriptor *chain_sg, *chain_block;
1865         u64 temp64;
1866         u32 chain_len;
1867
1868         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1869         chain_block = h->cmd_sg_list[c->cmdindex];
1870         chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1871         chain_len = sizeof(*chain_sg) *
1872                 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1873         chain_sg->Len = cpu_to_le32(chain_len);
1874         temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1875                                 PCI_DMA_TODEVICE);
1876         if (dma_mapping_error(&h->pdev->dev, temp64)) {
1877                 /* prevent subsequent unmapping */
1878                 chain_sg->Addr = cpu_to_le64(0);
1879                 return -1;
1880         }
1881         chain_sg->Addr = cpu_to_le64(temp64);
1882         return 0;
1883 }
1884
1885 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1886         struct CommandList *c)
1887 {
1888         struct SGDescriptor *chain_sg;
1889
1890         if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1891                 return;
1892
1893         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1894         pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1895                         le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1896 }
1897
1898
1899 /* Decode the various types of errors on ioaccel2 path.
1900  * Return 1 for any error that should generate a RAID path retry.
1901  * Return 0 for errors that don't require a RAID path retry.
1902  */
1903 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1904                                         struct CommandList *c,
1905                                         struct scsi_cmnd *cmd,
1906                                         struct io_accel2_cmd *c2)
1907 {
1908         int data_len;
1909         int retry = 0;
1910         u32 ioaccel2_resid = 0;
1911
1912         switch (c2->error_data.serv_response) {
1913         case IOACCEL2_SERV_RESPONSE_COMPLETE:
1914                 switch (c2->error_data.status) {
1915                 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1916                         break;
1917                 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1918                         cmd->result |= SAM_STAT_CHECK_CONDITION;
1919                         if (c2->error_data.data_present !=
1920                                         IOACCEL2_SENSE_DATA_PRESENT) {
1921                                 memset(cmd->sense_buffer, 0,
1922                                         SCSI_SENSE_BUFFERSIZE);
1923                                 break;
1924                         }
1925                         /* copy the sense data */
1926                         data_len = c2->error_data.sense_data_len;
1927                         if (data_len > SCSI_SENSE_BUFFERSIZE)
1928                                 data_len = SCSI_SENSE_BUFFERSIZE;
1929                         if (data_len > sizeof(c2->error_data.sense_data_buff))
1930                                 data_len =
1931                                         sizeof(c2->error_data.sense_data_buff);
1932                         memcpy(cmd->sense_buffer,
1933                                 c2->error_data.sense_data_buff, data_len);
1934                         retry = 1;
1935                         break;
1936                 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1937                         retry = 1;
1938                         break;
1939                 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1940                         retry = 1;
1941                         break;
1942                 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1943                         retry = 1;
1944                         break;
1945                 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1946                         retry = 1;
1947                         break;
1948                 default:
1949                         retry = 1;
1950                         break;
1951                 }
1952                 break;
1953         case IOACCEL2_SERV_RESPONSE_FAILURE:
1954                 switch (c2->error_data.status) {
1955                 case IOACCEL2_STATUS_SR_IO_ERROR:
1956                 case IOACCEL2_STATUS_SR_IO_ABORTED:
1957                 case IOACCEL2_STATUS_SR_OVERRUN:
1958                         retry = 1;
1959                         break;
1960                 case IOACCEL2_STATUS_SR_UNDERRUN:
1961                         cmd->result = (DID_OK << 16);           /* host byte */
1962                         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1963                         ioaccel2_resid = get_unaligned_le32(
1964                                                 &c2->error_data.resid_cnt[0]);
1965                         scsi_set_resid(cmd, ioaccel2_resid);
1966                         break;
1967                 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
1968                 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
1969                 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
1970                         /* We will get an event from ctlr to trigger rescan */
1971                         retry = 1;
1972                         break;
1973                 default:
1974                         retry = 1;
1975                 }
1976                 break;
1977         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1978                 break;
1979         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1980                 break;
1981         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1982                 retry = 1;
1983                 break;
1984         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1985                 break;
1986         default:
1987                 retry = 1;
1988                 break;
1989         }
1990
1991         return retry;   /* retry on raid path? */
1992 }
1993
1994 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
1995                 struct CommandList *c)
1996 {
1997         /*
1998          * Prevent the following race in the abort handler:
1999          *
2000          * 1. LLD is requested to abort a SCSI command
2001          * 2. The SCSI command completes
2002          * 3. The struct CommandList associated with step 2 is made available
2003          * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2004          * 5. Abort handler follows scsi_cmnd->host_scribble and
2005          *    finds struct CommandList and tries to aborts it
2006          * Now we have aborted the wrong command.
2007          *
2008          * Clear c->scsi_cmd here so that the abort handler will know this
2009          * command has completed.  Then, check to see if the abort handler is
2010          * waiting for this command, and, if so, wake it.
2011          */
2012         c->scsi_cmd = SCSI_CMD_IDLE;
2013         mb(); /* Ensure c->scsi_cmd is set to SCSI_CMD_IDLE */
2014         if (c->abort_pending) {
2015                 c->abort_pending = false;
2016                 wake_up_all(&h->abort_sync_wait_queue);
2017         }
2018 }
2019
2020 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2021                                       struct CommandList *c)
2022 {
2023         hpsa_cmd_resolve_events(h, c);
2024         cmd_tagged_free(h, c);
2025 }
2026
2027 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2028                 struct CommandList *c, struct scsi_cmnd *cmd)
2029 {
2030         hpsa_cmd_resolve_and_free(h, c);
2031         cmd->scsi_done(cmd);
2032 }
2033
2034 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2035 {
2036         INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2037         queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2038 }
2039
2040 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2041 {
2042         cmd->result = DID_ABORT << 16;
2043 }
2044
2045 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2046                                     struct scsi_cmnd *cmd)
2047 {
2048         hpsa_set_scsi_cmd_aborted(cmd);
2049         dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2050                          c->Request.CDB, c->err_info->ScsiStatus);
2051         hpsa_cmd_resolve_and_free(h, c);
2052 }
2053
2054 static void process_ioaccel2_completion(struct ctlr_info *h,
2055                 struct CommandList *c, struct scsi_cmnd *cmd,
2056                 struct hpsa_scsi_dev_t *dev)
2057 {
2058         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2059
2060         /* check for good status */
2061         if (likely(c2->error_data.serv_response == 0 &&
2062                         c2->error_data.status == 0))
2063                 return hpsa_cmd_free_and_done(h, c, cmd);
2064
2065         /* don't requeue a command which is being aborted */
2066         if (unlikely(c->abort_pending))
2067                 return hpsa_cmd_abort_and_free(h, c, cmd);
2068
2069         /*
2070          * Any RAID offload error results in retry which will use
2071          * the normal I/O path so the controller can handle whatever's
2072          * wrong.
2073          */
2074         if (is_logical_dev_addr_mode(dev->scsi3addr) &&
2075                 c2->error_data.serv_response ==
2076                         IOACCEL2_SERV_RESPONSE_FAILURE) {
2077                 if (c2->error_data.status ==
2078                         IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2079                         dev->offload_enabled = 0;
2080
2081                 return hpsa_retry_cmd(h, c);
2082         }
2083
2084         if (handle_ioaccel_mode2_error(h, c, cmd, c2))
2085                 return hpsa_retry_cmd(h, c);
2086
2087         return hpsa_cmd_free_and_done(h, c, cmd);
2088 }
2089
2090 /* Returns 0 on success, < 0 otherwise. */
2091 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2092                                         struct CommandList *cp)
2093 {
2094         u8 tmf_status = cp->err_info->ScsiStatus;
2095
2096         switch (tmf_status) {
2097         case CISS_TMF_COMPLETE:
2098                 /*
2099                  * CISS_TMF_COMPLETE never happens, instead,
2100                  * ei->CommandStatus == 0 for this case.
2101                  */
2102         case CISS_TMF_SUCCESS:
2103                 return 0;
2104         case CISS_TMF_INVALID_FRAME:
2105         case CISS_TMF_NOT_SUPPORTED:
2106         case CISS_TMF_FAILED:
2107         case CISS_TMF_WRONG_LUN:
2108         case CISS_TMF_OVERLAPPED_TAG:
2109                 break;
2110         default:
2111                 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2112                                 tmf_status);
2113                 break;
2114         }
2115         return -tmf_status;
2116 }
2117
2118 static void complete_scsi_command(struct CommandList *cp)
2119 {
2120         struct scsi_cmnd *cmd;
2121         struct ctlr_info *h;
2122         struct ErrorInfo *ei;
2123         struct hpsa_scsi_dev_t *dev;
2124         struct io_accel2_cmd *c2;
2125
2126         u8 sense_key;
2127         u8 asc;      /* additional sense code */
2128         u8 ascq;     /* additional sense code qualifier */
2129         unsigned long sense_data_size;
2130
2131         ei = cp->err_info;
2132         cmd = cp->scsi_cmd;
2133         h = cp->h;
2134         dev = cmd->device->hostdata;
2135         c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2136
2137         scsi_dma_unmap(cmd); /* undo the DMA mappings */
2138         if ((cp->cmd_type == CMD_SCSI) &&
2139                 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2140                 hpsa_unmap_sg_chain_block(h, cp);
2141
2142         if ((cp->cmd_type == CMD_IOACCEL2) &&
2143                 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2144                 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2145
2146         cmd->result = (DID_OK << 16);           /* host byte */
2147         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2148
2149         if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2150                 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2151
2152         /*
2153          * We check for lockup status here as it may be set for
2154          * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2155          * fail_all_oustanding_cmds()
2156          */
2157         if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2158                 /* DID_NO_CONNECT will prevent a retry */
2159                 cmd->result = DID_NO_CONNECT << 16;
2160                 return hpsa_cmd_free_and_done(h, cp, cmd);
2161         }
2162
2163         if (cp->cmd_type == CMD_IOACCEL2)
2164                 return process_ioaccel2_completion(h, cp, cmd, dev);
2165
2166         scsi_set_resid(cmd, ei->ResidualCnt);
2167         if (ei->CommandStatus == 0)
2168                 return hpsa_cmd_free_and_done(h, cp, cmd);
2169
2170         /* For I/O accelerator commands, copy over some fields to the normal
2171          * CISS header used below for error handling.
2172          */
2173         if (cp->cmd_type == CMD_IOACCEL1) {
2174                 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2175                 cp->Header.SGList = scsi_sg_count(cmd);
2176                 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2177                 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2178                         IOACCEL1_IOFLAGS_CDBLEN_MASK;
2179                 cp->Header.tag = c->tag;
2180                 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2181                 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2182
2183                 /* Any RAID offload error results in retry which will use
2184                  * the normal I/O path so the controller can handle whatever's
2185                  * wrong.
2186                  */
2187                 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2188                         if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2189                                 dev->offload_enabled = 0;
2190                         if (!cp->abort_pending)
2191                                 return hpsa_retry_cmd(h, cp);
2192                 }
2193         }
2194
2195         if (cp->abort_pending)
2196                 ei->CommandStatus = CMD_ABORTED;
2197
2198         /* an error has occurred */
2199         switch (ei->CommandStatus) {
2200
2201         case CMD_TARGET_STATUS:
2202                 cmd->result |= ei->ScsiStatus;
2203                 /* copy the sense data */
2204                 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2205                         sense_data_size = SCSI_SENSE_BUFFERSIZE;
2206                 else
2207                         sense_data_size = sizeof(ei->SenseInfo);
2208                 if (ei->SenseLen < sense_data_size)
2209                         sense_data_size = ei->SenseLen;
2210                 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2211                 if (ei->ScsiStatus)
2212                         decode_sense_data(ei->SenseInfo, sense_data_size,
2213                                 &sense_key, &asc, &ascq);
2214                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2215                         if (sense_key == ABORTED_COMMAND) {
2216                                 cmd->result |= DID_SOFT_ERROR << 16;
2217                                 break;
2218                         }
2219                         break;
2220                 }
2221                 /* Problem was not a check condition
2222                  * Pass it up to the upper layers...
2223                  */
2224                 if (ei->ScsiStatus) {
2225                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2226                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2227                                 "Returning result: 0x%x\n",
2228                                 cp, ei->ScsiStatus,
2229                                 sense_key, asc, ascq,
2230                                 cmd->result);
2231                 } else {  /* scsi status is zero??? How??? */
2232                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2233                                 "Returning no connection.\n", cp),
2234
2235                         /* Ordinarily, this case should never happen,
2236                          * but there is a bug in some released firmware
2237                          * revisions that allows it to happen if, for
2238                          * example, a 4100 backplane loses power and
2239                          * the tape drive is in it.  We assume that
2240                          * it's a fatal error of some kind because we
2241                          * can't show that it wasn't. We will make it
2242                          * look like selection timeout since that is
2243                          * the most common reason for this to occur,
2244                          * and it's severe enough.
2245                          */
2246
2247                         cmd->result = DID_NO_CONNECT << 16;
2248                 }
2249                 break;
2250
2251         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2252                 break;
2253         case CMD_DATA_OVERRUN:
2254                 dev_warn(&h->pdev->dev,
2255                         "CDB %16phN data overrun\n", cp->Request.CDB);
2256                 break;
2257         case CMD_INVALID: {
2258                 /* print_bytes(cp, sizeof(*cp), 1, 0);
2259                 print_cmd(cp); */
2260                 /* We get CMD_INVALID if you address a non-existent device
2261                  * instead of a selection timeout (no response).  You will
2262                  * see this if you yank out a drive, then try to access it.
2263                  * This is kind of a shame because it means that any other
2264                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
2265                  * missing target. */
2266                 cmd->result = DID_NO_CONNECT << 16;
2267         }
2268                 break;
2269         case CMD_PROTOCOL_ERR:
2270                 cmd->result = DID_ERROR << 16;
2271                 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2272                                 cp->Request.CDB);
2273                 break;
2274         case CMD_HARDWARE_ERR:
2275                 cmd->result = DID_ERROR << 16;
2276                 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2277                         cp->Request.CDB);
2278                 break;
2279         case CMD_CONNECTION_LOST:
2280                 cmd->result = DID_ERROR << 16;
2281                 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2282                         cp->Request.CDB);
2283                 break;
2284         case CMD_ABORTED:
2285                 /* Return now to avoid calling scsi_done(). */
2286                 return hpsa_cmd_abort_and_free(h, cp, cmd);
2287         case CMD_ABORT_FAILED:
2288                 cmd->result = DID_ERROR << 16;
2289                 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2290                         cp->Request.CDB);
2291                 break;
2292         case CMD_UNSOLICITED_ABORT:
2293                 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2294                 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2295                         cp->Request.CDB);
2296                 break;
2297         case CMD_TIMEOUT:
2298                 cmd->result = DID_TIME_OUT << 16;
2299                 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2300                         cp->Request.CDB);
2301                 break;
2302         case CMD_UNABORTABLE:
2303                 cmd->result = DID_ERROR << 16;
2304                 dev_warn(&h->pdev->dev, "Command unabortable\n");
2305                 break;
2306         case CMD_TMF_STATUS:
2307                 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2308                         cmd->result = DID_ERROR << 16;
2309                 break;
2310         case CMD_IOACCEL_DISABLED:
2311                 /* This only handles the direct pass-through case since RAID
2312                  * offload is handled above.  Just attempt a retry.
2313                  */
2314                 cmd->result = DID_SOFT_ERROR << 16;
2315                 dev_warn(&h->pdev->dev,
2316                                 "cp %p had HP SSD Smart Path error\n", cp);
2317                 break;
2318         default:
2319                 cmd->result = DID_ERROR << 16;
2320                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2321                                 cp, ei->CommandStatus);
2322         }
2323
2324         return hpsa_cmd_free_and_done(h, cp, cmd);
2325 }
2326
2327 static void hpsa_pci_unmap(struct pci_dev *pdev,
2328         struct CommandList *c, int sg_used, int data_direction)
2329 {
2330         int i;
2331
2332         for (i = 0; i < sg_used; i++)
2333                 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2334                                 le32_to_cpu(c->SG[i].Len),
2335                                 data_direction);
2336 }
2337
2338 static int hpsa_map_one(struct pci_dev *pdev,
2339                 struct CommandList *cp,
2340                 unsigned char *buf,
2341                 size_t buflen,
2342                 int data_direction)
2343 {
2344         u64 addr64;
2345
2346         if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2347                 cp->Header.SGList = 0;
2348                 cp->Header.SGTotal = cpu_to_le16(0);
2349                 return 0;
2350         }
2351
2352         addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2353         if (dma_mapping_error(&pdev->dev, addr64)) {
2354                 /* Prevent subsequent unmap of something never mapped */
2355                 cp->Header.SGList = 0;
2356                 cp->Header.SGTotal = cpu_to_le16(0);
2357                 return -1;
2358         }
2359         cp->SG[0].Addr = cpu_to_le64(addr64);
2360         cp->SG[0].Len = cpu_to_le32(buflen);
2361         cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2362         cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
2363         cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2364         return 0;
2365 }
2366
2367 #define NO_TIMEOUT ((unsigned long) -1)
2368 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2369 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2370         struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2371 {
2372         DECLARE_COMPLETION_ONSTACK(wait);
2373
2374         c->waiting = &wait;
2375         __enqueue_cmd_and_start_io(h, c, reply_queue);
2376         if (timeout_msecs == NO_TIMEOUT) {
2377                 /* TODO: get rid of this no-timeout thing */
2378                 wait_for_completion_io(&wait);
2379                 return IO_OK;
2380         }
2381         if (!wait_for_completion_io_timeout(&wait,
2382                                         msecs_to_jiffies(timeout_msecs))) {
2383                 dev_warn(&h->pdev->dev, "Command timed out.\n");
2384                 return -ETIMEDOUT;
2385         }
2386         return IO_OK;
2387 }
2388
2389 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2390                                    int reply_queue, unsigned long timeout_msecs)
2391 {
2392         if (unlikely(lockup_detected(h))) {
2393                 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2394                 return IO_OK;
2395         }
2396         return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2397 }
2398
2399 static u32 lockup_detected(struct ctlr_info *h)
2400 {
2401         int cpu;
2402         u32 rc, *lockup_detected;
2403
2404         cpu = get_cpu();
2405         lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2406         rc = *lockup_detected;
2407         put_cpu();
2408         return rc;
2409 }
2410
2411 #define MAX_DRIVER_CMD_RETRIES 25
2412 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2413         struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2414 {
2415         int backoff_time = 10, retry_count = 0;
2416         int rc;
2417
2418         do {
2419                 memset(c->err_info, 0, sizeof(*c->err_info));
2420                 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2421                                                   timeout_msecs);
2422                 if (rc)
2423                         break;
2424                 retry_count++;
2425                 if (retry_count > 3) {
2426                         msleep(backoff_time);
2427                         if (backoff_time < 1000)
2428                                 backoff_time *= 2;
2429                 }
2430         } while ((check_for_unit_attention(h, c) ||
2431                         check_for_busy(h, c)) &&
2432                         retry_count <= MAX_DRIVER_CMD_RETRIES);
2433         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2434         if (retry_count > MAX_DRIVER_CMD_RETRIES)
2435                 rc = -EIO;
2436         return rc;
2437 }
2438
2439 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2440                                 struct CommandList *c)
2441 {
2442         const u8 *cdb = c->Request.CDB;
2443         const u8 *lun = c->Header.LUN.LunAddrBytes;
2444
2445         dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2446         " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2447                 txt, lun[0], lun[1], lun[2], lun[3],
2448                 lun[4], lun[5], lun[6], lun[7],
2449                 cdb[0], cdb[1], cdb[2], cdb[3],
2450                 cdb[4], cdb[5], cdb[6], cdb[7],
2451                 cdb[8], cdb[9], cdb[10], cdb[11],
2452                 cdb[12], cdb[13], cdb[14], cdb[15]);
2453 }
2454
2455 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2456                         struct CommandList *cp)
2457 {
2458         const struct ErrorInfo *ei = cp->err_info;
2459         struct device *d = &cp->h->pdev->dev;
2460         u8 sense_key, asc, ascq;
2461         int sense_len;
2462
2463         switch (ei->CommandStatus) {
2464         case CMD_TARGET_STATUS:
2465                 if (ei->SenseLen > sizeof(ei->SenseInfo))
2466                         sense_len = sizeof(ei->SenseInfo);
2467                 else
2468                         sense_len = ei->SenseLen;
2469                 decode_sense_data(ei->SenseInfo, sense_len,
2470                                         &sense_key, &asc, &ascq);
2471                 hpsa_print_cmd(h, "SCSI status", cp);
2472                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2473                         dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2474                                 sense_key, asc, ascq);
2475                 else
2476                         dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2477                 if (ei->ScsiStatus == 0)
2478                         dev_warn(d, "SCSI status is abnormally zero.  "
2479                         "(probably indicates selection timeout "
2480                         "reported incorrectly due to a known "
2481                         "firmware bug, circa July, 2001.)\n");
2482                 break;
2483         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2484                 break;
2485         case CMD_DATA_OVERRUN:
2486                 hpsa_print_cmd(h, "overrun condition", cp);
2487                 break;
2488         case CMD_INVALID: {
2489                 /* controller unfortunately reports SCSI passthru's
2490                  * to non-existent targets as invalid commands.
2491                  */
2492                 hpsa_print_cmd(h, "invalid command", cp);
2493                 dev_warn(d, "probably means device no longer present\n");
2494                 }
2495                 break;
2496         case CMD_PROTOCOL_ERR:
2497                 hpsa_print_cmd(h, "protocol error", cp);
2498                 break;
2499         case CMD_HARDWARE_ERR:
2500                 hpsa_print_cmd(h, "hardware error", cp);
2501                 break;
2502         case CMD_CONNECTION_LOST:
2503                 hpsa_print_cmd(h, "connection lost", cp);
2504                 break;
2505         case CMD_ABORTED:
2506                 hpsa_print_cmd(h, "aborted", cp);
2507                 break;
2508         case CMD_ABORT_FAILED:
2509                 hpsa_print_cmd(h, "abort failed", cp);
2510                 break;
2511         case CMD_UNSOLICITED_ABORT:
2512                 hpsa_print_cmd(h, "unsolicited abort", cp);
2513                 break;
2514         case CMD_TIMEOUT:
2515                 hpsa_print_cmd(h, "timed out", cp);
2516                 break;
2517         case CMD_UNABORTABLE:
2518                 hpsa_print_cmd(h, "unabortable", cp);
2519                 break;
2520         case CMD_CTLR_LOCKUP:
2521                 hpsa_print_cmd(h, "controller lockup detected", cp);
2522                 break;
2523         default:
2524                 hpsa_print_cmd(h, "unknown status", cp);
2525                 dev_warn(d, "Unknown command status %x\n",
2526                                 ei->CommandStatus);
2527         }
2528 }
2529
2530 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2531                         u16 page, unsigned char *buf,
2532                         unsigned char bufsize)
2533 {
2534         int rc = IO_OK;
2535         struct CommandList *c;
2536         struct ErrorInfo *ei;
2537
2538         c = cmd_alloc(h);
2539
2540         if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2541                         page, scsi3addr, TYPE_CMD)) {
2542                 rc = -1;
2543                 goto out;
2544         }
2545         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2546                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2547         if (rc)
2548                 goto out;
2549         ei = c->err_info;
2550         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2551                 hpsa_scsi_interpret_error(h, c);
2552                 rc = -1;
2553         }
2554 out:
2555         cmd_free(h, c);
2556         return rc;
2557 }
2558
2559 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2560                 unsigned char *scsi3addr, unsigned char page,
2561                 struct bmic_controller_parameters *buf, size_t bufsize)
2562 {
2563         int rc = IO_OK;
2564         struct CommandList *c;
2565         struct ErrorInfo *ei;
2566
2567         c = cmd_alloc(h);
2568         if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2569                         page, scsi3addr, TYPE_CMD)) {
2570                 rc = -1;
2571                 goto out;
2572         }
2573         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2574                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2575         if (rc)
2576                 goto out;
2577         ei = c->err_info;
2578         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2579                 hpsa_scsi_interpret_error(h, c);
2580                 rc = -1;
2581         }
2582 out:
2583         cmd_free(h, c);
2584         return rc;
2585 }
2586
2587 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2588         u8 reset_type, int reply_queue)
2589 {
2590         int rc = IO_OK;
2591         struct CommandList *c;
2592         struct ErrorInfo *ei;
2593
2594         c = cmd_alloc(h);
2595
2596
2597         /* fill_cmd can't fail here, no data buffer to map. */
2598         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2599                         scsi3addr, TYPE_MSG);
2600         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2601         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2602         if (rc) {
2603                 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2604                 goto out;
2605         }
2606         /* no unmap needed here because no data xfer. */
2607
2608         ei = c->err_info;
2609         if (ei->CommandStatus != 0) {
2610                 hpsa_scsi_interpret_error(h, c);
2611                 rc = -1;
2612         }
2613 out:
2614         cmd_free(h, c);
2615         return rc;
2616 }
2617
2618 static void hpsa_get_raid_level(struct ctlr_info *h,
2619         unsigned char *scsi3addr, unsigned char *raid_level)
2620 {
2621         int rc;
2622         unsigned char *buf;
2623
2624         *raid_level = RAID_UNKNOWN;
2625         buf = kzalloc(64, GFP_KERNEL);
2626         if (!buf)
2627                 return;
2628         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2629         if (rc == 0)
2630                 *raid_level = buf[8];
2631         if (*raid_level > RAID_UNKNOWN)
2632                 *raid_level = RAID_UNKNOWN;
2633         kfree(buf);
2634         return;
2635 }
2636
2637 #define HPSA_MAP_DEBUG
2638 #ifdef HPSA_MAP_DEBUG
2639 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2640                                 struct raid_map_data *map_buff)
2641 {
2642         struct raid_map_disk_data *dd = &map_buff->data[0];
2643         int map, row, col;
2644         u16 map_cnt, row_cnt, disks_per_row;
2645
2646         if (rc != 0)
2647                 return;
2648
2649         /* Show details only if debugging has been activated. */
2650         if (h->raid_offload_debug < 2)
2651                 return;
2652
2653         dev_info(&h->pdev->dev, "structure_size = %u\n",
2654                                 le32_to_cpu(map_buff->structure_size));
2655         dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2656                         le32_to_cpu(map_buff->volume_blk_size));
2657         dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2658                         le64_to_cpu(map_buff->volume_blk_cnt));
2659         dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2660                         map_buff->phys_blk_shift);
2661         dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2662                         map_buff->parity_rotation_shift);
2663         dev_info(&h->pdev->dev, "strip_size = %u\n",
2664                         le16_to_cpu(map_buff->strip_size));
2665         dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2666                         le64_to_cpu(map_buff->disk_starting_blk));
2667         dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2668                         le64_to_cpu(map_buff->disk_blk_cnt));
2669         dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2670                         le16_to_cpu(map_buff->data_disks_per_row));
2671         dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2672                         le16_to_cpu(map_buff->metadata_disks_per_row));
2673         dev_info(&h->pdev->dev, "row_cnt = %u\n",
2674                         le16_to_cpu(map_buff->row_cnt));
2675         dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2676                         le16_to_cpu(map_buff->layout_map_count));
2677         dev_info(&h->pdev->dev, "flags = 0x%x\n",
2678                         le16_to_cpu(map_buff->flags));
2679         dev_info(&h->pdev->dev, "encrypytion = %s\n",
2680                         le16_to_cpu(map_buff->flags) &
2681                         RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
2682         dev_info(&h->pdev->dev, "dekindex = %u\n",
2683                         le16_to_cpu(map_buff->dekindex));
2684         map_cnt = le16_to_cpu(map_buff->layout_map_count);
2685         for (map = 0; map < map_cnt; map++) {
2686                 dev_info(&h->pdev->dev, "Map%u:\n", map);
2687                 row_cnt = le16_to_cpu(map_buff->row_cnt);
2688                 for (row = 0; row < row_cnt; row++) {
2689                         dev_info(&h->pdev->dev, "  Row%u:\n", row);
2690                         disks_per_row =
2691                                 le16_to_cpu(map_buff->data_disks_per_row);
2692                         for (col = 0; col < disks_per_row; col++, dd++)
2693                                 dev_info(&h->pdev->dev,
2694                                         "    D%02u: h=0x%04x xor=%u,%u\n",
2695                                         col, dd->ioaccel_handle,
2696                                         dd->xor_mult[0], dd->xor_mult[1]);
2697                         disks_per_row =
2698                                 le16_to_cpu(map_buff->metadata_disks_per_row);
2699                         for (col = 0; col < disks_per_row; col++, dd++)
2700                                 dev_info(&h->pdev->dev,
2701                                         "    M%02u: h=0x%04x xor=%u,%u\n",
2702                                         col, dd->ioaccel_handle,
2703                                         dd->xor_mult[0], dd->xor_mult[1]);
2704                 }
2705         }
2706 }
2707 #else
2708 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2709                         __attribute__((unused)) int rc,
2710                         __attribute__((unused)) struct raid_map_data *map_buff)
2711 {
2712 }
2713 #endif
2714
2715 static int hpsa_get_raid_map(struct ctlr_info *h,
2716         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2717 {
2718         int rc = 0;
2719         struct CommandList *c;
2720         struct ErrorInfo *ei;
2721
2722         c = cmd_alloc(h);
2723
2724         if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2725                         sizeof(this_device->raid_map), 0,
2726                         scsi3addr, TYPE_CMD)) {
2727                 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2728                 cmd_free(h, c);
2729                 return -1;
2730         }
2731         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2732                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2733         if (rc)
2734                 goto out;
2735         ei = c->err_info;
2736         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2737                 hpsa_scsi_interpret_error(h, c);
2738                 rc = -1;
2739                 goto out;
2740         }
2741         cmd_free(h, c);
2742
2743         /* @todo in the future, dynamically allocate RAID map memory */
2744         if (le32_to_cpu(this_device->raid_map.structure_size) >
2745                                 sizeof(this_device->raid_map)) {
2746                 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2747                 rc = -1;
2748         }
2749         hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2750         return rc;
2751 out:
2752         cmd_free(h, c);
2753         return rc;
2754 }
2755
2756 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2757                 unsigned char scsi3addr[], u16 bmic_device_index,
2758                 struct bmic_identify_physical_device *buf, size_t bufsize)
2759 {
2760         int rc = IO_OK;
2761         struct CommandList *c;
2762         struct ErrorInfo *ei;
2763
2764         c = cmd_alloc(h);
2765         rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2766                 0, RAID_CTLR_LUNID, TYPE_CMD);
2767         if (rc)
2768                 goto out;
2769
2770         c->Request.CDB[2] = bmic_device_index & 0xff;
2771         c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2772
2773         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2774                                                 NO_TIMEOUT);
2775         ei = c->err_info;
2776         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2777                 hpsa_scsi_interpret_error(h, c);
2778                 rc = -1;
2779         }
2780 out:
2781         cmd_free(h, c);
2782         return rc;
2783 }
2784
2785 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2786         unsigned char scsi3addr[], u8 page)
2787 {
2788         int rc;
2789         int i;
2790         int pages;
2791         unsigned char *buf, bufsize;
2792
2793         buf = kzalloc(256, GFP_KERNEL);
2794         if (!buf)
2795                 return 0;
2796
2797         /* Get the size of the page list first */
2798         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2799                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2800                                 buf, HPSA_VPD_HEADER_SZ);
2801         if (rc != 0)
2802                 goto exit_unsupported;
2803         pages = buf[3];
2804         if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2805                 bufsize = pages + HPSA_VPD_HEADER_SZ;
2806         else
2807                 bufsize = 255;
2808
2809         /* Get the whole VPD page list */
2810         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2811                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2812                                 buf, bufsize);
2813         if (rc != 0)
2814                 goto exit_unsupported;
2815
2816         pages = buf[3];
2817         for (i = 1; i <= pages; i++)
2818                 if (buf[3 + i] == page)
2819                         goto exit_supported;
2820 exit_unsupported:
2821         kfree(buf);
2822         return 0;
2823 exit_supported:
2824         kfree(buf);
2825         return 1;
2826 }
2827
2828 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2829         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2830 {
2831         int rc;
2832         unsigned char *buf;
2833         u8 ioaccel_status;
2834
2835         this_device->offload_config = 0;
2836         this_device->offload_enabled = 0;
2837         this_device->offload_to_be_enabled = 0;
2838
2839         buf = kzalloc(64, GFP_KERNEL);
2840         if (!buf)
2841                 return;
2842         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2843                 goto out;
2844         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2845                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2846         if (rc != 0)
2847                 goto out;
2848
2849 #define IOACCEL_STATUS_BYTE 4
2850 #define OFFLOAD_CONFIGURED_BIT 0x01
2851 #define OFFLOAD_ENABLED_BIT 0x02
2852         ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2853         this_device->offload_config =
2854                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2855         if (this_device->offload_config) {
2856                 this_device->offload_enabled =
2857                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2858                 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2859                         this_device->offload_enabled = 0;
2860         }
2861         this_device->offload_to_be_enabled = this_device->offload_enabled;
2862 out:
2863         kfree(buf);
2864         return;
2865 }
2866
2867 /* Get the device id from inquiry page 0x83 */
2868 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2869         unsigned char *device_id, int buflen)
2870 {
2871         int rc;
2872         unsigned char *buf;
2873
2874         if (buflen > 16)
2875                 buflen = 16;
2876         buf = kzalloc(64, GFP_KERNEL);
2877         if (!buf)
2878                 return -ENOMEM;
2879         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2880         if (rc == 0)
2881                 memcpy(device_id, &buf[8], buflen);
2882         kfree(buf);
2883         return rc != 0;
2884 }
2885
2886 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2887                 void *buf, int bufsize,
2888                 int extended_response)
2889 {
2890         int rc = IO_OK;
2891         struct CommandList *c;
2892         unsigned char scsi3addr[8];
2893         struct ErrorInfo *ei;
2894
2895         c = cmd_alloc(h);
2896
2897         /* address the controller */
2898         memset(scsi3addr, 0, sizeof(scsi3addr));
2899         if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2900                 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2901                 rc = -1;
2902                 goto out;
2903         }
2904         if (extended_response)
2905                 c->Request.CDB[1] = extended_response;
2906         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2907                                         PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2908         if (rc)
2909                 goto out;
2910         ei = c->err_info;
2911         if (ei->CommandStatus != 0 &&
2912             ei->CommandStatus != CMD_DATA_UNDERRUN) {
2913                 hpsa_scsi_interpret_error(h, c);
2914                 rc = -1;
2915         } else {
2916                 struct ReportLUNdata *rld = buf;
2917
2918                 if (rld->extended_response_flag != extended_response) {
2919                         dev_err(&h->pdev->dev,
2920                                 "report luns requested format %u, got %u\n",
2921                                 extended_response,
2922                                 rld->extended_response_flag);
2923                         rc = -1;
2924                 }
2925         }
2926 out:
2927         cmd_free(h, c);
2928         return rc;
2929 }
2930
2931 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2932                 struct ReportExtendedLUNdata *buf, int bufsize)
2933 {
2934         return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2935                                                 HPSA_REPORT_PHYS_EXTENDED);
2936 }
2937
2938 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2939                 struct ReportLUNdata *buf, int bufsize)
2940 {
2941         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2942 }
2943
2944 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2945         int bus, int target, int lun)
2946 {
2947         device->bus = bus;
2948         device->target = target;
2949         device->lun = lun;
2950 }
2951
2952 /* Use VPD inquiry to get details of volume status */
2953 static int hpsa_get_volume_status(struct ctlr_info *h,
2954                                         unsigned char scsi3addr[])
2955 {
2956         int rc;
2957         int status;
2958         int size;
2959         unsigned char *buf;
2960
2961         buf = kzalloc(64, GFP_KERNEL);
2962         if (!buf)
2963                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2964
2965         /* Does controller have VPD for logical volume status? */
2966         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2967                 goto exit_failed;
2968
2969         /* Get the size of the VPD return buffer */
2970         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2971                                         buf, HPSA_VPD_HEADER_SZ);
2972         if (rc != 0)
2973                 goto exit_failed;
2974         size = buf[3];
2975
2976         /* Now get the whole VPD buffer */
2977         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2978                                         buf, size + HPSA_VPD_HEADER_SZ);
2979         if (rc != 0)
2980                 goto exit_failed;
2981         status = buf[4]; /* status byte */
2982
2983         kfree(buf);
2984         return status;
2985 exit_failed:
2986         kfree(buf);
2987         return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2988 }
2989
2990 /* Determine offline status of a volume.
2991  * Return either:
2992  *  0 (not offline)
2993  *  0xff (offline for unknown reasons)
2994  *  # (integer code indicating one of several NOT READY states
2995  *     describing why a volume is to be kept offline)
2996  */
2997 static int hpsa_volume_offline(struct ctlr_info *h,
2998                                         unsigned char scsi3addr[])
2999 {
3000         struct CommandList *c;
3001         unsigned char *sense;
3002         u8 sense_key, asc, ascq;
3003         int sense_len;
3004         int rc, ldstat = 0;
3005         u16 cmd_status;
3006         u8 scsi_status;
3007 #define ASC_LUN_NOT_READY 0x04
3008 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3009 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3010
3011         c = cmd_alloc(h);
3012
3013         (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3014         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3015         if (rc) {
3016                 cmd_free(h, c);
3017                 return 0;
3018         }
3019         sense = c->err_info->SenseInfo;
3020         if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3021                 sense_len = sizeof(c->err_info->SenseInfo);
3022         else
3023                 sense_len = c->err_info->SenseLen;
3024         decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3025         cmd_status = c->err_info->CommandStatus;
3026         scsi_status = c->err_info->ScsiStatus;
3027         cmd_free(h, c);
3028         /* Is the volume 'not ready'? */
3029         if (cmd_status != CMD_TARGET_STATUS ||
3030                 scsi_status != SAM_STAT_CHECK_CONDITION ||
3031                 sense_key != NOT_READY ||
3032                 asc != ASC_LUN_NOT_READY)  {
3033                 return 0;
3034         }
3035
3036         /* Determine the reason for not ready state */
3037         ldstat = hpsa_get_volume_status(h, scsi3addr);
3038
3039         /* Keep volume offline in certain cases: */
3040         switch (ldstat) {
3041         case HPSA_LV_UNDERGOING_ERASE:
3042         case HPSA_LV_UNDERGOING_RPI:
3043         case HPSA_LV_PENDING_RPI:
3044         case HPSA_LV_ENCRYPTED_NO_KEY:
3045         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3046         case HPSA_LV_UNDERGOING_ENCRYPTION:
3047         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3048         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3049                 return ldstat;
3050         case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3051                 /* If VPD status page isn't available,
3052                  * use ASC/ASCQ to determine state
3053                  */
3054                 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3055                         (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3056                         return ldstat;
3057                 break;
3058         default:
3059                 break;
3060         }
3061         return 0;
3062 }
3063
3064 /*
3065  * Find out if a logical device supports aborts by simply trying one.
3066  * Smart Array may claim not to support aborts on logical drives, but
3067  * if a MSA2000 * is connected, the drives on that will be presented
3068  * by the Smart Array as logical drives, and aborts may be sent to
3069  * those devices successfully.  So the simplest way to find out is
3070  * to simply try an abort and see how the device responds.
3071  */
3072 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3073                                         unsigned char *scsi3addr)
3074 {
3075         struct CommandList *c;
3076         struct ErrorInfo *ei;
3077         int rc = 0;
3078
3079         u64 tag = (u64) -1; /* bogus tag */
3080
3081         /* Assume that physical devices support aborts */
3082         if (!is_logical_dev_addr_mode(scsi3addr))
3083                 return 1;
3084
3085         c = cmd_alloc(h);
3086
3087         (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3088         (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3089         /* no unmap needed here because no data xfer. */
3090         ei = c->err_info;
3091         switch (ei->CommandStatus) {
3092         case CMD_INVALID:
3093                 rc = 0;
3094                 break;
3095         case CMD_UNABORTABLE:
3096         case CMD_ABORT_FAILED:
3097                 rc = 1;
3098                 break;
3099         case CMD_TMF_STATUS:
3100                 rc = hpsa_evaluate_tmf_status(h, c);
3101                 break;
3102         default:
3103                 rc = 0;
3104                 break;
3105         }
3106         cmd_free(h, c);
3107         return rc;
3108 }
3109
3110 static int hpsa_update_device_info(struct ctlr_info *h,
3111         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3112         unsigned char *is_OBDR_device)
3113 {
3114
3115 #define OBDR_SIG_OFFSET 43
3116 #define OBDR_TAPE_SIG "$DR-10"
3117 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3118 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3119
3120         unsigned char *inq_buff;
3121         unsigned char *obdr_sig;
3122
3123         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3124         if (!inq_buff)
3125                 goto bail_out;
3126
3127         /* Do an inquiry to the device to see what it is. */
3128         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3129                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3130                 /* Inquiry failed (msg printed already) */
3131                 dev_err(&h->pdev->dev,
3132                         "hpsa_update_device_info: inquiry failed\n");
3133                 goto bail_out;
3134         }
3135
3136         this_device->devtype = (inq_buff[0] & 0x1f);
3137         memcpy(this_device->scsi3addr, scsi3addr, 8);
3138         memcpy(this_device->vendor, &inq_buff[8],
3139                 sizeof(this_device->vendor));
3140         memcpy(this_device->model, &inq_buff[16],
3141                 sizeof(this_device->model));
3142         memset(this_device->device_id, 0,
3143                 sizeof(this_device->device_id));
3144         hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3145                 sizeof(this_device->device_id));
3146
3147         if (this_device->devtype == TYPE_DISK &&
3148                 is_logical_dev_addr_mode(scsi3addr)) {
3149                 int volume_offline;
3150
3151                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3152                 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3153                         hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3154                 volume_offline = hpsa_volume_offline(h, scsi3addr);
3155                 if (volume_offline < 0 || volume_offline > 0xff)
3156                         volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3157                 this_device->volume_offline = volume_offline & 0xff;
3158         } else {
3159                 this_device->raid_level = RAID_UNKNOWN;
3160                 this_device->offload_config = 0;
3161                 this_device->offload_enabled = 0;
3162                 this_device->offload_to_be_enabled = 0;
3163                 this_device->hba_ioaccel_enabled = 0;
3164                 this_device->volume_offline = 0;
3165                 this_device->queue_depth = h->nr_cmds;
3166         }
3167
3168         if (is_OBDR_device) {
3169                 /* See if this is a One-Button-Disaster-Recovery device
3170                  * by looking for "$DR-10" at offset 43 in inquiry data.
3171                  */
3172                 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3173                 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3174                                         strncmp(obdr_sig, OBDR_TAPE_SIG,
3175                                                 OBDR_SIG_LEN) == 0);
3176         }
3177         kfree(inq_buff);
3178         return 0;
3179
3180 bail_out:
3181         kfree(inq_buff);
3182         return 1;
3183 }
3184
3185 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3186                         struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3187 {
3188         unsigned long flags;
3189         int rc, entry;
3190         /*
3191          * See if this device supports aborts.  If we already know
3192          * the device, we already know if it supports aborts, otherwise
3193          * we have to find out if it supports aborts by trying one.
3194          */
3195         spin_lock_irqsave(&h->devlock, flags);
3196         rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3197         if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3198                 entry >= 0 && entry < h->ndevices) {
3199                 dev->supports_aborts = h->dev[entry]->supports_aborts;
3200                 spin_unlock_irqrestore(&h->devlock, flags);
3201         } else {
3202                 spin_unlock_irqrestore(&h->devlock, flags);
3203                 dev->supports_aborts =
3204                                 hpsa_device_supports_aborts(h, scsi3addr);
3205                 if (dev->supports_aborts < 0)
3206                         dev->supports_aborts = 0;
3207         }
3208 }
3209
3210 static unsigned char *ext_target_model[] = {
3211         "MSA2012",
3212         "MSA2024",
3213         "MSA2312",
3214         "MSA2324",
3215         "P2000 G3 SAS",
3216         "MSA 2040 SAS",
3217         NULL,
3218 };
3219
3220 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
3221 {
3222         int i;
3223
3224         for (i = 0; ext_target_model[i]; i++)
3225                 if (strncmp(device->model, ext_target_model[i],
3226                         strlen(ext_target_model[i])) == 0)
3227                         return 1;
3228         return 0;
3229 }
3230
3231 /* Helper function to assign bus, target, lun mapping of devices.
3232  * Puts non-external target logical volumes on bus 0, external target logical
3233  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3234  * Logical drive target and lun are assigned at this time, but
3235  * physical device lun and target assignment are deferred (assigned
3236  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3237  */
3238 static void figure_bus_target_lun(struct ctlr_info *h,
3239         u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3240 {
3241         u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
3242
3243         if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3244                 /* physical device, target and lun filled in later */
3245                 if (is_hba_lunid(lunaddrbytes))
3246                         hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
3247                 else
3248                         /* defer target, lun assignment for physical devices */
3249                         hpsa_set_bus_target_lun(device, 2, -1, -1);
3250                 return;
3251         }
3252         /* It's a logical device */
3253         if (is_ext_target(h, device)) {
3254                 /* external target way, put logicals on bus 1
3255                  * and match target/lun numbers box
3256                  * reports, other smart array, bus 0, target 0, match lunid
3257                  */
3258                 hpsa_set_bus_target_lun(device,
3259                         1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3260                 return;
3261         }
3262         hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
3263 }
3264
3265 /*
3266  * If there is no lun 0 on a target, linux won't find any devices.
3267  * For the external targets (arrays), we have to manually detect the enclosure
3268  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3269  * it for some reason.  *tmpdevice is the target we're adding,
3270  * this_device is a pointer into the current element of currentsd[]
3271  * that we're building up in update_scsi_devices(), below.
3272  * lunzerobits is a bitmap that tracks which targets already have a
3273  * lun 0 assigned.
3274  * Returns 1 if an enclosure was added, 0 if not.
3275  */
3276 static int add_ext_target_dev(struct ctlr_info *h,
3277         struct hpsa_scsi_dev_t *tmpdevice,
3278         struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
3279         unsigned long lunzerobits[], int *n_ext_target_devs)
3280 {
3281         unsigned char scsi3addr[8];
3282
3283         if (test_bit(tmpdevice->target, lunzerobits))
3284                 return 0; /* There is already a lun 0 on this target. */
3285
3286         if (!is_logical_dev_addr_mode(lunaddrbytes))
3287                 return 0; /* It's the logical targets that may lack lun 0. */
3288
3289         if (!is_ext_target(h, tmpdevice))
3290                 return 0; /* Only external target devices have this problem. */
3291
3292         if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
3293                 return 0;
3294
3295         memset(scsi3addr, 0, 8);
3296         scsi3addr[3] = tmpdevice->target;
3297         if (is_hba_lunid(scsi3addr))
3298                 return 0; /* Don't add the RAID controller here. */
3299
3300         if (is_scsi_rev_5(h))
3301                 return 0; /* p1210m doesn't need to do this. */
3302
3303         if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
3304                 dev_warn(&h->pdev->dev, "Maximum number of external "
3305                         "target devices exceeded.  Check your hardware "
3306                         "configuration.");
3307                 return 0;
3308         }
3309
3310         if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
3311                 return 0;
3312         (*n_ext_target_devs)++;
3313         hpsa_set_bus_target_lun(this_device,
3314                                 tmpdevice->bus, tmpdevice->target, 0);
3315         hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
3316         set_bit(tmpdevice->target, lunzerobits);
3317         return 1;
3318 }
3319
3320 /*
3321  * Get address of physical disk used for an ioaccel2 mode command:
3322  *      1. Extract ioaccel2 handle from the command.
3323  *      2. Find a matching ioaccel2 handle from list of physical disks.
3324  *      3. Return:
3325  *              1 and set scsi3addr to address of matching physical
3326  *              0 if no matching physical disk was found.
3327  */
3328 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3329         struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3330 {
3331         struct io_accel2_cmd *c2 =
3332                         &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3333         unsigned long flags;
3334         int i;
3335
3336         spin_lock_irqsave(&h->devlock, flags);
3337         for (i = 0; i < h->ndevices; i++)
3338                 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3339                         memcpy(scsi3addr, h->dev[i]->scsi3addr,
3340                                 sizeof(h->dev[i]->scsi3addr));
3341                         spin_unlock_irqrestore(&h->devlock, flags);
3342                         return 1;
3343                 }
3344         spin_unlock_irqrestore(&h->devlock, flags);
3345         return 0;
3346 }
3347
3348 /*
3349  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
3350  * logdev.  The number of luns in physdev and logdev are returned in
3351  * *nphysicals and *nlogicals, respectively.
3352  * Returns 0 on success, -1 otherwise.
3353  */
3354 static int hpsa_gather_lun_info(struct ctlr_info *h,
3355         struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3356         struct ReportLUNdata *logdev, u32 *nlogicals)
3357 {
3358         if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3359                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3360                 return -1;
3361         }
3362         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3363         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3364                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3365                         HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3366                 *nphysicals = HPSA_MAX_PHYS_LUN;
3367         }
3368         if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3369                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3370                 return -1;
3371         }
3372         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3373         /* Reject Logicals in excess of our max capability. */
3374         if (*nlogicals > HPSA_MAX_LUN) {
3375                 dev_warn(&h->pdev->dev,
3376                         "maximum logical LUNs (%d) exceeded.  "
3377                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
3378                         *nlogicals - HPSA_MAX_LUN);
3379                         *nlogicals = HPSA_MAX_LUN;
3380         }
3381         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3382                 dev_warn(&h->pdev->dev,
3383                         "maximum logical + physical LUNs (%d) exceeded. "
3384                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3385                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3386                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3387         }
3388         return 0;
3389 }
3390
3391 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3392         int i, int nphysicals, int nlogicals,
3393         struct ReportExtendedLUNdata *physdev_list,
3394         struct ReportLUNdata *logdev_list)
3395 {
3396         /* Helper function, figure out where the LUN ID info is coming from
3397          * given index i, lists of physical and logical devices, where in
3398          * the list the raid controller is supposed to appear (first or last)
3399          */
3400
3401         int logicals_start = nphysicals + (raid_ctlr_position == 0);
3402         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3403
3404         if (i == raid_ctlr_position)
3405                 return RAID_CTLR_LUNID;
3406
3407         if (i < logicals_start)
3408                 return &physdev_list->LUN[i -
3409                                 (raid_ctlr_position == 0)].lunid[0];
3410
3411         if (i < last_device)
3412                 return &logdev_list->LUN[i - nphysicals -
3413                         (raid_ctlr_position == 0)][0];
3414         BUG();
3415         return NULL;
3416 }
3417
3418 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3419 {
3420         int rc;
3421         int hba_mode_enabled;
3422         struct bmic_controller_parameters *ctlr_params;
3423         ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3424                 GFP_KERNEL);
3425
3426         if (!ctlr_params)
3427                 return -ENOMEM;
3428         rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3429                 sizeof(struct bmic_controller_parameters));
3430         if (rc) {
3431                 kfree(ctlr_params);
3432                 return rc;
3433         }
3434
3435         hba_mode_enabled =
3436                 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3437         kfree(ctlr_params);
3438         return hba_mode_enabled;
3439 }
3440
3441 /* get physical drive ioaccel handle and queue depth */
3442 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3443                 struct hpsa_scsi_dev_t *dev,
3444                 u8 *lunaddrbytes,
3445                 struct bmic_identify_physical_device *id_phys)
3446 {
3447         int rc;
3448         struct ext_report_lun_entry *rle =
3449                 (struct ext_report_lun_entry *) lunaddrbytes;
3450
3451         dev->ioaccel_handle = rle->ioaccel_handle;
3452         if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3453                 dev->hba_ioaccel_enabled = 1;
3454         memset(id_phys, 0, sizeof(*id_phys));
3455         rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3456                         GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3457                         sizeof(*id_phys));
3458         if (!rc)
3459                 /* Reserve space for FW operations */
3460 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3461 #define DRIVE_QUEUE_DEPTH 7
3462                 dev->queue_depth =
3463                         le16_to_cpu(id_phys->current_queue_depth_limit) -
3464                                 DRIVE_CMDS_RESERVED_FOR_FW;
3465         else
3466                 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3467         atomic_set(&dev->ioaccel_cmds_out, 0);
3468 }
3469
3470 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3471 {
3472         /* the idea here is we could get notified
3473          * that some devices have changed, so we do a report
3474          * physical luns and report logical luns cmd, and adjust
3475          * our list of devices accordingly.
3476          *
3477          * The scsi3addr's of devices won't change so long as the
3478          * adapter is not reset.  That means we can rescan and
3479          * tell which devices we already know about, vs. new
3480          * devices, vs.  disappearing devices.
3481          */
3482         struct ReportExtendedLUNdata *physdev_list = NULL;
3483         struct ReportLUNdata *logdev_list = NULL;
3484         struct bmic_identify_physical_device *id_phys = NULL;
3485         u32 nphysicals = 0;
3486         u32 nlogicals = 0;
3487         u32 ndev_allocated = 0;
3488         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3489         int ncurrent = 0;
3490         int i, n_ext_target_devs, ndevs_to_allocate;
3491         int raid_ctlr_position;
3492         int rescan_hba_mode;
3493         DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3494
3495         currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3496         physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3497         logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3498         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3499         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3500
3501         if (!currentsd || !physdev_list || !logdev_list ||
3502                 !tmpdevice || !id_phys) {
3503                 dev_err(&h->pdev->dev, "out of memory\n");
3504                 goto out;
3505         }
3506         memset(lunzerobits, 0, sizeof(lunzerobits));
3507
3508         rescan_hba_mode = hpsa_hba_mode_enabled(h);
3509         if (rescan_hba_mode < 0)
3510                 goto out;
3511
3512         if (!h->hba_mode_enabled && rescan_hba_mode)
3513                 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3514         else if (h->hba_mode_enabled && !rescan_hba_mode)
3515                 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3516
3517         h->hba_mode_enabled = rescan_hba_mode;
3518
3519         if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3520                         logdev_list, &nlogicals))
3521                 goto out;
3522
3523         /* We might see up to the maximum number of logical and physical disks
3524          * plus external target devices, and a device for the local RAID
3525          * controller.
3526          */
3527         ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3528
3529         /* Allocate the per device structures */
3530         for (i = 0; i < ndevs_to_allocate; i++) {
3531                 if (i >= HPSA_MAX_DEVICES) {
3532                         dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3533                                 "  %d devices ignored.\n", HPSA_MAX_DEVICES,
3534                                 ndevs_to_allocate - HPSA_MAX_DEVICES);
3535                         break;
3536                 }
3537
3538                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3539                 if (!currentsd[i]) {
3540                         dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3541                                 __FILE__, __LINE__);
3542                         goto out;
3543                 }
3544                 ndev_allocated++;
3545         }
3546
3547         if (is_scsi_rev_5(h))
3548                 raid_ctlr_position = 0;
3549         else
3550                 raid_ctlr_position = nphysicals + nlogicals;
3551
3552         /* adjust our table of devices */
3553         n_ext_target_devs = 0;
3554         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3555                 u8 *lunaddrbytes, is_OBDR = 0;
3556
3557                 /* Figure out where the LUN ID info is coming from */
3558                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3559                         i, nphysicals, nlogicals, physdev_list, logdev_list);
3560
3561                 /* skip masked non-disk devices */
3562                 if (MASKED_DEVICE(lunaddrbytes))
3563                         if (i < nphysicals + (raid_ctlr_position == 0) &&
3564                                 NON_DISK_PHYS_DEV(lunaddrbytes))
3565                                 continue;
3566
3567                 /* Get device type, vendor, model, device id */
3568                 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3569                                                         &is_OBDR))
3570                         continue; /* skip it if we can't talk to it. */
3571                 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3572                 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
3573                 this_device = currentsd[ncurrent];
3574
3575                 /*
3576                  * For external target devices, we have to insert a LUN 0 which
3577                  * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3578                  * is nonetheless an enclosure device there.  We have to
3579                  * present that otherwise linux won't find anything if
3580                  * there is no lun 0.
3581                  */
3582                 if (add_ext_target_dev(h, tmpdevice, this_device,
3583                                 lunaddrbytes, lunzerobits,
3584                                 &n_ext_target_devs)) {
3585                         ncurrent++;
3586                         this_device = currentsd[ncurrent];
3587                 }
3588
3589                 *this_device = *tmpdevice;
3590
3591                 /* do not expose masked devices */
3592                 if (MASKED_DEVICE(lunaddrbytes) &&
3593                         i < nphysicals + (raid_ctlr_position == 0)) {
3594                         if (h->hba_mode_enabled)
3595                                 dev_warn(&h->pdev->dev,
3596                                         "Masked physical device detected\n");
3597                         this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3598                 } else {
3599                         this_device->expose_state =
3600                                         HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3601                 }
3602
3603                 switch (this_device->devtype) {
3604                 case TYPE_ROM:
3605                         /* We don't *really* support actual CD-ROM devices,
3606                          * just "One Button Disaster Recovery" tape drive
3607                          * which temporarily pretends to be a CD-ROM drive.
3608                          * So we check that the device is really an OBDR tape
3609                          * device by checking for "$DR-10" in bytes 43-48 of
3610                          * the inquiry data.
3611                          */
3612                         if (is_OBDR)
3613                                 ncurrent++;
3614                         break;
3615                 case TYPE_DISK:
3616                         if (i >= nphysicals) {
3617                                 ncurrent++;
3618                                 break;
3619                         }
3620
3621                         if (h->hba_mode_enabled)
3622                                 /* never use raid mapper in HBA mode */
3623                                 this_device->offload_enabled = 0;
3624                         else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
3625                                 h->transMethod & CFGTBL_Trans_io_accel2))
3626                                 break;
3627
3628                         hpsa_get_ioaccel_drive_info(h, this_device,
3629                                                 lunaddrbytes, id_phys);
3630                         atomic_set(&this_device->ioaccel_cmds_out, 0);
3631                         ncurrent++;
3632                         break;
3633                 case TYPE_TAPE:
3634                 case TYPE_MEDIUM_CHANGER:
3635                         ncurrent++;
3636                         break;
3637                 case TYPE_ENCLOSURE:
3638                         if (h->hba_mode_enabled)
3639                                 ncurrent++;
3640                         break;
3641                 case TYPE_RAID:
3642                         /* Only present the Smartarray HBA as a RAID controller.
3643                          * If it's a RAID controller other than the HBA itself
3644                          * (an external RAID controller, MSA500 or similar)
3645                          * don't present it.
3646                          */
3647                         if (!is_hba_lunid(lunaddrbytes))
3648                                 break;
3649                         ncurrent++;
3650                         break;
3651                 default:
3652                         break;
3653                 }
3654                 if (ncurrent >= HPSA_MAX_DEVICES)
3655                         break;
3656         }
3657         adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3658 out:
3659         kfree(tmpdevice);
3660         for (i = 0; i < ndev_allocated; i++)
3661                 kfree(currentsd[i]);
3662         kfree(currentsd);
3663         kfree(physdev_list);
3664         kfree(logdev_list);
3665         kfree(id_phys);
3666 }
3667
3668 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3669                                    struct scatterlist *sg)
3670 {
3671         u64 addr64 = (u64) sg_dma_address(sg);
3672         unsigned int len = sg_dma_len(sg);
3673
3674         desc->Addr = cpu_to_le64(addr64);
3675         desc->Len = cpu_to_le32(len);
3676         desc->Ext = 0;
3677 }
3678
3679 /*
3680  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3681  * dma mapping  and fills in the scatter gather entries of the
3682  * hpsa command, cp.
3683  */
3684 static int hpsa_scatter_gather(struct ctlr_info *h,
3685                 struct CommandList *cp,
3686                 struct scsi_cmnd *cmd)
3687 {
3688         struct scatterlist *sg;
3689         int use_sg, i, sg_limit, chained, last_sg;
3690         struct SGDescriptor *curr_sg;
3691
3692         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3693
3694         use_sg = scsi_dma_map(cmd);
3695         if (use_sg < 0)
3696                 return use_sg;
3697
3698         if (!use_sg)
3699                 goto sglist_finished;
3700
3701         /*
3702          * If the number of entries is greater than the max for a single list,
3703          * then we have a chained list; we will set up all but one entry in the
3704          * first list (the last entry is saved for link information);
3705          * otherwise, we don't have a chained list and we'll set up at each of
3706          * the entries in the one list.
3707          */
3708         curr_sg = cp->SG;
3709         chained = use_sg > h->max_cmd_sg_entries;
3710         sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3711         last_sg = scsi_sg_count(cmd) - 1;
3712         scsi_for_each_sg(cmd, sg, sg_limit, i) {
3713                 hpsa_set_sg_descriptor(curr_sg, sg);
3714                 curr_sg++;
3715         }
3716
3717         if (chained) {
3718                 /*
3719                  * Continue with the chained list.  Set curr_sg to the chained
3720                  * list.  Modify the limit to the total count less the entries
3721                  * we've already set up.  Resume the scan at the list entry
3722                  * where the previous loop left off.
3723                  */
3724                 curr_sg = h->cmd_sg_list[cp->cmdindex];
3725                 sg_limit = use_sg - sg_limit;
3726                 for_each_sg(sg, sg, sg_limit, i) {
3727                         hpsa_set_sg_descriptor(curr_sg, sg);
3728                         curr_sg++;
3729                 }
3730         }
3731
3732         /* Back the pointer up to the last entry and mark it as "last". */
3733         (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
3734
3735         if (use_sg + chained > h->maxSG)
3736                 h->maxSG = use_sg + chained;
3737
3738         if (chained) {
3739                 cp->Header.SGList = h->max_cmd_sg_entries;
3740                 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3741                 if (hpsa_map_sg_chain_block(h, cp)) {
3742                         scsi_dma_unmap(cmd);
3743                         return -1;
3744                 }
3745                 return 0;
3746         }
3747
3748 sglist_finished:
3749
3750         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
3751         cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3752         return 0;
3753 }
3754
3755 #define IO_ACCEL_INELIGIBLE (1)
3756 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3757 {
3758         int is_write = 0;
3759         u32 block;
3760         u32 block_cnt;
3761
3762         /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3763         switch (cdb[0]) {
3764         case WRITE_6:
3765         case WRITE_12:
3766                 is_write = 1;
3767         case READ_6:
3768         case READ_12:
3769                 if (*cdb_len == 6) {
3770                         block = (((u32) cdb[2]) << 8) | cdb[3];
3771                         block_cnt = cdb[4];
3772                 } else {
3773                         BUG_ON(*cdb_len != 12);
3774                         block = (((u32) cdb[2]) << 24) |
3775                                 (((u32) cdb[3]) << 16) |
3776                                 (((u32) cdb[4]) << 8) |
3777                                 cdb[5];
3778                         block_cnt =
3779                                 (((u32) cdb[6]) << 24) |
3780                                 (((u32) cdb[7]) << 16) |
3781                                 (((u32) cdb[8]) << 8) |
3782                                 cdb[9];
3783                 }
3784                 if (block_cnt > 0xffff)
3785                         return IO_ACCEL_INELIGIBLE;
3786
3787                 cdb[0] = is_write ? WRITE_10 : READ_10;
3788                 cdb[1] = 0;
3789                 cdb[2] = (u8) (block >> 24);
3790                 cdb[3] = (u8) (block >> 16);
3791                 cdb[4] = (u8) (block >> 8);
3792                 cdb[5] = (u8) (block);
3793                 cdb[6] = 0;
3794                 cdb[7] = (u8) (block_cnt >> 8);
3795                 cdb[8] = (u8) (block_cnt);
3796                 cdb[9] = 0;
3797                 *cdb_len = 10;
3798                 break;
3799         }
3800         return 0;
3801 }
3802
3803 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3804         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3805         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3806 {
3807         struct scsi_cmnd *cmd = c->scsi_cmd;
3808         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3809         unsigned int len;
3810         unsigned int total_len = 0;
3811         struct scatterlist *sg;
3812         u64 addr64;
3813         int use_sg, i;
3814         struct SGDescriptor *curr_sg;
3815         u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3816
3817         /* TODO: implement chaining support */
3818         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3819                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3820                 return IO_ACCEL_INELIGIBLE;
3821         }
3822
3823         BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3824
3825         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3826                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3827                 return IO_ACCEL_INELIGIBLE;
3828         }
3829
3830         c->cmd_type = CMD_IOACCEL1;
3831
3832         /* Adjust the DMA address to point to the accelerated command buffer */
3833         c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3834                                 (c->cmdindex * sizeof(*cp));
3835         BUG_ON(c->busaddr & 0x0000007F);
3836
3837         use_sg = scsi_dma_map(cmd);
3838         if (use_sg < 0) {
3839                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3840                 return use_sg;
3841         }
3842
3843         if (use_sg) {
3844                 curr_sg = cp->SG;
3845                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3846                         addr64 = (u64) sg_dma_address(sg);
3847                         len  = sg_dma_len(sg);
3848                         total_len += len;
3849                         curr_sg->Addr = cpu_to_le64(addr64);
3850                         curr_sg->Len = cpu_to_le32(len);
3851                         curr_sg->Ext = cpu_to_le32(0);
3852                         curr_sg++;
3853                 }
3854                 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3855
3856                 switch (cmd->sc_data_direction) {
3857                 case DMA_TO_DEVICE:
3858                         control |= IOACCEL1_CONTROL_DATA_OUT;
3859                         break;
3860                 case DMA_FROM_DEVICE:
3861                         control |= IOACCEL1_CONTROL_DATA_IN;
3862                         break;
3863                 case DMA_NONE:
3864                         control |= IOACCEL1_CONTROL_NODATAXFER;
3865                         break;
3866                 default:
3867                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3868                         cmd->sc_data_direction);
3869                         BUG();
3870                         break;
3871                 }
3872         } else {
3873                 control |= IOACCEL1_CONTROL_NODATAXFER;
3874         }
3875
3876         c->Header.SGList = use_sg;
3877         /* Fill out the command structure to submit */
3878         cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3879         cp->transfer_len = cpu_to_le32(total_len);
3880         cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3881                         (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3882         cp->control = cpu_to_le32(control);
3883         memcpy(cp->CDB, cdb, cdb_len);
3884         memcpy(cp->CISS_LUN, scsi3addr, 8);
3885         /* Tag was already set at init time. */
3886         enqueue_cmd_and_start_io(h, c);
3887         return 0;
3888 }
3889
3890 /*
3891  * Queue a command directly to a device behind the controller using the
3892  * I/O accelerator path.
3893  */
3894 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3895         struct CommandList *c)
3896 {
3897         struct scsi_cmnd *cmd = c->scsi_cmd;
3898         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3899
3900         c->phys_disk = dev;
3901
3902         return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3903                 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3904 }
3905
3906 /*
3907  * Set encryption parameters for the ioaccel2 request
3908  */
3909 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3910         struct CommandList *c, struct io_accel2_cmd *cp)
3911 {
3912         struct scsi_cmnd *cmd = c->scsi_cmd;
3913         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3914         struct raid_map_data *map = &dev->raid_map;
3915         u64 first_block;
3916
3917         /* Are we doing encryption on this device */
3918         if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3919                 return;
3920         /* Set the data encryption key index. */
3921         cp->dekindex = map->dekindex;
3922
3923         /* Set the encryption enable flag, encoded into direction field. */
3924         cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3925
3926         /* Set encryption tweak values based on logical block address
3927          * If block size is 512, tweak value is LBA.
3928          * For other block sizes, tweak is (LBA * block size)/ 512)
3929          */
3930         switch (cmd->cmnd[0]) {
3931         /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3932         case WRITE_6:
3933         case READ_6:
3934                 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3935                 break;
3936         case WRITE_10:
3937         case READ_10:
3938         /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3939         case WRITE_12:
3940         case READ_12:
3941                 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3942                 break;
3943         case WRITE_16:
3944         case READ_16:
3945                 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3946                 break;
3947         default:
3948                 dev_err(&h->pdev->dev,
3949                         "ERROR: %s: size (0x%x) not supported for encryption\n",
3950                         __func__, cmd->cmnd[0]);
3951                 BUG();
3952                 break;
3953         }
3954
3955         if (le32_to_cpu(map->volume_blk_size) != 512)
3956                 first_block = first_block *
3957                                 le32_to_cpu(map->volume_blk_size)/512;
3958
3959         cp->tweak_lower = cpu_to_le32(first_block);
3960         cp->tweak_upper = cpu_to_le32(first_block >> 32);
3961 }
3962
3963 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3964         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3965         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3966 {
3967         struct scsi_cmnd *cmd = c->scsi_cmd;
3968         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3969         struct ioaccel2_sg_element *curr_sg;
3970         int use_sg, i;
3971         struct scatterlist *sg;
3972         u64 addr64;
3973         u32 len;
3974         u32 total_len = 0;
3975
3976         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3977
3978         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3979                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3980                 return IO_ACCEL_INELIGIBLE;
3981         }
3982
3983         c->cmd_type = CMD_IOACCEL2;
3984         /* Adjust the DMA address to point to the accelerated command buffer */
3985         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3986                                 (c->cmdindex * sizeof(*cp));
3987         BUG_ON(c->busaddr & 0x0000007F);
3988
3989         memset(cp, 0, sizeof(*cp));
3990         cp->IU_type = IOACCEL2_IU_TYPE;
3991
3992         use_sg = scsi_dma_map(cmd);
3993         if (use_sg < 0) {
3994                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3995                 return use_sg;
3996         }
3997
3998         if (use_sg) {
3999                 curr_sg = cp->sg;
4000                 if (use_sg > h->ioaccel_maxsg) {
4001                         addr64 = le64_to_cpu(
4002                                 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4003                         curr_sg->address = cpu_to_le64(addr64);
4004                         curr_sg->length = 0;
4005                         curr_sg->reserved[0] = 0;
4006                         curr_sg->reserved[1] = 0;
4007                         curr_sg->reserved[2] = 0;
4008                         curr_sg->chain_indicator = 0x80;
4009
4010                         curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4011                 }
4012                 scsi_for_each_sg(cmd, sg, use_sg, i) {
4013                         addr64 = (u64) sg_dma_address(sg);
4014                         len  = sg_dma_len(sg);
4015                         total_len += len;
4016                         curr_sg->address = cpu_to_le64(addr64);
4017                         curr_sg->length = cpu_to_le32(len);
4018                         curr_sg->reserved[0] = 0;
4019                         curr_sg->reserved[1] = 0;
4020                         curr_sg->reserved[2] = 0;
4021                         curr_sg->chain_indicator = 0;
4022                         curr_sg++;
4023                 }
4024
4025                 switch (cmd->sc_data_direction) {
4026                 case DMA_TO_DEVICE:
4027                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4028                         cp->direction |= IOACCEL2_DIR_DATA_OUT;
4029                         break;
4030                 case DMA_FROM_DEVICE:
4031                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4032                         cp->direction |= IOACCEL2_DIR_DATA_IN;
4033                         break;
4034                 case DMA_NONE:
4035                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4036                         cp->direction |= IOACCEL2_DIR_NO_DATA;
4037                         break;
4038                 default:
4039                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4040                                 cmd->sc_data_direction);
4041                         BUG();
4042                         break;
4043                 }
4044         } else {
4045                 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4046                 cp->direction |= IOACCEL2_DIR_NO_DATA;
4047         }
4048
4049         /* Set encryption parameters, if necessary */
4050         set_encrypt_ioaccel2(h, c, cp);
4051
4052         cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4053         cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4054         memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4055
4056         cp->data_len = cpu_to_le32(total_len);
4057         cp->err_ptr = cpu_to_le64(c->busaddr +
4058                         offsetof(struct io_accel2_cmd, error_data));
4059         cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4060
4061         /* fill in sg elements */
4062         if (use_sg > h->ioaccel_maxsg) {
4063                 cp->sg_count = 1;
4064                 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4065                         atomic_dec(&phys_disk->ioaccel_cmds_out);
4066                         scsi_dma_unmap(cmd);
4067                         return -1;
4068                 }
4069         } else
4070                 cp->sg_count = (u8) use_sg;
4071
4072         enqueue_cmd_and_start_io(h, c);
4073         return 0;
4074 }
4075
4076 /*
4077  * Queue a command to the correct I/O accelerator path.
4078  */
4079 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4080         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4081         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4082 {
4083         /* Try to honor the device's queue depth */
4084         if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4085                                         phys_disk->queue_depth) {
4086                 atomic_dec(&phys_disk->ioaccel_cmds_out);
4087                 return IO_ACCEL_INELIGIBLE;
4088         }
4089         if (h->transMethod & CFGTBL_Trans_io_accel1)
4090                 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4091                                                 cdb, cdb_len, scsi3addr,
4092                                                 phys_disk);
4093         else
4094                 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4095                                                 cdb, cdb_len, scsi3addr,
4096                                                 phys_disk);
4097 }
4098
4099 static void raid_map_helper(struct raid_map_data *map,
4100                 int offload_to_mirror, u32 *map_index, u32 *current_group)
4101 {
4102         if (offload_to_mirror == 0)  {
4103                 /* use physical disk in the first mirrored group. */
4104                 *map_index %= le16_to_cpu(map->data_disks_per_row);
4105                 return;
4106         }
4107         do {
4108                 /* determine mirror group that *map_index indicates */
4109                 *current_group = *map_index /
4110                         le16_to_cpu(map->data_disks_per_row);
4111                 if (offload_to_mirror == *current_group)
4112                         continue;
4113                 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4114                         /* select map index from next group */
4115                         *map_index += le16_to_cpu(map->data_disks_per_row);
4116                         (*current_group)++;
4117                 } else {
4118                         /* select map index from first group */
4119                         *map_index %= le16_to_cpu(map->data_disks_per_row);
4120                         *current_group = 0;
4121                 }
4122         } while (offload_to_mirror != *current_group);
4123 }
4124
4125 /*
4126  * Attempt to perform offload RAID mapping for a logical volume I/O.
4127  */
4128 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4129         struct CommandList *c)
4130 {
4131         struct scsi_cmnd *cmd = c->scsi_cmd;
4132         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4133         struct raid_map_data *map = &dev->raid_map;
4134         struct raid_map_disk_data *dd = &map->data[0];
4135         int is_write = 0;
4136         u32 map_index;
4137         u64 first_block, last_block;
4138         u32 block_cnt;
4139         u32 blocks_per_row;
4140         u64 first_row, last_row;
4141         u32 first_row_offset, last_row_offset;
4142         u32 first_column, last_column;
4143         u64 r0_first_row, r0_last_row;
4144         u32 r5or6_blocks_per_row;
4145         u64 r5or6_first_row, r5or6_last_row;
4146         u32 r5or6_first_row_offset, r5or6_last_row_offset;
4147         u32 r5or6_first_column, r5or6_last_column;
4148         u32 total_disks_per_row;
4149         u32 stripesize;
4150         u32 first_group, last_group, current_group;
4151         u32 map_row;
4152         u32 disk_handle;
4153         u64 disk_block;
4154         u32 disk_block_cnt;
4155         u8 cdb[16];
4156         u8 cdb_len;
4157         u16 strip_size;
4158 #if BITS_PER_LONG == 32
4159         u64 tmpdiv;
4160 #endif
4161         int offload_to_mirror;
4162
4163         /* check for valid opcode, get LBA and block count */
4164         switch (cmd->cmnd[0]) {
4165         case WRITE_6:
4166                 is_write = 1;
4167         case READ_6:
4168                 first_block =
4169                         (((u64) cmd->cmnd[2]) << 8) |
4170                         cmd->cmnd[3];
4171                 block_cnt = cmd->cmnd[4];
4172                 if (block_cnt == 0)
4173                         block_cnt = 256;
4174                 break;
4175         case WRITE_10:
4176                 is_write = 1;
4177         case READ_10:
4178                 first_block =
4179                         (((u64) cmd->cmnd[2]) << 24) |
4180                         (((u64) cmd->cmnd[3]) << 16) |
4181                         (((u64) cmd->cmnd[4]) << 8) |
4182                         cmd->cmnd[5];
4183                 block_cnt =
4184                         (((u32) cmd->cmnd[7]) << 8) |
4185                         cmd->cmnd[8];
4186                 break;
4187         case WRITE_12:
4188                 is_write = 1;
4189         case READ_12:
4190                 first_block =
4191                         (((u64) cmd->cmnd[2]) << 24) |
4192                         (((u64) cmd->cmnd[3]) << 16) |
4193                         (((u64) cmd->cmnd[4]) << 8) |
4194                         cmd->cmnd[5];
4195                 block_cnt =
4196                         (((u32) cmd->cmnd[6]) << 24) |
4197                         (((u32) cmd->cmnd[7]) << 16) |
4198                         (((u32) cmd->cmnd[8]) << 8) |
4199                 cmd->cmnd[9];
4200                 break;
4201         case WRITE_16:
4202                 is_write = 1;
4203         case READ_16:
4204                 first_block =
4205                         (((u64) cmd->cmnd[2]) << 56) |
4206                         (((u64) cmd->cmnd[3]) << 48) |
4207                         (((u64) cmd->cmnd[4]) << 40) |
4208                         (((u64) cmd->cmnd[5]) << 32) |
4209                         (((u64) cmd->cmnd[6]) << 24) |
4210                         (((u64) cmd->cmnd[7]) << 16) |
4211                         (((u64) cmd->cmnd[8]) << 8) |
4212                         cmd->cmnd[9];
4213                 block_cnt =
4214                         (((u32) cmd->cmnd[10]) << 24) |
4215                         (((u32) cmd->cmnd[11]) << 16) |
4216                         (((u32) cmd->cmnd[12]) << 8) |
4217                         cmd->cmnd[13];
4218                 break;
4219         default:
4220                 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4221         }
4222         last_block = first_block + block_cnt - 1;
4223
4224         /* check for write to non-RAID-0 */
4225         if (is_write && dev->raid_level != 0)
4226                 return IO_ACCEL_INELIGIBLE;
4227
4228         /* check for invalid block or wraparound */
4229         if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4230                 last_block < first_block)
4231                 return IO_ACCEL_INELIGIBLE;
4232
4233         /* calculate stripe information for the request */
4234         blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4235                                 le16_to_cpu(map->strip_size);
4236         strip_size = le16_to_cpu(map->strip_size);
4237 #if BITS_PER_LONG == 32
4238         tmpdiv = first_block;
4239         (void) do_div(tmpdiv, blocks_per_row);
4240         first_row = tmpdiv;
4241         tmpdiv = last_block;
4242         (void) do_div(tmpdiv, blocks_per_row);
4243         last_row = tmpdiv;
4244         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4245         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4246         tmpdiv = first_row_offset;
4247         (void) do_div(tmpdiv, strip_size);
4248         first_column = tmpdiv;
4249         tmpdiv = last_row_offset;
4250         (void) do_div(tmpdiv, strip_size);
4251         last_column = tmpdiv;
4252 #else
4253         first_row = first_block / blocks_per_row;
4254         last_row = last_block / blocks_per_row;
4255         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4256         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4257         first_column = first_row_offset / strip_size;
4258         last_column = last_row_offset / strip_size;
4259 #endif
4260
4261         /* if this isn't a single row/column then give to the controller */
4262         if ((first_row != last_row) || (first_column != last_column))
4263                 return IO_ACCEL_INELIGIBLE;
4264
4265         /* proceeding with driver mapping */
4266         total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4267                                 le16_to_cpu(map->metadata_disks_per_row);
4268         map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4269                                 le16_to_cpu(map->row_cnt);
4270         map_index = (map_row * total_disks_per_row) + first_column;
4271
4272         switch (dev->raid_level) {
4273         case HPSA_RAID_0:
4274                 break; /* nothing special to do */
4275         case HPSA_RAID_1:
4276                 /* Handles load balance across RAID 1 members.
4277                  * (2-drive R1 and R10 with even # of drives.)
4278                  * Appropriate for SSDs, not optimal for HDDs
4279                  */
4280                 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4281                 if (dev->offload_to_mirror)
4282                         map_index += le16_to_cpu(map->data_disks_per_row);
4283                 dev->offload_to_mirror = !dev->offload_to_mirror;
4284                 break;
4285         case HPSA_RAID_ADM:
4286                 /* Handles N-way mirrors  (R1-ADM)
4287                  * and R10 with # of drives divisible by 3.)
4288                  */
4289                 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4290
4291                 offload_to_mirror = dev->offload_to_mirror;
4292                 raid_map_helper(map, offload_to_mirror,
4293                                 &map_index, &current_group);
4294                 /* set mirror group to use next time */
4295                 offload_to_mirror =
4296                         (offload_to_mirror >=
4297                         le16_to_cpu(map->layout_map_count) - 1)
4298                         ? 0 : offload_to_mirror + 1;
4299                 dev->offload_to_mirror = offload_to_mirror;
4300                 /* Avoid direct use of dev->offload_to_mirror within this
4301                  * function since multiple threads might simultaneously
4302                  * increment it beyond the range of dev->layout_map_count -1.
4303                  */
4304                 break;
4305         case HPSA_RAID_5:
4306         case HPSA_RAID_6:
4307                 if (le16_to_cpu(map->layout_map_count) <= 1)
4308                         break;
4309
4310                 /* Verify first and last block are in same RAID group */
4311                 r5or6_blocks_per_row =
4312                         le16_to_cpu(map->strip_size) *
4313                         le16_to_cpu(map->data_disks_per_row);
4314                 BUG_ON(r5or6_blocks_per_row == 0);
4315                 stripesize = r5or6_blocks_per_row *
4316                         le16_to_cpu(map->layout_map_count);
4317 #if BITS_PER_LONG == 32
4318                 tmpdiv = first_block;
4319                 first_group = do_div(tmpdiv, stripesize);
4320                 tmpdiv = first_group;
4321                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4322                 first_group = tmpdiv;
4323                 tmpdiv = last_block;
4324                 last_group = do_div(tmpdiv, stripesize);
4325                 tmpdiv = last_group;
4326                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4327                 last_group = tmpdiv;
4328 #else
4329                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4330                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4331 #endif
4332                 if (first_group != last_group)
4333                         return IO_ACCEL_INELIGIBLE;
4334
4335                 /* Verify request is in a single row of RAID 5/6 */
4336 #if BITS_PER_LONG == 32
4337                 tmpdiv = first_block;
4338                 (void) do_div(tmpdiv, stripesize);
4339                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4340                 tmpdiv = last_block;
4341                 (void) do_div(tmpdiv, stripesize);
4342                 r5or6_last_row = r0_last_row = tmpdiv;
4343 #else
4344                 first_row = r5or6_first_row = r0_first_row =
4345                                                 first_block / stripesize;
4346                 r5or6_last_row = r0_last_row = last_block / stripesize;
4347 #endif
4348                 if (r5or6_first_row != r5or6_last_row)
4349                         return IO_ACCEL_INELIGIBLE;
4350
4351
4352                 /* Verify request is in a single column */
4353 #if BITS_PER_LONG == 32
4354                 tmpdiv = first_block;
4355                 first_row_offset = do_div(tmpdiv, stripesize);
4356                 tmpdiv = first_row_offset;
4357                 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4358                 r5or6_first_row_offset = first_row_offset;
4359                 tmpdiv = last_block;
4360                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4361                 tmpdiv = r5or6_last_row_offset;
4362                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4363                 tmpdiv = r5or6_first_row_offset;
4364                 (void) do_div(tmpdiv, map->strip_size);
4365                 first_column = r5or6_first_column = tmpdiv;
4366                 tmpdiv = r5or6_last_row_offset;
4367                 (void) do_div(tmpdiv, map->strip_size);
4368                 r5or6_last_column = tmpdiv;
4369 #else
4370                 first_row_offset = r5or6_first_row_offset =
4371                         (u32)((first_block % stripesize) %
4372                                                 r5or6_blocks_per_row);
4373
4374                 r5or6_last_row_offset =
4375                         (u32)((last_block % stripesize) %
4376                                                 r5or6_blocks_per_row);
4377
4378                 first_column = r5or6_first_column =
4379                         r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4380                 r5or6_last_column =
4381                         r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4382 #endif
4383                 if (r5or6_first_column != r5or6_last_column)
4384                         return IO_ACCEL_INELIGIBLE;
4385
4386                 /* Request is eligible */
4387                 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4388                         le16_to_cpu(map->row_cnt);
4389
4390                 map_index = (first_group *
4391                         (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4392                         (map_row * total_disks_per_row) + first_column;
4393                 break;
4394         default:
4395                 return IO_ACCEL_INELIGIBLE;
4396         }
4397
4398         if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4399                 return IO_ACCEL_INELIGIBLE;
4400
4401         c->phys_disk = dev->phys_disk[map_index];
4402
4403         disk_handle = dd[map_index].ioaccel_handle;
4404         disk_block = le64_to_cpu(map->disk_starting_blk) +
4405                         first_row * le16_to_cpu(map->strip_size) +
4406                         (first_row_offset - first_column *
4407                         le16_to_cpu(map->strip_size));
4408         disk_block_cnt = block_cnt;
4409
4410         /* handle differing logical/physical block sizes */
4411         if (map->phys_blk_shift) {
4412                 disk_block <<= map->phys_blk_shift;
4413                 disk_block_cnt <<= map->phys_blk_shift;
4414         }
4415         BUG_ON(disk_block_cnt > 0xffff);
4416
4417         /* build the new CDB for the physical disk I/O */
4418         if (disk_block > 0xffffffff) {
4419                 cdb[0] = is_write ? WRITE_16 : READ_16;
4420                 cdb[1] = 0;
4421                 cdb[2] = (u8) (disk_block >> 56);
4422                 cdb[3] = (u8) (disk_block >> 48);
4423                 cdb[4] = (u8) (disk_block >> 40);
4424                 cdb[5] = (u8) (disk_block >> 32);
4425                 cdb[6] = (u8) (disk_block >> 24);
4426                 cdb[7] = (u8) (disk_block >> 16);
4427                 cdb[8] = (u8) (disk_block >> 8);
4428                 cdb[9] = (u8) (disk_block);
4429                 cdb[10] = (u8) (disk_block_cnt >> 24);
4430                 cdb[11] = (u8) (disk_block_cnt >> 16);
4431                 cdb[12] = (u8) (disk_block_cnt >> 8);
4432                 cdb[13] = (u8) (disk_block_cnt);
4433                 cdb[14] = 0;
4434                 cdb[15] = 0;
4435                 cdb_len = 16;
4436         } else {
4437                 cdb[0] = is_write ? WRITE_10 : READ_10;
4438                 cdb[1] = 0;
4439                 cdb[2] = (u8) (disk_block >> 24);
4440                 cdb[3] = (u8) (disk_block >> 16);
4441                 cdb[4] = (u8) (disk_block >> 8);
4442                 cdb[5] = (u8) (disk_block);
4443                 cdb[6] = 0;
4444                 cdb[7] = (u8) (disk_block_cnt >> 8);
4445                 cdb[8] = (u8) (disk_block_cnt);
4446                 cdb[9] = 0;
4447                 cdb_len = 10;
4448         }
4449         return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4450                                                 dev->scsi3addr,
4451                                                 dev->phys_disk[map_index]);
4452 }
4453
4454 /*
4455  * Submit commands down the "normal" RAID stack path
4456  * All callers to hpsa_ciss_submit must check lockup_detected
4457  * beforehand, before (opt.) and after calling cmd_alloc
4458  */
4459 static int hpsa_ciss_submit(struct ctlr_info *h,
4460         struct CommandList *c, struct scsi_cmnd *cmd,
4461         unsigned char scsi3addr[])
4462 {
4463         cmd->host_scribble = (unsigned char *) c;
4464         c->cmd_type = CMD_SCSI;
4465         c->scsi_cmd = cmd;
4466         c->Header.ReplyQueue = 0;  /* unused in simple mode */
4467         memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4468         c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4469
4470         /* Fill in the request block... */
4471
4472         c->Request.Timeout = 0;
4473         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4474         c->Request.CDBLen = cmd->cmd_len;
4475         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4476         switch (cmd->sc_data_direction) {
4477         case DMA_TO_DEVICE:
4478                 c->Request.type_attr_dir =
4479                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4480                 break;
4481         case DMA_FROM_DEVICE:
4482                 c->Request.type_attr_dir =
4483                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4484                 break;
4485         case DMA_NONE:
4486                 c->Request.type_attr_dir =
4487                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4488                 break;
4489         case DMA_BIDIRECTIONAL:
4490                 /* This can happen if a buggy application does a scsi passthru
4491                  * and sets both inlen and outlen to non-zero. ( see
4492                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4493                  */
4494
4495                 c->Request.type_attr_dir =
4496                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4497                 /* This is technically wrong, and hpsa controllers should
4498                  * reject it with CMD_INVALID, which is the most correct
4499                  * response, but non-fibre backends appear to let it
4500                  * slide by, and give the same results as if this field
4501                  * were set correctly.  Either way is acceptable for
4502                  * our purposes here.
4503                  */
4504
4505                 break;
4506
4507         default:
4508                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4509                         cmd->sc_data_direction);
4510                 BUG();
4511                 break;
4512         }
4513
4514         if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4515                 hpsa_cmd_resolve_and_free(h, c);
4516                 return SCSI_MLQUEUE_HOST_BUSY;
4517         }
4518         enqueue_cmd_and_start_io(h, c);
4519         /* the cmd'll come back via intr handler in complete_scsi_command()  */
4520         return 0;
4521 }
4522
4523 static void hpsa_cmd_init(struct ctlr_info *h, int index,
4524                                 struct CommandList *c)
4525 {
4526         dma_addr_t cmd_dma_handle, err_dma_handle;
4527
4528         /* Zero out all of commandlist except the last field, refcount */
4529         memset(c, 0, offsetof(struct CommandList, refcount));
4530         c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4531         cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4532         c->err_info = h->errinfo_pool + index;
4533         memset(c->err_info, 0, sizeof(*c->err_info));
4534         err_dma_handle = h->errinfo_pool_dhandle
4535             + index * sizeof(*c->err_info);
4536         c->cmdindex = index;
4537         c->busaddr = (u32) cmd_dma_handle;
4538         c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4539         c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4540         c->h = h;
4541         c->scsi_cmd = SCSI_CMD_IDLE;
4542 }
4543
4544 static void hpsa_preinitialize_commands(struct ctlr_info *h)
4545 {
4546         int i;
4547
4548         for (i = 0; i < h->nr_cmds; i++) {
4549                 struct CommandList *c = h->cmd_pool + i;
4550
4551                 hpsa_cmd_init(h, i, c);
4552                 atomic_set(&c->refcount, 0);
4553         }
4554 }
4555
4556 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4557                                 struct CommandList *c)
4558 {
4559         dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4560
4561         BUG_ON(c->cmdindex != index);
4562
4563         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4564         memset(c->err_info, 0, sizeof(*c->err_info));
4565         c->busaddr = (u32) cmd_dma_handle;
4566 }
4567
4568 static int hpsa_ioaccel_submit(struct ctlr_info *h,
4569                 struct CommandList *c, struct scsi_cmnd *cmd,
4570                 unsigned char *scsi3addr)
4571 {
4572         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4573         int rc = IO_ACCEL_INELIGIBLE;
4574
4575         cmd->host_scribble = (unsigned char *) c;
4576
4577         if (dev->offload_enabled) {
4578                 hpsa_cmd_init(h, c->cmdindex, c);
4579                 c->cmd_type = CMD_SCSI;
4580                 c->scsi_cmd = cmd;
4581                 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4582                 if (rc < 0)     /* scsi_dma_map failed. */
4583                         rc = SCSI_MLQUEUE_HOST_BUSY;
4584         } else if (dev->hba_ioaccel_enabled) {
4585                 hpsa_cmd_init(h, c->cmdindex, c);
4586                 c->cmd_type = CMD_SCSI;
4587                 c->scsi_cmd = cmd;
4588                 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4589                 if (rc < 0)     /* scsi_dma_map failed. */
4590                         rc = SCSI_MLQUEUE_HOST_BUSY;
4591         }
4592         return rc;
4593 }
4594
4595 static void hpsa_command_resubmit_worker(struct work_struct *work)
4596 {
4597         struct scsi_cmnd *cmd;
4598         struct hpsa_scsi_dev_t *dev;
4599         struct CommandList *c = container_of(work, struct CommandList, work);
4600
4601         cmd = c->scsi_cmd;
4602         dev = cmd->device->hostdata;
4603         if (!dev) {
4604                 cmd->result = DID_NO_CONNECT << 16;
4605                 return hpsa_cmd_free_and_done(c->h, c, cmd);
4606         }
4607         if (c->abort_pending)
4608                 return hpsa_cmd_abort_and_free(c->h, c, cmd);
4609         if (c->cmd_type == CMD_IOACCEL2) {
4610                 struct ctlr_info *h = c->h;
4611                 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4612                 int rc;
4613
4614                 if (c2->error_data.serv_response ==
4615                                 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4616                         rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4617                         if (rc == 0)
4618                                 return;
4619                         if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4620                                 /*
4621                                  * If we get here, it means dma mapping failed.
4622                                  * Try again via scsi mid layer, which will
4623                                  * then get SCSI_MLQUEUE_HOST_BUSY.
4624                                  */
4625                                 cmd->result = DID_IMM_RETRY << 16;
4626                                 return hpsa_cmd_free_and_done(h, c, cmd);
4627                         }
4628                         /* else, fall thru and resubmit down CISS path */
4629                 }
4630         }
4631         hpsa_cmd_partial_init(c->h, c->cmdindex, c);
4632         if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4633                 /*
4634                  * If we get here, it means dma mapping failed. Try
4635                  * again via scsi mid layer, which will then get
4636                  * SCSI_MLQUEUE_HOST_BUSY.
4637                  *
4638                  * hpsa_ciss_submit will have already freed c
4639                  * if it encountered a dma mapping failure.
4640                  */
4641                 cmd->result = DID_IMM_RETRY << 16;
4642                 cmd->scsi_done(cmd);
4643         }
4644 }
4645
4646 /* Running in struct Scsi_Host->host_lock less mode */
4647 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4648 {
4649         struct ctlr_info *h;
4650         struct hpsa_scsi_dev_t *dev;
4651         unsigned char scsi3addr[8];
4652         struct CommandList *c;
4653         int rc = 0;
4654
4655         /* Get the ptr to our adapter structure out of cmd->host. */
4656         h = sdev_to_hba(cmd->device);
4657
4658         BUG_ON(cmd->request->tag < 0);
4659
4660         dev = cmd->device->hostdata;
4661         if (!dev) {
4662                 cmd->result = DID_NO_CONNECT << 16;
4663                 cmd->scsi_done(cmd);
4664                 return 0;
4665         }
4666
4667         memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4668
4669         if (unlikely(lockup_detected(h))) {
4670                 cmd->result = DID_NO_CONNECT << 16;
4671                 cmd->scsi_done(cmd);
4672                 return 0;
4673         }
4674         c = cmd_tagged_alloc(h, cmd);
4675
4676         /*
4677          * Call alternate submit routine for I/O accelerated commands.
4678          * Retries always go down the normal I/O path.
4679          */
4680         if (likely(cmd->retries == 0 &&
4681                 cmd->request->cmd_type == REQ_TYPE_FS &&
4682                 h->acciopath_status)) {
4683                 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4684                 if (rc == 0)
4685                         return 0;
4686                 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4687                         hpsa_cmd_resolve_and_free(h, c);
4688                         return SCSI_MLQUEUE_HOST_BUSY;
4689                 }
4690         }
4691         return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4692 }
4693
4694 static void hpsa_scan_complete(struct ctlr_info *h)
4695 {
4696         unsigned long flags;
4697
4698         spin_lock_irqsave(&h->scan_lock, flags);
4699         h->scan_finished = 1;
4700         wake_up_all(&h->scan_wait_queue);
4701         spin_unlock_irqrestore(&h->scan_lock, flags);
4702 }
4703
4704 static void hpsa_scan_start(struct Scsi_Host *sh)
4705 {
4706         struct ctlr_info *h = shost_to_hba(sh);
4707         unsigned long flags;
4708
4709         /*
4710          * Don't let rescans be initiated on a controller known to be locked
4711          * up.  If the controller locks up *during* a rescan, that thread is
4712          * probably hosed, but at least we can prevent new rescan threads from
4713          * piling up on a locked up controller.
4714          */
4715         if (unlikely(lockup_detected(h)))
4716                 return hpsa_scan_complete(h);
4717
4718         /* wait until any scan already in progress is finished. */
4719         while (1) {
4720                 spin_lock_irqsave(&h->scan_lock, flags);
4721                 if (h->scan_finished)
4722                         break;
4723                 spin_unlock_irqrestore(&h->scan_lock, flags);
4724                 wait_event(h->scan_wait_queue, h->scan_finished);
4725                 /* Note: We don't need to worry about a race between this
4726                  * thread and driver unload because the midlayer will
4727                  * have incremented the reference count, so unload won't
4728                  * happen if we're in here.
4729                  */
4730         }
4731         h->scan_finished = 0; /* mark scan as in progress */
4732         spin_unlock_irqrestore(&h->scan_lock, flags);
4733
4734         if (unlikely(lockup_detected(h)))
4735                 return hpsa_scan_complete(h);
4736
4737         hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4738
4739         hpsa_scan_complete(h);
4740 }
4741
4742 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4743 {
4744         struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4745
4746         if (!logical_drive)
4747                 return -ENODEV;
4748
4749         if (qdepth < 1)
4750                 qdepth = 1;
4751         else if (qdepth > logical_drive->queue_depth)
4752                 qdepth = logical_drive->queue_depth;
4753
4754         return scsi_change_queue_depth(sdev, qdepth);
4755 }
4756
4757 static int hpsa_scan_finished(struct Scsi_Host *sh,
4758         unsigned long elapsed_time)
4759 {
4760         struct ctlr_info *h = shost_to_hba(sh);
4761         unsigned long flags;
4762         int finished;
4763
4764         spin_lock_irqsave(&h->scan_lock, flags);
4765         finished = h->scan_finished;
4766         spin_unlock_irqrestore(&h->scan_lock, flags);
4767         return finished;
4768 }
4769
4770 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
4771 {
4772         struct Scsi_Host *sh;
4773         int error;
4774
4775         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4776         if (sh == NULL) {
4777                 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
4778                 return -ENOMEM;
4779         }
4780
4781         sh->io_port = 0;
4782         sh->n_io_port = 0;
4783         sh->this_id = -1;
4784         sh->max_channel = 3;
4785         sh->max_cmd_len = MAX_COMMAND_SIZE;
4786         sh->max_lun = HPSA_MAX_LUN;
4787         sh->max_id = HPSA_MAX_LUN;
4788         sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
4789         sh->cmd_per_lun = sh->can_queue;
4790         sh->sg_tablesize = h->maxsgentries;
4791         sh->hostdata[0] = (unsigned long) h;
4792         sh->irq = h->intr[h->intr_mode];
4793         sh->unique_id = sh->irq;
4794         error = scsi_init_shared_tag_map(sh, sh->can_queue);
4795         if (error) {
4796                 dev_err(&h->pdev->dev,
4797                         "%s: scsi_init_shared_tag_map failed for controller %d\n",
4798                         __func__, h->ctlr);
4799                         scsi_host_put(sh);
4800                         return error;
4801         }
4802         h->scsi_host = sh;
4803         return 0;
4804 }
4805
4806 static int hpsa_scsi_add_host(struct ctlr_info *h)
4807 {
4808         int rv;
4809
4810         rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
4811         if (rv) {
4812                 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
4813                 return rv;
4814         }
4815         scsi_scan_host(h->scsi_host);
4816         return 0;
4817 }
4818
4819 /*
4820  * The block layer has already gone to the trouble of picking out a unique,
4821  * small-integer tag for this request.  We use an offset from that value as
4822  * an index to select our command block.  (The offset allows us to reserve the
4823  * low-numbered entries for our own uses.)
4824  */
4825 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
4826 {
4827         int idx = scmd->request->tag;
4828
4829         if (idx < 0)
4830                 return idx;
4831
4832         /* Offset to leave space for internal cmds. */
4833         return idx += HPSA_NRESERVED_CMDS;
4834 }
4835
4836 /*
4837  * Send a TEST_UNIT_READY command to the specified LUN using the specified
4838  * reply queue; returns zero if the unit is ready, and non-zero otherwise.
4839  */
4840 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
4841                                 struct CommandList *c, unsigned char lunaddr[],
4842                                 int reply_queue)
4843 {
4844         int rc;
4845
4846         /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4847         (void) fill_cmd(c, TEST_UNIT_READY, h,
4848                         NULL, 0, 0, lunaddr, TYPE_CMD);
4849         rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4850         if (rc)
4851                 return rc;
4852         /* no unmap needed here because no data xfer. */
4853
4854         /* Check if the unit is already ready. */
4855         if (c->err_info->CommandStatus == CMD_SUCCESS)
4856                 return 0;
4857
4858         /*
4859          * The first command sent after reset will receive "unit attention" to
4860          * indicate that the LUN has been reset...this is actually what we're
4861          * looking for (but, success is good too).
4862          */
4863         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4864                 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4865                         (c->err_info->SenseInfo[2] == NO_SENSE ||
4866                          c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4867                 return 0;
4868
4869         return 1;
4870 }
4871
4872 /*
4873  * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
4874  * returns zero when the unit is ready, and non-zero when giving up.
4875  */
4876 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
4877                                 struct CommandList *c,
4878                                 unsigned char lunaddr[], int reply_queue)
4879 {
4880         int rc;
4881         int count = 0;
4882         int waittime = 1; /* seconds */
4883
4884         /* Send test unit ready until device ready, or give up. */
4885         for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
4886
4887                 /*
4888                  * Wait for a bit.  do this first, because if we send
4889                  * the TUR right away, the reset will just abort it.
4890                  */
4891                 msleep(1000 * waittime);
4892
4893                 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
4894                 if (!rc)
4895                         break;
4896
4897                 /* Increase wait time with each try, up to a point. */
4898                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4899                         waittime *= 2;
4900
4901                 dev_warn(&h->pdev->dev,
4902                          "waiting %d secs for device to become ready.\n",
4903                          waittime);
4904         }
4905
4906         return rc;
4907 }
4908
4909 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4910                                            unsigned char lunaddr[],
4911                                            int reply_queue)
4912 {
4913         int first_queue;
4914         int last_queue;
4915         int rq;
4916         int rc = 0;
4917         struct CommandList *c;
4918
4919         c = cmd_alloc(h);
4920
4921         /*
4922          * If no specific reply queue was requested, then send the TUR
4923          * repeatedly, requesting a reply on each reply queue; otherwise execute
4924          * the loop exactly once using only the specified queue.
4925          */
4926         if (reply_queue == DEFAULT_REPLY_QUEUE) {
4927                 first_queue = 0;
4928                 last_queue = h->nreply_queues - 1;
4929         } else {
4930                 first_queue = reply_queue;
4931                 last_queue = reply_queue;
4932         }
4933
4934         for (rq = first_queue; rq <= last_queue; rq++) {
4935                 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
4936                 if (rc)
4937                         break;
4938         }
4939
4940         if (rc)
4941                 dev_warn(&h->pdev->dev, "giving up on device.\n");
4942         else
4943                 dev_warn(&h->pdev->dev, "device is ready.\n");
4944
4945         cmd_free(h, c);
4946         return rc;
4947 }
4948
4949 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4950  * complaining.  Doing a host- or bus-reset can't do anything good here.
4951  */
4952 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4953 {
4954         int rc;
4955         struct ctlr_info *h;
4956         struct hpsa_scsi_dev_t *dev;
4957         char msg[40];
4958
4959         /* find the controller to which the command to be aborted was sent */
4960         h = sdev_to_hba(scsicmd->device);
4961         if (h == NULL) /* paranoia */
4962                 return FAILED;
4963
4964         if (lockup_detected(h))
4965                 return FAILED;
4966
4967         dev = scsicmd->device->hostdata;
4968         if (!dev) {
4969                 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4970                         "device lookup failed.\n");
4971                 return FAILED;
4972         }
4973
4974         /* if controller locked up, we can guarantee command won't complete */
4975         if (lockup_detected(h)) {
4976                 sprintf(msg, "cmd %d RESET FAILED, lockup detected",
4977                                 hpsa_get_cmd_index(scsicmd));
4978                 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
4979                 return FAILED;
4980         }
4981
4982         /* this reset request might be the result of a lockup; check */
4983         if (detect_controller_lockup(h)) {
4984                 sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
4985                                 hpsa_get_cmd_index(scsicmd));
4986                 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
4987                 return FAILED;
4988         }
4989
4990         hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
4991
4992         /* send a reset to the SCSI LUN which the command was sent to */
4993         rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
4994                              DEFAULT_REPLY_QUEUE);
4995         if (rc == 0)
4996                 return SUCCESS;
4997
4998         dev_warn(&h->pdev->dev,
4999                 "scsi %d:%d:%d:%d reset failed\n",
5000                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
5001         return FAILED;
5002 }
5003
5004 static void swizzle_abort_tag(u8 *tag)
5005 {
5006         u8 original_tag[8];
5007
5008         memcpy(original_tag, tag, 8);
5009         tag[0] = original_tag[3];
5010         tag[1] = original_tag[2];
5011         tag[2] = original_tag[1];
5012         tag[3] = original_tag[0];
5013         tag[4] = original_tag[7];
5014         tag[5] = original_tag[6];
5015         tag[6] = original_tag[5];
5016         tag[7] = original_tag[4];
5017 }
5018
5019 static void hpsa_get_tag(struct ctlr_info *h,
5020         struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5021 {
5022         u64 tag;
5023         if (c->cmd_type == CMD_IOACCEL1) {
5024                 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5025                         &h->ioaccel_cmd_pool[c->cmdindex];
5026                 tag = le64_to_cpu(cm1->tag);
5027                 *tagupper = cpu_to_le32(tag >> 32);
5028                 *taglower = cpu_to_le32(tag);
5029                 return;
5030         }
5031         if (c->cmd_type == CMD_IOACCEL2) {
5032                 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5033                         &h->ioaccel2_cmd_pool[c->cmdindex];
5034                 /* upper tag not used in ioaccel2 mode */
5035                 memset(tagupper, 0, sizeof(*tagupper));
5036                 *taglower = cm2->Tag;
5037                 return;
5038         }
5039         tag = le64_to_cpu(c->Header.tag);
5040         *tagupper = cpu_to_le32(tag >> 32);
5041         *taglower = cpu_to_le32(tag);
5042 }
5043
5044 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5045         struct CommandList *abort, int reply_queue)
5046 {
5047         int rc = IO_OK;
5048         struct CommandList *c;
5049         struct ErrorInfo *ei;
5050         __le32 tagupper, taglower;
5051
5052         c = cmd_alloc(h);
5053
5054         /* fill_cmd can't fail here, no buffer to map */
5055         (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5056                 0, 0, scsi3addr, TYPE_MSG);
5057         if (h->needs_abort_tags_swizzled)
5058                 swizzle_abort_tag(&c->Request.CDB[4]);
5059         (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5060         hpsa_get_tag(h, abort, &taglower, &tagupper);
5061         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5062                 __func__, tagupper, taglower);
5063         /* no unmap needed here because no data xfer. */
5064
5065         ei = c->err_info;
5066         switch (ei->CommandStatus) {
5067         case CMD_SUCCESS:
5068                 break;
5069         case CMD_TMF_STATUS:
5070                 rc = hpsa_evaluate_tmf_status(h, c);
5071                 break;
5072         case CMD_UNABORTABLE: /* Very common, don't make noise. */
5073                 rc = -1;
5074                 break;
5075         default:
5076                 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5077                         __func__, tagupper, taglower);
5078                 hpsa_scsi_interpret_error(h, c);
5079                 rc = -1;
5080                 break;
5081         }
5082         cmd_free(h, c);
5083         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5084                 __func__, tagupper, taglower);
5085         return rc;
5086 }
5087
5088 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5089         struct CommandList *command_to_abort, int reply_queue)
5090 {
5091         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5092         struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5093         struct io_accel2_cmd *c2a =
5094                 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5095         struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5096         struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5097
5098         /*
5099          * We're overlaying struct hpsa_tmf_struct on top of something which
5100          * was allocated as a struct io_accel2_cmd, so we better be sure it
5101          * actually fits, and doesn't overrun the error info space.
5102          */
5103         BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5104                         sizeof(struct io_accel2_cmd));
5105         BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5106                         offsetof(struct hpsa_tmf_struct, error_len) +
5107                                 sizeof(ac->error_len));
5108
5109         c->cmd_type = IOACCEL2_TMF;
5110         c->scsi_cmd = SCSI_CMD_BUSY;
5111
5112         /* Adjust the DMA address to point to the accelerated command buffer */
5113         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5114                                 (c->cmdindex * sizeof(struct io_accel2_cmd));
5115         BUG_ON(c->busaddr & 0x0000007F);
5116
5117         memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5118         ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5119         ac->reply_queue = reply_queue;
5120         ac->tmf = IOACCEL2_TMF_ABORT;
5121         ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5122         memset(ac->lun_id, 0, sizeof(ac->lun_id));
5123         ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5124         ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5125         ac->error_ptr = cpu_to_le64(c->busaddr +
5126                         offsetof(struct io_accel2_cmd, error_data));
5127         ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5128 }
5129
5130 /* ioaccel2 path firmware cannot handle abort task requests.
5131  * Change abort requests to physical target reset, and send to the
5132  * address of the physical disk used for the ioaccel 2 command.
5133  * Return 0 on success (IO_OK)
5134  *       -1 on failure
5135  */
5136
5137 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5138         unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5139 {
5140         int rc = IO_OK;
5141         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5142         struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5143         unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5144         unsigned char *psa = &phys_scsi3addr[0];
5145
5146         /* Get a pointer to the hpsa logical device. */
5147         scmd = abort->scsi_cmd;
5148         dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5149         if (dev == NULL) {
5150                 dev_warn(&h->pdev->dev,
5151                         "Cannot abort: no device pointer for command.\n");
5152                         return -1; /* not abortable */
5153         }
5154
5155         if (h->raid_offload_debug > 0)
5156                 dev_info(&h->pdev->dev,
5157                         "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5158                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5159                         "Reset as abort",
5160                         scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5161                         scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5162
5163         if (!dev->offload_enabled) {
5164                 dev_warn(&h->pdev->dev,
5165                         "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5166                 return -1; /* not abortable */
5167         }
5168
5169         /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5170         if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5171                 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5172                 return -1; /* not abortable */
5173         }
5174
5175         /* send the reset */
5176         if (h->raid_offload_debug > 0)
5177                 dev_info(&h->pdev->dev,
5178                         "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5179                         psa[0], psa[1], psa[2], psa[3],
5180                         psa[4], psa[5], psa[6], psa[7]);
5181         rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
5182         if (rc != 0) {
5183                 dev_warn(&h->pdev->dev,
5184                         "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5185                         psa[0], psa[1], psa[2], psa[3],
5186                         psa[4], psa[5], psa[6], psa[7]);
5187                 return rc; /* failed to reset */
5188         }
5189
5190         /* wait for device to recover */
5191         if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
5192                 dev_warn(&h->pdev->dev,
5193                         "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5194                         psa[0], psa[1], psa[2], psa[3],
5195                         psa[4], psa[5], psa[6], psa[7]);
5196                 return -1;  /* failed to recover */
5197         }
5198
5199         /* device recovered */
5200         dev_info(&h->pdev->dev,
5201                 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5202                 psa[0], psa[1], psa[2], psa[3],
5203                 psa[4], psa[5], psa[6], psa[7]);
5204
5205         return rc; /* success */
5206 }
5207
5208 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5209         struct CommandList *abort, int reply_queue)
5210 {
5211         int rc = IO_OK;
5212         struct CommandList *c;
5213         __le32 taglower, tagupper;
5214         struct hpsa_scsi_dev_t *dev;
5215         struct io_accel2_cmd *c2;
5216
5217         dev = abort->scsi_cmd->device->hostdata;
5218         if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5219                 return -1;
5220
5221         c = cmd_alloc(h);
5222         setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5223         c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5224         (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5225         hpsa_get_tag(h, abort, &taglower, &tagupper);
5226         dev_dbg(&h->pdev->dev,
5227                 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5228                 __func__, tagupper, taglower);
5229         /* no unmap needed here because no data xfer. */
5230
5231         dev_dbg(&h->pdev->dev,
5232                 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5233                 __func__, tagupper, taglower, c2->error_data.serv_response);
5234         switch (c2->error_data.serv_response) {
5235         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5236         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5237                 rc = 0;
5238                 break;
5239         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5240         case IOACCEL2_SERV_RESPONSE_FAILURE:
5241         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5242                 rc = -1;
5243                 break;
5244         default:
5245                 dev_warn(&h->pdev->dev,
5246                         "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5247                         __func__, tagupper, taglower,
5248                         c2->error_data.serv_response);
5249                 rc = -1;
5250         }
5251         cmd_free(h, c);
5252         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5253                 tagupper, taglower);
5254         return rc;
5255 }
5256
5257 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5258         unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5259 {
5260         /*
5261          * ioccelerator mode 2 commands should be aborted via the
5262          * accelerated path, since RAID path is unaware of these commands,
5263          * but not all underlying firmware can handle abort TMF.
5264          * Change abort to physical device reset when abort TMF is unsupported.
5265          */
5266         if (abort->cmd_type == CMD_IOACCEL2) {
5267                 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5268                         return hpsa_send_abort_ioaccel2(h, abort,
5269                                                 reply_queue);
5270                 else
5271                         return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5272                                                         abort, reply_queue);
5273         }
5274         return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5275 }
5276
5277 /* Find out which reply queue a command was meant to return on */
5278 static int hpsa_extract_reply_queue(struct ctlr_info *h,
5279                                         struct CommandList *c)
5280 {
5281         if (c->cmd_type == CMD_IOACCEL2)
5282                 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5283         return c->Header.ReplyQueue;
5284 }
5285
5286 /*
5287  * Limit concurrency of abort commands to prevent
5288  * over-subscription of commands
5289  */
5290 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5291 {
5292 #define ABORT_CMD_WAIT_MSECS 5000
5293         return !wait_event_timeout(h->abort_cmd_wait_queue,
5294                         atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5295                         msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5296 }
5297
5298 /* Send an abort for the specified command.
5299  *      If the device and controller support it,
5300  *              send a task abort request.
5301  */
5302 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5303 {
5304
5305         int rc;
5306         struct ctlr_info *h;
5307         struct hpsa_scsi_dev_t *dev;
5308         struct CommandList *abort; /* pointer to command to be aborted */
5309         struct scsi_cmnd *as;   /* ptr to scsi cmd inside aborted command. */
5310         char msg[256];          /* For debug messaging. */
5311         int ml = 0;
5312         __le32 tagupper, taglower;
5313         int refcount, reply_queue;
5314
5315         if (sc == NULL)
5316                 return FAILED;
5317
5318         if (sc->device == NULL)
5319                 return FAILED;
5320
5321         /* Find the controller of the command to be aborted */
5322         h = sdev_to_hba(sc->device);
5323         if (h == NULL)
5324                 return FAILED;
5325
5326         /* Find the device of the command to be aborted */
5327         dev = sc->device->hostdata;
5328         if (!dev) {
5329                 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5330                                 msg);
5331                 return FAILED;
5332         }
5333
5334         /* If controller locked up, we can guarantee command won't complete */
5335         if (lockup_detected(h)) {
5336                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5337                                         "ABORT FAILED, lockup detected");
5338                 return FAILED;
5339         }
5340
5341         /* This is a good time to check if controller lockup has occurred */
5342         if (detect_controller_lockup(h)) {
5343                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5344                                         "ABORT FAILED, new lockup detected");
5345                 return FAILED;
5346         }
5347
5348         /* Check that controller supports some kind of task abort */
5349         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5350                 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5351                 return FAILED;
5352
5353         memset(msg, 0, sizeof(msg));
5354         ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
5355                 h->scsi_host->host_no, sc->device->channel,
5356                 sc->device->id, sc->device->lun,
5357                 "Aborting command", sc);
5358
5359         /* Get SCSI command to be aborted */
5360         abort = (struct CommandList *) sc->host_scribble;
5361         if (abort == NULL) {
5362                 /* This can happen if the command already completed. */
5363                 return SUCCESS;
5364         }
5365         refcount = atomic_inc_return(&abort->refcount);
5366         if (refcount == 1) { /* Command is done already. */
5367                 cmd_free(h, abort);
5368                 return SUCCESS;
5369         }
5370
5371         /* Don't bother trying the abort if we know it won't work. */
5372         if (abort->cmd_type != CMD_IOACCEL2 &&
5373                 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5374                 cmd_free(h, abort);
5375                 return FAILED;
5376         }
5377
5378         /*
5379          * Check that we're aborting the right command.
5380          * It's possible the CommandList already completed and got re-used.
5381          */
5382         if (abort->scsi_cmd != sc) {
5383                 cmd_free(h, abort);
5384                 return SUCCESS;
5385         }
5386
5387         abort->abort_pending = true;
5388         hpsa_get_tag(h, abort, &taglower, &tagupper);
5389         reply_queue = hpsa_extract_reply_queue(h, abort);
5390         ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
5391         as  = abort->scsi_cmd;
5392         if (as != NULL)
5393                 ml += sprintf(msg+ml,
5394                         "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5395                         as->cmd_len, as->cmnd[0], as->cmnd[1],
5396                         as->serial_number);
5397         dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
5398         hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5399
5400         /*
5401          * Command is in flight, or possibly already completed
5402          * by the firmware (but not to the scsi mid layer) but we can't
5403          * distinguish which.  Send the abort down.
5404          */
5405         if (wait_for_available_abort_cmd(h)) {
5406                 dev_warn(&h->pdev->dev,
5407                         "%s FAILED, timeout waiting for an abort command to become available.\n",
5408                         msg);
5409                 cmd_free(h, abort);
5410                 return FAILED;
5411         }
5412         rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
5413         atomic_inc(&h->abort_cmds_available);
5414         wake_up_all(&h->abort_cmd_wait_queue);
5415         if (rc != 0) {
5416                 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
5417                 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5418                                 "FAILED to abort command");
5419                 cmd_free(h, abort);
5420                 return FAILED;
5421         }
5422         dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
5423         wait_event(h->abort_sync_wait_queue,
5424                    abort->scsi_cmd != sc || lockup_detected(h));
5425         cmd_free(h, abort);
5426         return !lockup_detected(h) ? SUCCESS : FAILED;
5427 }
5428
5429 /*
5430  * For operations with an associated SCSI command, a command block is allocated
5431  * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5432  * block request tag as an index into a table of entries.  cmd_tagged_free() is
5433  * the complement, although cmd_free() may be called instead.
5434  */
5435 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5436                                             struct scsi_cmnd *scmd)
5437 {
5438         int idx = hpsa_get_cmd_index(scmd);
5439         struct CommandList *c = h->cmd_pool + idx;
5440
5441         if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5442                 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5443                         idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5444                 /* The index value comes from the block layer, so if it's out of
5445                  * bounds, it's probably not our bug.
5446                  */
5447                 BUG();
5448         }
5449
5450         atomic_inc(&c->refcount);
5451         if (unlikely(!hpsa_is_cmd_idle(c))) {
5452                 /*
5453                  * We expect that the SCSI layer will hand us a unique tag
5454                  * value.  Thus, there should never be a collision here between
5455                  * two requests...because if the selected command isn't idle
5456                  * then someone is going to be very disappointed.
5457                  */
5458                 dev_err(&h->pdev->dev,
5459                         "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5460                         idx);
5461                 if (c->scsi_cmd != NULL)
5462                         scsi_print_command(c->scsi_cmd);
5463                 scsi_print_command(scmd);
5464         }
5465
5466         hpsa_cmd_partial_init(h, idx, c);
5467         return c;
5468 }
5469
5470 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5471 {
5472         /*
5473          * Release our reference to the block.  We don't need to do anything
5474          * else to free it, because it is accessed by index.  (There's no point
5475          * in checking the result of the decrement, since we cannot guarantee
5476          * that there isn't a concurrent abort which is also accessing it.)
5477          */
5478         (void)atomic_dec(&c->refcount);
5479 }
5480
5481 /*
5482  * For operations that cannot sleep, a command block is allocated at init,
5483  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5484  * which ones are free or in use.  Lock must be held when calling this.
5485  * cmd_free() is the complement.
5486  * This function never gives up and returns NULL.  If it hangs,
5487  * another thread must call cmd_free() to free some tags.
5488  */
5489
5490 static struct CommandList *cmd_alloc(struct ctlr_info *h)
5491 {
5492         struct CommandList *c;
5493         int refcount, i;
5494         int offset = 0;
5495
5496         /*
5497          * There is some *extremely* small but non-zero chance that that
5498          * multiple threads could get in here, and one thread could
5499          * be scanning through the list of bits looking for a free
5500          * one, but the free ones are always behind him, and other
5501          * threads sneak in behind him and eat them before he can
5502          * get to them, so that while there is always a free one, a
5503          * very unlucky thread might be starved anyway, never able to
5504          * beat the other threads.  In reality, this happens so
5505          * infrequently as to be indistinguishable from never.
5506          *
5507          * Note that we start allocating commands before the SCSI host structure
5508          * is initialized.  Since the search starts at bit zero, this
5509          * all works, since we have at least one command structure available;
5510          * however, it means that the structures with the low indexes have to be
5511          * reserved for driver-initiated requests, while requests from the block
5512          * layer will use the higher indexes.
5513          */
5514
5515         for (;;) {
5516                 i = find_next_zero_bit(h->cmd_pool_bits,
5517                                         HPSA_NRESERVED_CMDS,
5518                                         offset);
5519                 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
5520                         offset = 0;
5521                         continue;
5522                 }
5523                 c = h->cmd_pool + i;
5524                 refcount = atomic_inc_return(&c->refcount);
5525                 if (unlikely(refcount > 1)) {
5526                         cmd_free(h, c); /* already in use */
5527                         offset = (i + 1) % HPSA_NRESERVED_CMDS;
5528                         continue;
5529                 }
5530                 set_bit(i & (BITS_PER_LONG - 1),
5531                         h->cmd_pool_bits + (i / BITS_PER_LONG));
5532                 break; /* it's ours now. */
5533         }
5534         hpsa_cmd_partial_init(h, i, c);
5535         return c;
5536 }
5537
5538 /*
5539  * This is the complementary operation to cmd_alloc().  Note, however, in some
5540  * corner cases it may also be used to free blocks allocated by
5541  * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5542  * the clear-bit is harmless.
5543  */
5544 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5545 {
5546         if (atomic_dec_and_test(&c->refcount)) {
5547                 int i;
5548
5549                 i = c - h->cmd_pool;
5550                 clear_bit(i & (BITS_PER_LONG - 1),
5551                           h->cmd_pool_bits + (i / BITS_PER_LONG));
5552         }
5553 }
5554
5555 #ifdef CONFIG_COMPAT
5556
5557 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5558         void __user *arg)
5559 {
5560         IOCTL32_Command_struct __user *arg32 =
5561             (IOCTL32_Command_struct __user *) arg;
5562         IOCTL_Command_struct arg64;
5563         IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5564         int err;
5565         u32 cp;
5566
5567         memset(&arg64, 0, sizeof(arg64));
5568         err = 0;
5569         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5570                            sizeof(arg64.LUN_info));
5571         err |= copy_from_user(&arg64.Request, &arg32->Request,
5572                            sizeof(arg64.Request));
5573         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5574                            sizeof(arg64.error_info));
5575         err |= get_user(arg64.buf_size, &arg32->buf_size);
5576         err |= get_user(cp, &arg32->buf);
5577         arg64.buf = compat_ptr(cp);
5578         err |= copy_to_user(p, &arg64, sizeof(arg64));
5579
5580         if (err)
5581                 return -EFAULT;
5582
5583         err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
5584         if (err)
5585                 return err;
5586         err |= copy_in_user(&arg32->error_info, &p->error_info,
5587                          sizeof(arg32->error_info));
5588         if (err)
5589                 return -EFAULT;
5590         return err;
5591 }
5592
5593 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
5594         int cmd, void __user *arg)
5595 {
5596         BIG_IOCTL32_Command_struct __user *arg32 =
5597             (BIG_IOCTL32_Command_struct __user *) arg;
5598         BIG_IOCTL_Command_struct arg64;
5599         BIG_IOCTL_Command_struct __user *p =
5600             compat_alloc_user_space(sizeof(arg64));
5601         int err;
5602         u32 cp;
5603
5604         memset(&arg64, 0, sizeof(arg64));
5605         err = 0;
5606         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5607                            sizeof(arg64.LUN_info));
5608         err |= copy_from_user(&arg64.Request, &arg32->Request,
5609                            sizeof(arg64.Request));
5610         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5611                            sizeof(arg64.error_info));
5612         err |= get_user(arg64.buf_size, &arg32->buf_size);
5613         err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5614         err |= get_user(cp, &arg32->buf);
5615         arg64.buf = compat_ptr(cp);
5616         err |= copy_to_user(p, &arg64, sizeof(arg64));
5617
5618         if (err)
5619                 return -EFAULT;
5620
5621         err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
5622         if (err)
5623                 return err;
5624         err |= copy_in_user(&arg32->error_info, &p->error_info,
5625                          sizeof(arg32->error_info));
5626         if (err)
5627                 return -EFAULT;
5628         return err;
5629 }
5630
5631 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5632 {
5633         switch (cmd) {
5634         case CCISS_GETPCIINFO:
5635         case CCISS_GETINTINFO:
5636         case CCISS_SETINTINFO:
5637         case CCISS_GETNODENAME:
5638         case CCISS_SETNODENAME:
5639         case CCISS_GETHEARTBEAT:
5640         case CCISS_GETBUSTYPES:
5641         case CCISS_GETFIRMVER:
5642         case CCISS_GETDRIVVER:
5643         case CCISS_REVALIDVOLS:
5644         case CCISS_DEREGDISK:
5645         case CCISS_REGNEWDISK:
5646         case CCISS_REGNEWD:
5647         case CCISS_RESCANDISK:
5648         case CCISS_GETLUNINFO:
5649                 return hpsa_ioctl(dev, cmd, arg);
5650
5651         case CCISS_PASSTHRU32:
5652                 return hpsa_ioctl32_passthru(dev, cmd, arg);
5653         case CCISS_BIG_PASSTHRU32:
5654                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5655
5656         default:
5657                 return -ENOIOCTLCMD;
5658         }
5659 }
5660 #endif
5661
5662 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5663 {
5664         struct hpsa_pci_info pciinfo;
5665
5666         if (!argp)
5667                 return -EINVAL;
5668         pciinfo.domain = pci_domain_nr(h->pdev->bus);
5669         pciinfo.bus = h->pdev->bus->number;
5670         pciinfo.dev_fn = h->pdev->devfn;
5671         pciinfo.board_id = h->board_id;
5672         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5673                 return -EFAULT;
5674         return 0;
5675 }
5676
5677 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5678 {
5679         DriverVer_type DriverVer;
5680         unsigned char vmaj, vmin, vsubmin;
5681         int rc;
5682
5683         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5684                 &vmaj, &vmin, &vsubmin);
5685         if (rc != 3) {
5686                 dev_info(&h->pdev->dev, "driver version string '%s' "
5687                         "unrecognized.", HPSA_DRIVER_VERSION);
5688                 vmaj = 0;
5689                 vmin = 0;
5690                 vsubmin = 0;
5691         }
5692         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5693         if (!argp)
5694                 return -EINVAL;
5695         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5696                 return -EFAULT;
5697         return 0;
5698 }
5699
5700 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5701 {
5702         IOCTL_Command_struct iocommand;
5703         struct CommandList *c;
5704         char *buff = NULL;
5705         u64 temp64;
5706         int rc = 0;
5707
5708         if (!argp)
5709                 return -EINVAL;
5710         if (!capable(CAP_SYS_RAWIO))
5711                 return -EPERM;
5712         if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5713                 return -EFAULT;
5714         if ((iocommand.buf_size < 1) &&
5715             (iocommand.Request.Type.Direction != XFER_NONE)) {
5716                 return -EINVAL;
5717         }
5718         if (iocommand.buf_size > 0) {
5719                 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5720                 if (buff == NULL)
5721                         return -ENOMEM;
5722                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5723                         /* Copy the data into the buffer we created */
5724                         if (copy_from_user(buff, iocommand.buf,
5725                                 iocommand.buf_size)) {
5726                                 rc = -EFAULT;
5727                                 goto out_kfree;
5728                         }
5729                 } else {
5730                         memset(buff, 0, iocommand.buf_size);
5731                 }
5732         }
5733         c = cmd_alloc(h);
5734
5735         /* Fill in the command type */
5736         c->cmd_type = CMD_IOCTL_PEND;
5737         c->scsi_cmd = SCSI_CMD_BUSY;
5738         /* Fill in Command Header */
5739         c->Header.ReplyQueue = 0; /* unused in simple mode */
5740         if (iocommand.buf_size > 0) {   /* buffer to fill */
5741                 c->Header.SGList = 1;
5742                 c->Header.SGTotal = cpu_to_le16(1);
5743         } else  { /* no buffers to fill */
5744                 c->Header.SGList = 0;
5745                 c->Header.SGTotal = cpu_to_le16(0);
5746         }
5747         memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
5748
5749         /* Fill in Request block */
5750         memcpy(&c->Request, &iocommand.Request,
5751                 sizeof(c->Request));
5752
5753         /* Fill in the scatter gather information */
5754         if (iocommand.buf_size > 0) {
5755                 temp64 = pci_map_single(h->pdev, buff,
5756                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5757                 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5758                         c->SG[0].Addr = cpu_to_le64(0);
5759                         c->SG[0].Len = cpu_to_le32(0);
5760                         rc = -ENOMEM;
5761                         goto out;
5762                 }
5763                 c->SG[0].Addr = cpu_to_le64(temp64);
5764                 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5765                 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
5766         }
5767         rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5768         if (iocommand.buf_size > 0)
5769                 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
5770         check_ioctl_unit_attention(h, c);
5771         if (rc) {
5772                 rc = -EIO;
5773                 goto out;
5774         }
5775
5776         /* Copy the error information out */
5777         memcpy(&iocommand.error_info, c->err_info,
5778                 sizeof(iocommand.error_info));
5779         if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
5780                 rc = -EFAULT;
5781                 goto out;
5782         }
5783         if ((iocommand.Request.Type.Direction & XFER_READ) &&
5784                 iocommand.buf_size > 0) {
5785                 /* Copy the data out of the buffer we created */
5786                 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
5787                         rc = -EFAULT;
5788                         goto out;
5789                 }
5790         }
5791 out:
5792         cmd_free(h, c);
5793 out_kfree:
5794         kfree(buff);
5795         return rc;
5796 }
5797
5798 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5799 {
5800         BIG_IOCTL_Command_struct *ioc;
5801         struct CommandList *c;
5802         unsigned char **buff = NULL;
5803         int *buff_size = NULL;
5804         u64 temp64;
5805         BYTE sg_used = 0;
5806         int status = 0;
5807         u32 left;
5808         u32 sz;
5809         BYTE __user *data_ptr;
5810
5811         if (!argp)
5812                 return -EINVAL;
5813         if (!capable(CAP_SYS_RAWIO))
5814                 return -EPERM;
5815         ioc = (BIG_IOCTL_Command_struct *)
5816             kmalloc(sizeof(*ioc), GFP_KERNEL);
5817         if (!ioc) {
5818                 status = -ENOMEM;
5819                 goto cleanup1;
5820         }
5821         if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5822                 status = -EFAULT;
5823                 goto cleanup1;
5824         }
5825         if ((ioc->buf_size < 1) &&
5826             (ioc->Request.Type.Direction != XFER_NONE)) {
5827                 status = -EINVAL;
5828                 goto cleanup1;
5829         }
5830         /* Check kmalloc limits  using all SGs */
5831         if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5832                 status = -EINVAL;
5833                 goto cleanup1;
5834         }
5835         if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5836                 status = -EINVAL;
5837                 goto cleanup1;
5838         }
5839         buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5840         if (!buff) {
5841                 status = -ENOMEM;
5842                 goto cleanup1;
5843         }
5844         buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5845         if (!buff_size) {
5846                 status = -ENOMEM;
5847                 goto cleanup1;
5848         }
5849         left = ioc->buf_size;
5850         data_ptr = ioc->buf;
5851         while (left) {
5852                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5853                 buff_size[sg_used] = sz;
5854                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5855                 if (buff[sg_used] == NULL) {
5856                         status = -ENOMEM;
5857                         goto cleanup1;
5858                 }
5859                 if (ioc->Request.Type.Direction & XFER_WRITE) {
5860                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5861                                 status = -EFAULT;
5862                                 goto cleanup1;
5863                         }
5864                 } else
5865                         memset(buff[sg_used], 0, sz);
5866                 left -= sz;
5867                 data_ptr += sz;
5868                 sg_used++;
5869         }
5870         c = cmd_alloc(h);
5871
5872         c->cmd_type = CMD_IOCTL_PEND;
5873         c->scsi_cmd = SCSI_CMD_BUSY;
5874         c->Header.ReplyQueue = 0;
5875         c->Header.SGList = (u8) sg_used;
5876         c->Header.SGTotal = cpu_to_le16(sg_used);
5877         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5878         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5879         if (ioc->buf_size > 0) {
5880                 int i;
5881                 for (i = 0; i < sg_used; i++) {
5882                         temp64 = pci_map_single(h->pdev, buff[i],
5883                                     buff_size[i], PCI_DMA_BIDIRECTIONAL);
5884                         if (dma_mapping_error(&h->pdev->dev,
5885                                                         (dma_addr_t) temp64)) {
5886                                 c->SG[i].Addr = cpu_to_le64(0);
5887                                 c->SG[i].Len = cpu_to_le32(0);
5888                                 hpsa_pci_unmap(h->pdev, c, i,
5889                                         PCI_DMA_BIDIRECTIONAL);
5890                                 status = -ENOMEM;
5891                                 goto cleanup0;
5892                         }
5893                         c->SG[i].Addr = cpu_to_le64(temp64);
5894                         c->SG[i].Len = cpu_to_le32(buff_size[i]);
5895                         c->SG[i].Ext = cpu_to_le32(0);
5896                 }
5897                 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5898         }
5899         status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5900         if (sg_used)
5901                 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5902         check_ioctl_unit_attention(h, c);
5903         if (status) {
5904                 status = -EIO;
5905                 goto cleanup0;
5906         }
5907
5908         /* Copy the error information out */
5909         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5910         if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5911                 status = -EFAULT;
5912                 goto cleanup0;
5913         }
5914         if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5915                 int i;
5916
5917                 /* Copy the data out of the buffer we created */
5918                 BYTE __user *ptr = ioc->buf;
5919                 for (i = 0; i < sg_used; i++) {
5920                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
5921                                 status = -EFAULT;
5922                                 goto cleanup0;
5923                         }
5924                         ptr += buff_size[i];
5925                 }
5926         }
5927         status = 0;
5928 cleanup0:
5929         cmd_free(h, c);
5930 cleanup1:
5931         if (buff) {
5932                 int i;
5933
5934                 for (i = 0; i < sg_used; i++)
5935                         kfree(buff[i]);
5936                 kfree(buff);
5937         }
5938         kfree(buff_size);
5939         kfree(ioc);
5940         return status;
5941 }
5942
5943 static void check_ioctl_unit_attention(struct ctlr_info *h,
5944         struct CommandList *c)
5945 {
5946         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5947                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5948                 (void) check_for_unit_attention(h, c);
5949 }
5950
5951 /*
5952  * ioctl
5953  */
5954 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5955 {
5956         struct ctlr_info *h;
5957         void __user *argp = (void __user *)arg;
5958         int rc;
5959
5960         h = sdev_to_hba(dev);
5961
5962         switch (cmd) {
5963         case CCISS_DEREGDISK:
5964         case CCISS_REGNEWDISK:
5965         case CCISS_REGNEWD:
5966                 hpsa_scan_start(h->scsi_host);
5967                 return 0;
5968         case CCISS_GETPCIINFO:
5969                 return hpsa_getpciinfo_ioctl(h, argp);
5970         case CCISS_GETDRIVVER:
5971                 return hpsa_getdrivver_ioctl(h, argp);
5972         case CCISS_PASSTHRU:
5973                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5974                         return -EAGAIN;
5975                 rc = hpsa_passthru_ioctl(h, argp);
5976                 atomic_inc(&h->passthru_cmds_avail);
5977                 return rc;
5978         case CCISS_BIG_PASSTHRU:
5979                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5980                         return -EAGAIN;
5981                 rc = hpsa_big_passthru_ioctl(h, argp);
5982                 atomic_inc(&h->passthru_cmds_avail);
5983                 return rc;
5984         default:
5985                 return -ENOTTY;
5986         }
5987 }
5988
5989 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5990                                 u8 reset_type)
5991 {
5992         struct CommandList *c;
5993
5994         c = cmd_alloc(h);
5995
5996         /* fill_cmd can't fail here, no data buffer to map */
5997         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5998                 RAID_CTLR_LUNID, TYPE_MSG);
5999         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6000         c->waiting = NULL;
6001         enqueue_cmd_and_start_io(h, c);
6002         /* Don't wait for completion, the reset won't complete.  Don't free
6003          * the command either.  This is the last command we will send before
6004          * re-initializing everything, so it doesn't matter and won't leak.
6005          */
6006         return;
6007 }
6008
6009 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6010         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6011         int cmd_type)
6012 {
6013         int pci_dir = XFER_NONE;
6014         u64 tag; /* for commands to be aborted */
6015
6016         c->cmd_type = CMD_IOCTL_PEND;
6017         c->scsi_cmd = SCSI_CMD_BUSY;
6018         c->Header.ReplyQueue = 0;
6019         if (buff != NULL && size > 0) {
6020                 c->Header.SGList = 1;
6021                 c->Header.SGTotal = cpu_to_le16(1);
6022         } else {
6023                 c->Header.SGList = 0;
6024                 c->Header.SGTotal = cpu_to_le16(0);
6025         }
6026         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6027
6028         if (cmd_type == TYPE_CMD) {
6029                 switch (cmd) {
6030                 case HPSA_INQUIRY:
6031                         /* are we trying to read a vital product page */
6032                         if (page_code & VPD_PAGE) {
6033                                 c->Request.CDB[1] = 0x01;
6034                                 c->Request.CDB[2] = (page_code & 0xff);
6035                         }
6036                         c->Request.CDBLen = 6;
6037                         c->Request.type_attr_dir =
6038                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6039                         c->Request.Timeout = 0;
6040                         c->Request.CDB[0] = HPSA_INQUIRY;
6041                         c->Request.CDB[4] = size & 0xFF;
6042                         break;
6043                 case HPSA_REPORT_LOG:
6044                 case HPSA_REPORT_PHYS:
6045                         /* Talking to controller so It's a physical command
6046                            mode = 00 target = 0.  Nothing to write.
6047                          */
6048                         c->Request.CDBLen = 12;
6049                         c->Request.type_attr_dir =
6050                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6051                         c->Request.Timeout = 0;
6052                         c->Request.CDB[0] = cmd;
6053                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6054                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6055                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6056                         c->Request.CDB[9] = size & 0xFF;
6057                         break;
6058                 case HPSA_CACHE_FLUSH:
6059                         c->Request.CDBLen = 12;
6060                         c->Request.type_attr_dir =
6061                                         TYPE_ATTR_DIR(cmd_type,
6062                                                 ATTR_SIMPLE, XFER_WRITE);
6063                         c->Request.Timeout = 0;
6064                         c->Request.CDB[0] = BMIC_WRITE;
6065                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6066                         c->Request.CDB[7] = (size >> 8) & 0xFF;
6067                         c->Request.CDB[8] = size & 0xFF;
6068                         break;
6069                 case TEST_UNIT_READY:
6070                         c->Request.CDBLen = 6;
6071                         c->Request.type_attr_dir =
6072                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6073                         c->Request.Timeout = 0;
6074                         break;
6075                 case HPSA_GET_RAID_MAP:
6076                         c->Request.CDBLen = 12;
6077                         c->Request.type_attr_dir =
6078                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6079                         c->Request.Timeout = 0;
6080                         c->Request.CDB[0] = HPSA_CISS_READ;
6081                         c->Request.CDB[1] = cmd;
6082                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6083                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6084                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6085                         c->Request.CDB[9] = size & 0xFF;
6086                         break;
6087                 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6088                         c->Request.CDBLen = 10;
6089                         c->Request.type_attr_dir =
6090                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6091                         c->Request.Timeout = 0;
6092                         c->Request.CDB[0] = BMIC_READ;
6093                         c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6094                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6095                         c->Request.CDB[8] = (size >> 8) & 0xFF;
6096                         break;
6097                 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6098                         c->Request.CDBLen = 10;
6099                         c->Request.type_attr_dir =
6100                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6101                         c->Request.Timeout = 0;
6102                         c->Request.CDB[0] = BMIC_READ;
6103                         c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6104                         c->Request.CDB[7] = (size >> 16) & 0xFF;
6105                         c->Request.CDB[8] = (size >> 8) & 0XFF;
6106                         break;
6107                 default:
6108                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6109                         BUG();
6110                         return -1;
6111                 }
6112         } else if (cmd_type == TYPE_MSG) {
6113                 switch (cmd) {
6114
6115                 case  HPSA_DEVICE_RESET_MSG:
6116                         c->Request.CDBLen = 16;
6117                         c->Request.type_attr_dir =
6118                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6119                         c->Request.Timeout = 0; /* Don't time out */
6120                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6121                         c->Request.CDB[0] =  cmd;
6122                         c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6123                         /* If bytes 4-7 are zero, it means reset the */
6124                         /* LunID device */
6125                         c->Request.CDB[4] = 0x00;
6126                         c->Request.CDB[5] = 0x00;
6127                         c->Request.CDB[6] = 0x00;
6128                         c->Request.CDB[7] = 0x00;
6129                         break;
6130                 case  HPSA_ABORT_MSG:
6131                         memcpy(&tag, buff, sizeof(tag));
6132                         dev_dbg(&h->pdev->dev,
6133                                 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6134                                 tag, c->Header.tag);
6135                         c->Request.CDBLen = 16;
6136                         c->Request.type_attr_dir =
6137                                         TYPE_ATTR_DIR(cmd_type,
6138                                                 ATTR_SIMPLE, XFER_WRITE);
6139                         c->Request.Timeout = 0; /* Don't time out */
6140                         c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6141                         c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6142                         c->Request.CDB[2] = 0x00; /* reserved */
6143                         c->Request.CDB[3] = 0x00; /* reserved */
6144                         /* Tag to abort goes in CDB[4]-CDB[11] */
6145                         memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
6146                         c->Request.CDB[12] = 0x00; /* reserved */
6147                         c->Request.CDB[13] = 0x00; /* reserved */
6148                         c->Request.CDB[14] = 0x00; /* reserved */
6149                         c->Request.CDB[15] = 0x00; /* reserved */
6150                 break;
6151                 default:
6152                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
6153                                 cmd);
6154                         BUG();
6155                 }
6156         } else {
6157                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6158                 BUG();
6159         }
6160
6161         switch (GET_DIR(c->Request.type_attr_dir)) {
6162         case XFER_READ:
6163                 pci_dir = PCI_DMA_FROMDEVICE;
6164                 break;
6165         case XFER_WRITE:
6166                 pci_dir = PCI_DMA_TODEVICE;
6167                 break;
6168         case XFER_NONE:
6169                 pci_dir = PCI_DMA_NONE;
6170                 break;
6171         default:
6172                 pci_dir = PCI_DMA_BIDIRECTIONAL;
6173         }
6174         if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6175                 return -1;
6176         return 0;
6177 }
6178
6179 /*
6180  * Map (physical) PCI mem into (virtual) kernel space
6181  */
6182 static void __iomem *remap_pci_mem(ulong base, ulong size)
6183 {
6184         ulong page_base = ((ulong) base) & PAGE_MASK;
6185         ulong page_offs = ((ulong) base) - page_base;
6186         void __iomem *page_remapped = ioremap_nocache(page_base,
6187                 page_offs + size);
6188
6189         return page_remapped ? (page_remapped + page_offs) : NULL;
6190 }
6191
6192 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6193 {
6194         return h->access.command_completed(h, q);
6195 }
6196
6197 static inline bool interrupt_pending(struct ctlr_info *h)
6198 {
6199         return h->access.intr_pending(h);
6200 }
6201
6202 static inline long interrupt_not_for_us(struct ctlr_info *h)
6203 {
6204         return (h->access.intr_pending(h) == 0) ||
6205                 (h->interrupts_enabled == 0);
6206 }
6207
6208 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6209         u32 raw_tag)
6210 {
6211         if (unlikely(tag_index >= h->nr_cmds)) {
6212                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6213                 return 1;
6214         }
6215         return 0;
6216 }
6217
6218 static inline void finish_cmd(struct CommandList *c)
6219 {
6220         dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6221         if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6222                         || c->cmd_type == CMD_IOACCEL2))
6223                 complete_scsi_command(c);
6224         else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6225                 complete(c->waiting);
6226 }
6227
6228
6229 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
6230 {
6231 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
6232 #define HPSA_SIMPLE_ERROR_BITS 0x03
6233         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
6234                 return tag & ~HPSA_SIMPLE_ERROR_BITS;
6235         return tag & ~HPSA_PERF_ERROR_BITS;
6236 }
6237
6238 /* process completion of an indexed ("direct lookup") command */
6239 static inline void process_indexed_cmd(struct ctlr_info *h,
6240         u32 raw_tag)
6241 {
6242         u32 tag_index;
6243         struct CommandList *c;
6244
6245         tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6246         if (!bad_tag(h, tag_index, raw_tag)) {
6247                 c = h->cmd_pool + tag_index;
6248                 finish_cmd(c);
6249         }
6250 }
6251
6252 /* Some controllers, like p400, will give us one interrupt
6253  * after a soft reset, even if we turned interrupts off.
6254  * Only need to check for this in the hpsa_xxx_discard_completions
6255  * functions.
6256  */
6257 static int ignore_bogus_interrupt(struct ctlr_info *h)
6258 {
6259         if (likely(!reset_devices))
6260                 return 0;
6261
6262         if (likely(h->interrupts_enabled))
6263                 return 0;
6264
6265         dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6266                 "(known firmware bug.)  Ignoring.\n");
6267
6268         return 1;
6269 }
6270
6271 /*
6272  * Convert &h->q[x] (passed to interrupt handlers) back to h.
6273  * Relies on (h-q[x] == x) being true for x such that
6274  * 0 <= x < MAX_REPLY_QUEUES.
6275  */
6276 static struct ctlr_info *queue_to_hba(u8 *queue)
6277 {
6278         return container_of((queue - *queue), struct ctlr_info, q[0]);
6279 }
6280
6281 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6282 {
6283         struct ctlr_info *h = queue_to_hba(queue);
6284         u8 q = *(u8 *) queue;
6285         u32 raw_tag;
6286
6287         if (ignore_bogus_interrupt(h))
6288                 return IRQ_NONE;
6289
6290         if (interrupt_not_for_us(h))
6291                 return IRQ_NONE;
6292         h->last_intr_timestamp = get_jiffies_64();
6293         while (interrupt_pending(h)) {
6294                 raw_tag = get_next_completion(h, q);
6295                 while (raw_tag != FIFO_EMPTY)
6296                         raw_tag = next_command(h, q);
6297         }
6298         return IRQ_HANDLED;
6299 }
6300
6301 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6302 {
6303         struct ctlr_info *h = queue_to_hba(queue);
6304         u32 raw_tag;
6305         u8 q = *(u8 *) queue;
6306
6307         if (ignore_bogus_interrupt(h))
6308                 return IRQ_NONE;
6309
6310         h->last_intr_timestamp = get_jiffies_64();
6311         raw_tag = get_next_completion(h, q);
6312         while (raw_tag != FIFO_EMPTY)
6313                 raw_tag = next_command(h, q);
6314         return IRQ_HANDLED;
6315 }
6316
6317 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6318 {
6319         struct ctlr_info *h = queue_to_hba((u8 *) queue);
6320         u32 raw_tag;
6321         u8 q = *(u8 *) queue;
6322
6323         if (interrupt_not_for_us(h))
6324                 return IRQ_NONE;
6325         h->last_intr_timestamp = get_jiffies_64();
6326         while (interrupt_pending(h)) {
6327                 raw_tag = get_next_completion(h, q);
6328                 while (raw_tag != FIFO_EMPTY) {
6329                         process_indexed_cmd(h, raw_tag);
6330                         raw_tag = next_command(h, q);
6331                 }
6332         }
6333         return IRQ_HANDLED;
6334 }
6335
6336 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6337 {
6338         struct ctlr_info *h = queue_to_hba(queue);
6339         u32 raw_tag;
6340         u8 q = *(u8 *) queue;
6341
6342         h->last_intr_timestamp = get_jiffies_64();
6343         raw_tag = get_next_completion(h, q);
6344         while (raw_tag != FIFO_EMPTY) {
6345                 process_indexed_cmd(h, raw_tag);
6346                 raw_tag = next_command(h, q);
6347         }
6348         return IRQ_HANDLED;
6349 }
6350
6351 /* Send a message CDB to the firmware. Careful, this only works
6352  * in simple mode, not performant mode due to the tag lookup.
6353  * We only ever use this immediately after a controller reset.
6354  */
6355 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6356                         unsigned char type)
6357 {
6358         struct Command {
6359                 struct CommandListHeader CommandHeader;
6360                 struct RequestBlock Request;
6361                 struct ErrDescriptor ErrorDescriptor;
6362         };
6363         struct Command *cmd;
6364         static const size_t cmd_sz = sizeof(*cmd) +
6365                                         sizeof(cmd->ErrorDescriptor);
6366         dma_addr_t paddr64;
6367         __le32 paddr32;
6368         u32 tag;
6369         void __iomem *vaddr;
6370         int i, err;
6371
6372         vaddr = pci_ioremap_bar(pdev, 0);
6373         if (vaddr == NULL)
6374                 return -ENOMEM;
6375
6376         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6377          * CCISS commands, so they must be allocated from the lower 4GiB of
6378          * memory.
6379          */
6380         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6381         if (err) {
6382                 iounmap(vaddr);
6383                 return err;
6384         }
6385
6386         cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6387         if (cmd == NULL) {
6388                 iounmap(vaddr);
6389                 return -ENOMEM;
6390         }
6391
6392         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
6393          * although there's no guarantee, we assume that the address is at
6394          * least 4-byte aligned (most likely, it's page-aligned).
6395          */
6396         paddr32 = cpu_to_le32(paddr64);
6397
6398         cmd->CommandHeader.ReplyQueue = 0;
6399         cmd->CommandHeader.SGList = 0;
6400         cmd->CommandHeader.SGTotal = cpu_to_le16(0);
6401         cmd->CommandHeader.tag = cpu_to_le64(paddr64);
6402         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6403
6404         cmd->Request.CDBLen = 16;
6405         cmd->Request.type_attr_dir =
6406                         TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
6407         cmd->Request.Timeout = 0; /* Don't time out */
6408         cmd->Request.CDB[0] = opcode;
6409         cmd->Request.CDB[1] = type;
6410         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
6411         cmd->ErrorDescriptor.Addr =
6412                         cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
6413         cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
6414
6415         writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
6416
6417         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6418                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
6419                 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
6420                         break;
6421                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6422         }
6423
6424         iounmap(vaddr);
6425
6426         /* we leak the DMA buffer here ... no choice since the controller could
6427          *  still complete the command.
6428          */
6429         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6430                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6431                         opcode, type);
6432                 return -ETIMEDOUT;
6433         }
6434
6435         pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6436
6437         if (tag & HPSA_ERROR_BIT) {
6438                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6439                         opcode, type);
6440                 return -EIO;
6441         }
6442
6443         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6444                 opcode, type);
6445         return 0;
6446 }
6447
6448 #define hpsa_noop(p) hpsa_message(p, 3, 0)
6449
6450 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
6451         void __iomem *vaddr, u32 use_doorbell)
6452 {
6453
6454         if (use_doorbell) {
6455                 /* For everything after the P600, the PCI power state method
6456                  * of resetting the controller doesn't work, so we have this
6457                  * other way using the doorbell register.
6458                  */
6459                 dev_info(&pdev->dev, "using doorbell to reset controller\n");
6460                 writel(use_doorbell, vaddr + SA5_DOORBELL);
6461
6462                 /* PMC hardware guys tell us we need a 10 second delay after
6463                  * doorbell reset and before any attempt to talk to the board
6464                  * at all to ensure that this actually works and doesn't fall
6465                  * over in some weird corner cases.
6466                  */
6467                 msleep(10000);
6468         } else { /* Try to do it the PCI power state way */
6469
6470                 /* Quoting from the Open CISS Specification: "The Power
6471                  * Management Control/Status Register (CSR) controls the power
6472                  * state of the device.  The normal operating state is D0,
6473                  * CSR=00h.  The software off state is D3, CSR=03h.  To reset
6474                  * the controller, place the interface device in D3 then to D0,
6475                  * this causes a secondary PCI reset which will reset the
6476                  * controller." */
6477
6478                 int rc = 0;
6479
6480                 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
6481
6482                 /* enter the D3hot power management state */
6483                 rc = pci_set_power_state(pdev, PCI_D3hot);
6484                 if (rc)
6485                         return rc;
6486
6487                 msleep(500);
6488
6489                 /* enter the D0 power management state */
6490                 rc = pci_set_power_state(pdev, PCI_D0);
6491                 if (rc)
6492                         return rc;
6493
6494                 /*
6495                  * The P600 requires a small delay when changing states.
6496                  * Otherwise we may think the board did not reset and we bail.
6497                  * This for kdump only and is particular to the P600.
6498                  */
6499                 msleep(500);
6500         }
6501         return 0;
6502 }
6503
6504 static void init_driver_version(char *driver_version, int len)
6505 {
6506         memset(driver_version, 0, len);
6507         strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
6508 }
6509
6510 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
6511 {
6512         char *driver_version;
6513         int i, size = sizeof(cfgtable->driver_version);
6514
6515         driver_version = kmalloc(size, GFP_KERNEL);
6516         if (!driver_version)
6517                 return -ENOMEM;
6518
6519         init_driver_version(driver_version, size);
6520         for (i = 0; i < size; i++)
6521                 writeb(driver_version[i], &cfgtable->driver_version[i]);
6522         kfree(driver_version);
6523         return 0;
6524 }
6525
6526 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6527                                           unsigned char *driver_ver)
6528 {
6529         int i;
6530
6531         for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6532                 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6533 }
6534
6535 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
6536 {
6537
6538         char *driver_ver, *old_driver_ver;
6539         int rc, size = sizeof(cfgtable->driver_version);
6540
6541         old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6542         if (!old_driver_ver)
6543                 return -ENOMEM;
6544         driver_ver = old_driver_ver + size;
6545
6546         /* After a reset, the 32 bytes of "driver version" in the cfgtable
6547          * should have been changed, otherwise we know the reset failed.
6548          */
6549         init_driver_version(old_driver_ver, size);
6550         read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6551         rc = !memcmp(driver_ver, old_driver_ver, size);
6552         kfree(old_driver_ver);
6553         return rc;
6554 }
6555 /* This does a hard reset of the controller using PCI power management
6556  * states or the using the doorbell register.
6557  */
6558 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
6559 {
6560         u64 cfg_offset;
6561         u32 cfg_base_addr;
6562         u64 cfg_base_addr_index;
6563         void __iomem *vaddr;
6564         unsigned long paddr;
6565         u32 misc_fw_support;
6566         int rc;
6567         struct CfgTable __iomem *cfgtable;
6568         u32 use_doorbell;
6569         u16 command_register;
6570
6571         /* For controllers as old as the P600, this is very nearly
6572          * the same thing as
6573          *
6574          * pci_save_state(pci_dev);
6575          * pci_set_power_state(pci_dev, PCI_D3hot);
6576          * pci_set_power_state(pci_dev, PCI_D0);
6577          * pci_restore_state(pci_dev);
6578          *
6579          * For controllers newer than the P600, the pci power state
6580          * method of resetting doesn't work so we have another way
6581          * using the doorbell register.
6582          */
6583
6584         if (!ctlr_is_resettable(board_id)) {
6585                 dev_warn(&pdev->dev, "Controller not resettable\n");
6586                 return -ENODEV;
6587         }
6588
6589         /* if controller is soft- but not hard resettable... */
6590         if (!ctlr_is_hard_resettable(board_id))
6591                 return -ENOTSUPP; /* try soft reset later. */
6592
6593         /* Save the PCI command register */
6594         pci_read_config_word(pdev, 4, &command_register);
6595         pci_save_state(pdev);
6596
6597         /* find the first memory BAR, so we can find the cfg table */
6598         rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6599         if (rc)
6600                 return rc;
6601         vaddr = remap_pci_mem(paddr, 0x250);
6602         if (!vaddr)
6603                 return -ENOMEM;
6604
6605         /* find cfgtable in order to check if reset via doorbell is supported */
6606         rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6607                                         &cfg_base_addr_index, &cfg_offset);
6608         if (rc)
6609                 goto unmap_vaddr;
6610         cfgtable = remap_pci_mem(pci_resource_start(pdev,
6611                        cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6612         if (!cfgtable) {
6613                 rc = -ENOMEM;
6614                 goto unmap_vaddr;
6615         }
6616         rc = write_driver_ver_to_cfgtable(cfgtable);
6617         if (rc)
6618                 goto unmap_cfgtable;
6619
6620         /* If reset via doorbell register is supported, use that.
6621          * There are two such methods.  Favor the newest method.
6622          */
6623         misc_fw_support = readl(&cfgtable->misc_fw_support);
6624         use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6625         if (use_doorbell) {
6626                 use_doorbell = DOORBELL_CTLR_RESET2;
6627         } else {
6628                 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6629                 if (use_doorbell) {
6630                         dev_warn(&pdev->dev,
6631                                 "Soft reset not supported. Firmware update is required.\n");
6632                         rc = -ENOTSUPP; /* try soft reset */
6633                         goto unmap_cfgtable;
6634                 }
6635         }
6636
6637         rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6638         if (rc)
6639                 goto unmap_cfgtable;
6640
6641         pci_restore_state(pdev);
6642         pci_write_config_word(pdev, 4, command_register);
6643
6644         /* Some devices (notably the HP Smart Array 5i Controller)
6645            need a little pause here */
6646         msleep(HPSA_POST_RESET_PAUSE_MSECS);
6647
6648         rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6649         if (rc) {
6650                 dev_warn(&pdev->dev,
6651                         "Failed waiting for board to become ready after hard reset\n");
6652                 goto unmap_cfgtable;
6653         }
6654
6655         rc = controller_reset_failed(vaddr);
6656         if (rc < 0)
6657                 goto unmap_cfgtable;
6658         if (rc) {
6659                 dev_warn(&pdev->dev, "Unable to successfully reset "
6660                         "controller. Will try soft reset.\n");
6661                 rc = -ENOTSUPP;
6662         } else {
6663                 dev_info(&pdev->dev, "board ready after hard reset.\n");
6664         }
6665
6666 unmap_cfgtable:
6667         iounmap(cfgtable);
6668
6669 unmap_vaddr:
6670         iounmap(vaddr);
6671         return rc;
6672 }
6673
6674 /*
6675  *  We cannot read the structure directly, for portability we must use
6676  *   the io functions.
6677  *   This is for debug only.
6678  */
6679 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6680 {
6681 #ifdef HPSA_DEBUG
6682         int i;
6683         char temp_name[17];
6684
6685         dev_info(dev, "Controller Configuration information\n");
6686         dev_info(dev, "------------------------------------\n");
6687         for (i = 0; i < 4; i++)
6688                 temp_name[i] = readb(&(tb->Signature[i]));
6689         temp_name[4] = '\0';
6690         dev_info(dev, "   Signature = %s\n", temp_name);
6691         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
6692         dev_info(dev, "   Transport methods supported = 0x%x\n",
6693                readl(&(tb->TransportSupport)));
6694         dev_info(dev, "   Transport methods active = 0x%x\n",
6695                readl(&(tb->TransportActive)));
6696         dev_info(dev, "   Requested transport Method = 0x%x\n",
6697                readl(&(tb->HostWrite.TransportRequest)));
6698         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
6699                readl(&(tb->HostWrite.CoalIntDelay)));
6700         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
6701                readl(&(tb->HostWrite.CoalIntCount)));
6702         dev_info(dev, "   Max outstanding commands = %d\n",
6703                readl(&(tb->CmdsOutMax)));
6704         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6705         for (i = 0; i < 16; i++)
6706                 temp_name[i] = readb(&(tb->ServerName[i]));
6707         temp_name[16] = '\0';
6708         dev_info(dev, "   Server Name = %s\n", temp_name);
6709         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
6710                 readl(&(tb->HeartBeat)));
6711 #endif                          /* HPSA_DEBUG */
6712 }
6713
6714 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6715 {
6716         int i, offset, mem_type, bar_type;
6717
6718         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6719                 return 0;
6720         offset = 0;
6721         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6722                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6723                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6724                         offset += 4;
6725                 else {
6726                         mem_type = pci_resource_flags(pdev, i) &
6727                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6728                         switch (mem_type) {
6729                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
6730                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6731                                 offset += 4;    /* 32 bit */
6732                                 break;
6733                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
6734                                 offset += 8;
6735                                 break;
6736                         default:        /* reserved in PCI 2.2 */
6737                                 dev_warn(&pdev->dev,
6738                                        "base address is invalid\n");
6739                                 return -1;
6740                                 break;
6741                         }
6742                 }
6743                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6744                         return i + 1;
6745         }
6746         return -1;
6747 }
6748
6749 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
6750 {
6751         if (h->msix_vector) {
6752                 if (h->pdev->msix_enabled)
6753                         pci_disable_msix(h->pdev);
6754                 h->msix_vector = 0;
6755         } else if (h->msi_vector) {
6756                 if (h->pdev->msi_enabled)
6757                         pci_disable_msi(h->pdev);
6758                 h->msi_vector = 0;
6759         }
6760 }
6761
6762 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6763  * controllers that are capable. If not, we use legacy INTx mode.
6764  */
6765 static void hpsa_interrupt_mode(struct ctlr_info *h)
6766 {
6767 #ifdef CONFIG_PCI_MSI
6768         int err, i;
6769         struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6770
6771         for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6772                 hpsa_msix_entries[i].vector = 0;
6773                 hpsa_msix_entries[i].entry = i;
6774         }
6775
6776         /* Some boards advertise MSI but don't really support it */
6777         if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6778             (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
6779                 goto default_int_mode;
6780         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6781                 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
6782                 h->msix_vector = MAX_REPLY_QUEUES;
6783                 if (h->msix_vector > num_online_cpus())
6784                         h->msix_vector = num_online_cpus();
6785                 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6786                                             1, h->msix_vector);
6787                 if (err < 0) {
6788                         dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6789                         h->msix_vector = 0;
6790                         goto single_msi_mode;
6791                 } else if (err < h->msix_vector) {
6792                         dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6793                                "available\n", err);
6794                 }
6795                 h->msix_vector = err;
6796                 for (i = 0; i < h->msix_vector; i++)
6797                         h->intr[i] = hpsa_msix_entries[i].vector;
6798                 return;
6799         }
6800 single_msi_mode:
6801         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6802                 dev_info(&h->pdev->dev, "MSI capable controller\n");
6803                 if (!pci_enable_msi(h->pdev))
6804                         h->msi_vector = 1;
6805                 else
6806                         dev_warn(&h->pdev->dev, "MSI init failed\n");
6807         }
6808 default_int_mode:
6809 #endif                          /* CONFIG_PCI_MSI */
6810         /* if we get here we're going to use the default interrupt mode */
6811         h->intr[h->intr_mode] = h->pdev->irq;
6812 }
6813
6814 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
6815 {
6816         int i;
6817         u32 subsystem_vendor_id, subsystem_device_id;
6818
6819         subsystem_vendor_id = pdev->subsystem_vendor;
6820         subsystem_device_id = pdev->subsystem_device;
6821         *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6822                     subsystem_vendor_id;
6823
6824         for (i = 0; i < ARRAY_SIZE(products); i++)
6825                 if (*board_id == products[i].board_id)
6826                         return i;
6827
6828         if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6829                 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6830                 !hpsa_allow_any) {
6831                 dev_warn(&pdev->dev, "unrecognized board ID: "
6832                         "0x%08x, ignoring.\n", *board_id);
6833                         return -ENODEV;
6834         }
6835         return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6836 }
6837
6838 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6839                                     unsigned long *memory_bar)
6840 {
6841         int i;
6842
6843         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6844                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6845                         /* addressing mode bits already removed */
6846                         *memory_bar = pci_resource_start(pdev, i);
6847                         dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6848                                 *memory_bar);
6849                         return 0;
6850                 }
6851         dev_warn(&pdev->dev, "no memory BAR found\n");
6852         return -ENODEV;
6853 }
6854
6855 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6856                                      int wait_for_ready)
6857 {
6858         int i, iterations;
6859         u32 scratchpad;
6860         if (wait_for_ready)
6861                 iterations = HPSA_BOARD_READY_ITERATIONS;
6862         else
6863                 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6864
6865         for (i = 0; i < iterations; i++) {
6866                 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6867                 if (wait_for_ready) {
6868                         if (scratchpad == HPSA_FIRMWARE_READY)
6869                                 return 0;
6870                 } else {
6871                         if (scratchpad != HPSA_FIRMWARE_READY)
6872                                 return 0;
6873                 }
6874                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6875         }
6876         dev_warn(&pdev->dev, "board not ready, timed out.\n");
6877         return -ENODEV;
6878 }
6879
6880 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6881                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6882                                u64 *cfg_offset)
6883 {
6884         *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6885         *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6886         *cfg_base_addr &= (u32) 0x0000ffff;
6887         *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6888         if (*cfg_base_addr_index == -1) {
6889                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6890                 return -ENODEV;
6891         }
6892         return 0;
6893 }
6894
6895 static void hpsa_free_cfgtables(struct ctlr_info *h)
6896 {
6897         if (h->transtable) {
6898                 iounmap(h->transtable);
6899                 h->transtable = NULL;
6900         }
6901         if (h->cfgtable) {
6902                 iounmap(h->cfgtable);
6903                 h->cfgtable = NULL;
6904         }
6905 }
6906
6907 /* Find and map CISS config table and transfer table
6908 + * several items must be unmapped (freed) later
6909 + * */
6910 static int hpsa_find_cfgtables(struct ctlr_info *h)
6911 {
6912         u64 cfg_offset;
6913         u32 cfg_base_addr;
6914         u64 cfg_base_addr_index;
6915         u32 trans_offset;
6916         int rc;
6917
6918         rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6919                 &cfg_base_addr_index, &cfg_offset);
6920         if (rc)
6921                 return rc;
6922         h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6923                        cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6924         if (!h->cfgtable) {
6925                 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
6926                 return -ENOMEM;
6927         }
6928         rc = write_driver_ver_to_cfgtable(h->cfgtable);
6929         if (rc)
6930                 return rc;
6931         /* Find performant mode table. */
6932         trans_offset = readl(&h->cfgtable->TransMethodOffset);
6933         h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6934                                 cfg_base_addr_index)+cfg_offset+trans_offset,
6935                                 sizeof(*h->transtable));
6936         if (!h->transtable) {
6937                 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
6938                 hpsa_free_cfgtables(h);
6939                 return -ENOMEM;
6940         }
6941         return 0;
6942 }
6943
6944 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6945 {
6946 #define MIN_MAX_COMMANDS 16
6947         BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
6948
6949         h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
6950
6951         /* Limit commands in memory limited kdump scenario. */
6952         if (reset_devices && h->max_commands > 32)
6953                 h->max_commands = 32;
6954
6955         if (h->max_commands < MIN_MAX_COMMANDS) {
6956                 dev_warn(&h->pdev->dev,
6957                         "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
6958                         h->max_commands,
6959                         MIN_MAX_COMMANDS);
6960                 h->max_commands = MIN_MAX_COMMANDS;
6961         }
6962 }
6963
6964 /* If the controller reports that the total max sg entries is greater than 512,
6965  * then we know that chained SG blocks work.  (Original smart arrays did not
6966  * support chained SG blocks and would return zero for max sg entries.)
6967  */
6968 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6969 {
6970         return h->maxsgentries > 512;
6971 }
6972
6973 /* Interrogate the hardware for some limits:
6974  * max commands, max SG elements without chaining, and with chaining,
6975  * SG chain block size, etc.
6976  */
6977 static void hpsa_find_board_params(struct ctlr_info *h)
6978 {
6979         hpsa_get_max_perf_mode_cmds(h);
6980         h->nr_cmds = h->max_commands;
6981         h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6982         h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6983         if (hpsa_supports_chained_sg_blocks(h)) {
6984                 /* Limit in-command s/g elements to 32 save dma'able memory. */
6985                 h->max_cmd_sg_entries = 32;
6986                 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6987                 h->maxsgentries--; /* save one for chain pointer */
6988         } else {
6989                 /*
6990                  * Original smart arrays supported at most 31 s/g entries
6991                  * embedded inline in the command (trying to use more
6992                  * would lock up the controller)
6993                  */
6994                 h->max_cmd_sg_entries = 31;
6995                 h->maxsgentries = 31; /* default to traditional values */
6996                 h->chainsize = 0;
6997         }
6998
6999         /* Find out what task management functions are supported and cache */
7000         h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7001         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7002                 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7003         if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7004                 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7005         if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7006                 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7007 }
7008
7009 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7010 {
7011         if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7012                 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7013                 return false;
7014         }
7015         return true;
7016 }
7017
7018 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7019 {
7020         u32 driver_support;
7021
7022         driver_support = readl(&(h->cfgtable->driver_support));
7023         /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7024 #ifdef CONFIG_X86
7025         driver_support |= ENABLE_SCSI_PREFETCH;
7026 #endif
7027         driver_support |= ENABLE_UNIT_ATTN;
7028         writel(driver_support, &(h->cfgtable->driver_support));
7029 }
7030
7031 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
7032  * in a prefetch beyond physical memory.
7033  */
7034 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7035 {
7036         u32 dma_prefetch;
7037
7038         if (h->board_id != 0x3225103C)
7039                 return;
7040         dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7041         dma_prefetch |= 0x8000;
7042         writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7043 }
7044
7045 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7046 {
7047         int i;
7048         u32 doorbell_value;
7049         unsigned long flags;
7050         /* wait until the clear_event_notify bit 6 is cleared by controller. */
7051         for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7052                 spin_lock_irqsave(&h->lock, flags);
7053                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7054                 spin_unlock_irqrestore(&h->lock, flags);
7055                 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7056                         goto done;
7057                 /* delay and try again */
7058                 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7059         }
7060         return -ENODEV;
7061 done:
7062         return 0;
7063 }
7064
7065 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7066 {
7067         int i;
7068         u32 doorbell_value;
7069         unsigned long flags;
7070
7071         /* under certain very rare conditions, this can take awhile.
7072          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7073          * as we enter this code.)
7074          */
7075         for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7076                 if (h->remove_in_progress)
7077                         goto done;
7078                 spin_lock_irqsave(&h->lock, flags);
7079                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7080                 spin_unlock_irqrestore(&h->lock, flags);
7081                 if (!(doorbell_value & CFGTBL_ChangeReq))
7082                         goto done;
7083                 /* delay and try again */
7084                 msleep(MODE_CHANGE_WAIT_INTERVAL);
7085         }
7086         return -ENODEV;
7087 done:
7088         return 0;
7089 }
7090
7091 /* return -ENODEV or other reason on error, 0 on success */
7092 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7093 {
7094         u32 trans_support;
7095
7096         trans_support = readl(&(h->cfgtable->TransportSupport));
7097         if (!(trans_support & SIMPLE_MODE))
7098                 return -ENOTSUPP;
7099
7100         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7101
7102         /* Update the field, and then ring the doorbell */
7103         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7104         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7105         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7106         if (hpsa_wait_for_mode_change_ack(h))
7107                 goto error;
7108         print_cfg_table(&h->pdev->dev, h->cfgtable);
7109         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7110                 goto error;
7111         h->transMethod = CFGTBL_Trans_Simple;
7112         return 0;
7113 error:
7114         dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7115         return -ENODEV;
7116 }
7117
7118 /* free items allocated or mapped by hpsa_pci_init */
7119 static void hpsa_free_pci_init(struct ctlr_info *h)
7120 {
7121         hpsa_free_cfgtables(h);                 /* pci_init 4 */
7122         iounmap(h->vaddr);                      /* pci_init 3 */
7123         h->vaddr = NULL;
7124         hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
7125         /*
7126          * call pci_disable_device before pci_release_regions per
7127          * Documentation/PCI/pci.txt
7128          */
7129         pci_disable_device(h->pdev);            /* pci_init 1 */
7130         pci_release_regions(h->pdev);           /* pci_init 2 */
7131 }
7132
7133 /* several items must be freed later */
7134 static int hpsa_pci_init(struct ctlr_info *h)
7135 {
7136         int prod_index, err;
7137
7138         prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7139         if (prod_index < 0)
7140                 return prod_index;
7141         h->product_name = products[prod_index].product_name;
7142         h->access = *(products[prod_index].access);
7143
7144         h->needs_abort_tags_swizzled =
7145                 ctlr_needs_abort_tags_swizzled(h->board_id);
7146
7147         pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7148                                PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7149
7150         err = pci_enable_device(h->pdev);
7151         if (err) {
7152                 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7153                 pci_disable_device(h->pdev);
7154                 return err;
7155         }
7156
7157         err = pci_request_regions(h->pdev, HPSA);
7158         if (err) {
7159                 dev_err(&h->pdev->dev,
7160                         "failed to obtain PCI resources\n");
7161                 pci_disable_device(h->pdev);
7162                 return err;
7163         }
7164
7165         pci_set_master(h->pdev);
7166
7167         hpsa_interrupt_mode(h);
7168         err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7169         if (err)
7170                 goto clean2;    /* intmode+region, pci */
7171         h->vaddr = remap_pci_mem(h->paddr, 0x250);
7172         if (!h->vaddr) {
7173                 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7174                 err = -ENOMEM;
7175                 goto clean2;    /* intmode+region, pci */
7176         }
7177         err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7178         if (err)
7179                 goto clean3;    /* vaddr, intmode+region, pci */
7180         err = hpsa_find_cfgtables(h);
7181         if (err)
7182                 goto clean3;    /* vaddr, intmode+region, pci */
7183         hpsa_find_board_params(h);
7184
7185         if (!hpsa_CISS_signature_present(h)) {
7186                 err = -ENODEV;
7187                 goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7188         }
7189         hpsa_set_driver_support_bits(h);
7190         hpsa_p600_dma_prefetch_quirk(h);
7191         err = hpsa_enter_simple_mode(h);
7192         if (err)
7193                 goto clean4;    /* cfgtables, vaddr, intmode+region, pci */
7194         return 0;
7195
7196 clean4: /* cfgtables, vaddr, intmode+region, pci */
7197         hpsa_free_cfgtables(h);
7198 clean3: /* vaddr, intmode+region, pci */
7199         iounmap(h->vaddr);
7200         h->vaddr = NULL;
7201 clean2: /* intmode+region, pci */
7202         hpsa_disable_interrupt_mode(h);
7203         /*
7204          * call pci_disable_device before pci_release_regions per
7205          * Documentation/PCI/pci.txt
7206          */
7207         pci_disable_device(h->pdev);
7208         pci_release_regions(h->pdev);
7209         return err;
7210 }
7211
7212 static void hpsa_hba_inquiry(struct ctlr_info *h)
7213 {
7214         int rc;
7215
7216 #define HBA_INQUIRY_BYTE_COUNT 64
7217         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7218         if (!h->hba_inquiry_data)
7219                 return;
7220         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7221                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7222         if (rc != 0) {
7223                 kfree(h->hba_inquiry_data);
7224                 h->hba_inquiry_data = NULL;
7225         }
7226 }
7227
7228 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7229 {
7230         int rc, i;
7231         void __iomem *vaddr;
7232
7233         if (!reset_devices)
7234                 return 0;
7235
7236         /* kdump kernel is loading, we don't know in which state is
7237          * the pci interface. The dev->enable_cnt is equal zero
7238          * so we call enable+disable, wait a while and switch it on.
7239          */
7240         rc = pci_enable_device(pdev);
7241         if (rc) {
7242                 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7243                 return -ENODEV;
7244         }
7245         pci_disable_device(pdev);
7246         msleep(260);                    /* a randomly chosen number */
7247         rc = pci_enable_device(pdev);
7248         if (rc) {
7249                 dev_warn(&pdev->dev, "failed to enable device.\n");
7250                 return -ENODEV;
7251         }
7252
7253         pci_set_master(pdev);
7254
7255         vaddr = pci_ioremap_bar(pdev, 0);
7256         if (vaddr == NULL) {
7257                 rc = -ENOMEM;
7258                 goto out_disable;
7259         }
7260         writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7261         iounmap(vaddr);
7262
7263         /* Reset the controller with a PCI power-cycle or via doorbell */
7264         rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7265
7266         /* -ENOTSUPP here means we cannot reset the controller
7267          * but it's already (and still) up and running in
7268          * "performant mode".  Or, it might be 640x, which can't reset
7269          * due to concerns about shared bbwc between 6402/6404 pair.
7270          */
7271         if (rc)
7272                 goto out_disable;
7273
7274         /* Now try to get the controller to respond to a no-op */
7275         dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7276         for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7277                 if (hpsa_noop(pdev) == 0)
7278                         break;
7279                 else
7280                         dev_warn(&pdev->dev, "no-op failed%s\n",
7281                                         (i < 11 ? "; re-trying" : ""));
7282         }
7283
7284 out_disable:
7285
7286         pci_disable_device(pdev);
7287         return rc;
7288 }
7289
7290 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7291 {
7292         kfree(h->cmd_pool_bits);
7293         h->cmd_pool_bits = NULL;
7294         if (h->cmd_pool) {
7295                 pci_free_consistent(h->pdev,
7296                                 h->nr_cmds * sizeof(struct CommandList),
7297                                 h->cmd_pool,
7298                                 h->cmd_pool_dhandle);
7299                 h->cmd_pool = NULL;
7300                 h->cmd_pool_dhandle = 0;
7301         }
7302         if (h->errinfo_pool) {
7303                 pci_free_consistent(h->pdev,
7304                                 h->nr_cmds * sizeof(struct ErrorInfo),
7305                                 h->errinfo_pool,
7306                                 h->errinfo_pool_dhandle);
7307                 h->errinfo_pool = NULL;
7308                 h->errinfo_pool_dhandle = 0;
7309         }
7310 }
7311
7312 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7313 {
7314         h->cmd_pool_bits = kzalloc(
7315                 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7316                 sizeof(unsigned long), GFP_KERNEL);
7317         h->cmd_pool = pci_alloc_consistent(h->pdev,
7318                     h->nr_cmds * sizeof(*h->cmd_pool),
7319                     &(h->cmd_pool_dhandle));
7320         h->errinfo_pool = pci_alloc_consistent(h->pdev,
7321                     h->nr_cmds * sizeof(*h->errinfo_pool),
7322                     &(h->errinfo_pool_dhandle));
7323         if ((h->cmd_pool_bits == NULL)
7324             || (h->cmd_pool == NULL)
7325             || (h->errinfo_pool == NULL)) {
7326                 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7327                 goto clean_up;
7328         }
7329         hpsa_preinitialize_commands(h);
7330         return 0;
7331 clean_up:
7332         hpsa_free_cmd_pool(h);
7333         return -ENOMEM;
7334 }
7335
7336 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7337 {
7338         int i, cpu;
7339
7340         cpu = cpumask_first(cpu_online_mask);
7341         for (i = 0; i < h->msix_vector; i++) {
7342                 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
7343                 cpu = cpumask_next(cpu, cpu_online_mask);
7344         }
7345 }
7346
7347 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7348 static void hpsa_free_irqs(struct ctlr_info *h)
7349 {
7350         int i;
7351
7352         if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7353                 /* Single reply queue, only one irq to free */
7354                 i = h->intr_mode;
7355                 irq_set_affinity_hint(h->intr[i], NULL);
7356                 free_irq(h->intr[i], &h->q[i]);
7357                 h->q[i] = 0;
7358                 return;
7359         }
7360
7361         for (i = 0; i < h->msix_vector; i++) {
7362                 irq_set_affinity_hint(h->intr[i], NULL);
7363                 free_irq(h->intr[i], &h->q[i]);
7364                 h->q[i] = 0;
7365         }
7366         for (; i < MAX_REPLY_QUEUES; i++)
7367                 h->q[i] = 0;
7368 }
7369
7370 /* returns 0 on success; cleans up and returns -Enn on error */
7371 static int hpsa_request_irqs(struct ctlr_info *h,
7372         irqreturn_t (*msixhandler)(int, void *),
7373         irqreturn_t (*intxhandler)(int, void *))
7374 {
7375         int rc, i;
7376
7377         /*
7378          * initialize h->q[x] = x so that interrupt handlers know which
7379          * queue to process.
7380          */
7381         for (i = 0; i < MAX_REPLY_QUEUES; i++)
7382                 h->q[i] = (u8) i;
7383
7384         if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
7385                 /* If performant mode and MSI-X, use multiple reply queues */
7386                 for (i = 0; i < h->msix_vector; i++) {
7387                         sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
7388                         rc = request_irq(h->intr[i], msixhandler,
7389                                         0, h->intrname[i],
7390                                         &h->q[i]);
7391                         if (rc) {
7392                                 int j;
7393
7394                                 dev_err(&h->pdev->dev,
7395                                         "failed to get irq %d for %s\n",
7396                                        h->intr[i], h->devname);
7397                                 for (j = 0; j < i; j++) {
7398                                         free_irq(h->intr[j], &h->q[j]);
7399                                         h->q[j] = 0;
7400                                 }
7401                                 for (; j < MAX_REPLY_QUEUES; j++)
7402                                         h->q[j] = 0;
7403                                 return rc;
7404                         }
7405                 }
7406                 hpsa_irq_affinity_hints(h);
7407         } else {
7408                 /* Use single reply pool */
7409                 if (h->msix_vector > 0 || h->msi_vector) {
7410                         if (h->msix_vector)
7411                                 sprintf(h->intrname[h->intr_mode],
7412                                         "%s-msix", h->devname);
7413                         else
7414                                 sprintf(h->intrname[h->intr_mode],
7415                                         "%s-msi", h->devname);
7416                         rc = request_irq(h->intr[h->intr_mode],
7417                                 msixhandler, 0,
7418                                 h->intrname[h->intr_mode],
7419                                 &h->q[h->intr_mode]);
7420                 } else {
7421                         sprintf(h->intrname[h->intr_mode],
7422                                 "%s-intx", h->devname);
7423                         rc = request_irq(h->intr[h->intr_mode],
7424                                 intxhandler, IRQF_SHARED,
7425                                 h->intrname[h->intr_mode],
7426                                 &h->q[h->intr_mode]);
7427                 }
7428                 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
7429         }
7430         if (rc) {
7431                 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
7432                        h->intr[h->intr_mode], h->devname);
7433                 hpsa_free_irqs(h);
7434                 return -ENODEV;
7435         }
7436         return 0;
7437 }
7438
7439 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
7440 {
7441         int rc;
7442         hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
7443
7444         dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
7445         rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7446         if (rc) {
7447                 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
7448                 return rc;
7449         }
7450
7451         dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
7452         rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7453         if (rc) {
7454                 dev_warn(&h->pdev->dev, "Board failed to become ready "
7455                         "after soft reset.\n");
7456                 return rc;
7457         }
7458
7459         return 0;
7460 }
7461
7462 static void hpsa_free_reply_queues(struct ctlr_info *h)
7463 {
7464         int i;
7465
7466         for (i = 0; i < h->nreply_queues; i++) {
7467                 if (!h->reply_queue[i].head)
7468                         continue;
7469                 pci_free_consistent(h->pdev,
7470                                         h->reply_queue_size,
7471                                         h->reply_queue[i].head,
7472                                         h->reply_queue[i].busaddr);
7473                 h->reply_queue[i].head = NULL;
7474                 h->reply_queue[i].busaddr = 0;
7475         }
7476         h->reply_queue_size = 0;
7477 }
7478
7479 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7480 {
7481         hpsa_free_performant_mode(h);           /* init_one 7 */
7482         hpsa_free_sg_chain_blocks(h);           /* init_one 6 */
7483         hpsa_free_cmd_pool(h);                  /* init_one 5 */
7484         hpsa_free_irqs(h);                      /* init_one 4 */
7485         scsi_host_put(h->scsi_host);            /* init_one 3 */
7486         h->scsi_host = NULL;                    /* init_one 3 */
7487         hpsa_free_pci_init(h);                  /* init_one 2_5 */
7488         free_percpu(h->lockup_detected);        /* init_one 2 */
7489         h->lockup_detected = NULL;              /* init_one 2 */
7490         if (h->resubmit_wq) {
7491                 destroy_workqueue(h->resubmit_wq);      /* init_one 1 */
7492                 h->resubmit_wq = NULL;
7493         }
7494         if (h->rescan_ctlr_wq) {
7495                 destroy_workqueue(h->rescan_ctlr_wq);
7496                 h->rescan_ctlr_wq = NULL;
7497         }
7498         kfree(h);                               /* init_one 1 */
7499 }
7500
7501 /* Called when controller lockup detected. */
7502 static void fail_all_outstanding_cmds(struct ctlr_info *h)
7503 {
7504         int i, refcount;
7505         struct CommandList *c;
7506         int failcount = 0;
7507
7508         flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
7509         for (i = 0; i < h->nr_cmds; i++) {
7510                 c = h->cmd_pool + i;
7511                 refcount = atomic_inc_return(&c->refcount);
7512                 if (refcount > 1) {
7513                         c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
7514                         finish_cmd(c);
7515                         atomic_dec(&h->commands_outstanding);
7516                         failcount++;
7517                 }
7518                 cmd_free(h, c);
7519         }
7520         dev_warn(&h->pdev->dev,
7521                 "failed %d commands in fail_all\n", failcount);
7522 }
7523
7524 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7525 {
7526         int cpu;
7527
7528         for_each_online_cpu(cpu) {
7529                 u32 *lockup_detected;
7530                 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7531                 *lockup_detected = value;
7532         }
7533         wmb(); /* be sure the per-cpu variables are out to memory */
7534 }
7535
7536 static void controller_lockup_detected(struct ctlr_info *h)
7537 {
7538         unsigned long flags;
7539         u32 lockup_detected;
7540
7541         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7542         spin_lock_irqsave(&h->lock, flags);
7543         lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7544         if (!lockup_detected) {
7545                 /* no heartbeat, but controller gave us a zero. */
7546                 dev_warn(&h->pdev->dev,
7547                         "lockup detected after %d but scratchpad register is zero\n",
7548                         h->heartbeat_sample_interval / HZ);
7549                 lockup_detected = 0xffffffff;
7550         }
7551         set_lockup_detected_for_all_cpus(h, lockup_detected);
7552         spin_unlock_irqrestore(&h->lock, flags);
7553         dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7554                         lockup_detected, h->heartbeat_sample_interval / HZ);
7555         pci_disable_device(h->pdev);
7556         fail_all_outstanding_cmds(h);
7557 }
7558
7559 static int detect_controller_lockup(struct ctlr_info *h)
7560 {
7561         u64 now;
7562         u32 heartbeat;
7563         unsigned long flags;
7564
7565         now = get_jiffies_64();
7566         /* If we've received an interrupt recently, we're ok. */
7567         if (time_after64(h->last_intr_timestamp +
7568                                 (h->heartbeat_sample_interval), now))
7569                 return false;
7570
7571         /*
7572          * If we've already checked the heartbeat recently, we're ok.
7573          * This could happen if someone sends us a signal. We
7574          * otherwise don't care about signals in this thread.
7575          */
7576         if (time_after64(h->last_heartbeat_timestamp +
7577                                 (h->heartbeat_sample_interval), now))
7578                 return false;
7579
7580         /* If heartbeat has not changed since we last looked, we're not ok. */
7581         spin_lock_irqsave(&h->lock, flags);
7582         heartbeat = readl(&h->cfgtable->HeartBeat);
7583         spin_unlock_irqrestore(&h->lock, flags);
7584         if (h->last_heartbeat == heartbeat) {
7585                 controller_lockup_detected(h);
7586                 return true;
7587         }
7588
7589         /* We're ok. */
7590         h->last_heartbeat = heartbeat;
7591         h->last_heartbeat_timestamp = now;
7592         return false;
7593 }
7594
7595 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
7596 {
7597         int i;
7598         char *event_type;
7599
7600         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7601                 return;
7602
7603         /* Ask the controller to clear the events we're handling. */
7604         if ((h->transMethod & (CFGTBL_Trans_io_accel1
7605                         | CFGTBL_Trans_io_accel2)) &&
7606                 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7607                  h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7608
7609                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7610                         event_type = "state change";
7611                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7612                         event_type = "configuration change";
7613                 /* Stop sending new RAID offload reqs via the IO accelerator */
7614                 scsi_block_requests(h->scsi_host);
7615                 for (i = 0; i < h->ndevices; i++)
7616                         h->dev[i]->offload_enabled = 0;
7617                 hpsa_drain_accel_commands(h);
7618                 /* Set 'accelerator path config change' bit */
7619                 dev_warn(&h->pdev->dev,
7620                         "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7621                         h->events, event_type);
7622                 writel(h->events, &(h->cfgtable->clear_event_notify));
7623                 /* Set the "clear event notify field update" bit 6 */
7624                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7625                 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7626                 hpsa_wait_for_clear_event_notify_ack(h);
7627                 scsi_unblock_requests(h->scsi_host);
7628         } else {
7629                 /* Acknowledge controller notification events. */
7630                 writel(h->events, &(h->cfgtable->clear_event_notify));
7631                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7632                 hpsa_wait_for_clear_event_notify_ack(h);
7633 #if 0
7634                 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7635                 hpsa_wait_for_mode_change_ack(h);
7636 #endif
7637         }
7638         return;
7639 }
7640
7641 /* Check a register on the controller to see if there are configuration
7642  * changes (added/changed/removed logical drives, etc.) which mean that
7643  * we should rescan the controller for devices.
7644  * Also check flag for driver-initiated rescan.
7645  */
7646 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
7647 {
7648         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7649                 return 0;
7650
7651         h->events = readl(&(h->cfgtable->event_notify));
7652         return h->events & RESCAN_REQUIRED_EVENT_BITS;
7653 }
7654
7655 /*
7656  * Check if any of the offline devices have become ready
7657  */
7658 static int hpsa_offline_devices_ready(struct ctlr_info *h)
7659 {
7660         unsigned long flags;
7661         struct offline_device_entry *d;
7662         struct list_head *this, *tmp;
7663
7664         spin_lock_irqsave(&h->offline_device_lock, flags);
7665         list_for_each_safe(this, tmp, &h->offline_device_list) {
7666                 d = list_entry(this, struct offline_device_entry,
7667                                 offline_list);
7668                 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7669                 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7670                         spin_lock_irqsave(&h->offline_device_lock, flags);
7671                         list_del(&d->offline_list);
7672                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
7673                         return 1;
7674                 }
7675                 spin_lock_irqsave(&h->offline_device_lock, flags);
7676         }
7677         spin_unlock_irqrestore(&h->offline_device_lock, flags);
7678         return 0;
7679 }
7680
7681 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
7682 {
7683         unsigned long flags;
7684         struct ctlr_info *h = container_of(to_delayed_work(work),
7685                                         struct ctlr_info, rescan_ctlr_work);
7686
7687
7688         if (h->remove_in_progress)
7689                 return;
7690
7691         if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7692                 scsi_host_get(h->scsi_host);
7693                 hpsa_ack_ctlr_events(h);
7694                 hpsa_scan_start(h->scsi_host);
7695                 scsi_host_put(h->scsi_host);
7696         }
7697         spin_lock_irqsave(&h->lock, flags);
7698         if (!h->remove_in_progress)
7699                 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7700                                 h->heartbeat_sample_interval);
7701         spin_unlock_irqrestore(&h->lock, flags);
7702 }
7703
7704 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7705 {
7706         unsigned long flags;
7707         struct ctlr_info *h = container_of(to_delayed_work(work),
7708                                         struct ctlr_info, monitor_ctlr_work);
7709
7710         detect_controller_lockup(h);
7711         if (lockup_detected(h))
7712                 return;
7713
7714         spin_lock_irqsave(&h->lock, flags);
7715         if (!h->remove_in_progress)
7716                 schedule_delayed_work(&h->monitor_ctlr_work,
7717                                 h->heartbeat_sample_interval);
7718         spin_unlock_irqrestore(&h->lock, flags);
7719 }
7720
7721 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7722                                                 char *name)
7723 {
7724         struct workqueue_struct *wq = NULL;
7725
7726         wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
7727         if (!wq)
7728                 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7729
7730         return wq;
7731 }
7732
7733 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7734 {
7735         int dac, rc;
7736         struct ctlr_info *h;
7737         int try_soft_reset = 0;
7738         unsigned long flags;
7739         u32 board_id;
7740
7741         if (number_of_controllers == 0)
7742                 printk(KERN_INFO DRIVER_NAME "\n");
7743
7744         rc = hpsa_lookup_board_id(pdev, &board_id);
7745         if (rc < 0) {
7746                 dev_warn(&pdev->dev, "Board ID not found\n");
7747                 return rc;
7748         }
7749
7750         rc = hpsa_init_reset_devices(pdev, board_id);
7751         if (rc) {
7752                 if (rc != -ENOTSUPP)
7753                         return rc;
7754                 /* If the reset fails in a particular way (it has no way to do
7755                  * a proper hard reset, so returns -ENOTSUPP) we can try to do
7756                  * a soft reset once we get the controller configured up to the
7757                  * point that it can accept a command.
7758                  */
7759                 try_soft_reset = 1;
7760                 rc = 0;
7761         }
7762
7763 reinit_after_soft_reset:
7764
7765         /* Command structures must be aligned on a 32-byte boundary because
7766          * the 5 lower bits of the address are used by the hardware. and by
7767          * the driver.  See comments in hpsa.h for more info.
7768          */
7769         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
7770         h = kzalloc(sizeof(*h), GFP_KERNEL);
7771         if (!h) {
7772                 dev_err(&pdev->dev, "Failed to allocate controller head\n");
7773                 return -ENOMEM;
7774         }
7775
7776         h->pdev = pdev;
7777
7778         h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
7779         INIT_LIST_HEAD(&h->offline_device_list);
7780         spin_lock_init(&h->lock);
7781         spin_lock_init(&h->offline_device_lock);
7782         spin_lock_init(&h->scan_lock);
7783         atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
7784         atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
7785
7786         /* Allocate and clear per-cpu variable lockup_detected */
7787         h->lockup_detected = alloc_percpu(u32);
7788         if (!h->lockup_detected) {
7789                 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
7790                 rc = -ENOMEM;
7791                 goto clean1;    /* aer/h */
7792         }
7793         set_lockup_detected_for_all_cpus(h, 0);
7794
7795         rc = hpsa_pci_init(h);
7796         if (rc)
7797                 goto clean2;    /* lu, aer/h */
7798
7799         /* relies on h-> settings made by hpsa_pci_init, including
7800          * interrupt_mode h->intr */
7801         rc = hpsa_scsi_host_alloc(h);
7802         if (rc)
7803                 goto clean2_5;  /* pci, lu, aer/h */
7804
7805         sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
7806         h->ctlr = number_of_controllers;
7807         number_of_controllers++;
7808
7809         /* configure PCI DMA stuff */
7810         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7811         if (rc == 0) {
7812                 dac = 1;
7813         } else {
7814                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7815                 if (rc == 0) {
7816                         dac = 0;
7817                 } else {
7818                         dev_err(&pdev->dev, "no suitable DMA available\n");
7819                         goto clean3;    /* shost, pci, lu, aer/h */
7820                 }
7821         }
7822
7823         /* make sure the board interrupts are off */
7824         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7825
7826         rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
7827         if (rc)
7828                 goto clean3;    /* shost, pci, lu, aer/h */
7829         rc = hpsa_alloc_cmd_pool(h);
7830         if (rc)
7831                 goto clean4;    /* irq, shost, pci, lu, aer/h */
7832         rc = hpsa_alloc_sg_chain_blocks(h);
7833         if (rc)
7834                 goto clean5;    /* cmd, irq, shost, pci, lu, aer/h */
7835         init_waitqueue_head(&h->scan_wait_queue);
7836         init_waitqueue_head(&h->abort_cmd_wait_queue);
7837         init_waitqueue_head(&h->abort_sync_wait_queue);
7838         h->scan_finished = 1; /* no scan currently in progress */
7839
7840         pci_set_drvdata(pdev, h);
7841         h->ndevices = 0;
7842         h->hba_mode_enabled = 0;
7843
7844         spin_lock_init(&h->devlock);
7845         rc = hpsa_put_ctlr_into_performant_mode(h);
7846         if (rc)
7847                 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
7848
7849         /* hook into SCSI subsystem */
7850         rc = hpsa_scsi_add_host(h);
7851         if (rc)
7852                 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
7853
7854         /* create the resubmit workqueue */
7855         h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
7856         if (!h->rescan_ctlr_wq) {
7857                 rc = -ENOMEM;
7858                 goto clean7;
7859         }
7860
7861         h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
7862         if (!h->resubmit_wq) {
7863                 rc = -ENOMEM;
7864                 goto clean7;    /* aer/h */
7865         }
7866
7867         /*
7868          * At this point, the controller is ready to take commands.
7869          * Now, if reset_devices and the hard reset didn't work, try
7870          * the soft reset and see if that works.
7871          */
7872         if (try_soft_reset) {
7873
7874                 /* This is kind of gross.  We may or may not get a completion
7875                  * from the soft reset command, and if we do, then the value
7876                  * from the fifo may or may not be valid.  So, we wait 10 secs
7877                  * after the reset throwing away any completions we get during
7878                  * that time.  Unregister the interrupt handler and register
7879                  * fake ones to scoop up any residual completions.
7880                  */
7881                 spin_lock_irqsave(&h->lock, flags);
7882                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7883                 spin_unlock_irqrestore(&h->lock, flags);
7884                 hpsa_free_irqs(h);
7885                 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
7886                                         hpsa_intx_discard_completions);
7887                 if (rc) {
7888                         dev_warn(&h->pdev->dev,
7889                                 "Failed to request_irq after soft reset.\n");
7890                         /*
7891                          * cannot goto clean7 or free_irqs will be called
7892                          * again. Instead, do its work
7893                          */
7894                         hpsa_free_performant_mode(h);   /* clean7 */
7895                         hpsa_free_sg_chain_blocks(h);   /* clean6 */
7896                         hpsa_free_cmd_pool(h);          /* clean5 */
7897                         /*
7898                          * skip hpsa_free_irqs(h) clean4 since that
7899                          * was just called before request_irqs failed
7900                          */
7901                         goto clean3;
7902                 }
7903
7904                 rc = hpsa_kdump_soft_reset(h);
7905                 if (rc)
7906                         /* Neither hard nor soft reset worked, we're hosed. */
7907                         goto clean9;
7908
7909                 dev_info(&h->pdev->dev, "Board READY.\n");
7910                 dev_info(&h->pdev->dev,
7911                         "Waiting for stale completions to drain.\n");
7912                 h->access.set_intr_mask(h, HPSA_INTR_ON);
7913                 msleep(10000);
7914                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7915
7916                 rc = controller_reset_failed(h->cfgtable);
7917                 if (rc)
7918                         dev_info(&h->pdev->dev,
7919                                 "Soft reset appears to have failed.\n");
7920
7921                 /* since the controller's reset, we have to go back and re-init
7922                  * everything.  Easiest to just forget what we've done and do it
7923                  * all over again.
7924                  */
7925                 hpsa_undo_allocations_after_kdump_soft_reset(h);
7926                 try_soft_reset = 0;
7927                 if (rc)
7928                         /* don't goto clean, we already unallocated */
7929                         return -ENODEV;
7930
7931                 goto reinit_after_soft_reset;
7932         }
7933
7934         /* Enable Accelerated IO path at driver layer */
7935         h->acciopath_status = 1;
7936
7937
7938         /* Turn the interrupts on so we can service requests */
7939         h->access.set_intr_mask(h, HPSA_INTR_ON);
7940
7941         hpsa_hba_inquiry(h);
7942
7943         /* Monitor the controller for firmware lockups */
7944         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7945         INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7946         schedule_delayed_work(&h->monitor_ctlr_work,
7947                                 h->heartbeat_sample_interval);
7948         INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
7949         queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7950                                 h->heartbeat_sample_interval);
7951         return 0;
7952
7953 clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
7954         kfree(h->hba_inquiry_data);
7955 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
7956         hpsa_free_performant_mode(h);
7957         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7958 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
7959         hpsa_free_sg_chain_blocks(h);
7960 clean5: /* cmd, irq, shost, pci, lu, aer/h */
7961         hpsa_free_cmd_pool(h);
7962 clean4: /* irq, shost, pci, lu, aer/h */
7963         hpsa_free_irqs(h);
7964 clean3: /* shost, pci, lu, aer/h */
7965         scsi_host_put(h->scsi_host);
7966         h->scsi_host = NULL;
7967 clean2_5: /* pci, lu, aer/h */
7968         hpsa_free_pci_init(h);
7969 clean2: /* lu, aer/h */
7970         if (h->lockup_detected) {
7971                 free_percpu(h->lockup_detected);
7972                 h->lockup_detected = NULL;
7973         }
7974 clean1: /* wq/aer/h */
7975         if (h->resubmit_wq) {
7976                 destroy_workqueue(h->resubmit_wq);
7977                 h->resubmit_wq = NULL;
7978         }
7979         if (h->rescan_ctlr_wq) {
7980                 destroy_workqueue(h->rescan_ctlr_wq);
7981                 h->rescan_ctlr_wq = NULL;
7982         }
7983         kfree(h);
7984         return rc;
7985 }
7986
7987 static void hpsa_flush_cache(struct ctlr_info *h)
7988 {
7989         char *flush_buf;
7990         struct CommandList *c;
7991         int rc;
7992
7993         if (unlikely(lockup_detected(h)))
7994                 return;
7995         flush_buf = kzalloc(4, GFP_KERNEL);
7996         if (!flush_buf)
7997                 return;
7998
7999         c = cmd_alloc(h);
8000
8001         if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8002                 RAID_CTLR_LUNID, TYPE_CMD)) {
8003                 goto out;
8004         }
8005         rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8006                                         PCI_DMA_TODEVICE, NO_TIMEOUT);
8007         if (rc)
8008                 goto out;
8009         if (c->err_info->CommandStatus != 0)
8010 out:
8011                 dev_warn(&h->pdev->dev,
8012                         "error flushing cache on controller\n");
8013         cmd_free(h, c);
8014         kfree(flush_buf);
8015 }
8016
8017 static void hpsa_shutdown(struct pci_dev *pdev)
8018 {
8019         struct ctlr_info *h;
8020
8021         h = pci_get_drvdata(pdev);
8022         /* Turn board interrupts off  and send the flush cache command
8023          * sendcmd will turn off interrupt, and send the flush...
8024          * To write all data in the battery backed cache to disks
8025          */
8026         hpsa_flush_cache(h);
8027         h->access.set_intr_mask(h, HPSA_INTR_OFF);
8028         hpsa_free_irqs(h);                      /* init_one 4 */
8029         hpsa_disable_interrupt_mode(h);         /* pci_init 2 */
8030 }
8031
8032 static void hpsa_free_device_info(struct ctlr_info *h)
8033 {
8034         int i;
8035
8036         for (i = 0; i < h->ndevices; i++) {
8037                 kfree(h->dev[i]);
8038                 h->dev[i] = NULL;
8039         }
8040 }
8041
8042 static void hpsa_remove_one(struct pci_dev *pdev)
8043 {
8044         struct ctlr_info *h;
8045         unsigned long flags;
8046
8047         if (pci_get_drvdata(pdev) == NULL) {
8048                 dev_err(&pdev->dev, "unable to remove device\n");
8049                 return;
8050         }
8051         h = pci_get_drvdata(pdev);
8052
8053         /* Get rid of any controller monitoring work items */
8054         spin_lock_irqsave(&h->lock, flags);
8055         h->remove_in_progress = 1;
8056         spin_unlock_irqrestore(&h->lock, flags);
8057         cancel_delayed_work_sync(&h->monitor_ctlr_work);
8058         cancel_delayed_work_sync(&h->rescan_ctlr_work);
8059         destroy_workqueue(h->rescan_ctlr_wq);
8060         destroy_workqueue(h->resubmit_wq);
8061
8062         /* includes hpsa_free_irqs - init_one 4 */
8063         /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8064         hpsa_shutdown(pdev);
8065
8066         hpsa_free_device_info(h);               /* scan */
8067
8068         kfree(h->hba_inquiry_data);                     /* init_one 10 */
8069         h->hba_inquiry_data = NULL;                     /* init_one 10 */
8070         if (h->scsi_host)
8071                 scsi_remove_host(h->scsi_host);         /* init_one 8 */
8072         hpsa_free_ioaccel2_sg_chain_blocks(h);
8073         hpsa_free_performant_mode(h);                   /* init_one 7 */
8074         hpsa_free_sg_chain_blocks(h);                   /* init_one 6 */
8075         hpsa_free_cmd_pool(h);                          /* init_one 5 */
8076
8077         /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8078
8079         scsi_host_put(h->scsi_host);                    /* init_one 3 */
8080         h->scsi_host = NULL;                            /* init_one 3 */
8081
8082         /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8083         hpsa_free_pci_init(h);                          /* init_one 2.5 */
8084
8085         free_percpu(h->lockup_detected);                /* init_one 2 */
8086         h->lockup_detected = NULL;                      /* init_one 2 */
8087         /* (void) pci_disable_pcie_error_reporting(pdev); */    /* init_one 1 */
8088         kfree(h);                                       /* init_one 1 */
8089 }
8090
8091 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8092         __attribute__((unused)) pm_message_t state)
8093 {
8094         return -ENOSYS;
8095 }
8096
8097 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8098 {
8099         return -ENOSYS;
8100 }
8101
8102 static struct pci_driver hpsa_pci_driver = {
8103         .name = HPSA,
8104         .probe = hpsa_init_one,
8105         .remove = hpsa_remove_one,
8106         .id_table = hpsa_pci_device_id, /* id_table */
8107         .shutdown = hpsa_shutdown,
8108         .suspend = hpsa_suspend,
8109         .resume = hpsa_resume,
8110 };
8111
8112 /* Fill in bucket_map[], given nsgs (the max number of
8113  * scatter gather elements supported) and bucket[],
8114  * which is an array of 8 integers.  The bucket[] array
8115  * contains 8 different DMA transfer sizes (in 16
8116  * byte increments) which the controller uses to fetch
8117  * commands.  This function fills in bucket_map[], which
8118  * maps a given number of scatter gather elements to one of
8119  * the 8 DMA transfer sizes.  The point of it is to allow the
8120  * controller to only do as much DMA as needed to fetch the
8121  * command, with the DMA transfer size encoded in the lower
8122  * bits of the command address.
8123  */
8124 static void  calc_bucket_map(int bucket[], int num_buckets,
8125         int nsgs, int min_blocks, u32 *bucket_map)
8126 {
8127         int i, j, b, size;
8128
8129         /* Note, bucket_map must have nsgs+1 entries. */
8130         for (i = 0; i <= nsgs; i++) {
8131                 /* Compute size of a command with i SG entries */
8132                 size = i + min_blocks;
8133                 b = num_buckets; /* Assume the biggest bucket */
8134                 /* Find the bucket that is just big enough */
8135                 for (j = 0; j < num_buckets; j++) {
8136                         if (bucket[j] >= size) {
8137                                 b = j;
8138                                 break;
8139                         }
8140                 }
8141                 /* for a command with i SG entries, use bucket b. */
8142                 bucket_map[i] = b;
8143         }
8144 }
8145
8146 /*
8147  * return -ENODEV on err, 0 on success (or no action)
8148  * allocates numerous items that must be freed later
8149  */
8150 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
8151 {
8152         int i;
8153         unsigned long register_value;
8154         unsigned long transMethod = CFGTBL_Trans_Performant |
8155                         (trans_support & CFGTBL_Trans_use_short_tags) |
8156                                 CFGTBL_Trans_enable_directed_msix |
8157                         (trans_support & (CFGTBL_Trans_io_accel1 |
8158                                 CFGTBL_Trans_io_accel2));
8159         struct access_method access = SA5_performant_access;
8160
8161         /* This is a bit complicated.  There are 8 registers on
8162          * the controller which we write to to tell it 8 different
8163          * sizes of commands which there may be.  It's a way of
8164          * reducing the DMA done to fetch each command.  Encoded into
8165          * each command's tag are 3 bits which communicate to the controller
8166          * which of the eight sizes that command fits within.  The size of
8167          * each command depends on how many scatter gather entries there are.
8168          * Each SG entry requires 16 bytes.  The eight registers are programmed
8169          * with the number of 16-byte blocks a command of that size requires.
8170          * The smallest command possible requires 5 such 16 byte blocks.
8171          * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8172          * blocks.  Note, this only extends to the SG entries contained
8173          * within the command block, and does not extend to chained blocks
8174          * of SG elements.   bft[] contains the eight values we write to
8175          * the registers.  They are not evenly distributed, but have more
8176          * sizes for small commands, and fewer sizes for larger commands.
8177          */
8178         int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
8179 #define MIN_IOACCEL2_BFT_ENTRY 5
8180 #define HPSA_IOACCEL2_HEADER_SZ 4
8181         int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8182                         13, 14, 15, 16, 17, 18, 19,
8183                         HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8184         BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8185         BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8186         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8187                                  16 * MIN_IOACCEL2_BFT_ENTRY);
8188         BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
8189         BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
8190         /*  5 = 1 s/g entry or 4k
8191          *  6 = 2 s/g entry or 8k
8192          *  8 = 4 s/g entry or 16k
8193          * 10 = 6 s/g entry or 24k
8194          */
8195
8196         /* If the controller supports either ioaccel method then
8197          * we can also use the RAID stack submit path that does not
8198          * perform the superfluous readl() after each command submission.
8199          */
8200         if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8201                 access = SA5_performant_access_no_read;
8202
8203         /* Controller spec: zero out this buffer. */
8204         for (i = 0; i < h->nreply_queues; i++)
8205                 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
8206
8207         bft[7] = SG_ENTRIES_IN_CMD + 4;
8208         calc_bucket_map(bft, ARRAY_SIZE(bft),
8209                                 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
8210         for (i = 0; i < 8; i++)
8211                 writel(bft[i], &h->transtable->BlockFetch[i]);
8212
8213         /* size of controller ring buffer */
8214         writel(h->max_commands, &h->transtable->RepQSize);
8215         writel(h->nreply_queues, &h->transtable->RepQCount);
8216         writel(0, &h->transtable->RepQCtrAddrLow32);
8217         writel(0, &h->transtable->RepQCtrAddrHigh32);
8218
8219         for (i = 0; i < h->nreply_queues; i++) {
8220                 writel(0, &h->transtable->RepQAddr[i].upper);
8221                 writel(h->reply_queue[i].busaddr,
8222                         &h->transtable->RepQAddr[i].lower);
8223         }
8224
8225         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8226         writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8227         /*
8228          * enable outbound interrupt coalescing in accelerator mode;
8229          */
8230         if (trans_support & CFGTBL_Trans_io_accel1) {
8231                 access = SA5_ioaccel_mode1_access;
8232                 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8233                 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8234         } else {
8235                 if (trans_support & CFGTBL_Trans_io_accel2) {
8236                         access = SA5_ioaccel_mode2_access;
8237                         writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8238                         writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8239                 }
8240         }
8241         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8242         if (hpsa_wait_for_mode_change_ack(h)) {
8243                 dev_err(&h->pdev->dev,
8244                         "performant mode problem - doorbell timeout\n");
8245                 return -ENODEV;
8246         }
8247         register_value = readl(&(h->cfgtable->TransportActive));
8248         if (!(register_value & CFGTBL_Trans_Performant)) {
8249                 dev_err(&h->pdev->dev,
8250                         "performant mode problem - transport not active\n");
8251                 return -ENODEV;
8252         }
8253         /* Change the access methods to the performant access methods */
8254         h->access = access;
8255         h->transMethod = transMethod;
8256
8257         if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8258                 (trans_support & CFGTBL_Trans_io_accel2)))
8259                 return 0;
8260
8261         if (trans_support & CFGTBL_Trans_io_accel1) {
8262                 /* Set up I/O accelerator mode */
8263                 for (i = 0; i < h->nreply_queues; i++) {
8264                         writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8265                         h->reply_queue[i].current_entry =
8266                                 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8267                 }
8268                 bft[7] = h->ioaccel_maxsg + 8;
8269                 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8270                                 h->ioaccel1_blockFetchTable);
8271
8272                 /* initialize all reply queue entries to unused */
8273                 for (i = 0; i < h->nreply_queues; i++)
8274                         memset(h->reply_queue[i].head,
8275                                 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8276                                 h->reply_queue_size);
8277
8278                 /* set all the constant fields in the accelerator command
8279                  * frames once at init time to save CPU cycles later.
8280                  */
8281                 for (i = 0; i < h->nr_cmds; i++) {
8282                         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8283
8284                         cp->function = IOACCEL1_FUNCTION_SCSIIO;
8285                         cp->err_info = (u32) (h->errinfo_pool_dhandle +
8286                                         (i * sizeof(struct ErrorInfo)));
8287                         cp->err_info_len = sizeof(struct ErrorInfo);
8288                         cp->sgl_offset = IOACCEL1_SGLOFFSET;
8289                         cp->host_context_flags =
8290                                 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
8291                         cp->timeout_sec = 0;
8292                         cp->ReplyQueue = 0;
8293                         cp->tag =
8294                                 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
8295                         cp->host_addr =
8296                                 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
8297                                         (i * sizeof(struct io_accel1_cmd)));
8298                 }
8299         } else if (trans_support & CFGTBL_Trans_io_accel2) {
8300                 u64 cfg_offset, cfg_base_addr_index;
8301                 u32 bft2_offset, cfg_base_addr;
8302                 int rc;
8303
8304                 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8305                         &cfg_base_addr_index, &cfg_offset);
8306                 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8307                 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8308                 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8309                                 4, h->ioaccel2_blockFetchTable);
8310                 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8311                 BUILD_BUG_ON(offsetof(struct CfgTable,
8312                                 io_accel_request_size_offset) != 0xb8);
8313                 h->ioaccel2_bft2_regs =
8314                         remap_pci_mem(pci_resource_start(h->pdev,
8315                                         cfg_base_addr_index) +
8316                                         cfg_offset + bft2_offset,
8317                                         ARRAY_SIZE(bft2) *
8318                                         sizeof(*h->ioaccel2_bft2_regs));
8319                 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8320                         writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
8321         }
8322         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8323         if (hpsa_wait_for_mode_change_ack(h)) {
8324                 dev_err(&h->pdev->dev,
8325                         "performant mode problem - enabling ioaccel mode\n");
8326                 return -ENODEV;
8327         }
8328         return 0;
8329 }
8330
8331 /* Free ioaccel1 mode command blocks and block fetch table */
8332 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8333 {
8334         if (h->ioaccel_cmd_pool) {
8335                 pci_free_consistent(h->pdev,
8336                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8337                         h->ioaccel_cmd_pool,
8338                         h->ioaccel_cmd_pool_dhandle);
8339                 h->ioaccel_cmd_pool = NULL;
8340                 h->ioaccel_cmd_pool_dhandle = 0;
8341         }
8342         kfree(h->ioaccel1_blockFetchTable);
8343         h->ioaccel1_blockFetchTable = NULL;
8344 }
8345
8346 /* Allocate ioaccel1 mode command blocks and block fetch table */
8347 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8348 {
8349         h->ioaccel_maxsg =
8350                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8351         if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8352                 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8353
8354         /* Command structures must be aligned on a 128-byte boundary
8355          * because the 7 lower bits of the address are used by the
8356          * hardware.
8357          */
8358         BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8359                         IOACCEL1_COMMANDLIST_ALIGNMENT);
8360         h->ioaccel_cmd_pool =
8361                 pci_alloc_consistent(h->pdev,
8362                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8363                         &(h->ioaccel_cmd_pool_dhandle));
8364
8365         h->ioaccel1_blockFetchTable =
8366                 kmalloc(((h->ioaccel_maxsg + 1) *
8367                                 sizeof(u32)), GFP_KERNEL);
8368
8369         if ((h->ioaccel_cmd_pool == NULL) ||
8370                 (h->ioaccel1_blockFetchTable == NULL))
8371                 goto clean_up;
8372
8373         memset(h->ioaccel_cmd_pool, 0,
8374                 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8375         return 0;
8376
8377 clean_up:
8378         hpsa_free_ioaccel1_cmd_and_bft(h);
8379         return -ENOMEM;
8380 }
8381
8382 /* Free ioaccel2 mode command blocks and block fetch table */
8383 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8384 {
8385         hpsa_free_ioaccel2_sg_chain_blocks(h);
8386
8387         if (h->ioaccel2_cmd_pool) {
8388                 pci_free_consistent(h->pdev,
8389                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8390                         h->ioaccel2_cmd_pool,
8391                         h->ioaccel2_cmd_pool_dhandle);
8392                 h->ioaccel2_cmd_pool = NULL;
8393                 h->ioaccel2_cmd_pool_dhandle = 0;
8394         }
8395         kfree(h->ioaccel2_blockFetchTable);
8396         h->ioaccel2_blockFetchTable = NULL;
8397 }
8398
8399 /* Allocate ioaccel2 mode command blocks and block fetch table */
8400 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8401 {
8402         int rc;
8403
8404         /* Allocate ioaccel2 mode command blocks and block fetch table */
8405
8406         h->ioaccel_maxsg =
8407                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8408         if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8409                 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8410
8411         BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8412                         IOACCEL2_COMMANDLIST_ALIGNMENT);
8413         h->ioaccel2_cmd_pool =
8414                 pci_alloc_consistent(h->pdev,
8415                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8416                         &(h->ioaccel2_cmd_pool_dhandle));
8417
8418         h->ioaccel2_blockFetchTable =
8419                 kmalloc(((h->ioaccel_maxsg + 1) *
8420                                 sizeof(u32)), GFP_KERNEL);
8421
8422         if ((h->ioaccel2_cmd_pool == NULL) ||
8423                 (h->ioaccel2_blockFetchTable == NULL)) {
8424                 rc = -ENOMEM;
8425                 goto clean_up;
8426         }
8427
8428         rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8429         if (rc)
8430                 goto clean_up;
8431
8432         memset(h->ioaccel2_cmd_pool, 0,
8433                 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8434         return 0;
8435
8436 clean_up:
8437         hpsa_free_ioaccel2_cmd_and_bft(h);
8438         return rc;
8439 }
8440
8441 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8442 static void hpsa_free_performant_mode(struct ctlr_info *h)
8443 {
8444         kfree(h->blockFetchTable);
8445         h->blockFetchTable = NULL;
8446         hpsa_free_reply_queues(h);
8447         hpsa_free_ioaccel1_cmd_and_bft(h);
8448         hpsa_free_ioaccel2_cmd_and_bft(h);
8449 }
8450
8451 /* return -ENODEV on error, 0 on success (or no action)
8452  * allocates numerous items that must be freed later
8453  */
8454 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
8455 {
8456         u32 trans_support;
8457         unsigned long transMethod = CFGTBL_Trans_Performant |
8458                                         CFGTBL_Trans_use_short_tags;
8459         int i, rc;
8460
8461         if (hpsa_simple_mode)
8462                 return 0;
8463
8464         trans_support = readl(&(h->cfgtable->TransportSupport));
8465         if (!(trans_support & PERFORMANT_MODE))
8466                 return 0;
8467
8468         /* Check for I/O accelerator mode support */
8469         if (trans_support & CFGTBL_Trans_io_accel1) {
8470                 transMethod |= CFGTBL_Trans_io_accel1 |
8471                                 CFGTBL_Trans_enable_directed_msix;
8472                 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8473                 if (rc)
8474                         return rc;
8475         } else if (trans_support & CFGTBL_Trans_io_accel2) {
8476                 transMethod |= CFGTBL_Trans_io_accel2 |
8477                                 CFGTBL_Trans_enable_directed_msix;
8478                 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8479                 if (rc)
8480                         return rc;
8481         }
8482
8483         h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
8484         hpsa_get_max_perf_mode_cmds(h);
8485         /* Performant mode ring buffer and supporting data structures */
8486         h->reply_queue_size = h->max_commands * sizeof(u64);
8487
8488         for (i = 0; i < h->nreply_queues; i++) {
8489                 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8490                                                 h->reply_queue_size,
8491                                                 &(h->reply_queue[i].busaddr));
8492                 if (!h->reply_queue[i].head) {
8493                         rc = -ENOMEM;
8494                         goto clean1;    /* rq, ioaccel */
8495                 }
8496                 h->reply_queue[i].size = h->max_commands;
8497                 h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
8498                 h->reply_queue[i].current_entry = 0;
8499         }
8500
8501         /* Need a block fetch table for performant mode */
8502         h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
8503                                 sizeof(u32)), GFP_KERNEL);
8504         if (!h->blockFetchTable) {
8505                 rc = -ENOMEM;
8506                 goto clean1;    /* rq, ioaccel */
8507         }
8508
8509         rc = hpsa_enter_performant_mode(h, trans_support);
8510         if (rc)
8511                 goto clean2;    /* bft, rq, ioaccel */
8512         return 0;
8513
8514 clean2: /* bft, rq, ioaccel */
8515         kfree(h->blockFetchTable);
8516         h->blockFetchTable = NULL;
8517 clean1: /* rq, ioaccel */
8518         hpsa_free_reply_queues(h);
8519         hpsa_free_ioaccel1_cmd_and_bft(h);
8520         hpsa_free_ioaccel2_cmd_and_bft(h);
8521         return rc;
8522 }
8523
8524 static int is_accelerated_cmd(struct CommandList *c)
8525 {
8526         return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8527 }
8528
8529 static void hpsa_drain_accel_commands(struct ctlr_info *h)
8530 {
8531         struct CommandList *c = NULL;
8532         int i, accel_cmds_out;
8533         int refcount;
8534
8535         do { /* wait for all outstanding ioaccel commands to drain out */
8536                 accel_cmds_out = 0;
8537                 for (i = 0; i < h->nr_cmds; i++) {
8538                         c = h->cmd_pool + i;
8539                         refcount = atomic_inc_return(&c->refcount);
8540                         if (refcount > 1) /* Command is allocated */
8541                                 accel_cmds_out += is_accelerated_cmd(c);
8542                         cmd_free(h, c);
8543                 }
8544                 if (accel_cmds_out <= 0)
8545                         break;
8546                 msleep(100);
8547         } while (1);
8548 }
8549
8550 /*
8551  *  This is it.  Register the PCI driver information for the cards we control
8552  *  the OS will call our registered routines when it finds one of our cards.
8553  */
8554 static int __init hpsa_init(void)
8555 {
8556         return pci_register_driver(&hpsa_pci_driver);
8557 }
8558
8559 static void __exit hpsa_cleanup(void)
8560 {
8561         pci_unregister_driver(&hpsa_pci_driver);
8562 }
8563
8564 static void __attribute__((unused)) verify_offsets(void)
8565 {
8566 #define VERIFY_OFFSET(member, offset) \
8567         BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8568
8569         VERIFY_OFFSET(structure_size, 0);
8570         VERIFY_OFFSET(volume_blk_size, 4);
8571         VERIFY_OFFSET(volume_blk_cnt, 8);
8572         VERIFY_OFFSET(phys_blk_shift, 16);
8573         VERIFY_OFFSET(parity_rotation_shift, 17);
8574         VERIFY_OFFSET(strip_size, 18);
8575         VERIFY_OFFSET(disk_starting_blk, 20);
8576         VERIFY_OFFSET(disk_blk_cnt, 28);
8577         VERIFY_OFFSET(data_disks_per_row, 36);
8578         VERIFY_OFFSET(metadata_disks_per_row, 38);
8579         VERIFY_OFFSET(row_cnt, 40);
8580         VERIFY_OFFSET(layout_map_count, 42);
8581         VERIFY_OFFSET(flags, 44);
8582         VERIFY_OFFSET(dekindex, 46);
8583         /* VERIFY_OFFSET(reserved, 48 */
8584         VERIFY_OFFSET(data, 64);
8585
8586 #undef VERIFY_OFFSET
8587
8588 #define VERIFY_OFFSET(member, offset) \
8589         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8590
8591         VERIFY_OFFSET(IU_type, 0);
8592         VERIFY_OFFSET(direction, 1);
8593         VERIFY_OFFSET(reply_queue, 2);
8594         /* VERIFY_OFFSET(reserved1, 3);  */
8595         VERIFY_OFFSET(scsi_nexus, 4);
8596         VERIFY_OFFSET(Tag, 8);
8597         VERIFY_OFFSET(cdb, 16);
8598         VERIFY_OFFSET(cciss_lun, 32);
8599         VERIFY_OFFSET(data_len, 40);
8600         VERIFY_OFFSET(cmd_priority_task_attr, 44);
8601         VERIFY_OFFSET(sg_count, 45);
8602         /* VERIFY_OFFSET(reserved3 */
8603         VERIFY_OFFSET(err_ptr, 48);
8604         VERIFY_OFFSET(err_len, 56);
8605         /* VERIFY_OFFSET(reserved4  */
8606         VERIFY_OFFSET(sg, 64);
8607
8608 #undef VERIFY_OFFSET
8609
8610 #define VERIFY_OFFSET(member, offset) \
8611         BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8612
8613         VERIFY_OFFSET(dev_handle, 0x00);
8614         VERIFY_OFFSET(reserved1, 0x02);
8615         VERIFY_OFFSET(function, 0x03);
8616         VERIFY_OFFSET(reserved2, 0x04);
8617         VERIFY_OFFSET(err_info, 0x0C);
8618         VERIFY_OFFSET(reserved3, 0x10);
8619         VERIFY_OFFSET(err_info_len, 0x12);
8620         VERIFY_OFFSET(reserved4, 0x13);
8621         VERIFY_OFFSET(sgl_offset, 0x14);
8622         VERIFY_OFFSET(reserved5, 0x15);
8623         VERIFY_OFFSET(transfer_len, 0x1C);
8624         VERIFY_OFFSET(reserved6, 0x20);
8625         VERIFY_OFFSET(io_flags, 0x24);
8626         VERIFY_OFFSET(reserved7, 0x26);
8627         VERIFY_OFFSET(LUN, 0x34);
8628         VERIFY_OFFSET(control, 0x3C);
8629         VERIFY_OFFSET(CDB, 0x40);
8630         VERIFY_OFFSET(reserved8, 0x50);
8631         VERIFY_OFFSET(host_context_flags, 0x60);
8632         VERIFY_OFFSET(timeout_sec, 0x62);
8633         VERIFY_OFFSET(ReplyQueue, 0x64);
8634         VERIFY_OFFSET(reserved9, 0x65);
8635         VERIFY_OFFSET(tag, 0x68);
8636         VERIFY_OFFSET(host_addr, 0x70);
8637         VERIFY_OFFSET(CISS_LUN, 0x78);
8638         VERIFY_OFFSET(SG, 0x78 + 8);
8639 #undef VERIFY_OFFSET
8640 }
8641
8642 module_init(hpsa_init);
8643 module_exit(hpsa_cleanup);