1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "2.1.22-040"
37 #define DRIVER_MAJOR 2
38 #define DRIVER_MINOR 1
39 #define DRIVER_RELEASE 22
40 #define DRIVER_REVISION 40
42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 #define PQI_POST_RESET_DELAY_SECS 5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
51 MODULE_AUTHOR("Microchip");
52 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
61 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
63 return scsi_cmd_priv(cmd);
66 static void pqi_verify_structures(void);
67 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
68 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
69 static void pqi_ctrl_offline_worker(struct work_struct *work);
70 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_scan_start(struct Scsi_Host *shost);
72 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
73 struct pqi_queue_group *queue_group, enum pqi_io_path path,
74 struct pqi_io_request *io_request);
75 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
76 struct pqi_iu_header *request, unsigned int flags,
77 struct pqi_raid_error_info *error_info);
78 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
79 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
80 unsigned int cdb_length, struct pqi_queue_group *queue_group,
81 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
82 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
83 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
84 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
85 struct pqi_scsi_dev_raid_map_data *rmd);
86 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
87 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
88 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
89 struct pqi_scsi_dev_raid_map_data *rmd);
90 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
91 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
92 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
93 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
94 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
95 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
96 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
97 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
98 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
100 /* for flags argument to pqi_submit_raid_request_synchronous() */
101 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
103 static struct scsi_transport_template *pqi_sas_transport_template;
105 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
107 enum pqi_lockup_action {
113 static enum pqi_lockup_action pqi_lockup_action = NONE;
116 enum pqi_lockup_action action;
118 } pqi_lockup_actions[] = {
133 static unsigned int pqi_supported_event_types[] = {
134 PQI_EVENT_TYPE_HOTPLUG,
135 PQI_EVENT_TYPE_HARDWARE,
136 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
137 PQI_EVENT_TYPE_LOGICAL_DEVICE,
139 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
140 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
143 static int pqi_disable_device_id_wildcards;
144 module_param_named(disable_device_id_wildcards,
145 pqi_disable_device_id_wildcards, int, 0644);
146 MODULE_PARM_DESC(disable_device_id_wildcards,
147 "Disable device ID wildcards.");
149 static int pqi_disable_heartbeat;
150 module_param_named(disable_heartbeat,
151 pqi_disable_heartbeat, int, 0644);
152 MODULE_PARM_DESC(disable_heartbeat,
153 "Disable heartbeat.");
155 static int pqi_disable_ctrl_shutdown;
156 module_param_named(disable_ctrl_shutdown,
157 pqi_disable_ctrl_shutdown, int, 0644);
158 MODULE_PARM_DESC(disable_ctrl_shutdown,
159 "Disable controller shutdown when controller locked up.");
161 static char *pqi_lockup_action_param;
162 module_param_named(lockup_action,
163 pqi_lockup_action_param, charp, 0644);
164 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
165 "\t\tSupported: none, reboot, panic\n"
166 "\t\tDefault: none");
168 static int pqi_expose_ld_first;
169 module_param_named(expose_ld_first,
170 pqi_expose_ld_first, int, 0644);
171 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
173 static int pqi_hide_vsep;
174 module_param_named(hide_vsep,
175 pqi_hide_vsep, int, 0644);
176 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
178 static int pqi_disable_managed_interrupts;
179 module_param_named(disable_managed_interrupts,
180 pqi_disable_managed_interrupts, int, 0644);
181 MODULE_PARM_DESC(disable_managed_interrupts,
182 "Disable the kernel automatically assigning SMP affinity to IRQs.");
184 static unsigned int pqi_ctrl_ready_timeout_secs;
185 module_param_named(ctrl_ready_timeout,
186 pqi_ctrl_ready_timeout_secs, uint, 0644);
187 MODULE_PARM_DESC(ctrl_ready_timeout,
188 "Timeout in seconds for driver to wait for controller ready.");
190 static char *raid_levels[] = {
200 static char *pqi_raid_level_to_string(u8 raid_level)
202 if (raid_level < ARRAY_SIZE(raid_levels))
203 return raid_levels[raid_level];
205 return "RAID UNKNOWN";
210 #define SA_RAID_1 2 /* also used for RAID 10 */
211 #define SA_RAID_5 3 /* also used for RAID 50 */
213 #define SA_RAID_6 5 /* also used for RAID 60 */
214 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
215 #define SA_RAID_MAX SA_RAID_TRIPLE
216 #define SA_RAID_UNKNOWN 0xff
218 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
220 pqi_prep_for_scsi_done(scmd);
224 static inline void pqi_disable_write_same(struct scsi_device *sdev)
226 sdev->no_write_same = 1;
229 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
231 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
234 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
236 return !device->is_physical_device;
239 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
241 return scsi3addr[2] != 0;
244 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
246 return !ctrl_info->controller_online;
249 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
251 if (ctrl_info->controller_online)
252 if (!sis_is_firmware_running(ctrl_info))
253 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
256 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
258 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
261 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
262 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
264 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
266 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
269 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
270 enum pqi_ctrl_mode mode)
274 driver_scratch = sis_read_driver_scratch(ctrl_info);
276 if (mode == PQI_MODE)
277 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
279 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
281 sis_write_driver_scratch(ctrl_info, driver_scratch);
284 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
286 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
289 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
293 driver_scratch = sis_read_driver_scratch(ctrl_info);
296 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
298 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
300 sis_write_driver_scratch(ctrl_info, driver_scratch);
303 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
305 ctrl_info->scan_blocked = true;
306 mutex_lock(&ctrl_info->scan_mutex);
309 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
311 ctrl_info->scan_blocked = false;
312 mutex_unlock(&ctrl_info->scan_mutex);
315 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
317 return ctrl_info->scan_blocked;
320 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
322 mutex_lock(&ctrl_info->lun_reset_mutex);
325 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
327 mutex_unlock(&ctrl_info->lun_reset_mutex);
330 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
332 struct Scsi_Host *shost;
333 unsigned int num_loops;
336 shost = ctrl_info->scsi_host;
338 scsi_block_requests(shost);
342 while (scsi_host_busy(shost)) {
350 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
352 scsi_unblock_requests(ctrl_info->scsi_host);
355 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
357 atomic_inc(&ctrl_info->num_busy_threads);
360 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
362 atomic_dec(&ctrl_info->num_busy_threads);
365 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
367 return ctrl_info->block_requests;
370 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
372 ctrl_info->block_requests = true;
375 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
377 ctrl_info->block_requests = false;
378 wake_up_all(&ctrl_info->block_requests_wait);
381 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
383 if (!pqi_ctrl_blocked(ctrl_info))
386 atomic_inc(&ctrl_info->num_blocked_threads);
387 wait_event(ctrl_info->block_requests_wait,
388 !pqi_ctrl_blocked(ctrl_info));
389 atomic_dec(&ctrl_info->num_blocked_threads);
392 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
394 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
396 unsigned long start_jiffies;
397 unsigned long warning_timeout;
398 bool displayed_warning;
400 displayed_warning = false;
401 start_jiffies = jiffies;
402 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
404 while (atomic_read(&ctrl_info->num_busy_threads) >
405 atomic_read(&ctrl_info->num_blocked_threads)) {
406 if (time_after(jiffies, warning_timeout)) {
407 dev_warn(&ctrl_info->pci_dev->dev,
408 "waiting %u seconds for driver activity to quiesce\n",
409 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
410 displayed_warning = true;
411 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
413 usleep_range(1000, 2000);
416 if (displayed_warning)
417 dev_warn(&ctrl_info->pci_dev->dev,
418 "driver activity quiesced after waiting for %u seconds\n",
419 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
422 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
424 return device->device_offline;
427 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
429 mutex_lock(&ctrl_info->ofa_mutex);
432 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
434 mutex_unlock(&ctrl_info->ofa_mutex);
437 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
439 mutex_lock(&ctrl_info->ofa_mutex);
440 mutex_unlock(&ctrl_info->ofa_mutex);
443 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
445 return mutex_is_locked(&ctrl_info->ofa_mutex);
448 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
450 device->in_remove = true;
453 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
455 return device->in_remove;
458 static inline int pqi_event_type_to_event_index(unsigned int event_type)
462 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
463 if (event_type == pqi_supported_event_types[index])
469 static inline bool pqi_is_supported_event(unsigned int event_type)
471 return pqi_event_type_to_event_index(event_type) != -1;
474 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
477 if (pqi_ctrl_offline(ctrl_info))
480 schedule_delayed_work(&ctrl_info->rescan_work, delay);
483 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
485 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
488 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
490 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
492 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
495 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
497 cancel_delayed_work_sync(&ctrl_info->rescan_work);
500 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
502 if (!ctrl_info->heartbeat_counter)
505 return readl(ctrl_info->heartbeat_counter);
508 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
510 return readb(ctrl_info->soft_reset_status);
513 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
517 status = pqi_read_soft_reset_status(ctrl_info);
518 status &= ~PQI_SOFT_RESET_ABORT;
519 writeb(status, ctrl_info->soft_reset_status);
522 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
527 io_high_prio = false;
529 if (device->ncq_prio_enable) {
531 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
532 if (priority_class == IOPRIO_CLASS_RT) {
533 /* Set NCQ priority for read/write commands. */
534 switch (scmd->cmnd[0]) {
552 static int pqi_map_single(struct pci_dev *pci_dev,
553 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
554 size_t buffer_length, enum dma_data_direction data_direction)
556 dma_addr_t bus_address;
558 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
561 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
563 if (dma_mapping_error(&pci_dev->dev, bus_address))
566 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
567 put_unaligned_le32(buffer_length, &sg_descriptor->length);
568 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
573 static void pqi_pci_unmap(struct pci_dev *pci_dev,
574 struct pqi_sg_descriptor *descriptors, int num_descriptors,
575 enum dma_data_direction data_direction)
579 if (data_direction == DMA_NONE)
582 for (i = 0; i < num_descriptors; i++)
583 dma_unmap_single(&pci_dev->dev,
584 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
585 get_unaligned_le32(&descriptors[i].length),
589 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
590 struct pqi_raid_path_request *request, u8 cmd,
591 u8 *scsi3addr, void *buffer, size_t buffer_length,
592 u16 vpd_page, enum dma_data_direction *dir)
595 size_t cdb_length = buffer_length;
597 memset(request, 0, sizeof(*request));
599 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
600 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
601 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
602 &request->header.iu_length);
603 put_unaligned_le32(buffer_length, &request->buffer_length);
604 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
605 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
606 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
612 request->data_direction = SOP_READ_FLAG;
614 if (vpd_page & VPD_PAGE) {
616 cdb[2] = (u8)vpd_page;
618 cdb[4] = (u8)cdb_length;
620 case CISS_REPORT_LOG:
621 case CISS_REPORT_PHYS:
622 request->data_direction = SOP_READ_FLAG;
624 if (cmd == CISS_REPORT_PHYS) {
625 if (ctrl_info->rpl_extended_format_4_5_supported)
626 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
628 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
630 cdb[1] = ctrl_info->ciss_report_log_flags;
632 put_unaligned_be32(cdb_length, &cdb[6]);
634 case CISS_GET_RAID_MAP:
635 request->data_direction = SOP_READ_FLAG;
637 cdb[1] = CISS_GET_RAID_MAP;
638 put_unaligned_be32(cdb_length, &cdb[6]);
641 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
642 request->data_direction = SOP_WRITE_FLAG;
644 cdb[6] = BMIC_FLUSH_CACHE;
645 put_unaligned_be16(cdb_length, &cdb[7]);
647 case BMIC_SENSE_DIAG_OPTIONS:
650 case BMIC_IDENTIFY_CONTROLLER:
651 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
652 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
653 case BMIC_SENSE_FEATURE:
654 request->data_direction = SOP_READ_FLAG;
657 put_unaligned_be16(cdb_length, &cdb[7]);
659 case BMIC_SET_DIAG_OPTIONS:
662 case BMIC_WRITE_HOST_WELLNESS:
663 request->data_direction = SOP_WRITE_FLAG;
666 put_unaligned_be16(cdb_length, &cdb[7]);
668 case BMIC_CSMI_PASSTHRU:
669 request->data_direction = SOP_BIDIRECTIONAL;
671 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
673 put_unaligned_be16(cdb_length, &cdb[7]);
676 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
680 switch (request->data_direction) {
682 *dir = DMA_FROM_DEVICE;
685 *dir = DMA_TO_DEVICE;
687 case SOP_NO_DIRECTION_FLAG:
691 *dir = DMA_BIDIRECTIONAL;
695 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
696 buffer, buffer_length, *dir);
699 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
701 io_request->scmd = NULL;
702 io_request->status = 0;
703 io_request->error_info = NULL;
704 io_request->raid_bypass = false;
707 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
709 struct pqi_io_request *io_request;
712 if (scmd) { /* SML I/O request */
713 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
715 i = blk_mq_unique_tag_to_tag(blk_tag);
716 io_request = &ctrl_info->io_request_pool[i];
717 if (atomic_inc_return(&io_request->refcount) > 1) {
718 atomic_dec(&io_request->refcount);
721 } else { /* IOCTL or driver internal request */
723 * benignly racy - may have to wait for an open slot.
724 * command slot range is scsi_ml_can_queue -
725 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
729 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
730 if (atomic_inc_return(&io_request->refcount) == 1)
732 atomic_dec(&io_request->refcount);
733 i = (i + 1) % PQI_RESERVED_IO_SLOTS;
738 pqi_reinit_io_request(io_request);
743 static void pqi_free_io_request(struct pqi_io_request *io_request)
745 atomic_dec(&io_request->refcount);
748 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
749 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
750 struct pqi_raid_error_info *error_info)
753 struct pqi_raid_path_request request;
754 enum dma_data_direction dir;
756 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
757 buffer, buffer_length, vpd_page, &dir);
761 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
763 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
768 /* helper functions for pqi_send_scsi_raid_request */
770 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
771 u8 cmd, void *buffer, size_t buffer_length)
773 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
774 buffer, buffer_length, 0, NULL);
777 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
778 u8 cmd, void *buffer, size_t buffer_length,
779 struct pqi_raid_error_info *error_info)
781 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
782 buffer, buffer_length, 0, error_info);
785 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
786 struct bmic_identify_controller *buffer)
788 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
789 buffer, sizeof(*buffer));
792 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
793 struct bmic_sense_subsystem_info *sense_info)
795 return pqi_send_ctrl_raid_request(ctrl_info,
796 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
797 sizeof(*sense_info));
800 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
801 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
803 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
804 buffer, buffer_length, vpd_page, NULL);
807 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
808 struct pqi_scsi_dev *device,
809 struct bmic_identify_physical_device *buffer, size_t buffer_length)
812 enum dma_data_direction dir;
813 u16 bmic_device_index;
814 struct pqi_raid_path_request request;
816 rc = pqi_build_raid_path_request(ctrl_info, &request,
817 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
818 buffer_length, 0, &dir);
822 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
823 request.cdb[2] = (u8)bmic_device_index;
824 request.cdb[9] = (u8)(bmic_device_index >> 8);
826 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
828 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
833 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
837 bytes = get_unaligned_le16(limit);
848 struct bmic_sense_feature_buffer {
849 struct bmic_sense_feature_buffer_header header;
850 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
855 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
856 offsetofend(struct bmic_sense_feature_buffer, \
857 aio_subpage.max_write_raid_1_10_3drive)
859 #define MINIMUM_AIO_SUBPAGE_LENGTH \
860 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
861 max_write_raid_1_10_3drive) - \
862 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
864 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
867 enum dma_data_direction dir;
868 struct pqi_raid_path_request request;
869 struct bmic_sense_feature_buffer *buffer;
871 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
875 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
876 buffer, sizeof(*buffer), 0, &dir);
880 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
881 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
883 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
885 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
890 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
891 buffer->header.subpage_code !=
892 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
893 get_unaligned_le16(&buffer->header.buffer_length) <
894 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
895 buffer->aio_subpage.header.page_code !=
896 BMIC_SENSE_FEATURE_IO_PAGE ||
897 buffer->aio_subpage.header.subpage_code !=
898 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
899 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
900 MINIMUM_AIO_SUBPAGE_LENGTH) {
904 ctrl_info->max_transfer_encrypted_sas_sata =
905 pqi_aio_limit_to_bytes(
906 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
908 ctrl_info->max_transfer_encrypted_nvme =
909 pqi_aio_limit_to_bytes(
910 &buffer->aio_subpage.max_transfer_encrypted_nvme);
912 ctrl_info->max_write_raid_5_6 =
913 pqi_aio_limit_to_bytes(
914 &buffer->aio_subpage.max_write_raid_5_6);
916 ctrl_info->max_write_raid_1_10_2drive =
917 pqi_aio_limit_to_bytes(
918 &buffer->aio_subpage.max_write_raid_1_10_2drive);
920 ctrl_info->max_write_raid_1_10_3drive =
921 pqi_aio_limit_to_bytes(
922 &buffer->aio_subpage.max_write_raid_1_10_3drive);
930 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
931 enum bmic_flush_cache_shutdown_event shutdown_event)
934 struct bmic_flush_cache *flush_cache;
936 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
940 flush_cache->shutdown_event = shutdown_event;
942 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
943 sizeof(*flush_cache));
950 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
951 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
952 struct pqi_raid_error_info *error_info)
954 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
955 buffer, buffer_length, error_info);
958 #define PQI_FETCH_PTRAID_DATA (1 << 31)
960 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
963 struct bmic_diag_options *diag;
965 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
969 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
970 diag, sizeof(*diag));
974 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
976 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
985 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
986 void *buffer, size_t buffer_length)
988 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
989 buffer, buffer_length);
994 struct bmic_host_wellness_driver_version {
996 u8 driver_version_tag[2];
997 __le16 driver_version_length;
998 char driver_version[32];
999 u8 dont_write_tag[2];
1005 static int pqi_write_driver_version_to_host_wellness(
1006 struct pqi_ctrl_info *ctrl_info)
1009 struct bmic_host_wellness_driver_version *buffer;
1010 size_t buffer_length;
1012 buffer_length = sizeof(*buffer);
1014 buffer = kmalloc(buffer_length, GFP_KERNEL);
1018 buffer->start_tag[0] = '<';
1019 buffer->start_tag[1] = 'H';
1020 buffer->start_tag[2] = 'W';
1021 buffer->start_tag[3] = '>';
1022 buffer->driver_version_tag[0] = 'D';
1023 buffer->driver_version_tag[1] = 'V';
1024 put_unaligned_le16(sizeof(buffer->driver_version),
1025 &buffer->driver_version_length);
1026 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1027 sizeof(buffer->driver_version) - 1);
1028 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
1029 buffer->dont_write_tag[0] = 'D';
1030 buffer->dont_write_tag[1] = 'W';
1031 buffer->end_tag[0] = 'Z';
1032 buffer->end_tag[1] = 'Z';
1034 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1043 struct bmic_host_wellness_time {
1048 u8 dont_write_tag[2];
1054 static int pqi_write_current_time_to_host_wellness(
1055 struct pqi_ctrl_info *ctrl_info)
1058 struct bmic_host_wellness_time *buffer;
1059 size_t buffer_length;
1060 time64_t local_time;
1064 buffer_length = sizeof(*buffer);
1066 buffer = kmalloc(buffer_length, GFP_KERNEL);
1070 buffer->start_tag[0] = '<';
1071 buffer->start_tag[1] = 'H';
1072 buffer->start_tag[2] = 'W';
1073 buffer->start_tag[3] = '>';
1074 buffer->time_tag[0] = 'T';
1075 buffer->time_tag[1] = 'D';
1076 put_unaligned_le16(sizeof(buffer->time),
1077 &buffer->time_length);
1079 local_time = ktime_get_real_seconds();
1080 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1081 year = tm.tm_year + 1900;
1083 buffer->time[0] = bin2bcd(tm.tm_hour);
1084 buffer->time[1] = bin2bcd(tm.tm_min);
1085 buffer->time[2] = bin2bcd(tm.tm_sec);
1086 buffer->time[3] = 0;
1087 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1088 buffer->time[5] = bin2bcd(tm.tm_mday);
1089 buffer->time[6] = bin2bcd(year / 100);
1090 buffer->time[7] = bin2bcd(year % 100);
1092 buffer->dont_write_tag[0] = 'D';
1093 buffer->dont_write_tag[1] = 'W';
1094 buffer->end_tag[0] = 'Z';
1095 buffer->end_tag[1] = 'Z';
1097 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1104 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
1106 static void pqi_update_time_worker(struct work_struct *work)
1109 struct pqi_ctrl_info *ctrl_info;
1111 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1114 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1116 dev_warn(&ctrl_info->pci_dev->dev,
1117 "error updating time on controller\n");
1119 schedule_delayed_work(&ctrl_info->update_time_work,
1120 PQI_UPDATE_TIME_WORK_INTERVAL);
1123 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1125 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1128 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1130 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1133 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1134 size_t buffer_length)
1136 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1139 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1142 size_t lun_list_length;
1143 size_t lun_data_length;
1144 size_t new_lun_list_length;
1145 void *lun_data = NULL;
1146 struct report_lun_header *report_lun_header;
1148 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1149 if (!report_lun_header) {
1154 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1158 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1161 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1163 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1169 if (lun_list_length == 0) {
1170 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1174 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1178 new_lun_list_length =
1179 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1181 if (new_lun_list_length > lun_list_length) {
1182 lun_list_length = new_lun_list_length;
1188 kfree(report_lun_header);
1200 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1204 u8 rpl_response_format;
1206 size_t rpl_16byte_wwid_list_length;
1208 struct report_lun_header *rpl_header;
1209 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1210 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1212 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1216 if (ctrl_info->rpl_extended_format_4_5_supported) {
1217 rpl_header = rpl_list;
1218 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1219 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1222 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1223 dev_err(&ctrl_info->pci_dev->dev,
1224 "RPL returned unsupported data format %u\n",
1225 rpl_response_format);
1228 dev_warn(&ctrl_info->pci_dev->dev,
1229 "RPL returned extended format 2 instead of 4\n");
1233 rpl_8byte_wwid_list = rpl_list;
1234 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1235 rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1237 rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1238 if (!rpl_16byte_wwid_list)
1241 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1242 &rpl_16byte_wwid_list->header.list_length);
1243 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1245 for (i = 0; i < num_physicals; i++) {
1246 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1247 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1248 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1249 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1250 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1251 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1252 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1253 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1256 kfree(rpl_8byte_wwid_list);
1257 *buffer = rpl_16byte_wwid_list;
1262 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1264 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1267 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1268 struct report_phys_lun_16byte_wwid_list **physdev_list,
1269 struct report_log_lun_list **logdev_list)
1272 size_t logdev_list_length;
1273 size_t logdev_data_length;
1274 struct report_log_lun_list *internal_logdev_list;
1275 struct report_log_lun_list *logdev_data;
1276 struct report_lun_header report_lun_header;
1278 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1280 dev_err(&ctrl_info->pci_dev->dev,
1281 "report physical LUNs failed\n");
1283 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1285 dev_err(&ctrl_info->pci_dev->dev,
1286 "report logical LUNs failed\n");
1289 * Tack the controller itself onto the end of the logical device list
1290 * by adding a list entry that is all zeros.
1293 logdev_data = *logdev_list;
1296 logdev_list_length =
1297 get_unaligned_be32(&logdev_data->header.list_length);
1299 memset(&report_lun_header, 0, sizeof(report_lun_header));
1301 (struct report_log_lun_list *)&report_lun_header;
1302 logdev_list_length = 0;
1305 logdev_data_length = sizeof(struct report_lun_header) +
1308 internal_logdev_list = kmalloc(logdev_data_length +
1309 sizeof(struct report_log_lun), GFP_KERNEL);
1310 if (!internal_logdev_list) {
1311 kfree(*logdev_list);
1312 *logdev_list = NULL;
1316 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1317 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1318 sizeof(struct report_log_lun));
1319 put_unaligned_be32(logdev_list_length +
1320 sizeof(struct report_log_lun),
1321 &internal_logdev_list->header.list_length);
1323 kfree(*logdev_list);
1324 *logdev_list = internal_logdev_list;
1329 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1330 int bus, int target, int lun)
1333 device->target = target;
1337 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1345 scsi3addr = device->scsi3addr;
1346 lunid = get_unaligned_le32(scsi3addr);
1348 if (pqi_is_hba_lunid(scsi3addr)) {
1349 /* The specified device is the controller. */
1350 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1351 device->target_lun_valid = true;
1355 if (pqi_is_logical_device(device)) {
1356 if (device->is_external_raid_device) {
1357 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1358 target = (lunid >> 16) & 0x3fff;
1361 bus = PQI_RAID_VOLUME_BUS;
1363 lun = lunid & 0x3fff;
1365 pqi_set_bus_target_lun(device, bus, target, lun);
1366 device->target_lun_valid = true;
1371 * Defer target and LUN assignment for non-controller physical devices
1372 * because the SAS transport layer will make these assignments later.
1374 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1377 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1378 struct pqi_scsi_dev *device)
1384 raid_level = SA_RAID_UNKNOWN;
1386 buffer = kmalloc(64, GFP_KERNEL);
1388 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1389 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1391 raid_level = buffer[8];
1392 if (raid_level > SA_RAID_MAX)
1393 raid_level = SA_RAID_UNKNOWN;
1398 device->raid_level = raid_level;
1401 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1402 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1406 u32 r5or6_blocks_per_row;
1408 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1410 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1411 err_msg = "RAID map too small";
1415 if (device->raid_level == SA_RAID_1) {
1416 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1417 err_msg = "invalid RAID-1 map";
1420 } else if (device->raid_level == SA_RAID_TRIPLE) {
1421 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1422 err_msg = "invalid RAID-1(Triple) map";
1425 } else if ((device->raid_level == SA_RAID_5 ||
1426 device->raid_level == SA_RAID_6) &&
1427 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1429 r5or6_blocks_per_row =
1430 get_unaligned_le16(&raid_map->strip_size) *
1431 get_unaligned_le16(&raid_map->data_disks_per_row);
1432 if (r5or6_blocks_per_row == 0) {
1433 err_msg = "invalid RAID-5 or RAID-6 map";
1441 dev_warn(&ctrl_info->pci_dev->dev,
1442 "logical device %08x%08x %s\n",
1443 *((u32 *)&device->scsi3addr),
1444 *((u32 *)&device->scsi3addr[4]), err_msg);
1449 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1450 struct pqi_scsi_dev *device)
1454 struct raid_map *raid_map;
1456 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1460 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1461 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1465 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1467 if (raid_map_size > sizeof(*raid_map)) {
1471 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1475 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1476 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1480 if (get_unaligned_le32(&raid_map->structure_size)
1482 dev_warn(&ctrl_info->pci_dev->dev,
1483 "requested %u bytes, received %u bytes\n",
1485 get_unaligned_le32(&raid_map->structure_size));
1491 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1495 device->raid_map = raid_map;
1505 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1506 struct pqi_scsi_dev *device)
1508 if (!ctrl_info->lv_drive_type_mix_valid) {
1509 device->max_transfer_encrypted = ~0;
1513 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1514 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1515 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1516 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1517 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1518 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1519 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1520 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1521 device->max_transfer_encrypted =
1522 ctrl_info->max_transfer_encrypted_sas_sata;
1524 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1525 device->max_transfer_encrypted =
1526 ctrl_info->max_transfer_encrypted_nvme;
1528 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1529 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1531 device->max_transfer_encrypted =
1532 min(ctrl_info->max_transfer_encrypted_sas_sata,
1533 ctrl_info->max_transfer_encrypted_nvme);
1538 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1539 struct pqi_scsi_dev *device)
1545 buffer = kmalloc(64, GFP_KERNEL);
1549 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1550 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1554 #define RAID_BYPASS_STATUS 4
1555 #define RAID_BYPASS_CONFIGURED 0x1
1556 #define RAID_BYPASS_ENABLED 0x2
1558 bypass_status = buffer[RAID_BYPASS_STATUS];
1559 device->raid_bypass_configured =
1560 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1561 if (device->raid_bypass_configured &&
1562 (bypass_status & RAID_BYPASS_ENABLED) &&
1563 pqi_get_raid_map(ctrl_info, device) == 0) {
1564 device->raid_bypass_enabled = true;
1565 if (get_unaligned_le16(&device->raid_map->flags) &
1566 RAID_MAP_ENCRYPTION_ENABLED)
1567 pqi_set_max_transfer_encrypted(ctrl_info, device);
1575 * Use vendor-specific VPD to determine online/offline status of a volume.
1578 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1579 struct pqi_scsi_dev *device)
1583 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1584 bool volume_offline = true;
1586 struct ciss_vpd_logical_volume_status *vpd;
1588 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1592 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1593 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1597 if (vpd->page_code != CISS_VPD_LV_STATUS)
1600 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1601 volume_status) + vpd->page_length;
1602 if (page_length < sizeof(*vpd))
1605 volume_status = vpd->volume_status;
1606 volume_flags = get_unaligned_be32(&vpd->flags);
1607 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1612 device->volume_status = volume_status;
1613 device->volume_offline = volume_offline;
1616 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
1617 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1618 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
1620 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1621 struct pqi_scsi_dev *device,
1622 struct bmic_identify_physical_device *id_phys)
1626 memset(id_phys, 0, sizeof(*id_phys));
1628 rc = pqi_identify_physical_device(ctrl_info, device,
1629 id_phys, sizeof(*id_phys));
1631 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1635 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1636 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1638 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1639 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1641 device->box_index = id_phys->box_index;
1642 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1643 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1644 device->queue_depth =
1645 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1646 device->active_path_index = id_phys->active_path_number;
1647 device->path_map = id_phys->redundant_path_present_map;
1648 memcpy(&device->box,
1649 &id_phys->alternate_paths_phys_box_on_port,
1650 sizeof(device->box));
1651 memcpy(&device->phys_connector,
1652 &id_phys->alternate_paths_phys_connector,
1653 sizeof(device->phys_connector));
1654 device->bay = id_phys->phys_bay_in_box;
1655 device->lun_count = id_phys->multi_lun_device_lun_count;
1656 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1659 id_phys->phy_to_phy_map[device->active_path_index];
1661 device->phy_id = 0xFF;
1663 device->ncq_prio_support =
1664 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1665 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1667 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1672 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1673 struct pqi_scsi_dev *device)
1678 buffer = kmalloc(64, GFP_KERNEL);
1682 /* Send an inquiry to the device to see what it is. */
1683 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1687 scsi_sanitize_inquiry_string(&buffer[8], 8);
1688 scsi_sanitize_inquiry_string(&buffer[16], 16);
1690 device->devtype = buffer[0] & 0x1f;
1691 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1692 memcpy(device->model, &buffer[16], sizeof(device->model));
1694 if (device->devtype == TYPE_DISK) {
1695 if (device->is_external_raid_device) {
1696 device->raid_level = SA_RAID_UNKNOWN;
1697 device->volume_status = CISS_LV_OK;
1698 device->volume_offline = false;
1700 pqi_get_raid_level(ctrl_info, device);
1701 pqi_get_raid_bypass_status(ctrl_info, device);
1702 pqi_get_volume_status(ctrl_info, device);
1713 * Prevent adding drive to OS for some corner cases such as a drive
1714 * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1715 * the drive until the sanitize completes, which can take hours,
1716 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1717 * are allowed, but READ/WRITE cause check condition. So the OS
1718 * cannot check/read the partition table.
1719 * Note: devices that have completed sanitize must be re-enabled
1720 * using the management utility.
1722 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1724 return device->erase_in_progress;
1727 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1728 struct pqi_scsi_dev *device,
1729 struct bmic_identify_physical_device *id_phys)
1733 if (device->is_expander_smp_device)
1736 if (pqi_is_logical_device(device))
1737 rc = pqi_get_logical_device_info(ctrl_info, device);
1739 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1744 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1745 struct pqi_scsi_dev *device,
1746 struct bmic_identify_physical_device *id_phys)
1750 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1752 if (rc == 0 && device->lun_count == 0)
1753 device->lun_count = 1;
1758 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1759 struct pqi_scsi_dev *device)
1762 static const char unknown_state_str[] =
1763 "Volume is in an unknown state (%u)";
1764 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1766 switch (device->volume_status) {
1768 status = "Volume online";
1770 case CISS_LV_FAILED:
1771 status = "Volume failed";
1773 case CISS_LV_NOT_CONFIGURED:
1774 status = "Volume not configured";
1776 case CISS_LV_DEGRADED:
1777 status = "Volume degraded";
1779 case CISS_LV_READY_FOR_RECOVERY:
1780 status = "Volume ready for recovery operation";
1782 case CISS_LV_UNDERGOING_RECOVERY:
1783 status = "Volume undergoing recovery";
1785 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1786 status = "Wrong physical drive was replaced";
1788 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1789 status = "A physical drive not properly connected";
1791 case CISS_LV_HARDWARE_OVERHEATING:
1792 status = "Hardware is overheating";
1794 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1795 status = "Hardware has overheated";
1797 case CISS_LV_UNDERGOING_EXPANSION:
1798 status = "Volume undergoing expansion";
1800 case CISS_LV_NOT_AVAILABLE:
1801 status = "Volume waiting for transforming volume";
1803 case CISS_LV_QUEUED_FOR_EXPANSION:
1804 status = "Volume queued for expansion";
1806 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1807 status = "Volume disabled due to SCSI ID conflict";
1809 case CISS_LV_EJECTED:
1810 status = "Volume has been ejected";
1812 case CISS_LV_UNDERGOING_ERASE:
1813 status = "Volume undergoing background erase";
1815 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1816 status = "Volume ready for predictive spare rebuild";
1818 case CISS_LV_UNDERGOING_RPI:
1819 status = "Volume undergoing rapid parity initialization";
1821 case CISS_LV_PENDING_RPI:
1822 status = "Volume queued for rapid parity initialization";
1824 case CISS_LV_ENCRYPTED_NO_KEY:
1825 status = "Encrypted volume inaccessible - key not present";
1827 case CISS_LV_UNDERGOING_ENCRYPTION:
1828 status = "Volume undergoing encryption process";
1830 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1831 status = "Volume undergoing encryption re-keying process";
1833 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1834 status = "Volume encrypted but encryption is disabled";
1836 case CISS_LV_PENDING_ENCRYPTION:
1837 status = "Volume pending migration to encrypted state";
1839 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1840 status = "Volume pending encryption rekeying";
1842 case CISS_LV_NOT_SUPPORTED:
1843 status = "Volume not supported on this controller";
1845 case CISS_LV_STATUS_UNAVAILABLE:
1846 status = "Volume status not available";
1849 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1850 unknown_state_str, device->volume_status);
1851 status = unknown_state_buffer;
1855 dev_info(&ctrl_info->pci_dev->dev,
1856 "scsi %d:%d:%d:%d %s\n",
1857 ctrl_info->scsi_host->host_no,
1858 device->bus, device->target, device->lun, status);
1861 static void pqi_rescan_worker(struct work_struct *work)
1863 struct pqi_ctrl_info *ctrl_info;
1865 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1868 pqi_scan_scsi_devices(ctrl_info);
1871 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1872 struct pqi_scsi_dev *device)
1876 if (pqi_is_logical_device(device))
1877 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1878 device->target, device->lun);
1880 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1885 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1887 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1892 for (lun = 0; lun < device->lun_count; lun++) {
1893 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1894 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1896 dev_err(&ctrl_info->pci_dev->dev,
1897 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1898 ctrl_info->scsi_host->host_no, device->bus,
1899 device->target, lun,
1900 atomic_read(&device->scsi_cmds_outstanding[lun]));
1903 if (pqi_is_logical_device(device))
1904 scsi_remove_device(device->sdev);
1906 pqi_remove_sas_device(device);
1908 pqi_device_remove_start(device);
1911 /* Assumes the SCSI device list lock is held. */
1913 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1914 int bus, int target, int lun)
1916 struct pqi_scsi_dev *device;
1918 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1919 if (device->bus == bus && device->target == target && device->lun == lun)
1925 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1927 if (dev1->is_physical_device != dev2->is_physical_device)
1930 if (dev1->is_physical_device)
1931 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1933 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1936 enum pqi_find_result {
1942 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1943 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1945 struct pqi_scsi_dev *device;
1947 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1948 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1949 *matching_device = device;
1950 if (pqi_device_equal(device_to_find, device)) {
1951 if (device_to_find->volume_offline)
1952 return DEVICE_CHANGED;
1955 return DEVICE_CHANGED;
1959 return DEVICE_NOT_FOUND;
1962 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1964 if (device->is_expander_smp_device)
1965 return "Enclosure SMP ";
1967 return scsi_device_type(device->devtype);
1970 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1972 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1973 char *action, struct pqi_scsi_dev *device)
1976 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1978 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1979 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1981 if (device->target_lun_valid)
1982 count += scnprintf(buffer + count,
1983 PQI_DEV_INFO_BUFFER_LENGTH - count,
1988 count += scnprintf(buffer + count,
1989 PQI_DEV_INFO_BUFFER_LENGTH - count,
1992 if (pqi_is_logical_device(device))
1993 count += scnprintf(buffer + count,
1994 PQI_DEV_INFO_BUFFER_LENGTH - count,
1996 *((u32 *)&device->scsi3addr),
1997 *((u32 *)&device->scsi3addr[4]));
1999 count += scnprintf(buffer + count,
2000 PQI_DEV_INFO_BUFFER_LENGTH - count,
2002 get_unaligned_be64(&device->wwid[0]),
2003 get_unaligned_be64(&device->wwid[8]));
2005 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2007 pqi_device_type(device),
2011 if (pqi_is_logical_device(device)) {
2012 if (device->devtype == TYPE_DISK)
2013 count += scnprintf(buffer + count,
2014 PQI_DEV_INFO_BUFFER_LENGTH - count,
2015 "SSDSmartPathCap%c En%c %-12s",
2016 device->raid_bypass_configured ? '+' : '-',
2017 device->raid_bypass_enabled ? '+' : '-',
2018 pqi_raid_level_to_string(device->raid_level));
2020 count += scnprintf(buffer + count,
2021 PQI_DEV_INFO_BUFFER_LENGTH - count,
2022 "AIO%c", device->aio_enabled ? '+' : '-');
2023 if (device->devtype == TYPE_DISK ||
2024 device->devtype == TYPE_ZBC)
2025 count += scnprintf(buffer + count,
2026 PQI_DEV_INFO_BUFFER_LENGTH - count,
2027 " qd=%-6d", device->queue_depth);
2030 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2033 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2038 if (raid_map1 == NULL || raid_map2 == NULL)
2039 return raid_map1 == raid_map2;
2041 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2042 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2044 if (raid_map1_size != raid_map2_size)
2047 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2050 /* Assumes the SCSI device list lock is held. */
2052 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2053 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2055 existing_device->device_type = new_device->device_type;
2056 existing_device->bus = new_device->bus;
2057 if (new_device->target_lun_valid) {
2058 existing_device->target = new_device->target;
2059 existing_device->lun = new_device->lun;
2060 existing_device->target_lun_valid = true;
2063 /* By definition, the scsi3addr and wwid fields are already the same. */
2065 existing_device->is_physical_device = new_device->is_physical_device;
2066 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2067 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2068 existing_device->sas_address = new_device->sas_address;
2069 existing_device->queue_depth = new_device->queue_depth;
2070 existing_device->device_offline = false;
2071 existing_device->lun_count = new_device->lun_count;
2073 if (pqi_is_logical_device(existing_device)) {
2074 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2076 if (existing_device->devtype == TYPE_DISK) {
2077 existing_device->raid_level = new_device->raid_level;
2078 existing_device->volume_status = new_device->volume_status;
2079 if (ctrl_info->logical_volume_rescan_needed)
2080 existing_device->rescan = true;
2081 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2082 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2083 kfree(existing_device->raid_map);
2084 existing_device->raid_map = new_device->raid_map;
2085 /* To prevent this from being freed later. */
2086 new_device->raid_map = NULL;
2088 existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2089 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2092 existing_device->aio_enabled = new_device->aio_enabled;
2093 existing_device->aio_handle = new_device->aio_handle;
2094 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2095 existing_device->active_path_index = new_device->active_path_index;
2096 existing_device->phy_id = new_device->phy_id;
2097 existing_device->path_map = new_device->path_map;
2098 existing_device->bay = new_device->bay;
2099 existing_device->box_index = new_device->box_index;
2100 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2101 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2102 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2103 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2107 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2110 kfree(device->raid_map);
2116 * Called when exposing a new device to the OS fails in order to re-adjust
2117 * our internal SCSI device list to match the SCSI ML's view.
2120 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2121 struct pqi_scsi_dev *device)
2123 unsigned long flags;
2125 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2126 list_del(&device->scsi_device_list_entry);
2127 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2129 /* Allow the device structure to be freed later. */
2130 device->keep_device = false;
2133 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2135 if (device->is_expander_smp_device)
2136 return device->sas_port != NULL;
2138 return device->sdev != NULL;
2141 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2142 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2146 unsigned long flags;
2147 enum pqi_find_result find_result;
2148 struct pqi_scsi_dev *device;
2149 struct pqi_scsi_dev *next;
2150 struct pqi_scsi_dev *matching_device;
2151 LIST_HEAD(add_list);
2152 LIST_HEAD(delete_list);
2155 * The idea here is to do as little work as possible while holding the
2156 * spinlock. That's why we go to great pains to defer anything other
2157 * than updating the internal device list until after we release the
2161 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2163 /* Assume that all devices in the existing list have gone away. */
2164 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2165 device->device_gone = true;
2167 for (i = 0; i < num_new_devices; i++) {
2168 device = new_device_list[i];
2170 find_result = pqi_scsi_find_entry(ctrl_info, device,
2173 switch (find_result) {
2176 * The newly found device is already in the existing
2179 device->new_device = false;
2180 matching_device->device_gone = false;
2181 pqi_scsi_update_device(ctrl_info, matching_device, device);
2183 case DEVICE_NOT_FOUND:
2185 * The newly found device is NOT in the existing device
2188 device->new_device = true;
2190 case DEVICE_CHANGED:
2192 * The original device has gone away and we need to add
2195 device->new_device = true;
2200 /* Process all devices that have gone away. */
2201 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2202 scsi_device_list_entry) {
2203 if (device->device_gone) {
2204 list_del(&device->scsi_device_list_entry);
2205 list_add_tail(&device->delete_list_entry, &delete_list);
2209 /* Process all new devices. */
2210 for (i = 0; i < num_new_devices; i++) {
2211 device = new_device_list[i];
2212 if (!device->new_device)
2214 if (device->volume_offline)
2216 list_add_tail(&device->scsi_device_list_entry,
2217 &ctrl_info->scsi_device_list);
2218 list_add_tail(&device->add_list_entry, &add_list);
2219 /* To prevent this device structure from being freed later. */
2220 device->keep_device = true;
2223 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2226 * If OFA is in progress and there are devices that need to be deleted,
2227 * allow any pending reset operations to continue and unblock any SCSI
2228 * requests before removal.
2230 if (pqi_ofa_in_progress(ctrl_info)) {
2231 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2232 if (pqi_is_device_added(device))
2233 pqi_device_remove_start(device);
2234 pqi_ctrl_unblock_device_reset(ctrl_info);
2235 pqi_scsi_unblock_requests(ctrl_info);
2238 /* Remove all devices that have gone away. */
2239 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2240 if (device->volume_offline) {
2241 pqi_dev_info(ctrl_info, "offline", device);
2242 pqi_show_volume_status(ctrl_info, device);
2244 pqi_dev_info(ctrl_info, "removed", device);
2246 if (pqi_is_device_added(device))
2247 pqi_remove_device(ctrl_info, device);
2248 list_del(&device->delete_list_entry);
2249 pqi_free_device(device);
2253 * Notify the SML of any existing device changes such as;
2254 * queue depth, device size.
2256 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2257 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2258 device->advertised_queue_depth = device->queue_depth;
2259 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2260 if (device->rescan) {
2261 scsi_rescan_device(&device->sdev->sdev_gendev);
2262 device->rescan = false;
2267 /* Expose any new devices. */
2268 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2269 if (!pqi_is_device_added(device)) {
2270 rc = pqi_add_device(ctrl_info, device);
2272 pqi_dev_info(ctrl_info, "added", device);
2274 dev_warn(&ctrl_info->pci_dev->dev,
2275 "scsi %d:%d:%d:%d addition failed, device not added\n",
2276 ctrl_info->scsi_host->host_no,
2277 device->bus, device->target,
2279 pqi_fixup_botched_add(ctrl_info, device);
2284 ctrl_info->logical_volume_rescan_needed = false;
2288 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2291 * Only support the HBA controller itself as a RAID
2292 * controller. If it's a RAID controller other than
2293 * the HBA itself (an external RAID controller, for
2294 * example), we don't support it.
2296 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2297 !pqi_is_hba_lunid(device->scsi3addr))
2303 static inline bool pqi_skip_device(u8 *scsi3addr)
2305 /* Ignore all masked devices. */
2306 if (MASKED_DEVICE(scsi3addr))
2312 static inline void pqi_mask_device(u8 *scsi3addr)
2314 scsi3addr[3] |= 0xc0;
2317 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2319 if (pqi_is_logical_device(device))
2322 return (device->path_map & (device->path_map - 1)) != 0;
2325 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2327 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2330 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2334 LIST_HEAD(new_device_list_head);
2335 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2336 struct report_log_lun_list *logdev_list = NULL;
2337 struct report_phys_lun_16byte_wwid *phys_lun;
2338 struct report_log_lun *log_lun;
2339 struct bmic_identify_physical_device *id_phys = NULL;
2342 struct pqi_scsi_dev **new_device_list = NULL;
2343 struct pqi_scsi_dev *device;
2344 struct pqi_scsi_dev *next;
2345 unsigned int num_new_devices;
2346 unsigned int num_valid_devices;
2347 bool is_physical_device;
2349 unsigned int physical_index;
2350 unsigned int logical_index;
2351 static char *out_of_memory_msg =
2352 "failed to allocate memory, device discovery stopped";
2354 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2360 get_unaligned_be32(&physdev_list->header.list_length)
2361 / sizeof(physdev_list->lun_entries[0]);
2367 get_unaligned_be32(&logdev_list->header.list_length)
2368 / sizeof(logdev_list->lun_entries[0]);
2372 if (num_physicals) {
2374 * We need this buffer for calls to pqi_get_physical_disk_info()
2375 * below. We allocate it here instead of inside
2376 * pqi_get_physical_disk_info() because it's a fairly large
2379 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2381 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2387 if (pqi_hide_vsep) {
2388 for (i = num_physicals - 1; i >= 0; i--) {
2389 phys_lun = &physdev_list->lun_entries[i];
2390 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2391 pqi_mask_device(phys_lun->lunid);
2399 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2400 ctrl_info->lv_drive_type_mix_valid = true;
2402 num_new_devices = num_physicals + num_logicals;
2404 new_device_list = kmalloc_array(num_new_devices,
2405 sizeof(*new_device_list),
2407 if (!new_device_list) {
2408 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2413 for (i = 0; i < num_new_devices; i++) {
2414 device = kzalloc(sizeof(*device), GFP_KERNEL);
2416 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2421 list_add_tail(&device->new_device_list_entry,
2422 &new_device_list_head);
2426 num_valid_devices = 0;
2430 for (i = 0; i < num_new_devices; i++) {
2432 if ((!pqi_expose_ld_first && i < num_physicals) ||
2433 (pqi_expose_ld_first && i >= num_logicals)) {
2434 is_physical_device = true;
2435 phys_lun = &physdev_list->lun_entries[physical_index++];
2437 scsi3addr = phys_lun->lunid;
2439 is_physical_device = false;
2441 log_lun = &logdev_list->lun_entries[logical_index++];
2442 scsi3addr = log_lun->lunid;
2445 if (is_physical_device && pqi_skip_device(scsi3addr))
2449 device = list_next_entry(device, new_device_list_entry);
2451 device = list_first_entry(&new_device_list_head,
2452 struct pqi_scsi_dev, new_device_list_entry);
2454 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2455 device->is_physical_device = is_physical_device;
2456 if (is_physical_device) {
2457 device->device_type = phys_lun->device_type;
2458 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2459 device->is_expander_smp_device = true;
2461 device->is_external_raid_device =
2462 pqi_is_external_raid_addr(scsi3addr);
2465 if (!pqi_is_supported_device(device))
2468 /* Gather information about the device. */
2469 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2470 if (rc == -ENOMEM) {
2471 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2476 if (device->is_physical_device)
2477 dev_warn(&ctrl_info->pci_dev->dev,
2478 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2479 get_unaligned_be64(&phys_lun->wwid[0]),
2480 get_unaligned_be64(&phys_lun->wwid[8]));
2482 dev_warn(&ctrl_info->pci_dev->dev,
2483 "obtaining device info failed, skipping logical device %08x%08x\n",
2484 *((u32 *)&device->scsi3addr),
2485 *((u32 *)&device->scsi3addr[4]));
2490 /* Do not present disks that the OS cannot fully probe. */
2491 if (pqi_keep_device_offline(device))
2494 pqi_assign_bus_target_lun(device);
2496 if (device->is_physical_device) {
2497 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2498 if ((phys_lun->device_flags &
2499 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2500 phys_lun->aio_handle) {
2501 device->aio_enabled = true;
2502 device->aio_handle =
2503 phys_lun->aio_handle;
2506 memcpy(device->volume_id, log_lun->volume_id,
2507 sizeof(device->volume_id));
2510 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2512 new_device_list[num_valid_devices++] = device;
2515 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2518 list_for_each_entry_safe(device, next, &new_device_list_head,
2519 new_device_list_entry) {
2520 if (device->keep_device)
2522 list_del(&device->new_device_list_entry);
2523 pqi_free_device(device);
2526 kfree(new_device_list);
2527 kfree(physdev_list);
2534 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2539 if (pqi_ctrl_offline(ctrl_info))
2542 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2544 if (!mutex_acquired) {
2545 if (pqi_ctrl_scan_blocked(ctrl_info))
2547 pqi_schedule_rescan_worker_delayed(ctrl_info);
2548 return -EINPROGRESS;
2551 rc = pqi_update_scsi_devices(ctrl_info);
2552 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2553 pqi_schedule_rescan_worker_delayed(ctrl_info);
2555 mutex_unlock(&ctrl_info->scan_mutex);
2560 static void pqi_scan_start(struct Scsi_Host *shost)
2562 struct pqi_ctrl_info *ctrl_info;
2564 ctrl_info = shost_to_hba(shost);
2566 pqi_scan_scsi_devices(ctrl_info);
2569 /* Returns TRUE if scan is finished. */
2571 static int pqi_scan_finished(struct Scsi_Host *shost,
2572 unsigned long elapsed_time)
2574 struct pqi_ctrl_info *ctrl_info;
2576 ctrl_info = shost_priv(shost);
2578 return !mutex_is_locked(&ctrl_info->scan_mutex);
2581 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2582 struct raid_map *raid_map, u64 first_block)
2584 u32 volume_blk_size;
2587 * Set the encryption tweak values based on logical block address.
2588 * If the block size is 512, the tweak value is equal to the LBA.
2589 * For other block sizes, tweak value is (LBA * block size) / 512.
2591 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2592 if (volume_blk_size != 512)
2593 first_block = (first_block * volume_blk_size) / 512;
2595 encryption_info->data_encryption_key_index =
2596 get_unaligned_le16(&raid_map->data_encryption_key_index);
2597 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2598 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2602 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2605 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2606 struct pqi_scsi_dev_raid_map_data *rmd)
2608 bool is_supported = true;
2610 switch (rmd->raid_level) {
2614 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2615 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2616 is_supported = false;
2618 case SA_RAID_TRIPLE:
2619 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2620 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2621 is_supported = false;
2624 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2625 rmd->data_length > ctrl_info->max_write_raid_5_6))
2626 is_supported = false;
2629 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2630 rmd->data_length > ctrl_info->max_write_raid_5_6))
2631 is_supported = false;
2634 is_supported = false;
2638 return is_supported;
2641 #define PQI_RAID_BYPASS_INELIGIBLE 1
2643 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2644 struct pqi_scsi_dev_raid_map_data *rmd)
2646 /* Check for valid opcode, get LBA and block count. */
2647 switch (scmd->cmnd[0]) {
2649 rmd->is_write = true;
2652 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2653 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2654 rmd->block_cnt = (u32)scmd->cmnd[4];
2655 if (rmd->block_cnt == 0)
2656 rmd->block_cnt = 256;
2659 rmd->is_write = true;
2662 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2663 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2666 rmd->is_write = true;
2669 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2670 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2673 rmd->is_write = true;
2676 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2677 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2680 /* Process via normal I/O path. */
2681 return PQI_RAID_BYPASS_INELIGIBLE;
2684 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2689 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2690 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2692 #if BITS_PER_LONG == 32
2696 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2698 /* Check for invalid block or wraparound. */
2699 if (rmd->last_block >=
2700 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2701 rmd->last_block < rmd->first_block)
2702 return PQI_RAID_BYPASS_INELIGIBLE;
2704 rmd->data_disks_per_row =
2705 get_unaligned_le16(&raid_map->data_disks_per_row);
2706 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2707 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2709 /* Calculate stripe information for the request. */
2710 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2711 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2712 return PQI_RAID_BYPASS_INELIGIBLE;
2713 #if BITS_PER_LONG == 32
2714 tmpdiv = rmd->first_block;
2715 do_div(tmpdiv, rmd->blocks_per_row);
2716 rmd->first_row = tmpdiv;
2717 tmpdiv = rmd->last_block;
2718 do_div(tmpdiv, rmd->blocks_per_row);
2719 rmd->last_row = tmpdiv;
2720 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2721 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2722 tmpdiv = rmd->first_row_offset;
2723 do_div(tmpdiv, rmd->strip_size);
2724 rmd->first_column = tmpdiv;
2725 tmpdiv = rmd->last_row_offset;
2726 do_div(tmpdiv, rmd->strip_size);
2727 rmd->last_column = tmpdiv;
2729 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2730 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2731 rmd->first_row_offset = (u32)(rmd->first_block -
2732 (rmd->first_row * rmd->blocks_per_row));
2733 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2734 rmd->blocks_per_row));
2735 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2736 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2739 /* If this isn't a single row/column then give to the controller. */
2740 if (rmd->first_row != rmd->last_row ||
2741 rmd->first_column != rmd->last_column)
2742 return PQI_RAID_BYPASS_INELIGIBLE;
2744 /* Proceeding with driver mapping. */
2745 rmd->total_disks_per_row = rmd->data_disks_per_row +
2746 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2747 rmd->map_row = ((u32)(rmd->first_row >>
2748 raid_map->parity_rotation_shift)) %
2749 get_unaligned_le16(&raid_map->row_cnt);
2750 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2756 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2757 struct raid_map *raid_map)
2759 #if BITS_PER_LONG == 32
2763 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2764 return PQI_RAID_BYPASS_INELIGIBLE;
2767 /* Verify first and last block are in same RAID group. */
2768 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2769 #if BITS_PER_LONG == 32
2770 tmpdiv = rmd->first_block;
2771 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2772 tmpdiv = rmd->first_group;
2773 do_div(tmpdiv, rmd->blocks_per_row);
2774 rmd->first_group = tmpdiv;
2775 tmpdiv = rmd->last_block;
2776 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2777 tmpdiv = rmd->last_group;
2778 do_div(tmpdiv, rmd->blocks_per_row);
2779 rmd->last_group = tmpdiv;
2781 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2782 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2784 if (rmd->first_group != rmd->last_group)
2785 return PQI_RAID_BYPASS_INELIGIBLE;
2787 /* Verify request is in a single row of RAID 5/6. */
2788 #if BITS_PER_LONG == 32
2789 tmpdiv = rmd->first_block;
2790 do_div(tmpdiv, rmd->stripesize);
2791 rmd->first_row = tmpdiv;
2792 rmd->r5or6_first_row = tmpdiv;
2793 tmpdiv = rmd->last_block;
2794 do_div(tmpdiv, rmd->stripesize);
2795 rmd->r5or6_last_row = tmpdiv;
2797 rmd->first_row = rmd->r5or6_first_row =
2798 rmd->first_block / rmd->stripesize;
2799 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2801 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2802 return PQI_RAID_BYPASS_INELIGIBLE;
2804 /* Verify request is in a single column. */
2805 #if BITS_PER_LONG == 32
2806 tmpdiv = rmd->first_block;
2807 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2808 tmpdiv = rmd->first_row_offset;
2809 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2810 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2811 tmpdiv = rmd->last_block;
2812 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2813 tmpdiv = rmd->r5or6_last_row_offset;
2814 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2815 tmpdiv = rmd->r5or6_first_row_offset;
2816 do_div(tmpdiv, rmd->strip_size);
2817 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2818 tmpdiv = rmd->r5or6_last_row_offset;
2819 do_div(tmpdiv, rmd->strip_size);
2820 rmd->r5or6_last_column = tmpdiv;
2822 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2823 (u32)((rmd->first_block % rmd->stripesize) %
2824 rmd->blocks_per_row);
2826 rmd->r5or6_last_row_offset =
2827 (u32)((rmd->last_block % rmd->stripesize) %
2828 rmd->blocks_per_row);
2831 rmd->r5or6_first_row_offset / rmd->strip_size;
2832 rmd->r5or6_first_column = rmd->first_column;
2833 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2835 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2836 return PQI_RAID_BYPASS_INELIGIBLE;
2838 /* Request is eligible. */
2840 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2841 get_unaligned_le16(&raid_map->row_cnt);
2843 rmd->map_index = (rmd->first_group *
2844 (get_unaligned_le16(&raid_map->row_cnt) *
2845 rmd->total_disks_per_row)) +
2846 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2848 if (rmd->is_write) {
2852 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2853 * parity entries inside the device's raid_map.
2855 * A device's RAID map is bounded by: number of RAID disks squared.
2857 * The devices RAID map size is checked during device
2860 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2861 index *= rmd->total_disks_per_row;
2862 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2864 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2865 if (rmd->raid_level == SA_RAID_6) {
2866 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2867 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2869 #if BITS_PER_LONG == 32
2870 tmpdiv = rmd->first_block;
2871 do_div(tmpdiv, rmd->blocks_per_row);
2874 rmd->row = rmd->first_block / rmd->blocks_per_row;
2881 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2883 /* Build the new CDB for the physical disk I/O. */
2884 if (rmd->disk_block > 0xffffffff) {
2885 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2887 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2888 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2891 rmd->cdb_length = 16;
2893 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2895 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2897 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2899 rmd->cdb_length = 10;
2903 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2904 struct pqi_scsi_dev_raid_map_data *rmd)
2909 group = rmd->map_index / rmd->data_disks_per_row;
2911 index = rmd->map_index - (group * rmd->data_disks_per_row);
2912 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2913 index += rmd->data_disks_per_row;
2914 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2915 if (rmd->layout_map_count > 2) {
2916 index += rmd->data_disks_per_row;
2917 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2920 rmd->num_it_nexus_entries = rmd->layout_map_count;
2923 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2924 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2925 struct pqi_queue_group *queue_group)
2928 struct raid_map *raid_map;
2930 u32 next_bypass_group;
2931 struct pqi_encryption_info *encryption_info_ptr;
2932 struct pqi_encryption_info encryption_info;
2933 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2935 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2937 return PQI_RAID_BYPASS_INELIGIBLE;
2939 rmd.raid_level = device->raid_level;
2941 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2942 return PQI_RAID_BYPASS_INELIGIBLE;
2944 if (unlikely(rmd.block_cnt == 0))
2945 return PQI_RAID_BYPASS_INELIGIBLE;
2947 raid_map = device->raid_map;
2949 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2951 return PQI_RAID_BYPASS_INELIGIBLE;
2953 if (device->raid_level == SA_RAID_1 ||
2954 device->raid_level == SA_RAID_TRIPLE) {
2956 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2958 group = device->next_bypass_group[rmd.map_index];
2959 next_bypass_group = group + 1;
2960 if (next_bypass_group >= rmd.layout_map_count)
2961 next_bypass_group = 0;
2962 device->next_bypass_group[rmd.map_index] = next_bypass_group;
2963 rmd.map_index += group * rmd.data_disks_per_row;
2965 } else if ((device->raid_level == SA_RAID_5 ||
2966 device->raid_level == SA_RAID_6) &&
2967 (rmd.layout_map_count > 1 || rmd.is_write)) {
2968 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2970 return PQI_RAID_BYPASS_INELIGIBLE;
2973 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2974 return PQI_RAID_BYPASS_INELIGIBLE;
2976 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2977 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2978 rmd.first_row * rmd.strip_size +
2979 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2980 rmd.disk_block_cnt = rmd.block_cnt;
2982 /* Handle differing logical/physical block sizes. */
2983 if (raid_map->phys_blk_shift) {
2984 rmd.disk_block <<= raid_map->phys_blk_shift;
2985 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2988 if (unlikely(rmd.disk_block_cnt > 0xffff))
2989 return PQI_RAID_BYPASS_INELIGIBLE;
2991 pqi_set_aio_cdb(&rmd);
2993 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2994 if (rmd.data_length > device->max_transfer_encrypted)
2995 return PQI_RAID_BYPASS_INELIGIBLE;
2996 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2997 encryption_info_ptr = &encryption_info;
2999 encryption_info_ptr = NULL;
3003 switch (device->raid_level) {
3005 case SA_RAID_TRIPLE:
3006 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3007 encryption_info_ptr, device, &rmd);
3010 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3011 encryption_info_ptr, device, &rmd);
3015 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3016 rmd.cdb, rmd.cdb_length, queue_group,
3017 encryption_info_ptr, true, false);
3020 #define PQI_STATUS_IDLE 0x0
3022 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3023 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3025 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3026 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3027 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3028 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3029 #define PQI_DEVICE_STATE_ERROR 0x4
3031 #define PQI_MODE_READY_TIMEOUT_SECS 30
3032 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3034 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3036 struct pqi_device_registers __iomem *pqi_registers;
3037 unsigned long timeout;
3041 pqi_registers = ctrl_info->pqi_registers;
3042 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3045 signature = readq(&pqi_registers->signature);
3046 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3047 sizeof(signature)) == 0)
3049 if (time_after(jiffies, timeout)) {
3050 dev_err(&ctrl_info->pci_dev->dev,
3051 "timed out waiting for PQI signature\n");
3054 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3058 status = readb(&pqi_registers->function_and_status_code);
3059 if (status == PQI_STATUS_IDLE)
3061 if (time_after(jiffies, timeout)) {
3062 dev_err(&ctrl_info->pci_dev->dev,
3063 "timed out waiting for PQI IDLE\n");
3066 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3070 if (readl(&pqi_registers->device_status) ==
3071 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3073 if (time_after(jiffies, timeout)) {
3074 dev_err(&ctrl_info->pci_dev->dev,
3075 "timed out waiting for PQI all registers ready\n");
3078 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3084 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3086 struct pqi_scsi_dev *device;
3088 device = io_request->scmd->device->hostdata;
3089 device->raid_bypass_enabled = false;
3090 device->aio_enabled = false;
3093 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3095 struct pqi_ctrl_info *ctrl_info;
3096 struct pqi_scsi_dev *device;
3098 device = sdev->hostdata;
3099 if (device->device_offline)
3102 device->device_offline = true;
3103 ctrl_info = shost_to_hba(sdev->host);
3104 pqi_schedule_rescan_worker(ctrl_info);
3105 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3106 path, ctrl_info->scsi_host->host_no, device->bus,
3107 device->target, device->lun);
3110 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3114 struct scsi_cmnd *scmd;
3115 struct pqi_raid_error_info *error_info;
3116 size_t sense_data_length;
3119 struct scsi_sense_hdr sshdr;
3121 scmd = io_request->scmd;
3125 error_info = io_request->error_info;
3126 scsi_status = error_info->status;
3129 switch (error_info->data_out_result) {
3130 case PQI_DATA_IN_OUT_GOOD:
3132 case PQI_DATA_IN_OUT_UNDERFLOW:
3134 get_unaligned_le32(&error_info->data_out_transferred);
3135 residual_count = scsi_bufflen(scmd) - xfer_count;
3136 scsi_set_resid(scmd, residual_count);
3137 if (xfer_count < scmd->underflow)
3138 host_byte = DID_SOFT_ERROR;
3140 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3141 case PQI_DATA_IN_OUT_ABORTED:
3142 host_byte = DID_ABORT;
3144 case PQI_DATA_IN_OUT_TIMEOUT:
3145 host_byte = DID_TIME_OUT;
3147 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3148 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3149 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3150 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3151 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3152 case PQI_DATA_IN_OUT_ERROR:
3153 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3154 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3155 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3156 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3157 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3158 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3159 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3160 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3161 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3162 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3164 host_byte = DID_ERROR;
3168 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3169 if (sense_data_length == 0)
3171 get_unaligned_le16(&error_info->response_data_length);
3172 if (sense_data_length) {
3173 if (sense_data_length > sizeof(error_info->data))
3174 sense_data_length = sizeof(error_info->data);
3176 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3177 scsi_normalize_sense(error_info->data,
3178 sense_data_length, &sshdr) &&
3179 sshdr.sense_key == HARDWARE_ERROR &&
3180 sshdr.asc == 0x3e) {
3181 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3182 struct pqi_scsi_dev *device = scmd->device->hostdata;
3184 switch (sshdr.ascq) {
3185 case 0x1: /* LOGICAL UNIT FAILURE */
3186 if (printk_ratelimit())
3187 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3188 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3189 pqi_take_device_offline(scmd->device, "RAID");
3190 host_byte = DID_NO_CONNECT;
3193 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3194 if (printk_ratelimit())
3195 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3196 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3201 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3202 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3203 memcpy(scmd->sense_buffer, error_info->data,
3207 scmd->result = scsi_status;
3208 set_host_byte(scmd, host_byte);
3211 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3215 struct scsi_cmnd *scmd;
3216 struct pqi_aio_error_info *error_info;
3217 size_t sense_data_length;
3220 bool device_offline;
3221 struct pqi_scsi_dev *device;
3223 scmd = io_request->scmd;
3224 error_info = io_request->error_info;
3226 sense_data_length = 0;
3227 device_offline = false;
3228 device = scmd->device->hostdata;
3230 switch (error_info->service_response) {
3231 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3232 scsi_status = error_info->status;
3234 case PQI_AIO_SERV_RESPONSE_FAILURE:
3235 switch (error_info->status) {
3236 case PQI_AIO_STATUS_IO_ABORTED:
3237 scsi_status = SAM_STAT_TASK_ABORTED;
3239 case PQI_AIO_STATUS_UNDERRUN:
3240 scsi_status = SAM_STAT_GOOD;
3241 residual_count = get_unaligned_le32(
3242 &error_info->residual_count);
3243 scsi_set_resid(scmd, residual_count);
3244 xfer_count = scsi_bufflen(scmd) - residual_count;
3245 if (xfer_count < scmd->underflow)
3246 host_byte = DID_SOFT_ERROR;
3248 case PQI_AIO_STATUS_OVERRUN:
3249 scsi_status = SAM_STAT_GOOD;
3251 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3252 pqi_aio_path_disabled(io_request);
3253 if (pqi_is_multipath_device(device)) {
3254 pqi_device_remove_start(device);
3255 host_byte = DID_NO_CONNECT;
3256 scsi_status = SAM_STAT_CHECK_CONDITION;
3258 scsi_status = SAM_STAT_GOOD;
3259 io_request->status = -EAGAIN;
3262 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3263 case PQI_AIO_STATUS_INVALID_DEVICE:
3264 if (!io_request->raid_bypass) {
3265 device_offline = true;
3266 pqi_take_device_offline(scmd->device, "AIO");
3267 host_byte = DID_NO_CONNECT;
3269 scsi_status = SAM_STAT_CHECK_CONDITION;
3271 case PQI_AIO_STATUS_IO_ERROR:
3273 scsi_status = SAM_STAT_CHECK_CONDITION;
3277 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3278 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3279 scsi_status = SAM_STAT_GOOD;
3281 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3282 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3284 scsi_status = SAM_STAT_CHECK_CONDITION;
3288 if (error_info->data_present) {
3290 get_unaligned_le16(&error_info->data_length);
3291 if (sense_data_length) {
3292 if (sense_data_length > sizeof(error_info->data))
3293 sense_data_length = sizeof(error_info->data);
3294 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3295 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3296 memcpy(scmd->sense_buffer, error_info->data,
3301 if (device_offline && sense_data_length == 0)
3302 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3304 scmd->result = scsi_status;
3305 set_host_byte(scmd, host_byte);
3308 static void pqi_process_io_error(unsigned int iu_type,
3309 struct pqi_io_request *io_request)
3312 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3313 pqi_process_raid_io_error(io_request);
3315 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3316 pqi_process_aio_io_error(io_request);
3321 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3322 struct pqi_task_management_response *response)
3326 switch (response->response_code) {
3327 case SOP_TMF_COMPLETE:
3328 case SOP_TMF_FUNCTION_SUCCEEDED:
3331 case SOP_TMF_REJECTED:
3334 case SOP_RC_INCORRECT_LOGICAL_UNIT:
3343 dev_err(&ctrl_info->pci_dev->dev,
3344 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3349 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3350 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3352 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3355 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3360 struct pqi_io_request *io_request;
3361 struct pqi_io_response *response;
3365 oq_ci = queue_group->oq_ci_copy;
3368 oq_pi = readl(queue_group->oq_pi);
3369 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3370 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3371 dev_err(&ctrl_info->pci_dev->dev,
3372 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3373 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3380 response = queue_group->oq_element_array +
3381 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3383 request_id = get_unaligned_le16(&response->request_id);
3384 if (request_id >= ctrl_info->max_io_slots) {
3385 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3386 dev_err(&ctrl_info->pci_dev->dev,
3387 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3388 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3392 io_request = &ctrl_info->io_request_pool[request_id];
3393 if (atomic_read(&io_request->refcount) == 0) {
3394 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3395 dev_err(&ctrl_info->pci_dev->dev,
3396 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3397 request_id, oq_pi, oq_ci);
3401 switch (response->header.iu_type) {
3402 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3403 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3404 if (io_request->scmd)
3405 io_request->scmd->result = 0;
3407 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3409 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3410 io_request->status =
3412 &((struct pqi_vendor_general_response *)response)->status);
3414 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3415 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3418 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3419 pqi_aio_path_disabled(io_request);
3420 io_request->status = -EAGAIN;
3422 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3423 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3424 io_request->error_info = ctrl_info->error_buffer +
3425 (get_unaligned_le16(&response->error_index) *
3426 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3427 pqi_process_io_error(response->header.iu_type, io_request);
3430 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3431 dev_err(&ctrl_info->pci_dev->dev,
3432 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3433 response->header.iu_type, oq_pi, oq_ci);
3437 io_request->io_complete_callback(io_request, io_request->context);
3440 * Note that the I/O request structure CANNOT BE TOUCHED after
3441 * returning from the I/O completion callback!
3443 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3446 if (num_responses) {
3447 queue_group->oq_ci_copy = oq_ci;
3448 writel(oq_ci, queue_group->oq_ci);
3451 return num_responses;
3454 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3455 unsigned int ci, unsigned int elements_in_queue)
3457 unsigned int num_elements_used;
3460 num_elements_used = pi - ci;
3462 num_elements_used = elements_in_queue - ci + pi;
3464 return elements_in_queue - num_elements_used - 1;
3467 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3468 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3472 unsigned long flags;
3474 struct pqi_queue_group *queue_group;
3476 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3477 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3480 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3482 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3483 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3485 if (pqi_num_elements_free(iq_pi, iq_ci,
3486 ctrl_info->num_elements_per_iq))
3489 spin_unlock_irqrestore(
3490 &queue_group->submit_lock[RAID_PATH], flags);
3492 if (pqi_ctrl_offline(ctrl_info))
3496 next_element = queue_group->iq_element_array[RAID_PATH] +
3497 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3499 memcpy(next_element, iu, iu_length);
3501 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3502 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3505 * This write notifies the controller that an IU is available to be
3508 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3510 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3513 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3514 struct pqi_event *event)
3516 struct pqi_event_acknowledge_request request;
3518 memset(&request, 0, sizeof(request));
3520 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3521 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3522 &request.header.iu_length);
3523 request.event_type = event->event_type;
3524 put_unaligned_le16(event->event_id, &request.event_id);
3525 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3527 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3530 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3531 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3533 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3534 struct pqi_ctrl_info *ctrl_info)
3537 unsigned long timeout;
3539 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3542 status = pqi_read_soft_reset_status(ctrl_info);
3543 if (status & PQI_SOFT_RESET_INITIATE)
3544 return RESET_INITIATE_DRIVER;
3546 if (status & PQI_SOFT_RESET_ABORT)
3549 if (!sis_is_firmware_running(ctrl_info))
3550 return RESET_NORESPONSE;
3552 if (time_after(jiffies, timeout)) {
3553 dev_warn(&ctrl_info->pci_dev->dev,
3554 "timed out waiting for soft reset status\n");
3555 return RESET_TIMEDOUT;
3558 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3562 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3565 unsigned int delay_secs;
3566 enum pqi_soft_reset_status reset_status;
3568 if (ctrl_info->soft_reset_handshake_supported)
3569 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3571 reset_status = RESET_INITIATE_FIRMWARE;
3573 delay_secs = PQI_POST_RESET_DELAY_SECS;
3575 switch (reset_status) {
3576 case RESET_TIMEDOUT:
3577 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3579 case RESET_INITIATE_DRIVER:
3580 dev_info(&ctrl_info->pci_dev->dev,
3581 "Online Firmware Activation: resetting controller\n");
3582 sis_soft_reset(ctrl_info);
3584 case RESET_INITIATE_FIRMWARE:
3585 ctrl_info->pqi_mode_enabled = false;
3586 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3587 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3588 pqi_ofa_free_host_buffer(ctrl_info);
3589 pqi_ctrl_ofa_done(ctrl_info);
3590 dev_info(&ctrl_info->pci_dev->dev,
3591 "Online Firmware Activation: %s\n",
3592 rc == 0 ? "SUCCESS" : "FAILED");
3595 dev_info(&ctrl_info->pci_dev->dev,
3596 "Online Firmware Activation ABORTED\n");
3597 if (ctrl_info->soft_reset_handshake_supported)
3598 pqi_clear_soft_reset_status(ctrl_info);
3599 pqi_ofa_free_host_buffer(ctrl_info);
3600 pqi_ctrl_ofa_done(ctrl_info);
3601 pqi_ofa_ctrl_unquiesce(ctrl_info);
3603 case RESET_NORESPONSE:
3606 dev_err(&ctrl_info->pci_dev->dev,
3607 "unexpected Online Firmware Activation reset status: 0x%x\n",
3609 pqi_ofa_free_host_buffer(ctrl_info);
3610 pqi_ctrl_ofa_done(ctrl_info);
3611 pqi_ofa_ctrl_unquiesce(ctrl_info);
3612 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3617 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3619 struct pqi_ctrl_info *ctrl_info;
3621 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3623 pqi_ctrl_ofa_start(ctrl_info);
3624 pqi_ofa_setup_host_buffer(ctrl_info);
3625 pqi_ofa_host_memory_update(ctrl_info);
3628 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3630 struct pqi_ctrl_info *ctrl_info;
3631 struct pqi_event *event;
3633 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3635 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3637 pqi_ofa_ctrl_quiesce(ctrl_info);
3638 pqi_acknowledge_event(ctrl_info, event);
3639 pqi_process_soft_reset(ctrl_info);
3642 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3643 struct pqi_event *event)
3649 switch (event->event_id) {
3650 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3651 dev_info(&ctrl_info->pci_dev->dev,
3652 "received Online Firmware Activation memory allocation request\n");
3653 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3655 case PQI_EVENT_OFA_QUIESCE:
3656 dev_info(&ctrl_info->pci_dev->dev,
3657 "received Online Firmware Activation quiesce request\n");
3658 schedule_work(&ctrl_info->ofa_quiesce_work);
3661 case PQI_EVENT_OFA_CANCELED:
3662 dev_info(&ctrl_info->pci_dev->dev,
3663 "received Online Firmware Activation cancel request: reason: %u\n",
3664 ctrl_info->ofa_cancel_reason);
3665 pqi_ofa_free_host_buffer(ctrl_info);
3666 pqi_ctrl_ofa_done(ctrl_info);
3669 dev_err(&ctrl_info->pci_dev->dev,
3670 "received unknown Online Firmware Activation request: event ID: %u\n",
3678 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3680 unsigned long flags;
3681 struct pqi_scsi_dev *device;
3683 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3685 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3686 if (device->raid_bypass_enabled)
3687 device->raid_bypass_enabled = false;
3689 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3692 static void pqi_event_worker(struct work_struct *work)
3696 struct pqi_ctrl_info *ctrl_info;
3697 struct pqi_event *event;
3700 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3702 pqi_ctrl_busy(ctrl_info);
3703 pqi_wait_if_ctrl_blocked(ctrl_info);
3704 if (pqi_ctrl_offline(ctrl_info))
3707 rescan_needed = false;
3708 event = ctrl_info->events;
3709 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3710 if (event->pending) {
3711 event->pending = false;
3712 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3713 ack_event = pqi_ofa_process_event(ctrl_info, event);
3716 rescan_needed = true;
3717 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3718 ctrl_info->logical_volume_rescan_needed = true;
3719 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3720 pqi_disable_raid_bypass(ctrl_info);
3723 pqi_acknowledge_event(ctrl_info, event);
3728 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
3731 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3732 PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3735 pqi_ctrl_unbusy(ctrl_info);
3738 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
3740 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3743 u32 heartbeat_count;
3744 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3746 pqi_check_ctrl_health(ctrl_info);
3747 if (pqi_ctrl_offline(ctrl_info))
3750 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3751 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3753 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3754 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3755 dev_err(&ctrl_info->pci_dev->dev,
3756 "no heartbeat detected - last heartbeat count: %u\n",
3758 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3762 ctrl_info->previous_num_interrupts = num_interrupts;
3765 ctrl_info->previous_heartbeat_count = heartbeat_count;
3766 mod_timer(&ctrl_info->heartbeat_timer,
3767 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3770 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3772 if (!ctrl_info->heartbeat_counter)
3775 ctrl_info->previous_num_interrupts =
3776 atomic_read(&ctrl_info->num_interrupts);
3777 ctrl_info->previous_heartbeat_count =
3778 pqi_read_heartbeat_counter(ctrl_info);
3780 ctrl_info->heartbeat_timer.expires =
3781 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3782 add_timer(&ctrl_info->heartbeat_timer);
3785 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3787 del_timer_sync(&ctrl_info->heartbeat_timer);
3790 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3791 struct pqi_event *event, struct pqi_event_response *response)
3793 switch (event->event_id) {
3794 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3795 ctrl_info->ofa_bytes_requested =
3796 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3798 case PQI_EVENT_OFA_CANCELED:
3799 ctrl_info->ofa_cancel_reason =
3800 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3805 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3810 struct pqi_event_queue *event_queue;
3811 struct pqi_event_response *response;
3812 struct pqi_event *event;
3815 event_queue = &ctrl_info->event_queue;
3817 oq_ci = event_queue->oq_ci_copy;
3820 oq_pi = readl(event_queue->oq_pi);
3821 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3822 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3823 dev_err(&ctrl_info->pci_dev->dev,
3824 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3825 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3833 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3835 event_index = pqi_event_type_to_event_index(response->event_type);
3837 if (event_index >= 0 && response->request_acknowledge) {
3838 event = &ctrl_info->events[event_index];
3839 event->pending = true;
3840 event->event_type = response->event_type;
3841 event->event_id = get_unaligned_le16(&response->event_id);
3842 event->additional_event_id =
3843 get_unaligned_le32(&response->additional_event_id);
3844 if (event->event_type == PQI_EVENT_TYPE_OFA)
3845 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3848 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3852 event_queue->oq_ci_copy = oq_ci;
3853 writel(oq_ci, event_queue->oq_ci);
3854 schedule_work(&ctrl_info->event_work);
3860 #define PQI_LEGACY_INTX_MASK 0x1
3862 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3865 struct pqi_device_registers __iomem *pqi_registers;
3866 volatile void __iomem *register_addr;
3868 pqi_registers = ctrl_info->pqi_registers;
3871 register_addr = &pqi_registers->legacy_intx_mask_clear;
3873 register_addr = &pqi_registers->legacy_intx_mask_set;
3875 intx_mask = readl(register_addr);
3876 intx_mask |= PQI_LEGACY_INTX_MASK;
3877 writel(intx_mask, register_addr);
3880 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3881 enum pqi_irq_mode new_mode)
3883 switch (ctrl_info->irq_mode) {
3889 pqi_configure_legacy_intx(ctrl_info, true);
3890 sis_enable_intx(ctrl_info);
3899 pqi_configure_legacy_intx(ctrl_info, false);
3900 sis_enable_msix(ctrl_info);
3905 pqi_configure_legacy_intx(ctrl_info, false);
3912 sis_enable_msix(ctrl_info);
3915 pqi_configure_legacy_intx(ctrl_info, true);
3916 sis_enable_intx(ctrl_info);
3924 ctrl_info->irq_mode = new_mode;
3927 #define PQI_LEGACY_INTX_PENDING 0x1
3929 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3934 switch (ctrl_info->irq_mode) {
3939 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3940 if (intx_status & PQI_LEGACY_INTX_PENDING)
3954 static irqreturn_t pqi_irq_handler(int irq, void *data)
3956 struct pqi_ctrl_info *ctrl_info;
3957 struct pqi_queue_group *queue_group;
3958 int num_io_responses_handled;
3959 int num_events_handled;
3962 ctrl_info = queue_group->ctrl_info;
3964 if (!pqi_is_valid_irq(ctrl_info))
3967 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3968 if (num_io_responses_handled < 0)
3971 if (irq == ctrl_info->event_irq) {
3972 num_events_handled = pqi_process_event_intr(ctrl_info);
3973 if (num_events_handled < 0)
3976 num_events_handled = 0;
3979 if (num_io_responses_handled + num_events_handled > 0)
3980 atomic_inc(&ctrl_info->num_interrupts);
3982 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3983 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3989 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3991 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3995 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3997 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3998 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3999 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4001 dev_err(&pci_dev->dev,
4002 "irq %u init failed with error %d\n",
4003 pci_irq_vector(pci_dev, i), rc);
4006 ctrl_info->num_msix_vectors_initialized++;
4012 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4016 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4017 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4018 &ctrl_info->queue_groups[i]);
4020 ctrl_info->num_msix_vectors_initialized = 0;
4023 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4025 int num_vectors_enabled;
4026 unsigned int flags = PCI_IRQ_MSIX;
4028 if (!pqi_disable_managed_interrupts)
4029 flags |= PCI_IRQ_AFFINITY;
4031 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4032 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4034 if (num_vectors_enabled < 0) {
4035 dev_err(&ctrl_info->pci_dev->dev,
4036 "MSI-X init failed with error %d\n",
4037 num_vectors_enabled);
4038 return num_vectors_enabled;
4041 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4042 ctrl_info->irq_mode = IRQ_MODE_MSIX;
4046 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4048 if (ctrl_info->num_msix_vectors_enabled) {
4049 pci_free_irq_vectors(ctrl_info->pci_dev);
4050 ctrl_info->num_msix_vectors_enabled = 0;
4054 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4057 size_t alloc_length;
4058 size_t element_array_length_per_iq;
4059 size_t element_array_length_per_oq;
4060 void *element_array;
4061 void __iomem *next_queue_index;
4062 void *aligned_pointer;
4063 unsigned int num_inbound_queues;
4064 unsigned int num_outbound_queues;
4065 unsigned int num_queue_indexes;
4066 struct pqi_queue_group *queue_group;
4068 element_array_length_per_iq =
4069 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4070 ctrl_info->num_elements_per_iq;
4071 element_array_length_per_oq =
4072 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4073 ctrl_info->num_elements_per_oq;
4074 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4075 num_outbound_queues = ctrl_info->num_queue_groups;
4076 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4078 aligned_pointer = NULL;
4080 for (i = 0; i < num_inbound_queues; i++) {
4081 aligned_pointer = PTR_ALIGN(aligned_pointer,
4082 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4083 aligned_pointer += element_array_length_per_iq;
4086 for (i = 0; i < num_outbound_queues; i++) {
4087 aligned_pointer = PTR_ALIGN(aligned_pointer,
4088 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4089 aligned_pointer += element_array_length_per_oq;
4092 aligned_pointer = PTR_ALIGN(aligned_pointer,
4093 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4094 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4095 PQI_EVENT_OQ_ELEMENT_LENGTH;
4097 for (i = 0; i < num_queue_indexes; i++) {
4098 aligned_pointer = PTR_ALIGN(aligned_pointer,
4099 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4100 aligned_pointer += sizeof(pqi_index_t);
4103 alloc_length = (size_t)aligned_pointer +
4104 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4106 alloc_length += PQI_EXTRA_SGL_MEMORY;
4108 ctrl_info->queue_memory_base =
4109 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4110 &ctrl_info->queue_memory_base_dma_handle,
4113 if (!ctrl_info->queue_memory_base)
4116 ctrl_info->queue_memory_length = alloc_length;
4118 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4119 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4121 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4122 queue_group = &ctrl_info->queue_groups[i];
4123 queue_group->iq_element_array[RAID_PATH] = element_array;
4124 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4125 ctrl_info->queue_memory_base_dma_handle +
4126 (element_array - ctrl_info->queue_memory_base);
4127 element_array += element_array_length_per_iq;
4128 element_array = PTR_ALIGN(element_array,
4129 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4130 queue_group->iq_element_array[AIO_PATH] = element_array;
4131 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4132 ctrl_info->queue_memory_base_dma_handle +
4133 (element_array - ctrl_info->queue_memory_base);
4134 element_array += element_array_length_per_iq;
4135 element_array = PTR_ALIGN(element_array,
4136 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4139 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4140 queue_group = &ctrl_info->queue_groups[i];
4141 queue_group->oq_element_array = element_array;
4142 queue_group->oq_element_array_bus_addr =
4143 ctrl_info->queue_memory_base_dma_handle +
4144 (element_array - ctrl_info->queue_memory_base);
4145 element_array += element_array_length_per_oq;
4146 element_array = PTR_ALIGN(element_array,
4147 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4150 ctrl_info->event_queue.oq_element_array = element_array;
4151 ctrl_info->event_queue.oq_element_array_bus_addr =
4152 ctrl_info->queue_memory_base_dma_handle +
4153 (element_array - ctrl_info->queue_memory_base);
4154 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4155 PQI_EVENT_OQ_ELEMENT_LENGTH;
4157 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4158 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4160 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4161 queue_group = &ctrl_info->queue_groups[i];
4162 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4163 queue_group->iq_ci_bus_addr[RAID_PATH] =
4164 ctrl_info->queue_memory_base_dma_handle +
4166 (void __iomem *)ctrl_info->queue_memory_base);
4167 next_queue_index += sizeof(pqi_index_t);
4168 next_queue_index = PTR_ALIGN(next_queue_index,
4169 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4170 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4171 queue_group->iq_ci_bus_addr[AIO_PATH] =
4172 ctrl_info->queue_memory_base_dma_handle +
4174 (void __iomem *)ctrl_info->queue_memory_base);
4175 next_queue_index += sizeof(pqi_index_t);
4176 next_queue_index = PTR_ALIGN(next_queue_index,
4177 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4178 queue_group->oq_pi = next_queue_index;
4179 queue_group->oq_pi_bus_addr =
4180 ctrl_info->queue_memory_base_dma_handle +
4182 (void __iomem *)ctrl_info->queue_memory_base);
4183 next_queue_index += sizeof(pqi_index_t);
4184 next_queue_index = PTR_ALIGN(next_queue_index,
4185 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4188 ctrl_info->event_queue.oq_pi = next_queue_index;
4189 ctrl_info->event_queue.oq_pi_bus_addr =
4190 ctrl_info->queue_memory_base_dma_handle +
4192 (void __iomem *)ctrl_info->queue_memory_base);
4197 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4200 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4201 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4204 * Initialize the backpointers to the controller structure in
4205 * each operational queue group structure.
4207 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4208 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4211 * Assign IDs to all operational queues. Note that the IDs
4212 * assigned to operational IQs are independent of the IDs
4213 * assigned to operational OQs.
4215 ctrl_info->event_queue.oq_id = next_oq_id++;
4216 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4217 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4218 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4219 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4223 * Assign MSI-X table entry indexes to all queues. Note that the
4224 * interrupt for the event queue is shared with the first queue group.
4226 ctrl_info->event_queue.int_msg_num = 0;
4227 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4228 ctrl_info->queue_groups[i].int_msg_num = i;
4230 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4231 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4232 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4233 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4234 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4238 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4240 size_t alloc_length;
4241 struct pqi_admin_queues_aligned *admin_queues_aligned;
4242 struct pqi_admin_queues *admin_queues;
4244 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4245 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4247 ctrl_info->admin_queue_memory_base =
4248 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4249 &ctrl_info->admin_queue_memory_base_dma_handle,
4252 if (!ctrl_info->admin_queue_memory_base)
4255 ctrl_info->admin_queue_memory_length = alloc_length;
4257 admin_queues = &ctrl_info->admin_queues;
4258 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4259 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4260 admin_queues->iq_element_array =
4261 &admin_queues_aligned->iq_element_array;
4262 admin_queues->oq_element_array =
4263 &admin_queues_aligned->oq_element_array;
4264 admin_queues->iq_ci =
4265 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4266 admin_queues->oq_pi =
4267 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4269 admin_queues->iq_element_array_bus_addr =
4270 ctrl_info->admin_queue_memory_base_dma_handle +
4271 (admin_queues->iq_element_array -
4272 ctrl_info->admin_queue_memory_base);
4273 admin_queues->oq_element_array_bus_addr =
4274 ctrl_info->admin_queue_memory_base_dma_handle +
4275 (admin_queues->oq_element_array -
4276 ctrl_info->admin_queue_memory_base);
4277 admin_queues->iq_ci_bus_addr =
4278 ctrl_info->admin_queue_memory_base_dma_handle +
4279 ((void __iomem *)admin_queues->iq_ci -
4280 (void __iomem *)ctrl_info->admin_queue_memory_base);
4281 admin_queues->oq_pi_bus_addr =
4282 ctrl_info->admin_queue_memory_base_dma_handle +
4283 ((void __iomem *)admin_queues->oq_pi -
4284 (void __iomem *)ctrl_info->admin_queue_memory_base);
4289 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
4290 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4292 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4294 struct pqi_device_registers __iomem *pqi_registers;
4295 struct pqi_admin_queues *admin_queues;
4296 unsigned long timeout;
4300 pqi_registers = ctrl_info->pqi_registers;
4301 admin_queues = &ctrl_info->admin_queues;
4303 writeq((u64)admin_queues->iq_element_array_bus_addr,
4304 &pqi_registers->admin_iq_element_array_addr);
4305 writeq((u64)admin_queues->oq_element_array_bus_addr,
4306 &pqi_registers->admin_oq_element_array_addr);
4307 writeq((u64)admin_queues->iq_ci_bus_addr,
4308 &pqi_registers->admin_iq_ci_addr);
4309 writeq((u64)admin_queues->oq_pi_bus_addr,
4310 &pqi_registers->admin_oq_pi_addr);
4312 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4313 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4314 (admin_queues->int_msg_num << 16);
4315 writel(reg, &pqi_registers->admin_iq_num_elements);
4317 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4318 &pqi_registers->function_and_status_code);
4320 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4322 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4323 status = readb(&pqi_registers->function_and_status_code);
4324 if (status == PQI_STATUS_IDLE)
4326 if (time_after(jiffies, timeout))
4331 * The offset registers are not initialized to the correct
4332 * offsets until *after* the create admin queue pair command
4333 * completes successfully.
4335 admin_queues->iq_pi = ctrl_info->iomem_base +
4336 PQI_DEVICE_REGISTERS_OFFSET +
4337 readq(&pqi_registers->admin_iq_pi_offset);
4338 admin_queues->oq_ci = ctrl_info->iomem_base +
4339 PQI_DEVICE_REGISTERS_OFFSET +
4340 readq(&pqi_registers->admin_oq_ci_offset);
4345 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4346 struct pqi_general_admin_request *request)
4348 struct pqi_admin_queues *admin_queues;
4352 admin_queues = &ctrl_info->admin_queues;
4353 iq_pi = admin_queues->iq_pi_copy;
4355 next_element = admin_queues->iq_element_array +
4356 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4358 memcpy(next_element, request, sizeof(*request));
4360 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4361 admin_queues->iq_pi_copy = iq_pi;
4364 * This write notifies the controller that an IU is available to be
4367 writel(iq_pi, admin_queues->iq_pi);
4370 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4372 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4373 struct pqi_general_admin_response *response)
4375 struct pqi_admin_queues *admin_queues;
4378 unsigned long timeout;
4380 admin_queues = &ctrl_info->admin_queues;
4381 oq_ci = admin_queues->oq_ci_copy;
4383 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4386 oq_pi = readl(admin_queues->oq_pi);
4389 if (time_after(jiffies, timeout)) {
4390 dev_err(&ctrl_info->pci_dev->dev,
4391 "timed out waiting for admin response\n");
4394 if (!sis_is_firmware_running(ctrl_info))
4396 usleep_range(1000, 2000);
4399 memcpy(response, admin_queues->oq_element_array +
4400 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4402 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4403 admin_queues->oq_ci_copy = oq_ci;
4404 writel(oq_ci, admin_queues->oq_ci);
4409 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4410 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4411 struct pqi_io_request *io_request)
4413 struct pqi_io_request *next;
4418 unsigned long flags;
4419 unsigned int num_elements_needed;
4420 unsigned int num_elements_to_end_of_queue;
4422 struct pqi_iu_header *request;
4424 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4427 io_request->queue_group = queue_group;
4428 list_add_tail(&io_request->request_list_entry,
4429 &queue_group->request_list[path]);
4432 iq_pi = queue_group->iq_pi_copy[path];
4434 list_for_each_entry_safe(io_request, next,
4435 &queue_group->request_list[path], request_list_entry) {
4437 request = io_request->iu;
4439 iu_length = get_unaligned_le16(&request->iu_length) +
4440 PQI_REQUEST_HEADER_LENGTH;
4441 num_elements_needed =
4442 DIV_ROUND_UP(iu_length,
4443 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4445 iq_ci = readl(queue_group->iq_ci[path]);
4447 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4448 ctrl_info->num_elements_per_iq))
4451 put_unaligned_le16(queue_group->oq_id,
4452 &request->response_queue_id);
4454 next_element = queue_group->iq_element_array[path] +
4455 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4457 num_elements_to_end_of_queue =
4458 ctrl_info->num_elements_per_iq - iq_pi;
4460 if (num_elements_needed <= num_elements_to_end_of_queue) {
4461 memcpy(next_element, request, iu_length);
4463 copy_count = num_elements_to_end_of_queue *
4464 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4465 memcpy(next_element, request, copy_count);
4466 memcpy(queue_group->iq_element_array[path],
4467 (u8 *)request + copy_count,
4468 iu_length - copy_count);
4471 iq_pi = (iq_pi + num_elements_needed) %
4472 ctrl_info->num_elements_per_iq;
4474 list_del(&io_request->request_list_entry);
4477 if (iq_pi != queue_group->iq_pi_copy[path]) {
4478 queue_group->iq_pi_copy[path] = iq_pi;
4480 * This write notifies the controller that one or more IUs are
4481 * available to be processed.
4483 writel(iq_pi, queue_group->iq_pi[path]);
4486 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4489 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4491 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4492 struct completion *wait)
4497 if (wait_for_completion_io_timeout(wait,
4498 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4503 pqi_check_ctrl_health(ctrl_info);
4504 if (pqi_ctrl_offline(ctrl_info)) {
4513 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4516 struct completion *waiting = context;
4521 static int pqi_process_raid_io_error_synchronous(
4522 struct pqi_raid_error_info *error_info)
4526 switch (error_info->data_out_result) {
4527 case PQI_DATA_IN_OUT_GOOD:
4528 if (error_info->status == SAM_STAT_GOOD)
4531 case PQI_DATA_IN_OUT_UNDERFLOW:
4532 if (error_info->status == SAM_STAT_GOOD ||
4533 error_info->status == SAM_STAT_CHECK_CONDITION)
4536 case PQI_DATA_IN_OUT_ABORTED:
4537 rc = PQI_CMD_STATUS_ABORTED;
4544 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4546 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4549 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4550 struct pqi_iu_header *request, unsigned int flags,
4551 struct pqi_raid_error_info *error_info)
4554 struct pqi_io_request *io_request;
4556 DECLARE_COMPLETION_ONSTACK(wait);
4558 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4559 if (down_interruptible(&ctrl_info->sync_request_sem))
4560 return -ERESTARTSYS;
4562 down(&ctrl_info->sync_request_sem);
4565 pqi_ctrl_busy(ctrl_info);
4567 * Wait for other admin queue updates such as;
4568 * config table changes, OFA memory updates, ...
4570 if (pqi_is_blockable_request(request))
4571 pqi_wait_if_ctrl_blocked(ctrl_info);
4573 if (pqi_ctrl_offline(ctrl_info)) {
4578 io_request = pqi_alloc_io_request(ctrl_info, NULL);
4580 put_unaligned_le16(io_request->index,
4581 &(((struct pqi_raid_path_request *)request)->request_id));
4583 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4584 ((struct pqi_raid_path_request *)request)->error_index =
4585 ((struct pqi_raid_path_request *)request)->request_id;
4587 iu_length = get_unaligned_le16(&request->iu_length) +
4588 PQI_REQUEST_HEADER_LENGTH;
4589 memcpy(io_request->iu, request, iu_length);
4591 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4592 io_request->context = &wait;
4594 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4597 pqi_wait_for_completion_io(ctrl_info, &wait);
4600 if (io_request->error_info)
4601 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4603 memset(error_info, 0, sizeof(*error_info));
4604 } else if (rc == 0 && io_request->error_info) {
4605 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4608 pqi_free_io_request(io_request);
4611 pqi_ctrl_unbusy(ctrl_info);
4612 up(&ctrl_info->sync_request_sem);
4617 static int pqi_validate_admin_response(
4618 struct pqi_general_admin_response *response, u8 expected_function_code)
4620 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4623 if (get_unaligned_le16(&response->header.iu_length) !=
4624 PQI_GENERAL_ADMIN_IU_LENGTH)
4627 if (response->function_code != expected_function_code)
4630 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4636 static int pqi_submit_admin_request_synchronous(
4637 struct pqi_ctrl_info *ctrl_info,
4638 struct pqi_general_admin_request *request,
4639 struct pqi_general_admin_response *response)
4643 pqi_submit_admin_request(ctrl_info, request);
4645 rc = pqi_poll_for_admin_response(ctrl_info, response);
4648 rc = pqi_validate_admin_response(response, request->function_code);
4653 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4656 struct pqi_general_admin_request request;
4657 struct pqi_general_admin_response response;
4658 struct pqi_device_capability *capability;
4659 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4661 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4665 memset(&request, 0, sizeof(request));
4667 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4668 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4669 &request.header.iu_length);
4670 request.function_code =
4671 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4672 put_unaligned_le32(sizeof(*capability),
4673 &request.data.report_device_capability.buffer_length);
4675 rc = pqi_map_single(ctrl_info->pci_dev,
4676 &request.data.report_device_capability.sg_descriptor,
4677 capability, sizeof(*capability),
4682 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4684 pqi_pci_unmap(ctrl_info->pci_dev,
4685 &request.data.report_device_capability.sg_descriptor, 1,
4691 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4696 ctrl_info->max_inbound_queues =
4697 get_unaligned_le16(&capability->max_inbound_queues);
4698 ctrl_info->max_elements_per_iq =
4699 get_unaligned_le16(&capability->max_elements_per_iq);
4700 ctrl_info->max_iq_element_length =
4701 get_unaligned_le16(&capability->max_iq_element_length)
4703 ctrl_info->max_outbound_queues =
4704 get_unaligned_le16(&capability->max_outbound_queues);
4705 ctrl_info->max_elements_per_oq =
4706 get_unaligned_le16(&capability->max_elements_per_oq);
4707 ctrl_info->max_oq_element_length =
4708 get_unaligned_le16(&capability->max_oq_element_length)
4711 sop_iu_layer_descriptor =
4712 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4714 ctrl_info->max_inbound_iu_length_per_firmware =
4716 &sop_iu_layer_descriptor->max_inbound_iu_length);
4717 ctrl_info->inbound_spanning_supported =
4718 sop_iu_layer_descriptor->inbound_spanning_supported;
4719 ctrl_info->outbound_spanning_supported =
4720 sop_iu_layer_descriptor->outbound_spanning_supported;
4728 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4730 if (ctrl_info->max_iq_element_length <
4731 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4732 dev_err(&ctrl_info->pci_dev->dev,
4733 "max. inbound queue element length of %d is less than the required length of %d\n",
4734 ctrl_info->max_iq_element_length,
4735 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4739 if (ctrl_info->max_oq_element_length <
4740 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4741 dev_err(&ctrl_info->pci_dev->dev,
4742 "max. outbound queue element length of %d is less than the required length of %d\n",
4743 ctrl_info->max_oq_element_length,
4744 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4748 if (ctrl_info->max_inbound_iu_length_per_firmware <
4749 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4750 dev_err(&ctrl_info->pci_dev->dev,
4751 "max. inbound IU length of %u is less than the min. required length of %d\n",
4752 ctrl_info->max_inbound_iu_length_per_firmware,
4753 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4757 if (!ctrl_info->inbound_spanning_supported) {
4758 dev_err(&ctrl_info->pci_dev->dev,
4759 "the controller does not support inbound spanning\n");
4763 if (ctrl_info->outbound_spanning_supported) {
4764 dev_err(&ctrl_info->pci_dev->dev,
4765 "the controller supports outbound spanning but this driver does not\n");
4772 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4775 struct pqi_event_queue *event_queue;
4776 struct pqi_general_admin_request request;
4777 struct pqi_general_admin_response response;
4779 event_queue = &ctrl_info->event_queue;
4782 * Create OQ (Outbound Queue - device to host queue) to dedicate
4785 memset(&request, 0, sizeof(request));
4786 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4787 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4788 &request.header.iu_length);
4789 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4790 put_unaligned_le16(event_queue->oq_id,
4791 &request.data.create_operational_oq.queue_id);
4792 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4793 &request.data.create_operational_oq.element_array_addr);
4794 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4795 &request.data.create_operational_oq.pi_addr);
4796 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4797 &request.data.create_operational_oq.num_elements);
4798 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4799 &request.data.create_operational_oq.element_length);
4800 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4801 put_unaligned_le16(event_queue->int_msg_num,
4802 &request.data.create_operational_oq.int_msg_num);
4804 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4809 event_queue->oq_ci = ctrl_info->iomem_base +
4810 PQI_DEVICE_REGISTERS_OFFSET +
4812 &response.data.create_operational_oq.oq_ci_offset);
4817 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4818 unsigned int group_number)
4821 struct pqi_queue_group *queue_group;
4822 struct pqi_general_admin_request request;
4823 struct pqi_general_admin_response response;
4825 queue_group = &ctrl_info->queue_groups[group_number];
4828 * Create IQ (Inbound Queue - host to device queue) for
4831 memset(&request, 0, sizeof(request));
4832 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4833 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4834 &request.header.iu_length);
4835 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4836 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4837 &request.data.create_operational_iq.queue_id);
4839 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4840 &request.data.create_operational_iq.element_array_addr);
4841 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4842 &request.data.create_operational_iq.ci_addr);
4843 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4844 &request.data.create_operational_iq.num_elements);
4845 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4846 &request.data.create_operational_iq.element_length);
4847 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4849 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4852 dev_err(&ctrl_info->pci_dev->dev,
4853 "error creating inbound RAID queue\n");
4857 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4858 PQI_DEVICE_REGISTERS_OFFSET +
4860 &response.data.create_operational_iq.iq_pi_offset);
4863 * Create IQ (Inbound Queue - host to device queue) for
4864 * Advanced I/O (AIO) path.
4866 memset(&request, 0, sizeof(request));
4867 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4868 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4869 &request.header.iu_length);
4870 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4871 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4872 &request.data.create_operational_iq.queue_id);
4873 put_unaligned_le64((u64)queue_group->
4874 iq_element_array_bus_addr[AIO_PATH],
4875 &request.data.create_operational_iq.element_array_addr);
4876 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4877 &request.data.create_operational_iq.ci_addr);
4878 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4879 &request.data.create_operational_iq.num_elements);
4880 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4881 &request.data.create_operational_iq.element_length);
4882 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4884 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4887 dev_err(&ctrl_info->pci_dev->dev,
4888 "error creating inbound AIO queue\n");
4892 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4893 PQI_DEVICE_REGISTERS_OFFSET +
4895 &response.data.create_operational_iq.iq_pi_offset);
4898 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4899 * assumed to be for RAID path I/O unless we change the queue's
4902 memset(&request, 0, sizeof(request));
4903 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4904 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4905 &request.header.iu_length);
4906 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4907 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4908 &request.data.change_operational_iq_properties.queue_id);
4909 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4910 &request.data.change_operational_iq_properties.vendor_specific);
4912 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4915 dev_err(&ctrl_info->pci_dev->dev,
4916 "error changing queue property\n");
4921 * Create OQ (Outbound Queue - device to host queue).
4923 memset(&request, 0, sizeof(request));
4924 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4925 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4926 &request.header.iu_length);
4927 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4928 put_unaligned_le16(queue_group->oq_id,
4929 &request.data.create_operational_oq.queue_id);
4930 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4931 &request.data.create_operational_oq.element_array_addr);
4932 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4933 &request.data.create_operational_oq.pi_addr);
4934 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4935 &request.data.create_operational_oq.num_elements);
4936 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4937 &request.data.create_operational_oq.element_length);
4938 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4939 put_unaligned_le16(queue_group->int_msg_num,
4940 &request.data.create_operational_oq.int_msg_num);
4942 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4945 dev_err(&ctrl_info->pci_dev->dev,
4946 "error creating outbound queue\n");
4950 queue_group->oq_ci = ctrl_info->iomem_base +
4951 PQI_DEVICE_REGISTERS_OFFSET +
4953 &response.data.create_operational_oq.oq_ci_offset);
4958 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4963 rc = pqi_create_event_queue(ctrl_info);
4965 dev_err(&ctrl_info->pci_dev->dev,
4966 "error creating event queue\n");
4970 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4971 rc = pqi_create_queue_group(ctrl_info, i);
4973 dev_err(&ctrl_info->pci_dev->dev,
4974 "error creating queue group number %u/%u\n",
4975 i, ctrl_info->num_queue_groups);
4983 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4984 struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
4986 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4991 struct pqi_event_config *event_config;
4992 struct pqi_event_descriptor *event_descriptor;
4993 struct pqi_general_management_request request;
4995 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5000 memset(&request, 0, sizeof(request));
5002 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5003 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5004 data.report_event_configuration.sg_descriptors[1]) -
5005 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5006 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5007 &request.data.report_event_configuration.buffer_length);
5009 rc = pqi_map_single(ctrl_info->pci_dev,
5010 request.data.report_event_configuration.sg_descriptors,
5011 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5016 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5018 pqi_pci_unmap(ctrl_info->pci_dev,
5019 request.data.report_event_configuration.sg_descriptors, 1,
5025 for (i = 0; i < event_config->num_event_descriptors; i++) {
5026 event_descriptor = &event_config->descriptors[i];
5027 if (enable_events &&
5028 pqi_is_supported_event(event_descriptor->event_type))
5029 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5030 &event_descriptor->oq_id);
5032 put_unaligned_le16(0, &event_descriptor->oq_id);
5035 memset(&request, 0, sizeof(request));
5037 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5038 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5039 data.report_event_configuration.sg_descriptors[1]) -
5040 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5041 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5042 &request.data.report_event_configuration.buffer_length);
5044 rc = pqi_map_single(ctrl_info->pci_dev,
5045 request.data.report_event_configuration.sg_descriptors,
5046 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5051 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5053 pqi_pci_unmap(ctrl_info->pci_dev,
5054 request.data.report_event_configuration.sg_descriptors, 1,
5058 kfree(event_config);
5063 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5065 return pqi_configure_events(ctrl_info, true);
5068 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5072 size_t sg_chain_buffer_length;
5073 struct pqi_io_request *io_request;
5075 if (!ctrl_info->io_request_pool)
5078 dev = &ctrl_info->pci_dev->dev;
5079 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5080 io_request = ctrl_info->io_request_pool;
5082 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5083 kfree(io_request->iu);
5084 if (!io_request->sg_chain_buffer)
5086 dma_free_coherent(dev, sg_chain_buffer_length,
5087 io_request->sg_chain_buffer,
5088 io_request->sg_chain_buffer_dma_handle);
5092 kfree(ctrl_info->io_request_pool);
5093 ctrl_info->io_request_pool = NULL;
5096 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5098 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5099 ctrl_info->error_buffer_length,
5100 &ctrl_info->error_buffer_dma_handle,
5102 if (!ctrl_info->error_buffer)
5108 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5111 void *sg_chain_buffer;
5112 size_t sg_chain_buffer_length;
5113 dma_addr_t sg_chain_buffer_dma_handle;
5115 struct pqi_io_request *io_request;
5117 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5118 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5120 if (!ctrl_info->io_request_pool) {
5121 dev_err(&ctrl_info->pci_dev->dev,
5122 "failed to allocate I/O request pool\n");
5126 dev = &ctrl_info->pci_dev->dev;
5127 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5128 io_request = ctrl_info->io_request_pool;
5130 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5131 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5133 if (!io_request->iu) {
5134 dev_err(&ctrl_info->pci_dev->dev,
5135 "failed to allocate IU buffers\n");
5139 sg_chain_buffer = dma_alloc_coherent(dev,
5140 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5143 if (!sg_chain_buffer) {
5144 dev_err(&ctrl_info->pci_dev->dev,
5145 "failed to allocate PQI scatter-gather chain buffers\n");
5149 io_request->index = i;
5150 io_request->sg_chain_buffer = sg_chain_buffer;
5151 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5158 pqi_free_all_io_requests(ctrl_info);
5164 * Calculate required resources that are sized based on max. outstanding
5165 * requests and max. transfer size.
5168 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5170 u32 max_transfer_size;
5173 ctrl_info->scsi_ml_can_queue =
5174 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5175 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5177 ctrl_info->error_buffer_length =
5178 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5181 max_transfer_size = min(ctrl_info->max_transfer_size,
5182 PQI_MAX_TRANSFER_SIZE_KDUMP);
5184 max_transfer_size = min(ctrl_info->max_transfer_size,
5185 PQI_MAX_TRANSFER_SIZE);
5187 max_sg_entries = max_transfer_size / PAGE_SIZE;
5189 /* +1 to cover when the buffer is not page-aligned. */
5192 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5194 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5196 ctrl_info->sg_chain_buffer_length =
5197 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5198 PQI_EXTRA_SGL_MEMORY;
5199 ctrl_info->sg_tablesize = max_sg_entries;
5200 ctrl_info->max_sectors = max_transfer_size / 512;
5203 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5205 int num_queue_groups;
5206 u16 num_elements_per_iq;
5207 u16 num_elements_per_oq;
5209 if (reset_devices) {
5210 num_queue_groups = 1;
5213 int max_queue_groups;
5215 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5216 ctrl_info->max_outbound_queues - 1);
5217 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5219 num_cpus = num_online_cpus();
5220 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5221 num_queue_groups = min(num_queue_groups, max_queue_groups);
5224 ctrl_info->num_queue_groups = num_queue_groups;
5227 * Make sure that the max. inbound IU length is an even multiple
5228 * of our inbound element length.
5230 ctrl_info->max_inbound_iu_length =
5231 (ctrl_info->max_inbound_iu_length_per_firmware /
5232 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5233 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5235 num_elements_per_iq =
5236 (ctrl_info->max_inbound_iu_length /
5237 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5239 /* Add one because one element in each queue is unusable. */
5240 num_elements_per_iq++;
5242 num_elements_per_iq = min(num_elements_per_iq,
5243 ctrl_info->max_elements_per_iq);
5245 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5246 num_elements_per_oq = min(num_elements_per_oq,
5247 ctrl_info->max_elements_per_oq);
5249 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5250 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5252 ctrl_info->max_sg_per_iu =
5253 ((ctrl_info->max_inbound_iu_length -
5254 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5255 sizeof(struct pqi_sg_descriptor)) +
5256 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5258 ctrl_info->max_sg_per_r56_iu =
5259 ((ctrl_info->max_inbound_iu_length -
5260 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5261 sizeof(struct pqi_sg_descriptor)) +
5262 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5265 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5266 struct scatterlist *sg)
5268 u64 address = (u64)sg_dma_address(sg);
5269 unsigned int length = sg_dma_len(sg);
5271 put_unaligned_le64(address, &sg_descriptor->address);
5272 put_unaligned_le32(length, &sg_descriptor->length);
5273 put_unaligned_le32(0, &sg_descriptor->flags);
5276 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5277 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5278 int max_sg_per_iu, bool *chained)
5281 unsigned int num_sg_in_iu;
5286 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5289 pqi_set_sg_descriptor(sg_descriptor, sg);
5296 if (i == max_sg_per_iu) {
5297 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5298 &sg_descriptor->address);
5299 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5300 &sg_descriptor->length);
5301 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5304 sg_descriptor = io_request->sg_chain_buffer;
5309 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5311 return num_sg_in_iu;
5314 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5315 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5316 struct pqi_io_request *io_request)
5321 unsigned int num_sg_in_iu;
5322 struct scatterlist *sg;
5323 struct pqi_sg_descriptor *sg_descriptor;
5325 sg_count = scsi_dma_map(scmd);
5329 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5330 PQI_REQUEST_HEADER_LENGTH;
5335 sg = scsi_sglist(scmd);
5336 sg_descriptor = request->sg_descriptors;
5338 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5339 ctrl_info->max_sg_per_iu, &chained);
5341 request->partial = chained;
5342 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5345 put_unaligned_le16(iu_length, &request->header.iu_length);
5350 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5351 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5352 struct pqi_io_request *io_request)
5357 unsigned int num_sg_in_iu;
5358 struct scatterlist *sg;
5359 struct pqi_sg_descriptor *sg_descriptor;
5361 sg_count = scsi_dma_map(scmd);
5365 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5366 PQI_REQUEST_HEADER_LENGTH;
5372 sg = scsi_sglist(scmd);
5373 sg_descriptor = request->sg_descriptors;
5375 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5376 ctrl_info->max_sg_per_iu, &chained);
5378 request->partial = chained;
5379 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5382 put_unaligned_le16(iu_length, &request->header.iu_length);
5383 request->num_sg_descriptors = num_sg_in_iu;
5388 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5389 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5390 struct pqi_io_request *io_request)
5395 unsigned int num_sg_in_iu;
5396 struct scatterlist *sg;
5397 struct pqi_sg_descriptor *sg_descriptor;
5399 sg_count = scsi_dma_map(scmd);
5403 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5404 PQI_REQUEST_HEADER_LENGTH;
5407 if (sg_count != 0) {
5408 sg = scsi_sglist(scmd);
5409 sg_descriptor = request->sg_descriptors;
5411 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5412 ctrl_info->max_sg_per_r56_iu, &chained);
5414 request->partial = chained;
5415 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5418 put_unaligned_le16(iu_length, &request->header.iu_length);
5419 request->num_sg_descriptors = num_sg_in_iu;
5424 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5425 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5426 struct pqi_io_request *io_request)
5431 unsigned int num_sg_in_iu;
5432 struct scatterlist *sg;
5433 struct pqi_sg_descriptor *sg_descriptor;
5435 sg_count = scsi_dma_map(scmd);
5439 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5440 PQI_REQUEST_HEADER_LENGTH;
5446 sg = scsi_sglist(scmd);
5447 sg_descriptor = request->sg_descriptors;
5449 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5450 ctrl_info->max_sg_per_iu, &chained);
5452 request->partial = chained;
5453 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5456 put_unaligned_le16(iu_length, &request->header.iu_length);
5457 request->num_sg_descriptors = num_sg_in_iu;
5462 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5465 struct scsi_cmnd *scmd;
5467 scmd = io_request->scmd;
5468 pqi_free_io_request(io_request);
5469 scsi_dma_unmap(scmd);
5470 pqi_scsi_done(scmd);
5473 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5474 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5475 struct pqi_queue_group *queue_group, bool io_high_prio)
5479 struct pqi_io_request *io_request;
5480 struct pqi_raid_path_request *request;
5482 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5484 return SCSI_MLQUEUE_HOST_BUSY;
5486 io_request->io_complete_callback = pqi_raid_io_complete;
5487 io_request->scmd = scmd;
5489 request = io_request->iu;
5490 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5492 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5493 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5494 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5495 request->command_priority = io_high_prio;
5496 put_unaligned_le16(io_request->index, &request->request_id);
5497 request->error_index = request->request_id;
5498 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5499 request->ml_device_lun_number = (u8)scmd->device->lun;
5501 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5502 memcpy(request->cdb, scmd->cmnd, cdb_length);
5504 switch (cdb_length) {
5509 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5512 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5515 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5518 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5522 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5526 switch (scmd->sc_data_direction) {
5527 case DMA_FROM_DEVICE:
5528 request->data_direction = SOP_READ_FLAG;
5531 request->data_direction = SOP_WRITE_FLAG;
5534 request->data_direction = SOP_NO_DIRECTION_FLAG;
5536 case DMA_BIDIRECTIONAL:
5537 request->data_direction = SOP_BIDIRECTIONAL;
5540 dev_err(&ctrl_info->pci_dev->dev,
5541 "unknown data direction: %d\n",
5542 scmd->sc_data_direction);
5546 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5548 pqi_free_io_request(io_request);
5549 return SCSI_MLQUEUE_HOST_BUSY;
5552 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5557 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5558 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5559 struct pqi_queue_group *queue_group)
5563 io_high_prio = pqi_is_io_high_priority(device, scmd);
5565 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5568 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5570 struct scsi_cmnd *scmd;
5571 struct pqi_scsi_dev *device;
5572 struct pqi_ctrl_info *ctrl_info;
5574 if (!io_request->raid_bypass)
5577 scmd = io_request->scmd;
5578 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5580 if (host_byte(scmd->result) == DID_NO_CONNECT)
5583 device = scmd->device->hostdata;
5584 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5587 ctrl_info = shost_to_hba(scmd->device->host);
5588 if (pqi_ctrl_offline(ctrl_info))
5594 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5597 struct scsi_cmnd *scmd;
5599 scmd = io_request->scmd;
5600 scsi_dma_unmap(scmd);
5601 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5602 set_host_byte(scmd, DID_IMM_RETRY);
5603 pqi_cmd_priv(scmd)->this_residual++;
5606 pqi_free_io_request(io_request);
5607 pqi_scsi_done(scmd);
5610 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5611 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5612 struct pqi_queue_group *queue_group)
5616 io_high_prio = pqi_is_io_high_priority(device, scmd);
5618 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5619 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5620 false, io_high_prio);
5623 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5624 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5625 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5626 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5630 struct pqi_io_request *io_request;
5631 struct pqi_aio_path_request *request;
5632 struct pqi_scsi_dev *device;
5634 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5636 return SCSI_MLQUEUE_HOST_BUSY;
5638 io_request->io_complete_callback = pqi_aio_io_complete;
5639 io_request->scmd = scmd;
5640 io_request->raid_bypass = raid_bypass;
5642 request = io_request->iu;
5643 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5645 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5646 put_unaligned_le32(aio_handle, &request->nexus_id);
5647 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5648 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5649 request->command_priority = io_high_prio;
5650 put_unaligned_le16(io_request->index, &request->request_id);
5651 request->error_index = request->request_id;
5652 device = scmd->device->hostdata;
5653 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
5654 put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
5655 if (cdb_length > sizeof(request->cdb))
5656 cdb_length = sizeof(request->cdb);
5657 request->cdb_length = cdb_length;
5658 memcpy(request->cdb, cdb, cdb_length);
5660 switch (scmd->sc_data_direction) {
5662 request->data_direction = SOP_READ_FLAG;
5664 case DMA_FROM_DEVICE:
5665 request->data_direction = SOP_WRITE_FLAG;
5668 request->data_direction = SOP_NO_DIRECTION_FLAG;
5670 case DMA_BIDIRECTIONAL:
5671 request->data_direction = SOP_BIDIRECTIONAL;
5674 dev_err(&ctrl_info->pci_dev->dev,
5675 "unknown data direction: %d\n",
5676 scmd->sc_data_direction);
5680 if (encryption_info) {
5681 request->encryption_enable = true;
5682 put_unaligned_le16(encryption_info->data_encryption_key_index,
5683 &request->data_encryption_key_index);
5684 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5685 &request->encrypt_tweak_lower);
5686 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5687 &request->encrypt_tweak_upper);
5690 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5692 pqi_free_io_request(io_request);
5693 return SCSI_MLQUEUE_HOST_BUSY;
5696 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5701 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5702 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5703 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5704 struct pqi_scsi_dev_raid_map_data *rmd)
5707 struct pqi_io_request *io_request;
5708 struct pqi_aio_r1_path_request *r1_request;
5710 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5712 return SCSI_MLQUEUE_HOST_BUSY;
5714 io_request->io_complete_callback = pqi_aio_io_complete;
5715 io_request->scmd = scmd;
5716 io_request->raid_bypass = true;
5718 r1_request = io_request->iu;
5719 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5721 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5722 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5723 r1_request->num_drives = rmd->num_it_nexus_entries;
5724 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5725 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5726 if (rmd->num_it_nexus_entries == 3)
5727 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5729 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5730 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5731 put_unaligned_le16(io_request->index, &r1_request->request_id);
5732 r1_request->error_index = r1_request->request_id;
5733 if (rmd->cdb_length > sizeof(r1_request->cdb))
5734 rmd->cdb_length = sizeof(r1_request->cdb);
5735 r1_request->cdb_length = rmd->cdb_length;
5736 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5738 /* The direction is always write. */
5739 r1_request->data_direction = SOP_READ_FLAG;
5741 if (encryption_info) {
5742 r1_request->encryption_enable = true;
5743 put_unaligned_le16(encryption_info->data_encryption_key_index,
5744 &r1_request->data_encryption_key_index);
5745 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5746 &r1_request->encrypt_tweak_lower);
5747 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5748 &r1_request->encrypt_tweak_upper);
5751 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5753 pqi_free_io_request(io_request);
5754 return SCSI_MLQUEUE_HOST_BUSY;
5757 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5762 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5763 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5764 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5765 struct pqi_scsi_dev_raid_map_data *rmd)
5768 struct pqi_io_request *io_request;
5769 struct pqi_aio_r56_path_request *r56_request;
5771 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5773 return SCSI_MLQUEUE_HOST_BUSY;
5774 io_request->io_complete_callback = pqi_aio_io_complete;
5775 io_request->scmd = scmd;
5776 io_request->raid_bypass = true;
5778 r56_request = io_request->iu;
5779 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5781 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5782 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5784 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5786 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5787 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5788 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5789 if (rmd->raid_level == SA_RAID_6) {
5790 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5791 r56_request->xor_multiplier = rmd->xor_mult;
5793 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5794 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5795 put_unaligned_le64(rmd->row, &r56_request->row);
5797 put_unaligned_le16(io_request->index, &r56_request->request_id);
5798 r56_request->error_index = r56_request->request_id;
5800 if (rmd->cdb_length > sizeof(r56_request->cdb))
5801 rmd->cdb_length = sizeof(r56_request->cdb);
5802 r56_request->cdb_length = rmd->cdb_length;
5803 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5805 /* The direction is always write. */
5806 r56_request->data_direction = SOP_READ_FLAG;
5808 if (encryption_info) {
5809 r56_request->encryption_enable = true;
5810 put_unaligned_le16(encryption_info->data_encryption_key_index,
5811 &r56_request->data_encryption_key_index);
5812 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5813 &r56_request->encrypt_tweak_lower);
5814 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5815 &r56_request->encrypt_tweak_upper);
5818 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5820 pqi_free_io_request(io_request);
5821 return SCSI_MLQUEUE_HOST_BUSY;
5824 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5829 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5830 struct scsi_cmnd *scmd)
5833 * We are setting host_tagset = 1 during init.
5835 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5838 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5840 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5843 return pqi_cmd_priv(scmd)->this_residual == 0;
5847 * This function gets called just before we hand the completed SCSI request
5851 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5853 struct pqi_scsi_dev *device;
5855 if (!scmd->device) {
5856 set_host_byte(scmd, DID_NO_CONNECT);
5860 device = scmd->device->hostdata;
5862 set_host_byte(scmd, DID_NO_CONNECT);
5866 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5869 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5870 struct scsi_cmnd *scmd)
5876 struct pqi_scsi_dev *device;
5877 struct pqi_stream_data *pqi_stream_data;
5878 struct pqi_scsi_dev_raid_map_data rmd;
5880 if (!ctrl_info->enable_stream_detection)
5883 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5887 /* Check writes only. */
5891 device = scmd->device->hostdata;
5893 /* Check for RAID 5/6 streams. */
5894 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5898 * If controller does not support AIO RAID{5,6} writes, need to send
5899 * requests down non-AIO path.
5901 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5902 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5906 oldest_jiffies = INT_MAX;
5907 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5908 pqi_stream_data = &device->stream_data[i];
5910 * Check for adjacent request or request is within
5911 * the previous request.
5913 if ((pqi_stream_data->next_lba &&
5914 rmd.first_block >= pqi_stream_data->next_lba) &&
5915 rmd.first_block <= pqi_stream_data->next_lba +
5917 pqi_stream_data->next_lba = rmd.first_block +
5919 pqi_stream_data->last_accessed = jiffies;
5924 if (pqi_stream_data->last_accessed == 0) {
5929 /* Find entry with oldest last accessed time. */
5930 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5931 oldest_jiffies = pqi_stream_data->last_accessed;
5936 /* Set LRU entry. */
5937 pqi_stream_data = &device->stream_data[lru_index];
5938 pqi_stream_data->last_accessed = jiffies;
5939 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5944 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5947 struct pqi_ctrl_info *ctrl_info;
5948 struct pqi_scsi_dev *device;
5950 struct pqi_queue_group *queue_group;
5953 device = scmd->device->hostdata;
5956 set_host_byte(scmd, DID_NO_CONNECT);
5957 pqi_scsi_done(scmd);
5961 atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
5963 ctrl_info = shost_to_hba(shost);
5965 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5966 set_host_byte(scmd, DID_NO_CONNECT);
5967 pqi_scsi_done(scmd);
5971 if (pqi_ctrl_blocked(ctrl_info)) {
5972 rc = SCSI_MLQUEUE_HOST_BUSY;
5977 * This is necessary because the SML doesn't zero out this field during
5982 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5983 queue_group = &ctrl_info->queue_groups[hw_queue];
5985 if (pqi_is_logical_device(device)) {
5986 raid_bypassed = false;
5987 if (device->raid_bypass_enabled &&
5988 pqi_is_bypass_eligible_request(scmd) &&
5989 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5990 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5991 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5992 raid_bypassed = true;
5993 device->raid_bypass_cnt++;
5997 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5999 if (device->aio_enabled)
6000 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6002 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6007 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
6012 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6016 unsigned long flags;
6017 unsigned int queued_io_count;
6018 struct pqi_queue_group *queue_group;
6019 struct pqi_io_request *io_request;
6021 queued_io_count = 0;
6023 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6024 queue_group = &ctrl_info->queue_groups[i];
6025 for (path = 0; path < 2; path++) {
6026 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6027 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6029 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6033 return queued_io_count;
6036 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6040 unsigned int nonempty_inbound_queue_count;
6041 struct pqi_queue_group *queue_group;
6045 nonempty_inbound_queue_count = 0;
6047 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6048 queue_group = &ctrl_info->queue_groups[i];
6049 for (path = 0; path < 2; path++) {
6050 iq_pi = queue_group->iq_pi_copy[path];
6051 iq_ci = readl(queue_group->iq_ci[path]);
6053 nonempty_inbound_queue_count++;
6057 return nonempty_inbound_queue_count;
6060 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6062 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6064 unsigned long start_jiffies;
6065 unsigned long warning_timeout;
6066 unsigned int queued_io_count;
6067 unsigned int nonempty_inbound_queue_count;
6068 bool displayed_warning;
6070 displayed_warning = false;
6071 start_jiffies = jiffies;
6072 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6075 queued_io_count = pqi_queued_io_count(ctrl_info);
6076 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6077 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6079 pqi_check_ctrl_health(ctrl_info);
6080 if (pqi_ctrl_offline(ctrl_info))
6082 if (time_after(jiffies, warning_timeout)) {
6083 dev_warn(&ctrl_info->pci_dev->dev,
6084 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6085 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6086 displayed_warning = true;
6087 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6089 usleep_range(1000, 2000);
6092 if (displayed_warning)
6093 dev_warn(&ctrl_info->pci_dev->dev,
6094 "queued I/O drained after waiting for %u seconds\n",
6095 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6100 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6101 struct pqi_scsi_dev *device)
6105 struct pqi_queue_group *queue_group;
6106 unsigned long flags;
6107 struct pqi_io_request *io_request;
6108 struct pqi_io_request *next;
6109 struct scsi_cmnd *scmd;
6110 struct pqi_scsi_dev *scsi_device;
6112 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6113 queue_group = &ctrl_info->queue_groups[i];
6115 for (path = 0; path < 2; path++) {
6117 &queue_group->submit_lock[path], flags);
6119 list_for_each_entry_safe(io_request, next,
6120 &queue_group->request_list[path],
6121 request_list_entry) {
6123 scmd = io_request->scmd;
6127 scsi_device = scmd->device->hostdata;
6128 if (scsi_device != device)
6131 list_del(&io_request->request_list_entry);
6132 set_host_byte(scmd, DID_RESET);
6133 pqi_free_io_request(io_request);
6134 scsi_dma_unmap(scmd);
6135 pqi_scsi_done(scmd);
6138 spin_unlock_irqrestore(
6139 &queue_group->submit_lock[path], flags);
6144 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6146 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6147 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6149 int cmds_outstanding;
6150 unsigned long start_jiffies;
6151 unsigned long warning_timeout;
6152 unsigned long msecs_waiting;
6154 start_jiffies = jiffies;
6155 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6157 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6158 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6159 pqi_check_ctrl_health(ctrl_info);
6160 if (pqi_ctrl_offline(ctrl_info))
6163 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6164 if (msecs_waiting >= timeout_msecs) {
6165 dev_err(&ctrl_info->pci_dev->dev,
6166 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6167 ctrl_info->scsi_host->host_no, device->bus, device->target,
6168 lun, msecs_waiting / 1000, cmds_outstanding);
6171 if (time_after(jiffies, warning_timeout)) {
6172 dev_warn(&ctrl_info->pci_dev->dev,
6173 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6174 ctrl_info->scsi_host->host_no, device->bus, device->target,
6175 lun, msecs_waiting / 1000, cmds_outstanding);
6176 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6178 usleep_range(1000, 2000);
6184 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6187 struct completion *waiting = context;
6192 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
6194 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6195 struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6198 unsigned int wait_secs;
6199 int cmds_outstanding;
6204 if (wait_for_completion_io_timeout(wait,
6205 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6210 pqi_check_ctrl_health(ctrl_info);
6211 if (pqi_ctrl_offline(ctrl_info)) {
6216 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6217 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6218 dev_warn(&ctrl_info->pci_dev->dev,
6219 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6220 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6226 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6228 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6231 struct pqi_io_request *io_request;
6232 DECLARE_COMPLETION_ONSTACK(wait);
6233 struct pqi_task_management_request *request;
6234 struct pqi_scsi_dev *device;
6236 device = scmd->device->hostdata;
6237 io_request = pqi_alloc_io_request(ctrl_info, NULL);
6238 io_request->io_complete_callback = pqi_lun_reset_complete;
6239 io_request->context = &wait;
6241 request = io_request->iu;
6242 memset(request, 0, sizeof(*request));
6244 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6245 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6246 &request->header.iu_length);
6247 put_unaligned_le16(io_request->index, &request->request_id);
6248 memcpy(request->lun_number, device->scsi3addr,
6249 sizeof(request->lun_number));
6250 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6251 request->ml_device_lun_number = (u8)scmd->device->lun;
6252 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6253 if (ctrl_info->tmf_iu_timeout_supported)
6254 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6256 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6259 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
6261 rc = io_request->status;
6263 pqi_free_io_request(io_request);
6268 #define PQI_LUN_RESET_RETRIES 3
6269 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6270 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6271 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6273 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6277 unsigned int retries;
6278 unsigned long timeout_msecs;
6279 struct pqi_scsi_dev *device;
6281 device = scmd->device->hostdata;
6282 for (retries = 0;;) {
6283 reset_rc = pqi_lun_reset(ctrl_info, scmd);
6284 if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
6286 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6289 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6290 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6292 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
6293 if (wait_rc && reset_rc == 0)
6296 return reset_rc == 0 ? SUCCESS : FAILED;
6299 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6302 struct pqi_scsi_dev *device;
6304 device = scmd->device->hostdata;
6305 pqi_ctrl_block_requests(ctrl_info);
6306 pqi_ctrl_wait_until_quiesced(ctrl_info);
6307 pqi_fail_io_queued_for_device(ctrl_info, device);
6308 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6312 rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
6313 pqi_ctrl_unblock_requests(ctrl_info);
6318 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6321 struct Scsi_Host *shost;
6322 struct pqi_ctrl_info *ctrl_info;
6323 struct pqi_scsi_dev *device;
6325 shost = scmd->device->host;
6326 ctrl_info = shost_to_hba(shost);
6327 device = scmd->device->hostdata;
6329 mutex_lock(&ctrl_info->lun_reset_mutex);
6331 dev_err(&ctrl_info->pci_dev->dev,
6332 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6334 device->bus, device->target, (u32)scmd->device->lun,
6335 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6337 pqi_check_ctrl_health(ctrl_info);
6338 if (pqi_ctrl_offline(ctrl_info))
6341 rc = pqi_device_reset(ctrl_info, scmd);
6343 dev_err(&ctrl_info->pci_dev->dev,
6344 "reset of scsi %d:%d:%d:%d: %s\n",
6345 shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
6346 rc == SUCCESS ? "SUCCESS" : "FAILED");
6348 mutex_unlock(&ctrl_info->lun_reset_mutex);
6353 static int pqi_slave_alloc(struct scsi_device *sdev)
6355 struct pqi_scsi_dev *device;
6356 unsigned long flags;
6357 struct pqi_ctrl_info *ctrl_info;
6358 struct scsi_target *starget;
6359 struct sas_rphy *rphy;
6361 ctrl_info = shost_to_hba(sdev->host);
6363 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6365 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6366 starget = scsi_target(sdev);
6367 rphy = target_to_rphy(starget);
6368 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6370 if (device->target_lun_valid) {
6371 device->ignore_device = true;
6373 device->target = sdev_id(sdev);
6374 device->lun = sdev->lun;
6375 device->target_lun_valid = true;
6379 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6380 sdev_id(sdev), sdev->lun);
6384 sdev->hostdata = device;
6385 device->sdev = sdev;
6386 if (device->queue_depth) {
6387 device->advertised_queue_depth = device->queue_depth;
6388 scsi_change_queue_depth(sdev,
6389 device->advertised_queue_depth);
6391 if (pqi_is_logical_device(device)) {
6392 pqi_disable_write_same(sdev);
6394 sdev->allow_restart = 1;
6395 if (device->device_type == SA_DEVICE_TYPE_NVME)
6396 pqi_disable_write_same(sdev);
6400 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6405 static void pqi_map_queues(struct Scsi_Host *shost)
6407 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6409 blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6410 ctrl_info->pci_dev, 0);
6413 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6415 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6418 static int pqi_slave_configure(struct scsi_device *sdev)
6421 struct pqi_scsi_dev *device;
6423 device = sdev->hostdata;
6424 device->devtype = sdev->type;
6426 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6428 device->ignore_device = false;
6434 static void pqi_slave_destroy(struct scsi_device *sdev)
6436 struct pqi_ctrl_info *ctrl_info;
6437 struct pqi_scsi_dev *device;
6439 unsigned long flags;
6441 ctrl_info = shost_to_hba(sdev->host);
6443 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6444 if (!mutex_acquired)
6447 device = sdev->hostdata;
6449 mutex_unlock(&ctrl_info->scan_mutex);
6453 device->lun_count--;
6454 if (device->lun_count > 0) {
6455 mutex_unlock(&ctrl_info->scan_mutex);
6459 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6460 list_del(&device->scsi_device_list_entry);
6461 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6463 mutex_unlock(&ctrl_info->scan_mutex);
6465 pqi_dev_info(ctrl_info, "removed", device);
6466 pqi_free_device(device);
6469 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6471 struct pci_dev *pci_dev;
6472 u32 subsystem_vendor;
6473 u32 subsystem_device;
6474 cciss_pci_info_struct pciinfo;
6479 pci_dev = ctrl_info->pci_dev;
6481 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6482 pciinfo.bus = pci_dev->bus->number;
6483 pciinfo.dev_fn = pci_dev->devfn;
6484 subsystem_vendor = pci_dev->subsystem_vendor;
6485 subsystem_device = pci_dev->subsystem_device;
6486 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6488 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6494 static int pqi_getdrivver_ioctl(void __user *arg)
6501 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6502 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6504 if (copy_to_user(arg, &version, sizeof(version)))
6510 struct ciss_error_info {
6513 size_t sense_data_length;
6516 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6517 struct ciss_error_info *ciss_error_info)
6519 int ciss_cmd_status;
6520 size_t sense_data_length;
6522 switch (pqi_error_info->data_out_result) {
6523 case PQI_DATA_IN_OUT_GOOD:
6524 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6526 case PQI_DATA_IN_OUT_UNDERFLOW:
6527 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6529 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6530 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6532 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6533 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6534 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6535 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6536 case PQI_DATA_IN_OUT_ERROR:
6537 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6539 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6540 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6541 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6542 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6543 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6544 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6545 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6546 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6547 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6548 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6549 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6551 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6552 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6554 case PQI_DATA_IN_OUT_ABORTED:
6555 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6557 case PQI_DATA_IN_OUT_TIMEOUT:
6558 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6561 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6566 get_unaligned_le16(&pqi_error_info->sense_data_length);
6567 if (sense_data_length == 0)
6569 get_unaligned_le16(&pqi_error_info->response_data_length);
6570 if (sense_data_length)
6571 if (sense_data_length > sizeof(pqi_error_info->data))
6572 sense_data_length = sizeof(pqi_error_info->data);
6574 ciss_error_info->scsi_status = pqi_error_info->status;
6575 ciss_error_info->command_status = ciss_cmd_status;
6576 ciss_error_info->sense_data_length = sense_data_length;
6579 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6582 char *kernel_buffer = NULL;
6584 size_t sense_data_length;
6585 IOCTL_Command_struct iocommand;
6586 struct pqi_raid_path_request request;
6587 struct pqi_raid_error_info pqi_error_info;
6588 struct ciss_error_info ciss_error_info;
6590 if (pqi_ctrl_offline(ctrl_info))
6592 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6596 if (!capable(CAP_SYS_RAWIO))
6598 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6600 if (iocommand.buf_size < 1 &&
6601 iocommand.Request.Type.Direction != XFER_NONE)
6603 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6605 if (iocommand.Request.Type.Type != TYPE_CMD)
6608 switch (iocommand.Request.Type.Direction) {
6612 case XFER_READ | XFER_WRITE:
6618 if (iocommand.buf_size > 0) {
6619 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6622 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6623 if (copy_from_user(kernel_buffer, iocommand.buf,
6624 iocommand.buf_size)) {
6629 memset(kernel_buffer, 0, iocommand.buf_size);
6633 memset(&request, 0, sizeof(request));
6635 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6636 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6637 PQI_REQUEST_HEADER_LENGTH;
6638 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6639 sizeof(request.lun_number));
6640 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6641 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6643 switch (iocommand.Request.Type.Direction) {
6645 request.data_direction = SOP_NO_DIRECTION_FLAG;
6648 request.data_direction = SOP_WRITE_FLAG;
6651 request.data_direction = SOP_READ_FLAG;
6653 case XFER_READ | XFER_WRITE:
6654 request.data_direction = SOP_BIDIRECTIONAL;
6658 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6660 if (iocommand.buf_size > 0) {
6661 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6663 rc = pqi_map_single(ctrl_info->pci_dev,
6664 &request.sg_descriptors[0], kernel_buffer,
6665 iocommand.buf_size, DMA_BIDIRECTIONAL);
6669 iu_length += sizeof(request.sg_descriptors[0]);
6672 put_unaligned_le16(iu_length, &request.header.iu_length);
6674 if (ctrl_info->raid_iu_timeout_supported)
6675 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6677 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6678 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6680 if (iocommand.buf_size > 0)
6681 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6684 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6687 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6688 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6689 iocommand.error_info.CommandStatus =
6690 ciss_error_info.command_status;
6691 sense_data_length = ciss_error_info.sense_data_length;
6692 if (sense_data_length) {
6693 if (sense_data_length >
6694 sizeof(iocommand.error_info.SenseInfo))
6696 sizeof(iocommand.error_info.SenseInfo);
6697 memcpy(iocommand.error_info.SenseInfo,
6698 pqi_error_info.data, sense_data_length);
6699 iocommand.error_info.SenseLen = sense_data_length;
6703 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6708 if (rc == 0 && iocommand.buf_size > 0 &&
6709 (iocommand.Request.Type.Direction & XFER_READ)) {
6710 if (copy_to_user(iocommand.buf, kernel_buffer,
6711 iocommand.buf_size)) {
6717 kfree(kernel_buffer);
6722 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6726 struct pqi_ctrl_info *ctrl_info;
6728 ctrl_info = shost_to_hba(sdev->host);
6731 case CCISS_DEREGDISK:
6732 case CCISS_REGNEWDISK:
6734 rc = pqi_scan_scsi_devices(ctrl_info);
6736 case CCISS_GETPCIINFO:
6737 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6739 case CCISS_GETDRIVVER:
6740 rc = pqi_getdrivver_ioctl(arg);
6742 case CCISS_PASSTHRU:
6743 rc = pqi_passthru_ioctl(ctrl_info, arg);
6753 static ssize_t pqi_firmware_version_show(struct device *dev,
6754 struct device_attribute *attr, char *buffer)
6756 struct Scsi_Host *shost;
6757 struct pqi_ctrl_info *ctrl_info;
6759 shost = class_to_shost(dev);
6760 ctrl_info = shost_to_hba(shost);
6762 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6765 static ssize_t pqi_driver_version_show(struct device *dev,
6766 struct device_attribute *attr, char *buffer)
6768 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6771 static ssize_t pqi_serial_number_show(struct device *dev,
6772 struct device_attribute *attr, char *buffer)
6774 struct Scsi_Host *shost;
6775 struct pqi_ctrl_info *ctrl_info;
6777 shost = class_to_shost(dev);
6778 ctrl_info = shost_to_hba(shost);
6780 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6783 static ssize_t pqi_model_show(struct device *dev,
6784 struct device_attribute *attr, char *buffer)
6786 struct Scsi_Host *shost;
6787 struct pqi_ctrl_info *ctrl_info;
6789 shost = class_to_shost(dev);
6790 ctrl_info = shost_to_hba(shost);
6792 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6795 static ssize_t pqi_vendor_show(struct device *dev,
6796 struct device_attribute *attr, char *buffer)
6798 struct Scsi_Host *shost;
6799 struct pqi_ctrl_info *ctrl_info;
6801 shost = class_to_shost(dev);
6802 ctrl_info = shost_to_hba(shost);
6804 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6807 static ssize_t pqi_host_rescan_store(struct device *dev,
6808 struct device_attribute *attr, const char *buffer, size_t count)
6810 struct Scsi_Host *shost = class_to_shost(dev);
6812 pqi_scan_start(shost);
6817 static ssize_t pqi_lockup_action_show(struct device *dev,
6818 struct device_attribute *attr, char *buffer)
6823 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6824 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6825 count += scnprintf(buffer + count, PAGE_SIZE - count,
6826 "[%s] ", pqi_lockup_actions[i].name);
6828 count += scnprintf(buffer + count, PAGE_SIZE - count,
6829 "%s ", pqi_lockup_actions[i].name);
6832 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6837 static ssize_t pqi_lockup_action_store(struct device *dev,
6838 struct device_attribute *attr, const char *buffer, size_t count)
6842 char action_name_buffer[32];
6844 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6845 action_name = strstrip(action_name_buffer);
6847 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6848 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6849 pqi_lockup_action = pqi_lockup_actions[i].action;
6857 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6858 struct device_attribute *attr, char *buffer)
6860 struct Scsi_Host *shost = class_to_shost(dev);
6861 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6863 return scnprintf(buffer, 10, "%x\n",
6864 ctrl_info->enable_stream_detection);
6867 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6868 struct device_attribute *attr, const char *buffer, size_t count)
6870 struct Scsi_Host *shost = class_to_shost(dev);
6871 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6872 u8 set_stream_detection = 0;
6874 if (kstrtou8(buffer, 0, &set_stream_detection))
6877 if (set_stream_detection > 0)
6878 set_stream_detection = 1;
6880 ctrl_info->enable_stream_detection = set_stream_detection;
6885 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6886 struct device_attribute *attr, char *buffer)
6888 struct Scsi_Host *shost = class_to_shost(dev);
6889 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6891 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6894 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6895 struct device_attribute *attr, const char *buffer, size_t count)
6897 struct Scsi_Host *shost = class_to_shost(dev);
6898 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6899 u8 set_r5_writes = 0;
6901 if (kstrtou8(buffer, 0, &set_r5_writes))
6904 if (set_r5_writes > 0)
6907 ctrl_info->enable_r5_writes = set_r5_writes;
6912 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6913 struct device_attribute *attr, char *buffer)
6915 struct Scsi_Host *shost = class_to_shost(dev);
6916 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6918 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6921 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6922 struct device_attribute *attr, const char *buffer, size_t count)
6924 struct Scsi_Host *shost = class_to_shost(dev);
6925 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6926 u8 set_r6_writes = 0;
6928 if (kstrtou8(buffer, 0, &set_r6_writes))
6931 if (set_r6_writes > 0)
6934 ctrl_info->enable_r6_writes = set_r6_writes;
6939 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6940 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6941 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6942 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6943 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6944 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6945 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6946 pqi_lockup_action_store);
6947 static DEVICE_ATTR(enable_stream_detection, 0644,
6948 pqi_host_enable_stream_detection_show,
6949 pqi_host_enable_stream_detection_store);
6950 static DEVICE_ATTR(enable_r5_writes, 0644,
6951 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6952 static DEVICE_ATTR(enable_r6_writes, 0644,
6953 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6955 static struct attribute *pqi_shost_attrs[] = {
6956 &dev_attr_driver_version.attr,
6957 &dev_attr_firmware_version.attr,
6958 &dev_attr_model.attr,
6959 &dev_attr_serial_number.attr,
6960 &dev_attr_vendor.attr,
6961 &dev_attr_rescan.attr,
6962 &dev_attr_lockup_action.attr,
6963 &dev_attr_enable_stream_detection.attr,
6964 &dev_attr_enable_r5_writes.attr,
6965 &dev_attr_enable_r6_writes.attr,
6969 ATTRIBUTE_GROUPS(pqi_shost);
6971 static ssize_t pqi_unique_id_show(struct device *dev,
6972 struct device_attribute *attr, char *buffer)
6974 struct pqi_ctrl_info *ctrl_info;
6975 struct scsi_device *sdev;
6976 struct pqi_scsi_dev *device;
6977 unsigned long flags;
6980 sdev = to_scsi_device(dev);
6981 ctrl_info = shost_to_hba(sdev->host);
6983 if (pqi_ctrl_offline(ctrl_info))
6986 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6988 device = sdev->hostdata;
6990 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6994 if (device->is_physical_device)
6995 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6997 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6999 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7001 return scnprintf(buffer, PAGE_SIZE,
7002 "%02X%02X%02X%02X%02X%02X%02X%02X"
7003 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7004 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7005 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7006 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7007 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7010 static ssize_t pqi_lunid_show(struct device *dev,
7011 struct device_attribute *attr, char *buffer)
7013 struct pqi_ctrl_info *ctrl_info;
7014 struct scsi_device *sdev;
7015 struct pqi_scsi_dev *device;
7016 unsigned long flags;
7019 sdev = to_scsi_device(dev);
7020 ctrl_info = shost_to_hba(sdev->host);
7022 if (pqi_ctrl_offline(ctrl_info))
7025 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7027 device = sdev->hostdata;
7029 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7033 memcpy(lunid, device->scsi3addr, sizeof(lunid));
7035 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7037 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7042 static ssize_t pqi_path_info_show(struct device *dev,
7043 struct device_attribute *attr, char *buf)
7045 struct pqi_ctrl_info *ctrl_info;
7046 struct scsi_device *sdev;
7047 struct pqi_scsi_dev *device;
7048 unsigned long flags;
7055 u8 phys_connector[2];
7057 sdev = to_scsi_device(dev);
7058 ctrl_info = shost_to_hba(sdev->host);
7060 if (pqi_ctrl_offline(ctrl_info))
7063 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7065 device = sdev->hostdata;
7067 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7072 for (i = 0; i < MAX_PATHS; i++) {
7073 path_map_index = 1 << i;
7074 if (i == device->active_path_index)
7076 else if (device->path_map & path_map_index)
7077 active = "Inactive";
7081 output_len += scnprintf(buf + output_len,
7082 PAGE_SIZE - output_len,
7083 "[%d:%d:%d:%d] %20.20s ",
7084 ctrl_info->scsi_host->host_no,
7085 device->bus, device->target,
7087 scsi_device_type(device->devtype));
7089 if (device->devtype == TYPE_RAID ||
7090 pqi_is_logical_device(device))
7093 memcpy(&phys_connector, &device->phys_connector[i],
7094 sizeof(phys_connector));
7095 if (phys_connector[0] < '0')
7096 phys_connector[0] = '0';
7097 if (phys_connector[1] < '0')
7098 phys_connector[1] = '0';
7100 output_len += scnprintf(buf + output_len,
7101 PAGE_SIZE - output_len,
7102 "PORT: %.2s ", phys_connector);
7104 box = device->box[i];
7105 if (box != 0 && box != 0xFF)
7106 output_len += scnprintf(buf + output_len,
7107 PAGE_SIZE - output_len,
7110 if ((device->devtype == TYPE_DISK ||
7111 device->devtype == TYPE_ZBC) &&
7112 pqi_expose_device(device))
7113 output_len += scnprintf(buf + output_len,
7114 PAGE_SIZE - output_len,
7118 output_len += scnprintf(buf + output_len,
7119 PAGE_SIZE - output_len,
7123 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7128 static ssize_t pqi_sas_address_show(struct device *dev,
7129 struct device_attribute *attr, char *buffer)
7131 struct pqi_ctrl_info *ctrl_info;
7132 struct scsi_device *sdev;
7133 struct pqi_scsi_dev *device;
7134 unsigned long flags;
7137 sdev = to_scsi_device(dev);
7138 ctrl_info = shost_to_hba(sdev->host);
7140 if (pqi_ctrl_offline(ctrl_info))
7143 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7145 device = sdev->hostdata;
7147 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7151 sas_address = device->sas_address;
7153 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7155 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7158 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7159 struct device_attribute *attr, char *buffer)
7161 struct pqi_ctrl_info *ctrl_info;
7162 struct scsi_device *sdev;
7163 struct pqi_scsi_dev *device;
7164 unsigned long flags;
7166 sdev = to_scsi_device(dev);
7167 ctrl_info = shost_to_hba(sdev->host);
7169 if (pqi_ctrl_offline(ctrl_info))
7172 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7174 device = sdev->hostdata;
7176 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7180 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7184 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7189 static ssize_t pqi_raid_level_show(struct device *dev,
7190 struct device_attribute *attr, char *buffer)
7192 struct pqi_ctrl_info *ctrl_info;
7193 struct scsi_device *sdev;
7194 struct pqi_scsi_dev *device;
7195 unsigned long flags;
7198 sdev = to_scsi_device(dev);
7199 ctrl_info = shost_to_hba(sdev->host);
7201 if (pqi_ctrl_offline(ctrl_info))
7204 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7206 device = sdev->hostdata;
7208 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7212 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7213 raid_level = pqi_raid_level_to_string(device->raid_level);
7217 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7219 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7222 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7223 struct device_attribute *attr, char *buffer)
7225 struct pqi_ctrl_info *ctrl_info;
7226 struct scsi_device *sdev;
7227 struct pqi_scsi_dev *device;
7228 unsigned long flags;
7229 unsigned int raid_bypass_cnt;
7231 sdev = to_scsi_device(dev);
7232 ctrl_info = shost_to_hba(sdev->host);
7234 if (pqi_ctrl_offline(ctrl_info))
7237 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7239 device = sdev->hostdata;
7241 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7245 raid_bypass_cnt = device->raid_bypass_cnt;
7247 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7249 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7252 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7253 struct device_attribute *attr, char *buf)
7255 struct pqi_ctrl_info *ctrl_info;
7256 struct scsi_device *sdev;
7257 struct pqi_scsi_dev *device;
7258 unsigned long flags;
7261 sdev = to_scsi_device(dev);
7262 ctrl_info = shost_to_hba(sdev->host);
7264 if (pqi_ctrl_offline(ctrl_info))
7267 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7269 device = sdev->hostdata;
7271 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7275 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7276 device->ncq_prio_enable);
7277 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7282 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7283 struct device_attribute *attr,
7284 const char *buf, size_t count)
7286 struct pqi_ctrl_info *ctrl_info;
7287 struct scsi_device *sdev;
7288 struct pqi_scsi_dev *device;
7289 unsigned long flags;
7290 u8 ncq_prio_enable = 0;
7292 if (kstrtou8(buf, 0, &ncq_prio_enable))
7295 sdev = to_scsi_device(dev);
7296 ctrl_info = shost_to_hba(sdev->host);
7298 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7300 device = sdev->hostdata;
7303 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7307 if (!device->ncq_prio_support) {
7308 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7312 device->ncq_prio_enable = ncq_prio_enable;
7314 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7319 static ssize_t pqi_numa_node_show(struct device *dev,
7320 struct device_attribute *attr, char *buffer)
7322 struct scsi_device *sdev;
7323 struct pqi_ctrl_info *ctrl_info;
7325 sdev = to_scsi_device(dev);
7326 ctrl_info = shost_to_hba(sdev->host);
7328 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7331 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7332 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7333 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7334 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7335 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7336 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7337 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7338 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7339 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7340 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7342 static struct attribute *pqi_sdev_attrs[] = {
7343 &dev_attr_lunid.attr,
7344 &dev_attr_unique_id.attr,
7345 &dev_attr_path_info.attr,
7346 &dev_attr_sas_address.attr,
7347 &dev_attr_ssd_smart_path_enabled.attr,
7348 &dev_attr_raid_level.attr,
7349 &dev_attr_raid_bypass_cnt.attr,
7350 &dev_attr_sas_ncq_prio_enable.attr,
7351 &dev_attr_numa_node.attr,
7355 ATTRIBUTE_GROUPS(pqi_sdev);
7357 static const struct scsi_host_template pqi_driver_template = {
7358 .module = THIS_MODULE,
7359 .name = DRIVER_NAME_SHORT,
7360 .proc_name = DRIVER_NAME_SHORT,
7361 .queuecommand = pqi_scsi_queue_command,
7362 .scan_start = pqi_scan_start,
7363 .scan_finished = pqi_scan_finished,
7365 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7367 .slave_alloc = pqi_slave_alloc,
7368 .slave_configure = pqi_slave_configure,
7369 .slave_destroy = pqi_slave_destroy,
7370 .map_queues = pqi_map_queues,
7371 .sdev_groups = pqi_sdev_groups,
7372 .shost_groups = pqi_shost_groups,
7373 .cmd_size = sizeof(struct pqi_cmd_priv),
7376 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7379 struct Scsi_Host *shost;
7381 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7383 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7388 shost->n_io_port = 0;
7389 shost->this_id = -1;
7390 shost->max_channel = PQI_MAX_BUS;
7391 shost->max_cmd_len = MAX_COMMAND_SIZE;
7392 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7394 shost->max_sectors = ctrl_info->max_sectors;
7395 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7396 shost->cmd_per_lun = shost->can_queue;
7397 shost->sg_tablesize = ctrl_info->sg_tablesize;
7398 shost->transportt = pqi_sas_transport_template;
7399 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7400 shost->unique_id = shost->irq;
7401 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7402 shost->host_tagset = 1;
7403 shost->hostdata[0] = (unsigned long)ctrl_info;
7405 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7407 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7411 rc = pqi_add_sas_host(shost, ctrl_info);
7413 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7417 ctrl_info->scsi_host = shost;
7422 scsi_remove_host(shost);
7424 scsi_host_put(shost);
7429 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7431 struct Scsi_Host *shost;
7433 pqi_delete_sas_host(ctrl_info);
7435 shost = ctrl_info->scsi_host;
7439 scsi_remove_host(shost);
7440 scsi_host_put(shost);
7443 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7446 struct pqi_device_registers __iomem *pqi_registers;
7447 unsigned long timeout;
7448 unsigned int timeout_msecs;
7449 union pqi_reset_register reset_reg;
7451 pqi_registers = ctrl_info->pqi_registers;
7452 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7453 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7456 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7457 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7458 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7460 if (!sis_is_firmware_running(ctrl_info)) {
7464 if (time_after(jiffies, timeout)) {
7473 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7476 union pqi_reset_register reset_reg;
7478 if (ctrl_info->pqi_reset_quiesce_supported) {
7479 rc = sis_pqi_reset_quiesce(ctrl_info);
7481 dev_err(&ctrl_info->pci_dev->dev,
7482 "PQI reset failed during quiesce with error %d\n", rc);
7487 reset_reg.all_bits = 0;
7488 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7489 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7491 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7493 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7495 dev_err(&ctrl_info->pci_dev->dev,
7496 "PQI reset failed with error %d\n", rc);
7501 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7504 struct bmic_sense_subsystem_info *sense_info;
7506 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7510 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7514 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7515 sizeof(sense_info->ctrl_serial_number));
7516 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7524 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7527 struct bmic_identify_controller *identify;
7529 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7533 rc = pqi_identify_controller(ctrl_info, identify);
7537 if (get_unaligned_le32(&identify->extra_controller_flags) &
7538 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7539 memcpy(ctrl_info->firmware_version,
7540 identify->firmware_version_long,
7541 sizeof(identify->firmware_version_long));
7543 memcpy(ctrl_info->firmware_version,
7544 identify->firmware_version_short,
7545 sizeof(identify->firmware_version_short));
7546 ctrl_info->firmware_version
7547 [sizeof(identify->firmware_version_short)] = '\0';
7548 snprintf(ctrl_info->firmware_version +
7549 strlen(ctrl_info->firmware_version),
7550 sizeof(ctrl_info->firmware_version) -
7551 sizeof(identify->firmware_version_short),
7553 get_unaligned_le16(&identify->firmware_build_number));
7556 memcpy(ctrl_info->model, identify->product_id,
7557 sizeof(identify->product_id));
7558 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7560 memcpy(ctrl_info->vendor, identify->vendor_id,
7561 sizeof(identify->vendor_id));
7562 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7564 dev_info(&ctrl_info->pci_dev->dev,
7565 "Firmware version: %s\n", ctrl_info->firmware_version);
7573 struct pqi_config_table_section_info {
7574 struct pqi_ctrl_info *ctrl_info;
7577 void __iomem *section_iomem_addr;
7580 static inline bool pqi_is_firmware_feature_supported(
7581 struct pqi_config_table_firmware_features *firmware_features,
7582 unsigned int bit_position)
7584 unsigned int byte_index;
7586 byte_index = bit_position / BITS_PER_BYTE;
7588 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7591 return firmware_features->features_supported[byte_index] &
7592 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7595 static inline bool pqi_is_firmware_feature_enabled(
7596 struct pqi_config_table_firmware_features *firmware_features,
7597 void __iomem *firmware_features_iomem_addr,
7598 unsigned int bit_position)
7600 unsigned int byte_index;
7601 u8 __iomem *features_enabled_iomem_addr;
7603 byte_index = (bit_position / BITS_PER_BYTE) +
7604 (le16_to_cpu(firmware_features->num_elements) * 2);
7606 features_enabled_iomem_addr = firmware_features_iomem_addr +
7607 offsetof(struct pqi_config_table_firmware_features,
7608 features_supported) + byte_index;
7610 return *((__force u8 *)features_enabled_iomem_addr) &
7611 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7614 static inline void pqi_request_firmware_feature(
7615 struct pqi_config_table_firmware_features *firmware_features,
7616 unsigned int bit_position)
7618 unsigned int byte_index;
7620 byte_index = (bit_position / BITS_PER_BYTE) +
7621 le16_to_cpu(firmware_features->num_elements);
7623 firmware_features->features_supported[byte_index] |=
7624 (1 << (bit_position % BITS_PER_BYTE));
7627 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7628 u16 first_section, u16 last_section)
7630 struct pqi_vendor_general_request request;
7632 memset(&request, 0, sizeof(request));
7634 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7635 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7636 &request.header.iu_length);
7637 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7638 &request.function_code);
7639 put_unaligned_le16(first_section,
7640 &request.data.config_table_update.first_section);
7641 put_unaligned_le16(last_section,
7642 &request.data.config_table_update.last_section);
7644 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7647 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7648 struct pqi_config_table_firmware_features *firmware_features,
7649 void __iomem *firmware_features_iomem_addr)
7651 void *features_requested;
7652 void __iomem *features_requested_iomem_addr;
7653 void __iomem *host_max_known_feature_iomem_addr;
7655 features_requested = firmware_features->features_supported +
7656 le16_to_cpu(firmware_features->num_elements);
7658 features_requested_iomem_addr = firmware_features_iomem_addr +
7659 (features_requested - (void *)firmware_features);
7661 memcpy_toio(features_requested_iomem_addr, features_requested,
7662 le16_to_cpu(firmware_features->num_elements));
7664 if (pqi_is_firmware_feature_supported(firmware_features,
7665 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7666 host_max_known_feature_iomem_addr =
7667 features_requested_iomem_addr +
7668 (le16_to_cpu(firmware_features->num_elements) * 2) +
7670 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7671 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7674 return pqi_config_table_update(ctrl_info,
7675 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7676 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7679 struct pqi_firmware_feature {
7681 unsigned int feature_bit;
7684 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7685 struct pqi_firmware_feature *firmware_feature);
7688 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7689 struct pqi_firmware_feature *firmware_feature)
7691 if (!firmware_feature->supported) {
7692 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7693 firmware_feature->feature_name);
7697 if (firmware_feature->enabled) {
7698 dev_info(&ctrl_info->pci_dev->dev,
7699 "%s enabled\n", firmware_feature->feature_name);
7703 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7704 firmware_feature->feature_name);
7707 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7708 struct pqi_firmware_feature *firmware_feature)
7710 switch (firmware_feature->feature_bit) {
7711 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7712 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7714 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7715 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7717 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7718 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7720 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7721 ctrl_info->soft_reset_handshake_supported =
7722 firmware_feature->enabled &&
7723 pqi_read_soft_reset_status(ctrl_info);
7725 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7726 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7728 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7729 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7731 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7732 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7733 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7735 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7736 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7738 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7739 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7743 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7746 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7747 struct pqi_firmware_feature *firmware_feature)
7749 if (firmware_feature->feature_status)
7750 firmware_feature->feature_status(ctrl_info, firmware_feature);
7753 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7755 static struct pqi_firmware_feature pqi_firmware_features[] = {
7757 .feature_name = "Online Firmware Activation",
7758 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7759 .feature_status = pqi_firmware_feature_status,
7762 .feature_name = "Serial Management Protocol",
7763 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7764 .feature_status = pqi_firmware_feature_status,
7767 .feature_name = "Maximum Known Feature",
7768 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7769 .feature_status = pqi_firmware_feature_status,
7772 .feature_name = "RAID 0 Read Bypass",
7773 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7774 .feature_status = pqi_firmware_feature_status,
7777 .feature_name = "RAID 1 Read Bypass",
7778 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7779 .feature_status = pqi_firmware_feature_status,
7782 .feature_name = "RAID 5 Read Bypass",
7783 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7784 .feature_status = pqi_firmware_feature_status,
7787 .feature_name = "RAID 6 Read Bypass",
7788 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7789 .feature_status = pqi_firmware_feature_status,
7792 .feature_name = "RAID 0 Write Bypass",
7793 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7794 .feature_status = pqi_firmware_feature_status,
7797 .feature_name = "RAID 1 Write Bypass",
7798 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7799 .feature_status = pqi_ctrl_update_feature_flags,
7802 .feature_name = "RAID 5 Write Bypass",
7803 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7804 .feature_status = pqi_ctrl_update_feature_flags,
7807 .feature_name = "RAID 6 Write Bypass",
7808 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7809 .feature_status = pqi_ctrl_update_feature_flags,
7812 .feature_name = "New Soft Reset Handshake",
7813 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7814 .feature_status = pqi_ctrl_update_feature_flags,
7817 .feature_name = "RAID IU Timeout",
7818 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7819 .feature_status = pqi_ctrl_update_feature_flags,
7822 .feature_name = "TMF IU Timeout",
7823 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7824 .feature_status = pqi_ctrl_update_feature_flags,
7827 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7828 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7829 .feature_status = pqi_firmware_feature_status,
7832 .feature_name = "Firmware Triage",
7833 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7834 .feature_status = pqi_ctrl_update_feature_flags,
7837 .feature_name = "RPL Extended Formats 4 and 5",
7838 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7839 .feature_status = pqi_ctrl_update_feature_flags,
7842 .feature_name = "Multi-LUN Target",
7843 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
7844 .feature_status = pqi_ctrl_update_feature_flags,
7848 static void pqi_process_firmware_features(
7849 struct pqi_config_table_section_info *section_info)
7852 struct pqi_ctrl_info *ctrl_info;
7853 struct pqi_config_table_firmware_features *firmware_features;
7854 void __iomem *firmware_features_iomem_addr;
7856 unsigned int num_features_supported;
7858 ctrl_info = section_info->ctrl_info;
7859 firmware_features = section_info->section;
7860 firmware_features_iomem_addr = section_info->section_iomem_addr;
7862 for (i = 0, num_features_supported = 0;
7863 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7864 if (pqi_is_firmware_feature_supported(firmware_features,
7865 pqi_firmware_features[i].feature_bit)) {
7866 pqi_firmware_features[i].supported = true;
7867 num_features_supported++;
7869 pqi_firmware_feature_update(ctrl_info,
7870 &pqi_firmware_features[i]);
7874 if (num_features_supported == 0)
7877 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7878 if (!pqi_firmware_features[i].supported)
7880 pqi_request_firmware_feature(firmware_features,
7881 pqi_firmware_features[i].feature_bit);
7884 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7885 firmware_features_iomem_addr);
7887 dev_err(&ctrl_info->pci_dev->dev,
7888 "failed to enable firmware features in PQI configuration table\n");
7889 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7890 if (!pqi_firmware_features[i].supported)
7892 pqi_firmware_feature_update(ctrl_info,
7893 &pqi_firmware_features[i]);
7898 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7899 if (!pqi_firmware_features[i].supported)
7901 if (pqi_is_firmware_feature_enabled(firmware_features,
7902 firmware_features_iomem_addr,
7903 pqi_firmware_features[i].feature_bit)) {
7904 pqi_firmware_features[i].enabled = true;
7906 pqi_firmware_feature_update(ctrl_info,
7907 &pqi_firmware_features[i]);
7911 static void pqi_init_firmware_features(void)
7915 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7916 pqi_firmware_features[i].supported = false;
7917 pqi_firmware_features[i].enabled = false;
7921 static void pqi_process_firmware_features_section(
7922 struct pqi_config_table_section_info *section_info)
7924 mutex_lock(&pqi_firmware_features_mutex);
7925 pqi_init_firmware_features();
7926 pqi_process_firmware_features(section_info);
7927 mutex_unlock(&pqi_firmware_features_mutex);
7931 * Reset all controller settings that can be initialized during the processing
7932 * of the PQI Configuration Table.
7935 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7937 ctrl_info->heartbeat_counter = NULL;
7938 ctrl_info->soft_reset_status = NULL;
7939 ctrl_info->soft_reset_handshake_supported = false;
7940 ctrl_info->enable_r1_writes = false;
7941 ctrl_info->enable_r5_writes = false;
7942 ctrl_info->enable_r6_writes = false;
7943 ctrl_info->raid_iu_timeout_supported = false;
7944 ctrl_info->tmf_iu_timeout_supported = false;
7945 ctrl_info->firmware_triage_supported = false;
7946 ctrl_info->rpl_extended_format_4_5_supported = false;
7947 ctrl_info->multi_lun_device_supported = false;
7950 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7954 bool firmware_feature_section_present;
7955 void __iomem *table_iomem_addr;
7956 struct pqi_config_table *config_table;
7957 struct pqi_config_table_section_header *section;
7958 struct pqi_config_table_section_info section_info;
7959 struct pqi_config_table_section_info feature_section_info = {0};
7961 table_length = ctrl_info->config_table_length;
7962 if (table_length == 0)
7965 config_table = kmalloc(table_length, GFP_KERNEL);
7966 if (!config_table) {
7967 dev_err(&ctrl_info->pci_dev->dev,
7968 "failed to allocate memory for PQI configuration table\n");
7973 * Copy the config table contents from I/O memory space into the
7976 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7977 memcpy_fromio(config_table, table_iomem_addr, table_length);
7979 firmware_feature_section_present = false;
7980 section_info.ctrl_info = ctrl_info;
7981 section_offset = get_unaligned_le32(&config_table->first_section_offset);
7983 while (section_offset) {
7984 section = (void *)config_table + section_offset;
7986 section_info.section = section;
7987 section_info.section_offset = section_offset;
7988 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7990 switch (get_unaligned_le16(§ion->section_id)) {
7991 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7992 firmware_feature_section_present = true;
7993 feature_section_info = section_info;
7995 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7996 if (pqi_disable_heartbeat)
7997 dev_warn(&ctrl_info->pci_dev->dev,
7998 "heartbeat disabled by module parameter\n");
8000 ctrl_info->heartbeat_counter =
8003 offsetof(struct pqi_config_table_heartbeat,
8006 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8007 ctrl_info->soft_reset_status =
8010 offsetof(struct pqi_config_table_soft_reset,
8015 section_offset = get_unaligned_le16(§ion->next_section_offset);
8019 * We process the firmware feature section after all other sections
8020 * have been processed so that the feature bit callbacks can take
8021 * into account the settings configured by other sections.
8023 if (firmware_feature_section_present)
8024 pqi_process_firmware_features_section(&feature_section_info);
8026 kfree(config_table);
8031 /* Switches the controller from PQI mode back into SIS mode. */
8033 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8037 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8038 rc = pqi_reset(ctrl_info);
8041 rc = sis_reenable_sis_mode(ctrl_info);
8043 dev_err(&ctrl_info->pci_dev->dev,
8044 "re-enabling SIS mode failed with error %d\n", rc);
8047 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8053 * If the controller isn't already in SIS mode, this function forces it into
8057 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8059 if (!sis_is_firmware_running(ctrl_info))
8062 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8065 if (sis_is_kernel_up(ctrl_info)) {
8066 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8070 return pqi_revert_to_sis_mode(ctrl_info);
8073 static void pqi_perform_lockup_action(void)
8075 switch (pqi_lockup_action) {
8077 panic("FATAL: Smart Family Controller lockup detected");
8080 emergency_restart();
8088 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8093 if (reset_devices) {
8094 if (pqi_is_fw_triage_supported(ctrl_info)) {
8095 rc = sis_wait_for_fw_triage_completion(ctrl_info);
8099 sis_soft_reset(ctrl_info);
8100 ssleep(PQI_POST_RESET_DELAY_SECS);
8102 rc = pqi_force_sis_mode(ctrl_info);
8108 * Wait until the controller is ready to start accepting SIS
8111 rc = sis_wait_for_ctrl_ready(ctrl_info);
8113 if (reset_devices) {
8114 dev_err(&ctrl_info->pci_dev->dev,
8115 "kdump init failed with error %d\n", rc);
8116 pqi_lockup_action = REBOOT;
8117 pqi_perform_lockup_action();
8123 * Get the controller properties. This allows us to determine
8124 * whether or not it supports PQI mode.
8126 rc = sis_get_ctrl_properties(ctrl_info);
8128 dev_err(&ctrl_info->pci_dev->dev,
8129 "error obtaining controller properties\n");
8133 rc = sis_get_pqi_capabilities(ctrl_info);
8135 dev_err(&ctrl_info->pci_dev->dev,
8136 "error obtaining controller capabilities\n");
8140 product_id = sis_get_product_id(ctrl_info);
8141 ctrl_info->product_id = (u8)product_id;
8142 ctrl_info->product_revision = (u8)(product_id >> 8);
8144 if (reset_devices) {
8145 if (ctrl_info->max_outstanding_requests >
8146 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8147 ctrl_info->max_outstanding_requests =
8148 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8150 if (ctrl_info->max_outstanding_requests >
8151 PQI_MAX_OUTSTANDING_REQUESTS)
8152 ctrl_info->max_outstanding_requests =
8153 PQI_MAX_OUTSTANDING_REQUESTS;
8156 pqi_calculate_io_resources(ctrl_info);
8158 rc = pqi_alloc_error_buffer(ctrl_info);
8160 dev_err(&ctrl_info->pci_dev->dev,
8161 "failed to allocate PQI error buffer\n");
8166 * If the function we are about to call succeeds, the
8167 * controller will transition from legacy SIS mode
8170 rc = sis_init_base_struct_addr(ctrl_info);
8172 dev_err(&ctrl_info->pci_dev->dev,
8173 "error initializing PQI mode\n");
8177 /* Wait for the controller to complete the SIS -> PQI transition. */
8178 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8180 dev_err(&ctrl_info->pci_dev->dev,
8181 "transition to PQI mode failed\n");
8185 /* From here on, we are running in PQI mode. */
8186 ctrl_info->pqi_mode_enabled = true;
8187 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8189 rc = pqi_alloc_admin_queues(ctrl_info);
8191 dev_err(&ctrl_info->pci_dev->dev,
8192 "failed to allocate admin queues\n");
8196 rc = pqi_create_admin_queues(ctrl_info);
8198 dev_err(&ctrl_info->pci_dev->dev,
8199 "error creating admin queues\n");
8203 rc = pqi_report_device_capability(ctrl_info);
8205 dev_err(&ctrl_info->pci_dev->dev,
8206 "obtaining device capability failed\n");
8210 rc = pqi_validate_device_capability(ctrl_info);
8214 pqi_calculate_queue_resources(ctrl_info);
8216 rc = pqi_enable_msix_interrupts(ctrl_info);
8220 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8221 ctrl_info->max_msix_vectors =
8222 ctrl_info->num_msix_vectors_enabled;
8223 pqi_calculate_queue_resources(ctrl_info);
8226 rc = pqi_alloc_io_resources(ctrl_info);
8230 rc = pqi_alloc_operational_queues(ctrl_info);
8232 dev_err(&ctrl_info->pci_dev->dev,
8233 "failed to allocate operational queues\n");
8237 pqi_init_operational_queues(ctrl_info);
8239 rc = pqi_create_queues(ctrl_info);
8243 rc = pqi_request_irqs(ctrl_info);
8247 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8249 ctrl_info->controller_online = true;
8251 rc = pqi_process_config_table(ctrl_info);
8255 pqi_start_heartbeat_timer(ctrl_info);
8257 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8258 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8259 if (rc) { /* Supported features not returned correctly. */
8260 dev_err(&ctrl_info->pci_dev->dev,
8261 "error obtaining advanced RAID bypass configuration\n");
8264 ctrl_info->ciss_report_log_flags |=
8265 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8268 rc = pqi_enable_events(ctrl_info);
8270 dev_err(&ctrl_info->pci_dev->dev,
8271 "error enabling events\n");
8275 /* Register with the SCSI subsystem. */
8276 rc = pqi_register_scsi(ctrl_info);
8280 rc = pqi_get_ctrl_product_details(ctrl_info);
8282 dev_err(&ctrl_info->pci_dev->dev,
8283 "error obtaining product details\n");
8287 rc = pqi_get_ctrl_serial_number(ctrl_info);
8289 dev_err(&ctrl_info->pci_dev->dev,
8290 "error obtaining ctrl serial number\n");
8294 rc = pqi_set_diag_rescan(ctrl_info);
8296 dev_err(&ctrl_info->pci_dev->dev,
8297 "error enabling multi-lun rescan\n");
8301 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8303 dev_err(&ctrl_info->pci_dev->dev,
8304 "error updating host wellness\n");
8308 pqi_schedule_update_time_worker(ctrl_info);
8310 pqi_scan_scsi_devices(ctrl_info);
8315 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8318 struct pqi_admin_queues *admin_queues;
8319 struct pqi_event_queue *event_queue;
8321 admin_queues = &ctrl_info->admin_queues;
8322 admin_queues->iq_pi_copy = 0;
8323 admin_queues->oq_ci_copy = 0;
8324 writel(0, admin_queues->oq_pi);
8326 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8327 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8328 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8329 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8331 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8332 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8333 writel(0, ctrl_info->queue_groups[i].oq_pi);
8336 event_queue = &ctrl_info->event_queue;
8337 writel(0, event_queue->oq_pi);
8338 event_queue->oq_ci_copy = 0;
8341 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8345 rc = pqi_force_sis_mode(ctrl_info);
8350 * Wait until the controller is ready to start accepting SIS
8353 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8358 * Get the controller properties. This allows us to determine
8359 * whether or not it supports PQI mode.
8361 rc = sis_get_ctrl_properties(ctrl_info);
8363 dev_err(&ctrl_info->pci_dev->dev,
8364 "error obtaining controller properties\n");
8368 rc = sis_get_pqi_capabilities(ctrl_info);
8370 dev_err(&ctrl_info->pci_dev->dev,
8371 "error obtaining controller capabilities\n");
8376 * If the function we are about to call succeeds, the
8377 * controller will transition from legacy SIS mode
8380 rc = sis_init_base_struct_addr(ctrl_info);
8382 dev_err(&ctrl_info->pci_dev->dev,
8383 "error initializing PQI mode\n");
8387 /* Wait for the controller to complete the SIS -> PQI transition. */
8388 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8390 dev_err(&ctrl_info->pci_dev->dev,
8391 "transition to PQI mode failed\n");
8395 /* From here on, we are running in PQI mode. */
8396 ctrl_info->pqi_mode_enabled = true;
8397 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8399 pqi_reinit_queues(ctrl_info);
8401 rc = pqi_create_admin_queues(ctrl_info);
8403 dev_err(&ctrl_info->pci_dev->dev,
8404 "error creating admin queues\n");
8408 rc = pqi_create_queues(ctrl_info);
8412 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8414 ctrl_info->controller_online = true;
8415 pqi_ctrl_unblock_requests(ctrl_info);
8417 pqi_ctrl_reset_config(ctrl_info);
8419 rc = pqi_process_config_table(ctrl_info);
8423 pqi_start_heartbeat_timer(ctrl_info);
8425 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8426 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8428 dev_err(&ctrl_info->pci_dev->dev,
8429 "error obtaining advanced RAID bypass configuration\n");
8432 ctrl_info->ciss_report_log_flags |=
8433 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8436 rc = pqi_enable_events(ctrl_info);
8438 dev_err(&ctrl_info->pci_dev->dev,
8439 "error enabling events\n");
8443 rc = pqi_get_ctrl_product_details(ctrl_info);
8445 dev_err(&ctrl_info->pci_dev->dev,
8446 "error obtaining product details\n");
8450 rc = pqi_set_diag_rescan(ctrl_info);
8452 dev_err(&ctrl_info->pci_dev->dev,
8453 "error enabling multi-lun rescan\n");
8457 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8459 dev_err(&ctrl_info->pci_dev->dev,
8460 "error updating host wellness\n");
8464 if (pqi_ofa_in_progress(ctrl_info))
8465 pqi_ctrl_unblock_scan(ctrl_info);
8467 pqi_scan_scsi_devices(ctrl_info);
8472 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8476 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8477 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8479 return pcibios_err_to_errno(rc);
8482 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8487 rc = pci_enable_device(ctrl_info->pci_dev);
8489 dev_err(&ctrl_info->pci_dev->dev,
8490 "failed to enable PCI device\n");
8494 if (sizeof(dma_addr_t) > 4)
8495 mask = DMA_BIT_MASK(64);
8497 mask = DMA_BIT_MASK(32);
8499 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8501 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8502 goto disable_device;
8505 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8507 dev_err(&ctrl_info->pci_dev->dev,
8508 "failed to obtain PCI resources\n");
8509 goto disable_device;
8512 ctrl_info->iomem_base = ioremap(pci_resource_start(
8513 ctrl_info->pci_dev, 0),
8514 pci_resource_len(ctrl_info->pci_dev, 0));
8515 if (!ctrl_info->iomem_base) {
8516 dev_err(&ctrl_info->pci_dev->dev,
8517 "failed to map memory for controller registers\n");
8519 goto release_regions;
8522 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8524 /* Increase the PCIe completion timeout. */
8525 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8526 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8528 dev_err(&ctrl_info->pci_dev->dev,
8529 "failed to set PCIe completion timeout\n");
8530 goto release_regions;
8533 /* Enable bus mastering. */
8534 pci_set_master(ctrl_info->pci_dev);
8536 ctrl_info->registers = ctrl_info->iomem_base;
8537 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8539 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8544 pci_release_regions(ctrl_info->pci_dev);
8546 pci_disable_device(ctrl_info->pci_dev);
8551 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8553 iounmap(ctrl_info->iomem_base);
8554 pci_release_regions(ctrl_info->pci_dev);
8555 if (pci_is_enabled(ctrl_info->pci_dev))
8556 pci_disable_device(ctrl_info->pci_dev);
8557 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8560 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8562 struct pqi_ctrl_info *ctrl_info;
8564 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8565 GFP_KERNEL, numa_node);
8569 mutex_init(&ctrl_info->scan_mutex);
8570 mutex_init(&ctrl_info->lun_reset_mutex);
8571 mutex_init(&ctrl_info->ofa_mutex);
8573 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8574 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8576 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8577 atomic_set(&ctrl_info->num_interrupts, 0);
8579 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8580 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8582 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8583 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8585 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8586 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8588 sema_init(&ctrl_info->sync_request_sem,
8589 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8590 init_waitqueue_head(&ctrl_info->block_requests_wait);
8592 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8593 ctrl_info->irq_mode = IRQ_MODE_NONE;
8594 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8596 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8597 ctrl_info->max_transfer_encrypted_sas_sata =
8598 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8599 ctrl_info->max_transfer_encrypted_nvme =
8600 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8601 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8602 ctrl_info->max_write_raid_1_10_2drive = ~0;
8603 ctrl_info->max_write_raid_1_10_3drive = ~0;
8604 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8609 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8614 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8616 pqi_free_irqs(ctrl_info);
8617 pqi_disable_msix_interrupts(ctrl_info);
8620 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8622 pqi_free_interrupts(ctrl_info);
8623 if (ctrl_info->queue_memory_base)
8624 dma_free_coherent(&ctrl_info->pci_dev->dev,
8625 ctrl_info->queue_memory_length,
8626 ctrl_info->queue_memory_base,
8627 ctrl_info->queue_memory_base_dma_handle);
8628 if (ctrl_info->admin_queue_memory_base)
8629 dma_free_coherent(&ctrl_info->pci_dev->dev,
8630 ctrl_info->admin_queue_memory_length,
8631 ctrl_info->admin_queue_memory_base,
8632 ctrl_info->admin_queue_memory_base_dma_handle);
8633 pqi_free_all_io_requests(ctrl_info);
8634 if (ctrl_info->error_buffer)
8635 dma_free_coherent(&ctrl_info->pci_dev->dev,
8636 ctrl_info->error_buffer_length,
8637 ctrl_info->error_buffer,
8638 ctrl_info->error_buffer_dma_handle);
8639 if (ctrl_info->iomem_base)
8640 pqi_cleanup_pci_init(ctrl_info);
8641 pqi_free_ctrl_info(ctrl_info);
8644 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8646 ctrl_info->controller_online = false;
8647 pqi_stop_heartbeat_timer(ctrl_info);
8648 pqi_ctrl_block_requests(ctrl_info);
8649 pqi_cancel_rescan_worker(ctrl_info);
8650 pqi_cancel_update_time_worker(ctrl_info);
8651 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8652 pqi_fail_all_outstanding_requests(ctrl_info);
8653 ctrl_info->pqi_mode_enabled = false;
8655 pqi_unregister_scsi(ctrl_info);
8656 if (ctrl_info->pqi_mode_enabled)
8657 pqi_revert_to_sis_mode(ctrl_info);
8658 pqi_free_ctrl_resources(ctrl_info);
8661 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8663 pqi_ctrl_block_scan(ctrl_info);
8664 pqi_scsi_block_requests(ctrl_info);
8665 pqi_ctrl_block_device_reset(ctrl_info);
8666 pqi_ctrl_block_requests(ctrl_info);
8667 pqi_ctrl_wait_until_quiesced(ctrl_info);
8668 pqi_stop_heartbeat_timer(ctrl_info);
8671 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8673 pqi_start_heartbeat_timer(ctrl_info);
8674 pqi_ctrl_unblock_requests(ctrl_info);
8675 pqi_ctrl_unblock_device_reset(ctrl_info);
8676 pqi_scsi_unblock_requests(ctrl_info);
8677 pqi_ctrl_unblock_scan(ctrl_info);
8680 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8685 struct pqi_ofa_memory *ofap;
8686 struct pqi_sg_descriptor *mem_descriptor;
8687 dma_addr_t dma_handle;
8689 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8691 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8692 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8695 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8696 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8699 dev = &ctrl_info->pci_dev->dev;
8701 for (i = 0; i < sg_count; i++) {
8702 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8703 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8704 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8705 goto out_free_chunks;
8706 mem_descriptor = &ofap->sg_descriptor[i];
8707 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8708 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8711 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8712 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8713 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8719 mem_descriptor = &ofap->sg_descriptor[i];
8720 dma_free_coherent(dev, chunk_size,
8721 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8722 get_unaligned_le64(&mem_descriptor->address));
8724 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8730 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8736 if (ctrl_info->ofa_bytes_requested == 0)
8739 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8740 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8741 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8743 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8744 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8747 chunk_size = PAGE_ALIGN(chunk_size);
8753 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8756 struct pqi_ofa_memory *ofap;
8758 dev = &ctrl_info->pci_dev->dev;
8760 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8761 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8765 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8767 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8769 "failed to allocate host buffer for Online Firmware Activation\n");
8770 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8771 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8775 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8776 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8779 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8783 struct pqi_ofa_memory *ofap;
8784 struct pqi_sg_descriptor *mem_descriptor;
8785 unsigned int num_memory_descriptors;
8787 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8791 dev = &ctrl_info->pci_dev->dev;
8793 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8796 mem_descriptor = ofap->sg_descriptor;
8797 num_memory_descriptors =
8798 get_unaligned_le16(&ofap->num_memory_descriptors);
8800 for (i = 0; i < num_memory_descriptors; i++) {
8801 dma_free_coherent(dev,
8802 get_unaligned_le32(&mem_descriptor[i].length),
8803 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8804 get_unaligned_le64(&mem_descriptor[i].address));
8806 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8809 dma_free_coherent(dev, sizeof(*ofap), ofap,
8810 ctrl_info->pqi_ofa_mem_dma_handle);
8811 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8814 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8817 struct pqi_vendor_general_request request;
8818 struct pqi_ofa_memory *ofap;
8820 memset(&request, 0, sizeof(request));
8822 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8823 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8824 &request.header.iu_length);
8825 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8826 &request.function_code);
8828 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8831 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8832 get_unaligned_le16(&ofap->num_memory_descriptors) *
8833 sizeof(struct pqi_sg_descriptor);
8835 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8836 &request.data.ofa_memory_allocation.buffer_address);
8837 put_unaligned_le32(buffer_length,
8838 &request.data.ofa_memory_allocation.buffer_length);
8841 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8844 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8848 return pqi_ctrl_init_resume(ctrl_info);
8851 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8852 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8853 .status = SAM_STAT_CHECK_CONDITION,
8856 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8859 struct pqi_io_request *io_request;
8860 struct scsi_cmnd *scmd;
8861 struct scsi_device *sdev;
8863 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8864 io_request = &ctrl_info->io_request_pool[i];
8865 if (atomic_read(&io_request->refcount) == 0)
8868 scmd = io_request->scmd;
8870 sdev = scmd->device;
8871 if (!sdev || !scsi_device_online(sdev)) {
8872 pqi_free_io_request(io_request);
8875 set_host_byte(scmd, DID_NO_CONNECT);
8878 io_request->status = -ENXIO;
8879 io_request->error_info =
8880 &pqi_ctrl_offline_raid_error_info;
8883 io_request->io_complete_callback(io_request,
8884 io_request->context);
8888 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8890 pqi_perform_lockup_action();
8891 pqi_stop_heartbeat_timer(ctrl_info);
8892 pqi_free_interrupts(ctrl_info);
8893 pqi_cancel_rescan_worker(ctrl_info);
8894 pqi_cancel_update_time_worker(ctrl_info);
8895 pqi_ctrl_wait_until_quiesced(ctrl_info);
8896 pqi_fail_all_outstanding_requests(ctrl_info);
8897 pqi_ctrl_unblock_requests(ctrl_info);
8900 static void pqi_ctrl_offline_worker(struct work_struct *work)
8902 struct pqi_ctrl_info *ctrl_info;
8904 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8905 pqi_take_ctrl_offline_deferred(ctrl_info);
8908 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8909 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
8911 if (!ctrl_info->controller_online)
8914 ctrl_info->controller_online = false;
8915 ctrl_info->pqi_mode_enabled = false;
8916 pqi_ctrl_block_requests(ctrl_info);
8917 if (!pqi_disable_ctrl_shutdown)
8918 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
8919 pci_disable_device(ctrl_info->pci_dev);
8920 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8921 schedule_work(&ctrl_info->ctrl_offline_work);
8924 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8925 const struct pci_device_id *id)
8927 char *ctrl_description;
8929 if (id->driver_data)
8930 ctrl_description = (char *)id->driver_data;
8932 ctrl_description = "Microchip Smart Family Controller";
8934 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8937 static int pqi_pci_probe(struct pci_dev *pci_dev,
8938 const struct pci_device_id *id)
8942 struct pqi_ctrl_info *ctrl_info;
8944 pqi_print_ctrl_info(pci_dev, id);
8946 if (pqi_disable_device_id_wildcards &&
8947 id->subvendor == PCI_ANY_ID &&
8948 id->subdevice == PCI_ANY_ID) {
8949 dev_warn(&pci_dev->dev,
8950 "controller not probed because device ID wildcards are disabled\n");
8954 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8955 dev_warn(&pci_dev->dev,
8956 "controller device ID matched using wildcards\n");
8958 node = dev_to_node(&pci_dev->dev);
8959 if (node == NUMA_NO_NODE) {
8960 node = cpu_to_node(0);
8961 if (node == NUMA_NO_NODE)
8963 set_dev_node(&pci_dev->dev, node);
8966 ctrl_info = pqi_alloc_ctrl_info(node);
8968 dev_err(&pci_dev->dev,
8969 "failed to allocate controller info block\n");
8972 ctrl_info->numa_node = node;
8974 ctrl_info->pci_dev = pci_dev;
8976 rc = pqi_pci_init(ctrl_info);
8980 rc = pqi_ctrl_init(ctrl_info);
8987 pqi_remove_ctrl(ctrl_info);
8992 static void pqi_pci_remove(struct pci_dev *pci_dev)
8994 struct pqi_ctrl_info *ctrl_info;
8998 ctrl_info = pci_get_drvdata(pci_dev);
9002 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9003 if (vendor_id == 0xffff)
9004 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9006 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9008 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9009 rc = pqi_flush_cache(ctrl_info, RESTART);
9011 dev_err(&pci_dev->dev,
9012 "unable to flush controller cache during remove\n");
9015 pqi_remove_ctrl(ctrl_info);
9018 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9021 struct pqi_io_request *io_request;
9022 struct scsi_cmnd *scmd;
9024 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9025 io_request = &ctrl_info->io_request_pool[i];
9026 if (atomic_read(&io_request->refcount) == 0)
9028 scmd = io_request->scmd;
9029 WARN_ON(scmd != NULL); /* IO command from SML */
9030 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9034 static void pqi_shutdown(struct pci_dev *pci_dev)
9037 struct pqi_ctrl_info *ctrl_info;
9038 enum bmic_flush_cache_shutdown_event shutdown_event;
9040 ctrl_info = pci_get_drvdata(pci_dev);
9042 dev_err(&pci_dev->dev,
9043 "cache could not be flushed\n");
9047 pqi_wait_until_ofa_finished(ctrl_info);
9049 pqi_scsi_block_requests(ctrl_info);
9050 pqi_ctrl_block_device_reset(ctrl_info);
9051 pqi_ctrl_block_requests(ctrl_info);
9052 pqi_ctrl_wait_until_quiesced(ctrl_info);
9054 if (system_state == SYSTEM_RESTART)
9055 shutdown_event = RESTART;
9057 shutdown_event = SHUTDOWN;
9060 * Write all data in the controller's battery-backed cache to
9063 rc = pqi_flush_cache(ctrl_info, shutdown_event);
9065 dev_err(&pci_dev->dev,
9066 "unable to flush controller cache\n");
9068 pqi_crash_if_pending_command(ctrl_info);
9069 pqi_reset(ctrl_info);
9072 static void pqi_process_lockup_action_param(void)
9076 if (!pqi_lockup_action_param)
9079 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9080 if (strcmp(pqi_lockup_action_param,
9081 pqi_lockup_actions[i].name) == 0) {
9082 pqi_lockup_action = pqi_lockup_actions[i].action;
9087 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9088 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9091 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
9092 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
9094 static void pqi_process_ctrl_ready_timeout_param(void)
9096 if (pqi_ctrl_ready_timeout_secs == 0)
9099 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9100 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9101 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9102 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9103 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9104 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9105 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9106 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9109 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9112 static void pqi_process_module_params(void)
9114 pqi_process_lockup_action_param();
9115 pqi_process_ctrl_ready_timeout_param();
9118 #if defined(CONFIG_PM)
9120 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9122 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9128 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9130 struct pci_dev *pci_dev;
9131 struct pqi_ctrl_info *ctrl_info;
9133 pci_dev = to_pci_dev(dev);
9134 ctrl_info = pci_get_drvdata(pci_dev);
9136 pqi_wait_until_ofa_finished(ctrl_info);
9138 pqi_ctrl_block_scan(ctrl_info);
9139 pqi_scsi_block_requests(ctrl_info);
9140 pqi_ctrl_block_device_reset(ctrl_info);
9141 pqi_ctrl_block_requests(ctrl_info);
9142 pqi_ctrl_wait_until_quiesced(ctrl_info);
9145 enum bmic_flush_cache_shutdown_event shutdown_event;
9147 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9148 pqi_flush_cache(ctrl_info, shutdown_event);
9151 pqi_stop_heartbeat_timer(ctrl_info);
9152 pqi_crash_if_pending_command(ctrl_info);
9153 pqi_free_irqs(ctrl_info);
9155 ctrl_info->controller_online = false;
9156 ctrl_info->pqi_mode_enabled = false;
9161 static __maybe_unused int pqi_suspend(struct device *dev)
9163 return pqi_suspend_or_freeze(dev, true);
9166 static int pqi_resume_or_restore(struct device *dev)
9169 struct pci_dev *pci_dev;
9170 struct pqi_ctrl_info *ctrl_info;
9172 pci_dev = to_pci_dev(dev);
9173 ctrl_info = pci_get_drvdata(pci_dev);
9175 rc = pqi_request_irqs(ctrl_info);
9179 pqi_ctrl_unblock_device_reset(ctrl_info);
9180 pqi_ctrl_unblock_requests(ctrl_info);
9181 pqi_scsi_unblock_requests(ctrl_info);
9182 pqi_ctrl_unblock_scan(ctrl_info);
9184 ssleep(PQI_POST_RESET_DELAY_SECS);
9186 return pqi_ctrl_init_resume(ctrl_info);
9189 static int pqi_freeze(struct device *dev)
9191 return pqi_suspend_or_freeze(dev, false);
9194 static int pqi_thaw(struct device *dev)
9197 struct pci_dev *pci_dev;
9198 struct pqi_ctrl_info *ctrl_info;
9200 pci_dev = to_pci_dev(dev);
9201 ctrl_info = pci_get_drvdata(pci_dev);
9203 rc = pqi_request_irqs(ctrl_info);
9207 ctrl_info->controller_online = true;
9208 ctrl_info->pqi_mode_enabled = true;
9210 pqi_ctrl_unblock_device_reset(ctrl_info);
9211 pqi_ctrl_unblock_requests(ctrl_info);
9212 pqi_scsi_unblock_requests(ctrl_info);
9213 pqi_ctrl_unblock_scan(ctrl_info);
9218 static int pqi_poweroff(struct device *dev)
9220 struct pci_dev *pci_dev;
9221 struct pqi_ctrl_info *ctrl_info;
9222 enum bmic_flush_cache_shutdown_event shutdown_event;
9224 pci_dev = to_pci_dev(dev);
9225 ctrl_info = pci_get_drvdata(pci_dev);
9227 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9228 pqi_flush_cache(ctrl_info, shutdown_event);
9233 static const struct dev_pm_ops pqi_pm_ops = {
9234 .suspend = pqi_suspend,
9235 .resume = pqi_resume_or_restore,
9236 .freeze = pqi_freeze,
9238 .poweroff = pqi_poweroff,
9239 .restore = pqi_resume_or_restore,
9242 #endif /* CONFIG_PM */
9244 /* Define the PCI IDs for the controllers that we support. */
9245 static const struct pci_device_id pqi_pci_id_table[] = {
9247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9303 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9307 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9311 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9315 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9319 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9323 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9327 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9331 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9335 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9339 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9343 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9351 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9355 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9359 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9363 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9367 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9371 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9375 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9379 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9444 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9448 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9452 PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9456 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9460 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9464 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9468 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9471 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9472 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9475 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9476 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9479 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9480 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9483 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9484 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9487 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9488 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9491 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9492 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9495 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9496 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9499 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9500 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9503 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9504 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9507 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9508 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9511 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9512 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9515 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9516 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9519 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9520 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9523 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9524 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9527 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9528 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9531 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9532 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9535 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9536 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9539 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9540 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9543 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9544 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9547 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9548 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9551 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9552 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9555 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9556 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9559 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9560 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9563 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9564 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9567 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9568 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9571 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9572 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9575 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9576 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9579 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9580 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9583 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9584 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9587 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9588 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9591 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9592 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9595 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9596 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9599 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9600 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9603 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9604 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9607 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9608 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9611 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9612 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9615 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9616 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9619 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9620 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9623 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9624 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9627 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9628 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9631 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9632 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9635 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9636 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9639 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9640 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9643 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9644 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9647 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9648 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9651 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9652 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9655 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9656 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9659 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9660 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9663 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9664 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9668 PCI_VENDOR_ID_ADAPTEC2, 0x1475)
9671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9672 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9676 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9680 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9684 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9688 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9692 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9696 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9700 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9704 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9708 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9712 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9716 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9720 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9724 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9728 PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
9731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9732 PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
9735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9736 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9740 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9744 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9748 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9752 PCI_VENDOR_ID_DELL, 0x1fe0)
9755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9756 PCI_VENDOR_ID_HP, 0x0600)
9759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9760 PCI_VENDOR_ID_HP, 0x0601)
9763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9764 PCI_VENDOR_ID_HP, 0x0602)
9767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9768 PCI_VENDOR_ID_HP, 0x0603)
9771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9772 PCI_VENDOR_ID_HP, 0x0609)
9775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9776 PCI_VENDOR_ID_HP, 0x0650)
9779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9780 PCI_VENDOR_ID_HP, 0x0651)
9783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9784 PCI_VENDOR_ID_HP, 0x0652)
9787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9788 PCI_VENDOR_ID_HP, 0x0653)
9791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9792 PCI_VENDOR_ID_HP, 0x0654)
9795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9796 PCI_VENDOR_ID_HP, 0x0655)
9799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9800 PCI_VENDOR_ID_HP, 0x0700)
9803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9804 PCI_VENDOR_ID_HP, 0x0701)
9807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9808 PCI_VENDOR_ID_HP, 0x1001)
9811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9812 PCI_VENDOR_ID_HP, 0x1002)
9815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9816 PCI_VENDOR_ID_HP, 0x1100)
9819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9820 PCI_VENDOR_ID_HP, 0x1101)
9823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9867 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9871 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9872 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9875 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9879 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9883 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9887 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9891 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9895 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9899 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9903 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9907 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9964 PCI_VENDOR_ID_LENOVO, 0x0220)
9967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9968 PCI_VENDOR_ID_LENOVO, 0x0221)
9971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9972 PCI_VENDOR_ID_LENOVO, 0x0520)
9975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9976 PCI_VENDOR_ID_LENOVO, 0x0522)
9979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9980 PCI_VENDOR_ID_LENOVO, 0x0620)
9983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9984 PCI_VENDOR_ID_LENOVO, 0x0621)
9987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9988 PCI_VENDOR_ID_LENOVO, 0x0622)
9991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9992 PCI_VENDOR_ID_LENOVO, 0x0623)
9995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10023 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10056 PCI_ANY_ID, PCI_ANY_ID)
10061 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10063 static struct pci_driver pqi_pci_driver = {
10064 .name = DRIVER_NAME_SHORT,
10065 .id_table = pqi_pci_id_table,
10066 .probe = pqi_pci_probe,
10067 .remove = pqi_pci_remove,
10068 .shutdown = pqi_shutdown,
10069 #if defined(CONFIG_PM)
10076 static int __init pqi_init(void)
10080 pr_info(DRIVER_NAME "\n");
10081 pqi_verify_structures();
10082 sis_verify_structures();
10084 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10085 if (!pqi_sas_transport_template)
10088 pqi_process_module_params();
10090 rc = pci_register_driver(&pqi_pci_driver);
10092 sas_release_transport(pqi_sas_transport_template);
10097 static void __exit pqi_cleanup(void)
10099 pci_unregister_driver(&pqi_pci_driver);
10100 sas_release_transport(pqi_sas_transport_template);
10103 module_init(pqi_init);
10104 module_exit(pqi_cleanup);
10106 static void pqi_verify_structures(void)
10108 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10109 sis_host_to_ctrl_doorbell) != 0x20);
10110 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10111 sis_interrupt_mask) != 0x34);
10112 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10113 sis_ctrl_to_host_doorbell) != 0x9c);
10114 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10115 sis_ctrl_to_host_doorbell_clear) != 0xa0);
10116 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10117 sis_driver_scratch) != 0xb0);
10118 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10119 sis_product_identifier) != 0xb4);
10120 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10121 sis_firmware_status) != 0xbc);
10122 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10123 sis_ctrl_shutdown_reason_code) != 0xcc);
10124 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10125 sis_mailbox) != 0x1000);
10126 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10127 pqi_registers) != 0x4000);
10129 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10131 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10132 iu_length) != 0x2);
10133 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10134 response_queue_id) != 0x4);
10135 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10136 driver_flags) != 0x6);
10137 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10139 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10141 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10142 service_response) != 0x1);
10143 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10144 data_present) != 0x2);
10145 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10147 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10148 residual_count) != 0x4);
10149 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10150 data_length) != 0x8);
10151 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10152 reserved1) != 0xa);
10153 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10155 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10157 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10158 data_in_result) != 0x0);
10159 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10160 data_out_result) != 0x1);
10161 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10163 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10165 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10166 status_qualifier) != 0x6);
10167 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10168 sense_data_length) != 0x8);
10169 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10170 response_data_length) != 0xa);
10171 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10172 data_in_transferred) != 0xc);
10173 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10174 data_out_transferred) != 0x10);
10175 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10177 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10179 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10180 signature) != 0x0);
10181 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10182 function_and_status_code) != 0x8);
10183 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10184 max_admin_iq_elements) != 0x10);
10185 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10186 max_admin_oq_elements) != 0x11);
10187 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10188 admin_iq_element_length) != 0x12);
10189 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10190 admin_oq_element_length) != 0x13);
10191 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10192 max_reset_timeout) != 0x14);
10193 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10194 legacy_intx_status) != 0x18);
10195 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10196 legacy_intx_mask_set) != 0x1c);
10197 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10198 legacy_intx_mask_clear) != 0x20);
10199 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10200 device_status) != 0x40);
10201 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10202 admin_iq_pi_offset) != 0x48);
10203 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10204 admin_oq_ci_offset) != 0x50);
10205 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10206 admin_iq_element_array_addr) != 0x58);
10207 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10208 admin_oq_element_array_addr) != 0x60);
10209 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10210 admin_iq_ci_addr) != 0x68);
10211 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10212 admin_oq_pi_addr) != 0x70);
10213 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10214 admin_iq_num_elements) != 0x78);
10215 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10216 admin_oq_num_elements) != 0x79);
10217 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10218 admin_queue_int_msg_num) != 0x7a);
10219 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10220 device_error) != 0x80);
10221 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10222 error_details) != 0x88);
10223 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10224 device_reset) != 0x90);
10225 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10226 power_action) != 0x94);
10227 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10229 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10230 header.iu_type) != 0);
10231 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10232 header.iu_length) != 2);
10233 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10234 header.driver_flags) != 6);
10235 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10237 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10238 function_code) != 10);
10239 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10240 data.report_device_capability.buffer_length) != 44);
10241 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10242 data.report_device_capability.sg_descriptor) != 48);
10243 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10244 data.create_operational_iq.queue_id) != 12);
10245 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10246 data.create_operational_iq.element_array_addr) != 16);
10247 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10248 data.create_operational_iq.ci_addr) != 24);
10249 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10250 data.create_operational_iq.num_elements) != 32);
10251 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10252 data.create_operational_iq.element_length) != 34);
10253 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10254 data.create_operational_iq.queue_protocol) != 36);
10255 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10256 data.create_operational_oq.queue_id) != 12);
10257 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10258 data.create_operational_oq.element_array_addr) != 16);
10259 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10260 data.create_operational_oq.pi_addr) != 24);
10261 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10262 data.create_operational_oq.num_elements) != 32);
10263 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10264 data.create_operational_oq.element_length) != 34);
10265 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10266 data.create_operational_oq.queue_protocol) != 36);
10267 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10268 data.create_operational_oq.int_msg_num) != 40);
10269 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10270 data.create_operational_oq.coalescing_count) != 42);
10271 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10272 data.create_operational_oq.min_coalescing_time) != 44);
10273 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10274 data.create_operational_oq.max_coalescing_time) != 48);
10275 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10276 data.delete_operational_queue.queue_id) != 12);
10277 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10278 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10279 data.create_operational_iq) != 64 - 11);
10280 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10281 data.create_operational_oq) != 64 - 11);
10282 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10283 data.delete_operational_queue) != 64 - 11);
10285 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10286 header.iu_type) != 0);
10287 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10288 header.iu_length) != 2);
10289 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10290 header.driver_flags) != 6);
10291 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10293 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10294 function_code) != 10);
10295 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10297 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10298 data.create_operational_iq.status_descriptor) != 12);
10299 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10300 data.create_operational_iq.iq_pi_offset) != 16);
10301 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10302 data.create_operational_oq.status_descriptor) != 12);
10303 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10304 data.create_operational_oq.oq_ci_offset) != 16);
10305 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10307 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10308 header.iu_type) != 0);
10309 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10310 header.iu_length) != 2);
10311 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10312 header.response_queue_id) != 4);
10313 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10314 header.driver_flags) != 6);
10315 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10317 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10319 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10320 buffer_length) != 12);
10321 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10322 lun_number) != 16);
10323 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10324 protocol_specific) != 24);
10325 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10326 error_index) != 27);
10327 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10329 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10331 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10332 sg_descriptors) != 64);
10333 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10334 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10336 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10337 header.iu_type) != 0);
10338 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10339 header.iu_length) != 2);
10340 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10341 header.response_queue_id) != 4);
10342 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10343 header.driver_flags) != 6);
10344 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10346 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10348 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10349 buffer_length) != 16);
10350 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10351 data_encryption_key_index) != 22);
10352 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10353 encrypt_tweak_lower) != 24);
10354 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10355 encrypt_tweak_upper) != 28);
10356 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10358 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10359 error_index) != 48);
10360 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10361 num_sg_descriptors) != 50);
10362 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10363 cdb_length) != 51);
10364 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10365 lun_number) != 52);
10366 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10367 sg_descriptors) != 64);
10368 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10369 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10371 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10372 header.iu_type) != 0);
10373 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10374 header.iu_length) != 2);
10375 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10377 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10378 error_index) != 10);
10380 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10381 header.iu_type) != 0);
10382 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10383 header.iu_length) != 2);
10384 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10385 header.response_queue_id) != 4);
10386 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10388 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10389 data.report_event_configuration.buffer_length) != 12);
10390 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10391 data.report_event_configuration.sg_descriptors) != 16);
10392 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10393 data.set_event_configuration.global_event_oq_id) != 10);
10394 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10395 data.set_event_configuration.buffer_length) != 12);
10396 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10397 data.set_event_configuration.sg_descriptors) != 16);
10399 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10400 max_inbound_iu_length) != 6);
10401 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10402 max_outbound_iu_length) != 14);
10403 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10405 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10406 data_length) != 0);
10407 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10408 iq_arbitration_priority_support_bitmask) != 8);
10409 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10410 maximum_aw_a) != 9);
10411 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10412 maximum_aw_b) != 10);
10413 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10414 maximum_aw_c) != 11);
10415 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10416 max_inbound_queues) != 16);
10417 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10418 max_elements_per_iq) != 18);
10419 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10420 max_iq_element_length) != 24);
10421 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10422 min_iq_element_length) != 26);
10423 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10424 max_outbound_queues) != 30);
10425 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10426 max_elements_per_oq) != 32);
10427 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10428 intr_coalescing_time_granularity) != 34);
10429 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10430 max_oq_element_length) != 36);
10431 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10432 min_oq_element_length) != 38);
10433 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10434 iu_layer_descriptors) != 64);
10435 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10437 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10439 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10441 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10443 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10444 num_event_descriptors) != 2);
10445 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10446 descriptors) != 4);
10448 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10449 ARRAY_SIZE(pqi_supported_event_types));
10451 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10452 header.iu_type) != 0);
10453 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10454 header.iu_length) != 2);
10455 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10457 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10459 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10460 additional_event_id) != 12);
10461 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10463 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10465 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10466 header.iu_type) != 0);
10467 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10468 header.iu_length) != 2);
10469 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10471 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10473 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10474 additional_event_id) != 12);
10475 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10477 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10478 header.iu_type) != 0);
10479 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10480 header.iu_length) != 2);
10481 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10483 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10485 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10487 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10488 lun_number) != 16);
10489 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10490 protocol_specific) != 24);
10491 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10492 outbound_queue_id_to_manage) != 26);
10493 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10494 request_id_to_manage) != 28);
10495 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10496 task_management_function) != 30);
10497 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10499 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10500 header.iu_type) != 0);
10501 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10502 header.iu_length) != 2);
10503 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10505 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10507 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10508 additional_response_info) != 12);
10509 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10510 response_code) != 15);
10511 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10513 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10514 configured_logical_drive_count) != 0);
10515 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10516 configuration_signature) != 1);
10517 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10518 firmware_version_short) != 5);
10519 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10520 extended_logical_unit_count) != 154);
10521 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10522 firmware_build_number) != 190);
10523 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10524 vendor_id) != 200);
10525 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10526 product_id) != 208);
10527 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10528 extra_controller_flags) != 286);
10529 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10530 controller_mode) != 292);
10531 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10532 spare_part_number) != 293);
10533 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10534 firmware_version_long) != 325);
10536 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10537 phys_bay_in_box) != 115);
10538 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10539 device_type) != 120);
10540 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10541 redundant_path_present_map) != 1736);
10542 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10543 active_path_number) != 1738);
10544 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10545 alternate_paths_phys_connector) != 1739);
10546 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10547 alternate_paths_phys_box_on_port) != 1755);
10548 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10549 current_queue_depth_limit) != 1796);
10550 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10552 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10553 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10555 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10556 subpage_code) != 1);
10557 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10558 buffer_length) != 2);
10560 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10561 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10563 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10564 subpage_code) != 1);
10565 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10566 page_length) != 2);
10568 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10570 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10572 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10573 firmware_read_support) != 4);
10574 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10575 driver_read_support) != 5);
10576 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10577 firmware_write_support) != 6);
10578 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10579 driver_write_support) != 7);
10580 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10581 max_transfer_encrypted_sas_sata) != 8);
10582 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10583 max_transfer_encrypted_nvme) != 10);
10584 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10585 max_write_raid_5_6) != 12);
10586 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10587 max_write_raid_1_10_2drive) != 14);
10588 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10589 max_write_raid_1_10_3drive) != 16);
10591 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10592 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10593 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10594 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10595 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10596 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10597 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10598 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10599 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10600 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10601 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10602 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10604 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10605 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10606 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);