scsi: mpi3mr: Add framework to issue config requests
[platform/kernel/linux-starfive.git] / drivers / scsi / mpi3mr / mpi3mr_fw.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2022 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17         struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19         struct mpi3mr_drv_cmd *drv_cmd);
20
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24
25 #if defined(writeq) && defined(CONFIG_64BIT)
26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
27 {
28         writeq(b, addr);
29 }
30 #else
31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
32 {
33         __u64 data_out = b;
34
35         writel((u32)(data_out), addr);
36         writel((u32)(data_out >> 32), (addr + 4));
37 }
38 #endif
39
40 static inline bool
41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
42 {
43         u16 pi, ci, max_entries;
44         bool is_qfull = false;
45
46         pi = op_req_q->pi;
47         ci = READ_ONCE(op_req_q->ci);
48         max_entries = op_req_q->num_requests;
49
50         if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
51                 is_qfull = true;
52
53         return is_qfull;
54 }
55
56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
57 {
58         u16 i, max_vectors;
59
60         max_vectors = mrioc->intr_info_count;
61
62         for (i = 0; i < max_vectors; i++)
63                 synchronize_irq(pci_irq_vector(mrioc->pdev, i));
64 }
65
66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
67 {
68         mrioc->intr_enabled = 0;
69         mpi3mr_sync_irqs(mrioc);
70 }
71
72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
73 {
74         mrioc->intr_enabled = 1;
75 }
76
77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
78 {
79         u16 i;
80
81         mpi3mr_ioc_disable_intr(mrioc);
82
83         if (!mrioc->intr_info)
84                 return;
85
86         for (i = 0; i < mrioc->intr_info_count; i++)
87                 free_irq(pci_irq_vector(mrioc->pdev, i),
88                     (mrioc->intr_info + i));
89
90         kfree(mrioc->intr_info);
91         mrioc->intr_info = NULL;
92         mrioc->intr_info_count = 0;
93         mrioc->is_intr_info_set = false;
94         pci_free_irq_vectors(mrioc->pdev);
95 }
96
97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
98         dma_addr_t dma_addr)
99 {
100         struct mpi3_sge_common *sgel = paddr;
101
102         sgel->flags = flags;
103         sgel->length = cpu_to_le32(length);
104         sgel->address = cpu_to_le64(dma_addr);
105 }
106
107 void mpi3mr_build_zero_len_sge(void *paddr)
108 {
109         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
110
111         mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
112 }
113
114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
115         dma_addr_t phys_addr)
116 {
117         if (!phys_addr)
118                 return NULL;
119
120         if ((phys_addr < mrioc->reply_buf_dma) ||
121             (phys_addr > mrioc->reply_buf_dma_max_address))
122                 return NULL;
123
124         return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
125 }
126
127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
128         dma_addr_t phys_addr)
129 {
130         if (!phys_addr)
131                 return NULL;
132
133         return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
134 }
135
136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
137         u64 reply_dma)
138 {
139         u32 old_idx = 0;
140         unsigned long flags;
141
142         spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
143         old_idx  =  mrioc->reply_free_queue_host_index;
144         mrioc->reply_free_queue_host_index = (
145             (mrioc->reply_free_queue_host_index ==
146             (mrioc->reply_free_qsz - 1)) ? 0 :
147             (mrioc->reply_free_queue_host_index + 1));
148         mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
149         writel(mrioc->reply_free_queue_host_index,
150             &mrioc->sysif_regs->reply_free_host_index);
151         spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
152 }
153
154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
155         u64 sense_buf_dma)
156 {
157         u32 old_idx = 0;
158         unsigned long flags;
159
160         spin_lock_irqsave(&mrioc->sbq_lock, flags);
161         old_idx  =  mrioc->sbq_host_index;
162         mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
163             (mrioc->sense_buf_q_sz - 1)) ? 0 :
164             (mrioc->sbq_host_index + 1));
165         mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
166         writel(mrioc->sbq_host_index,
167             &mrioc->sysif_regs->sense_buffer_free_host_index);
168         spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
169 }
170
171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
172         struct mpi3_event_notification_reply *event_reply)
173 {
174         char *desc = NULL;
175         u16 event;
176
177         event = event_reply->event;
178
179         switch (event) {
180         case MPI3_EVENT_LOG_DATA:
181                 desc = "Log Data";
182                 break;
183         case MPI3_EVENT_CHANGE:
184                 desc = "Event Change";
185                 break;
186         case MPI3_EVENT_GPIO_INTERRUPT:
187                 desc = "GPIO Interrupt";
188                 break;
189         case MPI3_EVENT_CABLE_MGMT:
190                 desc = "Cable Management";
191                 break;
192         case MPI3_EVENT_ENERGY_PACK_CHANGE:
193                 desc = "Energy Pack Change";
194                 break;
195         case MPI3_EVENT_DEVICE_ADDED:
196         {
197                 struct mpi3_device_page0 *event_data =
198                     (struct mpi3_device_page0 *)event_reply->event_data;
199                 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
200                     event_data->dev_handle, event_data->device_form);
201                 return;
202         }
203         case MPI3_EVENT_DEVICE_INFO_CHANGED:
204         {
205                 struct mpi3_device_page0 *event_data =
206                     (struct mpi3_device_page0 *)event_reply->event_data;
207                 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
208                     event_data->dev_handle, event_data->device_form);
209                 return;
210         }
211         case MPI3_EVENT_DEVICE_STATUS_CHANGE:
212         {
213                 struct mpi3_event_data_device_status_change *event_data =
214                     (struct mpi3_event_data_device_status_change *)event_reply->event_data;
215                 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
216                     event_data->dev_handle, event_data->reason_code);
217                 return;
218         }
219         case MPI3_EVENT_SAS_DISCOVERY:
220         {
221                 struct mpi3_event_data_sas_discovery *event_data =
222                     (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
223                 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
224                     (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
225                     "start" : "stop",
226                     le32_to_cpu(event_data->discovery_status));
227                 return;
228         }
229         case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
230                 desc = "SAS Broadcast Primitive";
231                 break;
232         case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
233                 desc = "SAS Notify Primitive";
234                 break;
235         case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
236                 desc = "SAS Init Device Status Change";
237                 break;
238         case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
239                 desc = "SAS Init Table Overflow";
240                 break;
241         case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
242                 desc = "SAS Topology Change List";
243                 break;
244         case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
245                 desc = "Enclosure Device Status Change";
246                 break;
247         case MPI3_EVENT_HARD_RESET_RECEIVED:
248                 desc = "Hard Reset Received";
249                 break;
250         case MPI3_EVENT_SAS_PHY_COUNTER:
251                 desc = "SAS PHY Counter";
252                 break;
253         case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
254                 desc = "SAS Device Discovery Error";
255                 break;
256         case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
257                 desc = "PCIE Topology Change List";
258                 break;
259         case MPI3_EVENT_PCIE_ENUMERATION:
260         {
261                 struct mpi3_event_data_pcie_enumeration *event_data =
262                     (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
263                 ioc_info(mrioc, "PCIE Enumeration: (%s)",
264                     (event_data->reason_code ==
265                     MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
266                 if (event_data->enumeration_status)
267                         ioc_info(mrioc, "enumeration_status(0x%08x)\n",
268                             le32_to_cpu(event_data->enumeration_status));
269                 return;
270         }
271         case MPI3_EVENT_PREPARE_FOR_RESET:
272                 desc = "Prepare For Reset";
273                 break;
274         }
275
276         if (!desc)
277                 return;
278
279         ioc_info(mrioc, "%s\n", desc);
280 }
281
282 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
283         struct mpi3_default_reply *def_reply)
284 {
285         struct mpi3_event_notification_reply *event_reply =
286             (struct mpi3_event_notification_reply *)def_reply;
287
288         mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
289         mpi3mr_print_event_data(mrioc, event_reply);
290         mpi3mr_os_handle_events(mrioc, event_reply);
291 }
292
293 static struct mpi3mr_drv_cmd *
294 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
295         struct mpi3_default_reply *def_reply)
296 {
297         u16 idx;
298
299         switch (host_tag) {
300         case MPI3MR_HOSTTAG_INITCMDS:
301                 return &mrioc->init_cmds;
302         case MPI3MR_HOSTTAG_CFG_CMDS:
303                 return &mrioc->cfg_cmds;
304         case MPI3MR_HOSTTAG_BSG_CMDS:
305                 return &mrioc->bsg_cmds;
306         case MPI3MR_HOSTTAG_BLK_TMS:
307                 return &mrioc->host_tm_cmds;
308         case MPI3MR_HOSTTAG_PEL_ABORT:
309                 return &mrioc->pel_abort_cmd;
310         case MPI3MR_HOSTTAG_PEL_WAIT:
311                 return &mrioc->pel_cmds;
312         case MPI3MR_HOSTTAG_INVALID:
313                 if (def_reply && def_reply->function ==
314                     MPI3_FUNCTION_EVENT_NOTIFICATION)
315                         mpi3mr_handle_events(mrioc, def_reply);
316                 return NULL;
317         default:
318                 break;
319         }
320         if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
321             host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
322                 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
323                 return &mrioc->dev_rmhs_cmds[idx];
324         }
325
326         if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
327             host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
328                 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
329                 return &mrioc->evtack_cmds[idx];
330         }
331
332         return NULL;
333 }
334
335 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
336         struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
337 {
338         u16 reply_desc_type, host_tag = 0;
339         u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
340         u32 ioc_loginfo = 0;
341         struct mpi3_status_reply_descriptor *status_desc;
342         struct mpi3_address_reply_descriptor *addr_desc;
343         struct mpi3_success_reply_descriptor *success_desc;
344         struct mpi3_default_reply *def_reply = NULL;
345         struct mpi3mr_drv_cmd *cmdptr = NULL;
346         struct mpi3_scsi_io_reply *scsi_reply;
347         u8 *sense_buf = NULL;
348
349         *reply_dma = 0;
350         reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
351             MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
352         switch (reply_desc_type) {
353         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
354                 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
355                 host_tag = le16_to_cpu(status_desc->host_tag);
356                 ioc_status = le16_to_cpu(status_desc->ioc_status);
357                 if (ioc_status &
358                     MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
359                         ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
360                 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
361                 break;
362         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
363                 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
364                 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
365                 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
366                 if (!def_reply)
367                         goto out;
368                 host_tag = le16_to_cpu(def_reply->host_tag);
369                 ioc_status = le16_to_cpu(def_reply->ioc_status);
370                 if (ioc_status &
371                     MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
372                         ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
373                 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
374                 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
375                         scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
376                         sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
377                             le64_to_cpu(scsi_reply->sense_data_buffer_address));
378                 }
379                 break;
380         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
381                 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
382                 host_tag = le16_to_cpu(success_desc->host_tag);
383                 break;
384         default:
385                 break;
386         }
387
388         cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
389         if (cmdptr) {
390                 if (cmdptr->state & MPI3MR_CMD_PENDING) {
391                         cmdptr->state |= MPI3MR_CMD_COMPLETE;
392                         cmdptr->ioc_loginfo = ioc_loginfo;
393                         cmdptr->ioc_status = ioc_status;
394                         cmdptr->state &= ~MPI3MR_CMD_PENDING;
395                         if (def_reply) {
396                                 cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
397                                 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
398                                     mrioc->reply_sz);
399                         }
400                         if (cmdptr->is_waiting) {
401                                 complete(&cmdptr->done);
402                                 cmdptr->is_waiting = 0;
403                         } else if (cmdptr->callback)
404                                 cmdptr->callback(mrioc, cmdptr);
405                 }
406         }
407 out:
408         if (sense_buf)
409                 mpi3mr_repost_sense_buf(mrioc,
410                     le64_to_cpu(scsi_reply->sense_data_buffer_address));
411 }
412
413 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
414 {
415         u32 exp_phase = mrioc->admin_reply_ephase;
416         u32 admin_reply_ci = mrioc->admin_reply_ci;
417         u32 num_admin_replies = 0;
418         u64 reply_dma = 0;
419         struct mpi3_default_reply_descriptor *reply_desc;
420
421         reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
422             admin_reply_ci;
423
424         if ((le16_to_cpu(reply_desc->reply_flags) &
425             MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
426                 return 0;
427
428         do {
429                 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
430                 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
431                 if (reply_dma)
432                         mpi3mr_repost_reply_buf(mrioc, reply_dma);
433                 num_admin_replies++;
434                 if (++admin_reply_ci == mrioc->num_admin_replies) {
435                         admin_reply_ci = 0;
436                         exp_phase ^= 1;
437                 }
438                 reply_desc =
439                     (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
440                     admin_reply_ci;
441                 if ((le16_to_cpu(reply_desc->reply_flags) &
442                     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
443                         break;
444         } while (1);
445
446         writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
447         mrioc->admin_reply_ci = admin_reply_ci;
448         mrioc->admin_reply_ephase = exp_phase;
449
450         return num_admin_replies;
451 }
452
453 /**
454  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
455  *      queue's consumer index from operational reply descriptor queue.
456  * @op_reply_q: op_reply_qinfo object
457  * @reply_ci: operational reply descriptor's queue consumer index
458  *
459  * Returns reply descriptor frame address
460  */
461 static inline struct mpi3_default_reply_descriptor *
462 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
463 {
464         void *segment_base_addr;
465         struct segments *segments = op_reply_q->q_segments;
466         struct mpi3_default_reply_descriptor *reply_desc = NULL;
467
468         segment_base_addr =
469             segments[reply_ci / op_reply_q->segment_qd].segment;
470         reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
471             (reply_ci % op_reply_q->segment_qd);
472         return reply_desc;
473 }
474
475 /**
476  * mpi3mr_process_op_reply_q - Operational reply queue handler
477  * @mrioc: Adapter instance reference
478  * @op_reply_q: Operational reply queue info
479  *
480  * Checks the specific operational reply queue and drains the
481  * reply queue entries until the queue is empty and process the
482  * individual reply descriptors.
483  *
484  * Return: 0 if queue is already processed,or number of reply
485  *          descriptors processed.
486  */
487 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
488         struct op_reply_qinfo *op_reply_q)
489 {
490         struct op_req_qinfo *op_req_q;
491         u32 exp_phase;
492         u32 reply_ci;
493         u32 num_op_reply = 0;
494         u64 reply_dma = 0;
495         struct mpi3_default_reply_descriptor *reply_desc;
496         u16 req_q_idx = 0, reply_qidx;
497
498         reply_qidx = op_reply_q->qid - 1;
499
500         if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
501                 return 0;
502
503         exp_phase = op_reply_q->ephase;
504         reply_ci = op_reply_q->ci;
505
506         reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
507         if ((le16_to_cpu(reply_desc->reply_flags) &
508             MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
509                 atomic_dec(&op_reply_q->in_use);
510                 return 0;
511         }
512
513         do {
514                 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
515                 op_req_q = &mrioc->req_qinfo[req_q_idx];
516
517                 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
518                 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
519                     reply_qidx);
520                 atomic_dec(&op_reply_q->pend_ios);
521                 if (reply_dma)
522                         mpi3mr_repost_reply_buf(mrioc, reply_dma);
523                 num_op_reply++;
524
525                 if (++reply_ci == op_reply_q->num_replies) {
526                         reply_ci = 0;
527                         exp_phase ^= 1;
528                 }
529
530                 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
531
532                 if ((le16_to_cpu(reply_desc->reply_flags) &
533                     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
534                         break;
535                 /*
536                  * Exit completion loop to avoid CPU lockup
537                  * Ensure remaining completion happens from threaded ISR.
538                  */
539                 if (num_op_reply > mrioc->max_host_ios) {
540                         op_reply_q->enable_irq_poll = true;
541                         break;
542                 }
543
544         } while (1);
545
546         writel(reply_ci,
547             &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
548         op_reply_q->ci = reply_ci;
549         op_reply_q->ephase = exp_phase;
550
551         atomic_dec(&op_reply_q->in_use);
552         return num_op_reply;
553 }
554
555 /**
556  * mpi3mr_blk_mq_poll - Operational reply queue handler
557  * @shost: SCSI Host reference
558  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
559  *
560  * Checks the specific operational reply queue and drains the
561  * reply queue entries until the queue is empty and process the
562  * individual reply descriptors.
563  *
564  * Return: 0 if queue is already processed,or number of reply
565  *          descriptors processed.
566  */
567 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
568 {
569         int num_entries = 0;
570         struct mpi3mr_ioc *mrioc;
571
572         mrioc = (struct mpi3mr_ioc *)shost->hostdata;
573
574         if ((mrioc->reset_in_progress || mrioc->prepare_for_reset))
575                 return 0;
576
577         num_entries = mpi3mr_process_op_reply_q(mrioc,
578                         &mrioc->op_reply_qinfo[queue_num]);
579
580         return num_entries;
581 }
582
583 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
584 {
585         struct mpi3mr_intr_info *intr_info = privdata;
586         struct mpi3mr_ioc *mrioc;
587         u16 midx;
588         u32 num_admin_replies = 0, num_op_reply = 0;
589
590         if (!intr_info)
591                 return IRQ_NONE;
592
593         mrioc = intr_info->mrioc;
594
595         if (!mrioc->intr_enabled)
596                 return IRQ_NONE;
597
598         midx = intr_info->msix_index;
599
600         if (!midx)
601                 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
602         if (intr_info->op_reply_q)
603                 num_op_reply = mpi3mr_process_op_reply_q(mrioc,
604                     intr_info->op_reply_q);
605
606         if (num_admin_replies || num_op_reply)
607                 return IRQ_HANDLED;
608         else
609                 return IRQ_NONE;
610 }
611
612 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
613 {
614         struct mpi3mr_intr_info *intr_info = privdata;
615         struct mpi3mr_ioc *mrioc;
616         u16 midx;
617         int ret;
618
619         if (!intr_info)
620                 return IRQ_NONE;
621
622         mrioc = intr_info->mrioc;
623         midx = intr_info->msix_index;
624         /* Call primary ISR routine */
625         ret = mpi3mr_isr_primary(irq, privdata);
626
627         /*
628          * If more IOs are expected, schedule IRQ polling thread.
629          * Otherwise exit from ISR.
630          */
631         if (!intr_info->op_reply_q)
632                 return ret;
633
634         if (!intr_info->op_reply_q->enable_irq_poll ||
635             !atomic_read(&intr_info->op_reply_q->pend_ios))
636                 return ret;
637
638         disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
639
640         return IRQ_WAKE_THREAD;
641 }
642
643 /**
644  * mpi3mr_isr_poll - Reply queue polling routine
645  * @irq: IRQ
646  * @privdata: Interrupt info
647  *
648  * poll for pending I/O completions in a loop until pending I/Os
649  * present or controller queue depth I/Os are processed.
650  *
651  * Return: IRQ_NONE or IRQ_HANDLED
652  */
653 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
654 {
655         struct mpi3mr_intr_info *intr_info = privdata;
656         struct mpi3mr_ioc *mrioc;
657         u16 midx;
658         u32 num_op_reply = 0;
659
660         if (!intr_info || !intr_info->op_reply_q)
661                 return IRQ_NONE;
662
663         mrioc = intr_info->mrioc;
664         midx = intr_info->msix_index;
665
666         /* Poll for pending IOs completions */
667         do {
668                 if (!mrioc->intr_enabled)
669                         break;
670
671                 if (!midx)
672                         mpi3mr_process_admin_reply_q(mrioc);
673                 if (intr_info->op_reply_q)
674                         num_op_reply +=
675                             mpi3mr_process_op_reply_q(mrioc,
676                                 intr_info->op_reply_q);
677
678                 usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
679
680         } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
681             (num_op_reply < mrioc->max_host_ios));
682
683         intr_info->op_reply_q->enable_irq_poll = false;
684         enable_irq(pci_irq_vector(mrioc->pdev, midx));
685
686         return IRQ_HANDLED;
687 }
688
689 /**
690  * mpi3mr_request_irq - Request IRQ and register ISR
691  * @mrioc: Adapter instance reference
692  * @index: IRQ vector index
693  *
694  * Request threaded ISR with primary ISR and secondary
695  *
696  * Return: 0 on success and non zero on failures.
697  */
698 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
699 {
700         struct pci_dev *pdev = mrioc->pdev;
701         struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
702         int retval = 0;
703
704         intr_info->mrioc = mrioc;
705         intr_info->msix_index = index;
706         intr_info->op_reply_q = NULL;
707
708         snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
709             mrioc->driver_name, mrioc->id, index);
710
711         retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
712             mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
713         if (retval) {
714                 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
715                     intr_info->name, pci_irq_vector(pdev, index));
716                 return retval;
717         }
718
719         return retval;
720 }
721
722 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
723 {
724         if (!mrioc->requested_poll_qcount)
725                 return;
726
727         /* Reserved for Admin and Default Queue */
728         if (max_vectors > 2 &&
729                 (mrioc->requested_poll_qcount < max_vectors - 2)) {
730                 ioc_info(mrioc,
731                     "enabled polled queues (%d) msix (%d)\n",
732                     mrioc->requested_poll_qcount, max_vectors);
733         } else {
734                 ioc_info(mrioc,
735                     "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
736                     mrioc->requested_poll_qcount, max_vectors);
737                 mrioc->requested_poll_qcount = 0;
738         }
739 }
740
741 /**
742  * mpi3mr_setup_isr - Setup ISR for the controller
743  * @mrioc: Adapter instance reference
744  * @setup_one: Request one IRQ or more
745  *
746  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
747  *
748  * Return: 0 on success and non zero on failures.
749  */
750 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
751 {
752         unsigned int irq_flags = PCI_IRQ_MSIX;
753         int max_vectors, min_vec;
754         int retval;
755         int i;
756         struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
757
758         if (mrioc->is_intr_info_set)
759                 return 0;
760
761         mpi3mr_cleanup_isr(mrioc);
762
763         if (setup_one || reset_devices) {
764                 max_vectors = 1;
765                 retval = pci_alloc_irq_vectors(mrioc->pdev,
766                     1, max_vectors, irq_flags);
767                 if (retval < 0) {
768                         ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
769                             retval);
770                         goto out_failed;
771                 }
772         } else {
773                 max_vectors =
774                     min_t(int, mrioc->cpu_count + 1 +
775                         mrioc->requested_poll_qcount, mrioc->msix_count);
776
777                 mpi3mr_calc_poll_queues(mrioc, max_vectors);
778
779                 ioc_info(mrioc,
780                     "MSI-X vectors supported: %d, no of cores: %d,",
781                     mrioc->msix_count, mrioc->cpu_count);
782                 ioc_info(mrioc,
783                     "MSI-x vectors requested: %d poll_queues %d\n",
784                     max_vectors, mrioc->requested_poll_qcount);
785
786                 desc.post_vectors = mrioc->requested_poll_qcount;
787                 min_vec = desc.pre_vectors + desc.post_vectors;
788                 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
789
790                 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
791                         min_vec, max_vectors, irq_flags, &desc);
792
793                 if (retval < 0) {
794                         ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
795                             retval);
796                         goto out_failed;
797                 }
798
799
800                 /*
801                  * If only one MSI-x is allocated, then MSI-x 0 will be shared
802                  * between Admin queue and operational queue
803                  */
804                 if (retval == min_vec)
805                         mrioc->op_reply_q_offset = 0;
806                 else if (retval != (max_vectors)) {
807                         ioc_info(mrioc,
808                             "allocated vectors (%d) are less than configured (%d)\n",
809                             retval, max_vectors);
810                 }
811
812                 max_vectors = retval;
813                 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
814
815                 mpi3mr_calc_poll_queues(mrioc, max_vectors);
816
817         }
818
819         mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
820             GFP_KERNEL);
821         if (!mrioc->intr_info) {
822                 retval = -ENOMEM;
823                 pci_free_irq_vectors(mrioc->pdev);
824                 goto out_failed;
825         }
826         for (i = 0; i < max_vectors; i++) {
827                 retval = mpi3mr_request_irq(mrioc, i);
828                 if (retval) {
829                         mrioc->intr_info_count = i;
830                         goto out_failed;
831                 }
832         }
833         if (reset_devices || !setup_one)
834                 mrioc->is_intr_info_set = true;
835         mrioc->intr_info_count = max_vectors;
836         mpi3mr_ioc_enable_intr(mrioc);
837         return 0;
838
839 out_failed:
840         mpi3mr_cleanup_isr(mrioc);
841
842         return retval;
843 }
844
845 static const struct {
846         enum mpi3mr_iocstate value;
847         char *name;
848 } mrioc_states[] = {
849         { MRIOC_STATE_READY, "ready" },
850         { MRIOC_STATE_FAULT, "fault" },
851         { MRIOC_STATE_RESET, "reset" },
852         { MRIOC_STATE_BECOMING_READY, "becoming ready" },
853         { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
854         { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
855 };
856
857 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
858 {
859         int i;
860         char *name = NULL;
861
862         for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
863                 if (mrioc_states[i].value == mrioc_state) {
864                         name = mrioc_states[i].name;
865                         break;
866                 }
867         }
868         return name;
869 }
870
871 /* Reset reason to name mapper structure*/
872 static const struct {
873         enum mpi3mr_reset_reason value;
874         char *name;
875 } mpi3mr_reset_reason_codes[] = {
876         { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
877         { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
878         { MPI3MR_RESET_FROM_APP, "application invocation" },
879         { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
880         { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
881         { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
882         { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
883         { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
884         { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
885         { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
886         { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
887         { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
888         { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
889         {
890                 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
891                 "create request queue timeout"
892         },
893         {
894                 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
895                 "create reply queue timeout"
896         },
897         { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
898         { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
899         { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
900         { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
901         {
902                 MPI3MR_RESET_FROM_CIACTVRST_TIMER,
903                 "component image activation timeout"
904         },
905         {
906                 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
907                 "get package version timeout"
908         },
909         { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
910         { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
911         { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
912         { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
913 };
914
915 /**
916  * mpi3mr_reset_rc_name - get reset reason code name
917  * @reason_code: reset reason code value
918  *
919  * Map reset reason to an NULL terminated ASCII string
920  *
921  * Return: name corresponding to reset reason value or NULL.
922  */
923 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
924 {
925         int i;
926         char *name = NULL;
927
928         for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
929                 if (mpi3mr_reset_reason_codes[i].value == reason_code) {
930                         name = mpi3mr_reset_reason_codes[i].name;
931                         break;
932                 }
933         }
934         return name;
935 }
936
937 /* Reset type to name mapper structure*/
938 static const struct {
939         u16 reset_type;
940         char *name;
941 } mpi3mr_reset_types[] = {
942         { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
943         { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
944 };
945
946 /**
947  * mpi3mr_reset_type_name - get reset type name
948  * @reset_type: reset type value
949  *
950  * Map reset type to an NULL terminated ASCII string
951  *
952  * Return: name corresponding to reset type value or NULL.
953  */
954 static const char *mpi3mr_reset_type_name(u16 reset_type)
955 {
956         int i;
957         char *name = NULL;
958
959         for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
960                 if (mpi3mr_reset_types[i].reset_type == reset_type) {
961                         name = mpi3mr_reset_types[i].name;
962                         break;
963                 }
964         }
965         return name;
966 }
967
968 /**
969  * mpi3mr_print_fault_info - Display fault information
970  * @mrioc: Adapter instance reference
971  *
972  * Display the controller fault information if there is a
973  * controller fault.
974  *
975  * Return: Nothing.
976  */
977 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
978 {
979         u32 ioc_status, code, code1, code2, code3;
980
981         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
982
983         if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
984                 code = readl(&mrioc->sysif_regs->fault);
985                 code1 = readl(&mrioc->sysif_regs->fault_info[0]);
986                 code2 = readl(&mrioc->sysif_regs->fault_info[1]);
987                 code3 = readl(&mrioc->sysif_regs->fault_info[2]);
988
989                 ioc_info(mrioc,
990                     "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
991                     code, code1, code2, code3);
992         }
993 }
994
995 /**
996  * mpi3mr_get_iocstate - Get IOC State
997  * @mrioc: Adapter instance reference
998  *
999  * Return a proper IOC state enum based on the IOC status and
1000  * IOC configuration and unrcoverable state of the controller.
1001  *
1002  * Return: Current IOC state.
1003  */
1004 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1005 {
1006         u32 ioc_status, ioc_config;
1007         u8 ready, enabled;
1008
1009         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1010         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1011
1012         if (mrioc->unrecoverable)
1013                 return MRIOC_STATE_UNRECOVERABLE;
1014         if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1015                 return MRIOC_STATE_FAULT;
1016
1017         ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1018         enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1019
1020         if (ready && enabled)
1021                 return MRIOC_STATE_READY;
1022         if ((!ready) && (!enabled))
1023                 return MRIOC_STATE_RESET;
1024         if ((!ready) && (enabled))
1025                 return MRIOC_STATE_BECOMING_READY;
1026
1027         return MRIOC_STATE_RESET_REQUESTED;
1028 }
1029
1030 /**
1031  * mpi3mr_clear_reset_history - clear reset history
1032  * @mrioc: Adapter instance reference
1033  *
1034  * Write the reset history bit in IOC status to clear the bit,
1035  * if it is already set.
1036  *
1037  * Return: Nothing.
1038  */
1039 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1040 {
1041         u32 ioc_status;
1042
1043         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1044         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1045                 writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1046 }
1047
1048 /**
1049  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1050  * @mrioc: Adapter instance reference
1051  * @reset_reason: Reset reason code
1052  *
1053  * Issue Message unit Reset to the controller and wait for it to
1054  * be complete.
1055  *
1056  * Return: 0 on success, -1 on failure.
1057  */
1058 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1059         u32 reset_reason)
1060 {
1061         u32 ioc_config, timeout, ioc_status;
1062         int retval = -1;
1063
1064         ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1065         if (mrioc->unrecoverable) {
1066                 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1067                 return retval;
1068         }
1069         mpi3mr_clear_reset_history(mrioc);
1070         writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1071         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1072         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1073         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1074
1075         timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1076         do {
1077                 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1078                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1079                         mpi3mr_clear_reset_history(mrioc);
1080                         break;
1081                 }
1082                 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1083                         mpi3mr_print_fault_info(mrioc);
1084                         break;
1085                 }
1086                 msleep(100);
1087         } while (--timeout);
1088
1089         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1090         if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1091               (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1092               (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1093                 retval = 0;
1094
1095         ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
1096             (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1097         return retval;
1098 }
1099
1100 /**
1101  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1102  * during reset/resume
1103  * @mrioc: Adapter instance reference
1104  *
1105  * Return zero if the new IOCFacts parameters value is compatible with
1106  * older values else return -EPERM
1107  */
1108 static int
1109 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1110 {
1111         u16 dev_handle_bitmap_sz;
1112         void *removepend_bitmap;
1113
1114         if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1115                 ioc_err(mrioc,
1116                     "cannot increase reply size from %d to %d\n",
1117                     mrioc->reply_sz, mrioc->facts.reply_sz);
1118                 return -EPERM;
1119         }
1120
1121         if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1122                 ioc_err(mrioc,
1123                     "cannot reduce number of operational reply queues from %d to %d\n",
1124                     mrioc->num_op_reply_q,
1125                     mrioc->facts.max_op_reply_q);
1126                 return -EPERM;
1127         }
1128
1129         if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1130                 ioc_err(mrioc,
1131                     "cannot reduce number of operational request queues from %d to %d\n",
1132                     mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1133                 return -EPERM;
1134         }
1135
1136         dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
1137         if (mrioc->facts.max_devhandle % 8)
1138                 dev_handle_bitmap_sz++;
1139         if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
1140                 removepend_bitmap = krealloc(mrioc->removepend_bitmap,
1141                     dev_handle_bitmap_sz, GFP_KERNEL);
1142                 if (!removepend_bitmap) {
1143                         ioc_err(mrioc,
1144                             "failed to increase removepend_bitmap sz from: %d to %d\n",
1145                             mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
1146                         return -EPERM;
1147                 }
1148                 memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
1149                     dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
1150                 mrioc->removepend_bitmap = removepend_bitmap;
1151                 ioc_info(mrioc,
1152                     "increased dev_handle_bitmap_sz from %d to %d\n",
1153                     mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
1154                 mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
1155         }
1156
1157         return 0;
1158 }
1159
1160 /**
1161  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1162  * @mrioc: Adapter instance reference
1163  *
1164  * Set Enable IOC bit in IOC configuration register and wait for
1165  * the controller to become ready.
1166  *
1167  * Return: 0 on success, appropriate error on failure.
1168  */
1169 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1170 {
1171         u32 ioc_config, ioc_status, timeout;
1172         int retval = 0;
1173         enum mpi3mr_iocstate ioc_state;
1174         u64 base_info;
1175
1176         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1177         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1178         base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1179         ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1180             ioc_status, ioc_config, base_info);
1181
1182         /*The timeout value is in 2sec unit, changing it to seconds*/
1183         mrioc->ready_timeout =
1184             ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1185             MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1186
1187         ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1188
1189         ioc_state = mpi3mr_get_iocstate(mrioc);
1190         ioc_info(mrioc, "controller is in %s state during detection\n",
1191             mpi3mr_iocstate_name(ioc_state));
1192
1193         if (ioc_state == MRIOC_STATE_BECOMING_READY ||
1194             ioc_state == MRIOC_STATE_RESET_REQUESTED) {
1195                 timeout = mrioc->ready_timeout * 10;
1196                 do {
1197                         msleep(100);
1198                 } while (--timeout);
1199
1200                 ioc_state = mpi3mr_get_iocstate(mrioc);
1201                 ioc_info(mrioc,
1202                     "controller is in %s state after waiting to reset\n",
1203                     mpi3mr_iocstate_name(ioc_state));
1204         }
1205
1206         if (ioc_state == MRIOC_STATE_READY) {
1207                 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1208                 retval = mpi3mr_issue_and_process_mur(mrioc,
1209                     MPI3MR_RESET_FROM_BRINGUP);
1210                 ioc_state = mpi3mr_get_iocstate(mrioc);
1211                 if (retval)
1212                         ioc_err(mrioc,
1213                             "message unit reset failed with error %d current state %s\n",
1214                             retval, mpi3mr_iocstate_name(ioc_state));
1215         }
1216         if (ioc_state != MRIOC_STATE_RESET) {
1217                 mpi3mr_print_fault_info(mrioc);
1218                 ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1219                 retval = mpi3mr_issue_reset(mrioc,
1220                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1221                     MPI3MR_RESET_FROM_BRINGUP);
1222                 if (retval) {
1223                         ioc_err(mrioc,
1224                             "soft reset failed with error %d\n", retval);
1225                         goto out_failed;
1226                 }
1227         }
1228         ioc_state = mpi3mr_get_iocstate(mrioc);
1229         if (ioc_state != MRIOC_STATE_RESET) {
1230                 ioc_err(mrioc,
1231                     "cannot bring controller to reset state, current state: %s\n",
1232                     mpi3mr_iocstate_name(ioc_state));
1233                 goto out_failed;
1234         }
1235         mpi3mr_clear_reset_history(mrioc);
1236         retval = mpi3mr_setup_admin_qpair(mrioc);
1237         if (retval) {
1238                 ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1239                     retval);
1240                 goto out_failed;
1241         }
1242
1243         ioc_info(mrioc, "bringing controller to ready state\n");
1244         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1245         ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1246         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1247
1248         timeout = mrioc->ready_timeout * 10;
1249         do {
1250                 ioc_state = mpi3mr_get_iocstate(mrioc);
1251                 if (ioc_state == MRIOC_STATE_READY) {
1252                         ioc_info(mrioc,
1253                             "successfully transitioned to %s state\n",
1254                             mpi3mr_iocstate_name(ioc_state));
1255                         return 0;
1256                 }
1257                 msleep(100);
1258         } while (--timeout);
1259
1260 out_failed:
1261         ioc_state = mpi3mr_get_iocstate(mrioc);
1262         ioc_err(mrioc,
1263             "failed to bring to ready state,  current state: %s\n",
1264             mpi3mr_iocstate_name(ioc_state));
1265         return retval;
1266 }
1267
1268 /**
1269  * mpi3mr_soft_reset_success - Check softreset is success or not
1270  * @ioc_status: IOC status register value
1271  * @ioc_config: IOC config register value
1272  *
1273  * Check whether the soft reset is successful or not based on
1274  * IOC status and IOC config register values.
1275  *
1276  * Return: True when the soft reset is success, false otherwise.
1277  */
1278 static inline bool
1279 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1280 {
1281         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1282             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1283                 return true;
1284         return false;
1285 }
1286
1287 /**
1288  * mpi3mr_diagfault_success - Check diag fault is success or not
1289  * @mrioc: Adapter reference
1290  * @ioc_status: IOC status register value
1291  *
1292  * Check whether the controller hit diag reset fault code.
1293  *
1294  * Return: True when there is diag fault, false otherwise.
1295  */
1296 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1297         u32 ioc_status)
1298 {
1299         u32 fault;
1300
1301         if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1302                 return false;
1303         fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1304         if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1305                 mpi3mr_print_fault_info(mrioc);
1306                 return true;
1307         }
1308         return false;
1309 }
1310
1311 /**
1312  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1313  * @mrioc: Adapter reference
1314  *
1315  * Set diag save bit in IOC configuration register to enable
1316  * snapdump.
1317  *
1318  * Return: Nothing.
1319  */
1320 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1321 {
1322         u32 ioc_config;
1323
1324         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1325         ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1326         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1327 }
1328
1329 /**
1330  * mpi3mr_issue_reset - Issue reset to the controller
1331  * @mrioc: Adapter reference
1332  * @reset_type: Reset type
1333  * @reset_reason: Reset reason code
1334  *
1335  * Unlock the host diagnostic registers and write the specific
1336  * reset type to that, wait for reset acknowledgment from the
1337  * controller, if the reset is not successful retry for the
1338  * predefined number of times.
1339  *
1340  * Return: 0 on success, non-zero on failure.
1341  */
1342 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1343         u32 reset_reason)
1344 {
1345         int retval = -1;
1346         u8 unlock_retry_count = 0;
1347         u32 host_diagnostic, ioc_status, ioc_config;
1348         u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1349
1350         if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1351             (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1352                 return retval;
1353         if (mrioc->unrecoverable)
1354                 return retval;
1355         if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1356                 retval = 0;
1357                 return retval;
1358         }
1359
1360         ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1361             mpi3mr_reset_type_name(reset_type),
1362             mpi3mr_reset_rc_name(reset_reason), reset_reason);
1363
1364         mpi3mr_clear_reset_history(mrioc);
1365         do {
1366                 ioc_info(mrioc,
1367                     "Write magic sequence to unlock host diag register (retry=%d)\n",
1368                     ++unlock_retry_count);
1369                 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1370                         ioc_err(mrioc,
1371                             "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1372                             mpi3mr_reset_type_name(reset_type),
1373                             host_diagnostic);
1374                         mrioc->unrecoverable = 1;
1375                         return retval;
1376                 }
1377
1378                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1379                     &mrioc->sysif_regs->write_sequence);
1380                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1381                     &mrioc->sysif_regs->write_sequence);
1382                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1383                     &mrioc->sysif_regs->write_sequence);
1384                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1385                     &mrioc->sysif_regs->write_sequence);
1386                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1387                     &mrioc->sysif_regs->write_sequence);
1388                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1389                     &mrioc->sysif_regs->write_sequence);
1390                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1391                     &mrioc->sysif_regs->write_sequence);
1392                 usleep_range(1000, 1100);
1393                 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1394                 ioc_info(mrioc,
1395                     "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1396                     unlock_retry_count, host_diagnostic);
1397         } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1398
1399         writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1400         writel(host_diagnostic | reset_type,
1401             &mrioc->sysif_regs->host_diagnostic);
1402         switch (reset_type) {
1403         case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1404                 do {
1405                         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1406                         ioc_config =
1407                             readl(&mrioc->sysif_regs->ioc_configuration);
1408                         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1409                             && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1410                             ) {
1411                                 mpi3mr_clear_reset_history(mrioc);
1412                                 retval = 0;
1413                                 break;
1414                         }
1415                         msleep(100);
1416                 } while (--timeout);
1417                 mpi3mr_print_fault_info(mrioc);
1418                 break;
1419         case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1420                 do {
1421                         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1422                         if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1423                                 retval = 0;
1424                                 break;
1425                         }
1426                         msleep(100);
1427                 } while (--timeout);
1428                 break;
1429         default:
1430                 break;
1431         }
1432
1433         writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1434             &mrioc->sysif_regs->write_sequence);
1435
1436         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1437         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1438         ioc_info(mrioc,
1439             "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
1440             (!retval)?"successful":"failed", ioc_status,
1441             ioc_config);
1442         if (retval)
1443                 mrioc->unrecoverable = 1;
1444         return retval;
1445 }
1446
1447 /**
1448  * mpi3mr_admin_request_post - Post request to admin queue
1449  * @mrioc: Adapter reference
1450  * @admin_req: MPI3 request
1451  * @admin_req_sz: Request size
1452  * @ignore_reset: Ignore reset in process
1453  *
1454  * Post the MPI3 request into admin request queue and
1455  * inform the controller, if the queue is full return
1456  * appropriate error.
1457  *
1458  * Return: 0 on success, non-zero on failure.
1459  */
1460 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1461         u16 admin_req_sz, u8 ignore_reset)
1462 {
1463         u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1464         int retval = 0;
1465         unsigned long flags;
1466         u8 *areq_entry;
1467
1468         if (mrioc->unrecoverable) {
1469                 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1470                 return -EFAULT;
1471         }
1472
1473         spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1474         areq_pi = mrioc->admin_req_pi;
1475         areq_ci = mrioc->admin_req_ci;
1476         max_entries = mrioc->num_admin_req;
1477         if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1478             (areq_pi == (max_entries - 1)))) {
1479                 ioc_err(mrioc, "AdminReqQ full condition detected\n");
1480                 retval = -EAGAIN;
1481                 goto out;
1482         }
1483         if (!ignore_reset && mrioc->reset_in_progress) {
1484                 ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1485                 retval = -EAGAIN;
1486                 goto out;
1487         }
1488         areq_entry = (u8 *)mrioc->admin_req_base +
1489             (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1490         memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1491         memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1492
1493         if (++areq_pi == max_entries)
1494                 areq_pi = 0;
1495         mrioc->admin_req_pi = areq_pi;
1496
1497         writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1498
1499 out:
1500         spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1501
1502         return retval;
1503 }
1504
1505 /**
1506  * mpi3mr_free_op_req_q_segments - free request memory segments
1507  * @mrioc: Adapter instance reference
1508  * @q_idx: operational request queue index
1509  *
1510  * Free memory segments allocated for operational request queue
1511  *
1512  * Return: Nothing.
1513  */
1514 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1515 {
1516         u16 j;
1517         int size;
1518         struct segments *segments;
1519
1520         segments = mrioc->req_qinfo[q_idx].q_segments;
1521         if (!segments)
1522                 return;
1523
1524         if (mrioc->enable_segqueue) {
1525                 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1526                 if (mrioc->req_qinfo[q_idx].q_segment_list) {
1527                         dma_free_coherent(&mrioc->pdev->dev,
1528                             MPI3MR_MAX_SEG_LIST_SIZE,
1529                             mrioc->req_qinfo[q_idx].q_segment_list,
1530                             mrioc->req_qinfo[q_idx].q_segment_list_dma);
1531                         mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1532                 }
1533         } else
1534                 size = mrioc->req_qinfo[q_idx].segment_qd *
1535                     mrioc->facts.op_req_sz;
1536
1537         for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1538                 if (!segments[j].segment)
1539                         continue;
1540                 dma_free_coherent(&mrioc->pdev->dev,
1541                     size, segments[j].segment, segments[j].segment_dma);
1542                 segments[j].segment = NULL;
1543         }
1544         kfree(mrioc->req_qinfo[q_idx].q_segments);
1545         mrioc->req_qinfo[q_idx].q_segments = NULL;
1546         mrioc->req_qinfo[q_idx].qid = 0;
1547 }
1548
1549 /**
1550  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1551  * @mrioc: Adapter instance reference
1552  * @q_idx: operational reply queue index
1553  *
1554  * Free memory segments allocated for operational reply queue
1555  *
1556  * Return: Nothing.
1557  */
1558 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1559 {
1560         u16 j;
1561         int size;
1562         struct segments *segments;
1563
1564         segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1565         if (!segments)
1566                 return;
1567
1568         if (mrioc->enable_segqueue) {
1569                 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1570                 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1571                         dma_free_coherent(&mrioc->pdev->dev,
1572                             MPI3MR_MAX_SEG_LIST_SIZE,
1573                             mrioc->op_reply_qinfo[q_idx].q_segment_list,
1574                             mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1575                         mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1576                 }
1577         } else
1578                 size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1579                     mrioc->op_reply_desc_sz;
1580
1581         for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1582                 if (!segments[j].segment)
1583                         continue;
1584                 dma_free_coherent(&mrioc->pdev->dev,
1585                     size, segments[j].segment, segments[j].segment_dma);
1586                 segments[j].segment = NULL;
1587         }
1588
1589         kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1590         mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1591         mrioc->op_reply_qinfo[q_idx].qid = 0;
1592 }
1593
1594 /**
1595  * mpi3mr_delete_op_reply_q - delete operational reply queue
1596  * @mrioc: Adapter instance reference
1597  * @qidx: operational reply queue index
1598  *
1599  * Delete operatinal reply queue by issuing MPI request
1600  * through admin queue.
1601  *
1602  * Return:  0 on success, non-zero on failure.
1603  */
1604 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1605 {
1606         struct mpi3_delete_reply_queue_request delq_req;
1607         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1608         int retval = 0;
1609         u16 reply_qid = 0, midx;
1610
1611         reply_qid = op_reply_q->qid;
1612
1613         midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1614
1615         if (!reply_qid) {
1616                 retval = -1;
1617                 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1618                 goto out;
1619         }
1620
1621         (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1622             mrioc->active_poll_qcount--;
1623
1624         memset(&delq_req, 0, sizeof(delq_req));
1625         mutex_lock(&mrioc->init_cmds.mutex);
1626         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1627                 retval = -1;
1628                 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1629                 mutex_unlock(&mrioc->init_cmds.mutex);
1630                 goto out;
1631         }
1632         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1633         mrioc->init_cmds.is_waiting = 1;
1634         mrioc->init_cmds.callback = NULL;
1635         delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1636         delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1637         delq_req.queue_id = cpu_to_le16(reply_qid);
1638
1639         init_completion(&mrioc->init_cmds.done);
1640         retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1641             1);
1642         if (retval) {
1643                 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1644                 goto out_unlock;
1645         }
1646         wait_for_completion_timeout(&mrioc->init_cmds.done,
1647             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1648         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1649                 ioc_err(mrioc, "delete reply queue timed out\n");
1650                 mpi3mr_check_rh_fault_ioc(mrioc,
1651                     MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1652                 retval = -1;
1653                 goto out_unlock;
1654         }
1655         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1656             != MPI3_IOCSTATUS_SUCCESS) {
1657                 ioc_err(mrioc,
1658                     "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1659                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1660                     mrioc->init_cmds.ioc_loginfo);
1661                 retval = -1;
1662                 goto out_unlock;
1663         }
1664         mrioc->intr_info[midx].op_reply_q = NULL;
1665
1666         mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1667 out_unlock:
1668         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1669         mutex_unlock(&mrioc->init_cmds.mutex);
1670 out:
1671
1672         return retval;
1673 }
1674
1675 /**
1676  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1677  * @mrioc: Adapter instance reference
1678  * @qidx: request queue index
1679  *
1680  * Allocate segmented memory pools for operational reply
1681  * queue.
1682  *
1683  * Return: 0 on success, non-zero on failure.
1684  */
1685 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1686 {
1687         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1688         int i, size;
1689         u64 *q_segment_list_entry = NULL;
1690         struct segments *segments;
1691
1692         if (mrioc->enable_segqueue) {
1693                 op_reply_q->segment_qd =
1694                     MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1695
1696                 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1697
1698                 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1699                     MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1700                     GFP_KERNEL);
1701                 if (!op_reply_q->q_segment_list)
1702                         return -ENOMEM;
1703                 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1704         } else {
1705                 op_reply_q->segment_qd = op_reply_q->num_replies;
1706                 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
1707         }
1708
1709         op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
1710             op_reply_q->segment_qd);
1711
1712         op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
1713             sizeof(struct segments), GFP_KERNEL);
1714         if (!op_reply_q->q_segments)
1715                 return -ENOMEM;
1716
1717         segments = op_reply_q->q_segments;
1718         for (i = 0; i < op_reply_q->num_segments; i++) {
1719                 segments[i].segment =
1720                     dma_alloc_coherent(&mrioc->pdev->dev,
1721                     size, &segments[i].segment_dma, GFP_KERNEL);
1722                 if (!segments[i].segment)
1723                         return -ENOMEM;
1724                 if (mrioc->enable_segqueue)
1725                         q_segment_list_entry[i] =
1726                             (unsigned long)segments[i].segment_dma;
1727         }
1728
1729         return 0;
1730 }
1731
1732 /**
1733  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
1734  * @mrioc: Adapter instance reference
1735  * @qidx: request queue index
1736  *
1737  * Allocate segmented memory pools for operational request
1738  * queue.
1739  *
1740  * Return: 0 on success, non-zero on failure.
1741  */
1742 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1743 {
1744         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
1745         int i, size;
1746         u64 *q_segment_list_entry = NULL;
1747         struct segments *segments;
1748
1749         if (mrioc->enable_segqueue) {
1750                 op_req_q->segment_qd =
1751                     MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
1752
1753                 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1754
1755                 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1756                     MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
1757                     GFP_KERNEL);
1758                 if (!op_req_q->q_segment_list)
1759                         return -ENOMEM;
1760                 q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
1761
1762         } else {
1763                 op_req_q->segment_qd = op_req_q->num_requests;
1764                 size = op_req_q->num_requests * mrioc->facts.op_req_sz;
1765         }
1766
1767         op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
1768             op_req_q->segment_qd);
1769
1770         op_req_q->q_segments = kcalloc(op_req_q->num_segments,
1771             sizeof(struct segments), GFP_KERNEL);
1772         if (!op_req_q->q_segments)
1773                 return -ENOMEM;
1774
1775         segments = op_req_q->q_segments;
1776         for (i = 0; i < op_req_q->num_segments; i++) {
1777                 segments[i].segment =
1778                     dma_alloc_coherent(&mrioc->pdev->dev,
1779                     size, &segments[i].segment_dma, GFP_KERNEL);
1780                 if (!segments[i].segment)
1781                         return -ENOMEM;
1782                 if (mrioc->enable_segqueue)
1783                         q_segment_list_entry[i] =
1784                             (unsigned long)segments[i].segment_dma;
1785         }
1786
1787         return 0;
1788 }
1789
1790 /**
1791  * mpi3mr_create_op_reply_q - create operational reply queue
1792  * @mrioc: Adapter instance reference
1793  * @qidx: operational reply queue index
1794  *
1795  * Create operatinal reply queue by issuing MPI request
1796  * through admin queue.
1797  *
1798  * Return:  0 on success, non-zero on failure.
1799  */
1800 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1801 {
1802         struct mpi3_create_reply_queue_request create_req;
1803         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1804         int retval = 0;
1805         u16 reply_qid = 0, midx;
1806
1807         reply_qid = op_reply_q->qid;
1808
1809         midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1810
1811         if (reply_qid) {
1812                 retval = -1;
1813                 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
1814                     reply_qid);
1815
1816                 return retval;
1817         }
1818
1819         reply_qid = qidx + 1;
1820         op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
1821         if (!mrioc->pdev->revision)
1822                 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
1823         op_reply_q->ci = 0;
1824         op_reply_q->ephase = 1;
1825         atomic_set(&op_reply_q->pend_ios, 0);
1826         atomic_set(&op_reply_q->in_use, 0);
1827         op_reply_q->enable_irq_poll = false;
1828
1829         if (!op_reply_q->q_segments) {
1830                 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
1831                 if (retval) {
1832                         mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1833                         goto out;
1834                 }
1835         }
1836
1837         memset(&create_req, 0, sizeof(create_req));
1838         mutex_lock(&mrioc->init_cmds.mutex);
1839         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1840                 retval = -1;
1841                 ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
1842                 goto out_unlock;
1843         }
1844         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1845         mrioc->init_cmds.is_waiting = 1;
1846         mrioc->init_cmds.callback = NULL;
1847         create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1848         create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
1849         create_req.queue_id = cpu_to_le16(reply_qid);
1850
1851         if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
1852                 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
1853         else
1854                 op_reply_q->qtype = MPI3MR_POLL_QUEUE;
1855
1856         if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
1857                 create_req.flags =
1858                         MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
1859                 create_req.msix_index =
1860                         cpu_to_le16(mrioc->intr_info[midx].msix_index);
1861         } else {
1862                 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
1863                 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
1864                         reply_qid, midx);
1865                 if (!mrioc->active_poll_qcount)
1866                         disable_irq_nosync(pci_irq_vector(mrioc->pdev,
1867                             mrioc->intr_info_count - 1));
1868         }
1869
1870         if (mrioc->enable_segqueue) {
1871                 create_req.flags |=
1872                     MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1873                 create_req.base_address = cpu_to_le64(
1874                     op_reply_q->q_segment_list_dma);
1875         } else
1876                 create_req.base_address = cpu_to_le64(
1877                     op_reply_q->q_segments[0].segment_dma);
1878
1879         create_req.size = cpu_to_le16(op_reply_q->num_replies);
1880
1881         init_completion(&mrioc->init_cmds.done);
1882         retval = mpi3mr_admin_request_post(mrioc, &create_req,
1883             sizeof(create_req), 1);
1884         if (retval) {
1885                 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
1886                 goto out_unlock;
1887         }
1888         wait_for_completion_timeout(&mrioc->init_cmds.done,
1889             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1890         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1891                 ioc_err(mrioc, "create reply queue timed out\n");
1892                 mpi3mr_check_rh_fault_ioc(mrioc,
1893                     MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
1894                 retval = -1;
1895                 goto out_unlock;
1896         }
1897         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1898             != MPI3_IOCSTATUS_SUCCESS) {
1899                 ioc_err(mrioc,
1900                     "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1901                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1902                     mrioc->init_cmds.ioc_loginfo);
1903                 retval = -1;
1904                 goto out_unlock;
1905         }
1906         op_reply_q->qid = reply_qid;
1907         if (midx < mrioc->intr_info_count)
1908                 mrioc->intr_info[midx].op_reply_q = op_reply_q;
1909
1910         (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
1911             mrioc->active_poll_qcount++;
1912
1913 out_unlock:
1914         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1915         mutex_unlock(&mrioc->init_cmds.mutex);
1916 out:
1917
1918         return retval;
1919 }
1920
1921 /**
1922  * mpi3mr_create_op_req_q - create operational request queue
1923  * @mrioc: Adapter instance reference
1924  * @idx: operational request queue index
1925  * @reply_qid: Reply queue ID
1926  *
1927  * Create operatinal request queue by issuing MPI request
1928  * through admin queue.
1929  *
1930  * Return:  0 on success, non-zero on failure.
1931  */
1932 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
1933         u16 reply_qid)
1934 {
1935         struct mpi3_create_request_queue_request create_req;
1936         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
1937         int retval = 0;
1938         u16 req_qid = 0;
1939
1940         req_qid = op_req_q->qid;
1941
1942         if (req_qid) {
1943                 retval = -1;
1944                 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
1945                     req_qid);
1946
1947                 return retval;
1948         }
1949         req_qid = idx + 1;
1950
1951         op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
1952         op_req_q->ci = 0;
1953         op_req_q->pi = 0;
1954         op_req_q->reply_qid = reply_qid;
1955         spin_lock_init(&op_req_q->q_lock);
1956
1957         if (!op_req_q->q_segments) {
1958                 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
1959                 if (retval) {
1960                         mpi3mr_free_op_req_q_segments(mrioc, idx);
1961                         goto out;
1962                 }
1963         }
1964
1965         memset(&create_req, 0, sizeof(create_req));
1966         mutex_lock(&mrioc->init_cmds.mutex);
1967         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1968                 retval = -1;
1969                 ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
1970                 goto out_unlock;
1971         }
1972         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1973         mrioc->init_cmds.is_waiting = 1;
1974         mrioc->init_cmds.callback = NULL;
1975         create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1976         create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
1977         create_req.queue_id = cpu_to_le16(req_qid);
1978         if (mrioc->enable_segqueue) {
1979                 create_req.flags =
1980                     MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1981                 create_req.base_address = cpu_to_le64(
1982                     op_req_q->q_segment_list_dma);
1983         } else
1984                 create_req.base_address = cpu_to_le64(
1985                     op_req_q->q_segments[0].segment_dma);
1986         create_req.reply_queue_id = cpu_to_le16(reply_qid);
1987         create_req.size = cpu_to_le16(op_req_q->num_requests);
1988
1989         init_completion(&mrioc->init_cmds.done);
1990         retval = mpi3mr_admin_request_post(mrioc, &create_req,
1991             sizeof(create_req), 1);
1992         if (retval) {
1993                 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
1994                 goto out_unlock;
1995         }
1996         wait_for_completion_timeout(&mrioc->init_cmds.done,
1997             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1998         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1999                 ioc_err(mrioc, "create request queue timed out\n");
2000                 mpi3mr_check_rh_fault_ioc(mrioc,
2001                     MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2002                 retval = -1;
2003                 goto out_unlock;
2004         }
2005         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2006             != MPI3_IOCSTATUS_SUCCESS) {
2007                 ioc_err(mrioc,
2008                     "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2009                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2010                     mrioc->init_cmds.ioc_loginfo);
2011                 retval = -1;
2012                 goto out_unlock;
2013         }
2014         op_req_q->qid = req_qid;
2015
2016 out_unlock:
2017         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2018         mutex_unlock(&mrioc->init_cmds.mutex);
2019 out:
2020
2021         return retval;
2022 }
2023
2024 /**
2025  * mpi3mr_create_op_queues - create operational queue pairs
2026  * @mrioc: Adapter instance reference
2027  *
2028  * Allocate memory for operational queue meta data and call
2029  * create request and reply queue functions.
2030  *
2031  * Return: 0 on success, non-zero on failures.
2032  */
2033 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2034 {
2035         int retval = 0;
2036         u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2037
2038         num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2039             mrioc->facts.max_op_req_q);
2040
2041         msix_count_op_q =
2042             mrioc->intr_info_count - mrioc->op_reply_q_offset;
2043         if (!mrioc->num_queues)
2044                 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2045         /*
2046          * During reset set the num_queues to the number of queues
2047          * that was set before the reset.
2048          */
2049         num_queues = mrioc->num_op_reply_q ?
2050             mrioc->num_op_reply_q : mrioc->num_queues;
2051         ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2052             num_queues);
2053
2054         if (!mrioc->req_qinfo) {
2055                 mrioc->req_qinfo = kcalloc(num_queues,
2056                     sizeof(struct op_req_qinfo), GFP_KERNEL);
2057                 if (!mrioc->req_qinfo) {
2058                         retval = -1;
2059                         goto out_failed;
2060                 }
2061
2062                 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2063                     num_queues, GFP_KERNEL);
2064                 if (!mrioc->op_reply_qinfo) {
2065                         retval = -1;
2066                         goto out_failed;
2067                 }
2068         }
2069
2070         if (mrioc->enable_segqueue)
2071                 ioc_info(mrioc,
2072                     "allocating operational queues through segmented queues\n");
2073
2074         for (i = 0; i < num_queues; i++) {
2075                 if (mpi3mr_create_op_reply_q(mrioc, i)) {
2076                         ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2077                         break;
2078                 }
2079                 if (mpi3mr_create_op_req_q(mrioc, i,
2080                     mrioc->op_reply_qinfo[i].qid)) {
2081                         ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2082                         mpi3mr_delete_op_reply_q(mrioc, i);
2083                         break;
2084                 }
2085         }
2086
2087         if (i == 0) {
2088                 /* Not even one queue is created successfully*/
2089                 retval = -1;
2090                 goto out_failed;
2091         }
2092         mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2093         ioc_info(mrioc,
2094             "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2095             mrioc->num_op_reply_q, mrioc->default_qcount,
2096             mrioc->active_poll_qcount);
2097
2098         return retval;
2099 out_failed:
2100         kfree(mrioc->req_qinfo);
2101         mrioc->req_qinfo = NULL;
2102
2103         kfree(mrioc->op_reply_qinfo);
2104         mrioc->op_reply_qinfo = NULL;
2105
2106         return retval;
2107 }
2108
2109 /**
2110  * mpi3mr_op_request_post - Post request to operational queue
2111  * @mrioc: Adapter reference
2112  * @op_req_q: Operational request queue info
2113  * @req: MPI3 request
2114  *
2115  * Post the MPI3 request into operational request queue and
2116  * inform the controller, if the queue is full return
2117  * appropriate error.
2118  *
2119  * Return: 0 on success, non-zero on failure.
2120  */
2121 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2122         struct op_req_qinfo *op_req_q, u8 *req)
2123 {
2124         u16 pi = 0, max_entries, reply_qidx = 0, midx;
2125         int retval = 0;
2126         unsigned long flags;
2127         u8 *req_entry;
2128         void *segment_base_addr;
2129         u16 req_sz = mrioc->facts.op_req_sz;
2130         struct segments *segments = op_req_q->q_segments;
2131
2132         reply_qidx = op_req_q->reply_qid - 1;
2133
2134         if (mrioc->unrecoverable)
2135                 return -EFAULT;
2136
2137         spin_lock_irqsave(&op_req_q->q_lock, flags);
2138         pi = op_req_q->pi;
2139         max_entries = op_req_q->num_requests;
2140
2141         if (mpi3mr_check_req_qfull(op_req_q)) {
2142                 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2143                     reply_qidx, mrioc->op_reply_q_offset);
2144                 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2145
2146                 if (mpi3mr_check_req_qfull(op_req_q)) {
2147                         retval = -EAGAIN;
2148                         goto out;
2149                 }
2150         }
2151
2152         if (mrioc->reset_in_progress) {
2153                 ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2154                 retval = -EAGAIN;
2155                 goto out;
2156         }
2157
2158         segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2159         req_entry = (u8 *)segment_base_addr +
2160             ((pi % op_req_q->segment_qd) * req_sz);
2161
2162         memset(req_entry, 0, req_sz);
2163         memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2164
2165         if (++pi == max_entries)
2166                 pi = 0;
2167         op_req_q->pi = pi;
2168
2169         if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2170             > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2171                 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2172
2173         writel(op_req_q->pi,
2174             &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2175
2176 out:
2177         spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2178         return retval;
2179 }
2180
2181 /**
2182  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2183  * controller
2184  * @mrioc: Adapter instance reference
2185  * @reason_code: reason code for the fault.
2186  *
2187  * This routine will save snapdump and fault the controller with
2188  * the given reason code if it is not already in the fault or
2189  * not asynchronosuly reset. This will be used to handle
2190  * initilaization time faults/resets/timeout as in those cases
2191  * immediate soft reset invocation is not required.
2192  *
2193  * Return:  None.
2194  */
2195 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2196 {
2197         u32 ioc_status, host_diagnostic, timeout;
2198
2199         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2200         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2201             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
2202                 mpi3mr_print_fault_info(mrioc);
2203                 return;
2204         }
2205         mpi3mr_set_diagsave(mrioc);
2206         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2207             reason_code);
2208         timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2209         do {
2210                 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2211                 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2212                         break;
2213                 msleep(100);
2214         } while (--timeout);
2215 }
2216
2217 /**
2218  * mpi3mr_sync_timestamp - Issue time stamp sync request
2219  * @mrioc: Adapter reference
2220  *
2221  * Issue IO unit control MPI request to synchornize firmware
2222  * timestamp with host time.
2223  *
2224  * Return: 0 on success, non-zero on failure.
2225  */
2226 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2227 {
2228         ktime_t current_time;
2229         struct mpi3_iounit_control_request iou_ctrl;
2230         int retval = 0;
2231
2232         memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2233         mutex_lock(&mrioc->init_cmds.mutex);
2234         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2235                 retval = -1;
2236                 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2237                 mutex_unlock(&mrioc->init_cmds.mutex);
2238                 goto out;
2239         }
2240         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2241         mrioc->init_cmds.is_waiting = 1;
2242         mrioc->init_cmds.callback = NULL;
2243         iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2244         iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2245         iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2246         current_time = ktime_get_real();
2247         iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2248
2249         init_completion(&mrioc->init_cmds.done);
2250         retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2251             sizeof(iou_ctrl), 0);
2252         if (retval) {
2253                 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2254                 goto out_unlock;
2255         }
2256
2257         wait_for_completion_timeout(&mrioc->init_cmds.done,
2258             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2259         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2260                 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2261                 mrioc->init_cmds.is_waiting = 0;
2262                 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2263                         mpi3mr_soft_reset_handler(mrioc,
2264                             MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
2265                 retval = -1;
2266                 goto out_unlock;
2267         }
2268         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2269             != MPI3_IOCSTATUS_SUCCESS) {
2270                 ioc_err(mrioc,
2271                     "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2272                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2273                     mrioc->init_cmds.ioc_loginfo);
2274                 retval = -1;
2275                 goto out_unlock;
2276         }
2277
2278 out_unlock:
2279         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2280         mutex_unlock(&mrioc->init_cmds.mutex);
2281
2282 out:
2283         return retval;
2284 }
2285
2286 /**
2287  * mpi3mr_print_pkg_ver - display controller fw package version
2288  * @mrioc: Adapter reference
2289  *
2290  * Retrieve firmware package version from the component image
2291  * header of the controller flash and display it.
2292  *
2293  * Return: 0 on success and non-zero on failure.
2294  */
2295 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2296 {
2297         struct mpi3_ci_upload_request ci_upload;
2298         int retval = -1;
2299         void *data = NULL;
2300         dma_addr_t data_dma;
2301         struct mpi3_ci_manifest_mpi *manifest;
2302         u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2303         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2304
2305         data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2306             GFP_KERNEL);
2307         if (!data)
2308                 return -ENOMEM;
2309
2310         memset(&ci_upload, 0, sizeof(ci_upload));
2311         mutex_lock(&mrioc->init_cmds.mutex);
2312         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2313                 ioc_err(mrioc, "sending get package version failed due to command in use\n");
2314                 mutex_unlock(&mrioc->init_cmds.mutex);
2315                 goto out;
2316         }
2317         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2318         mrioc->init_cmds.is_waiting = 1;
2319         mrioc->init_cmds.callback = NULL;
2320         ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2321         ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2322         ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2323         ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2324         ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2325         ci_upload.segment_size = cpu_to_le32(data_len);
2326
2327         mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2328             data_dma);
2329         init_completion(&mrioc->init_cmds.done);
2330         retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2331             sizeof(ci_upload), 1);
2332         if (retval) {
2333                 ioc_err(mrioc, "posting get package version failed\n");
2334                 goto out_unlock;
2335         }
2336         wait_for_completion_timeout(&mrioc->init_cmds.done,
2337             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2338         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2339                 ioc_err(mrioc, "get package version timed out\n");
2340                 mpi3mr_check_rh_fault_ioc(mrioc,
2341                     MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2342                 retval = -1;
2343                 goto out_unlock;
2344         }
2345         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2346             == MPI3_IOCSTATUS_SUCCESS) {
2347                 manifest = (struct mpi3_ci_manifest_mpi *) data;
2348                 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2349                         ioc_info(mrioc,
2350                             "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2351                             manifest->package_version.gen_major,
2352                             manifest->package_version.gen_minor,
2353                             manifest->package_version.phase_major,
2354                             manifest->package_version.phase_minor,
2355                             manifest->package_version.customer_id,
2356                             manifest->package_version.build_num);
2357                 }
2358         }
2359         retval = 0;
2360 out_unlock:
2361         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2362         mutex_unlock(&mrioc->init_cmds.mutex);
2363
2364 out:
2365         if (data)
2366                 dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2367                     data_dma);
2368         return retval;
2369 }
2370
2371 /**
2372  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2373  * @work: work struct
2374  *
2375  * Watch dog work periodically executed (1 second interval) to
2376  * monitor firmware fault and to issue periodic timer sync to
2377  * the firmware.
2378  *
2379  * Return: Nothing.
2380  */
2381 static void mpi3mr_watchdog_work(struct work_struct *work)
2382 {
2383         struct mpi3mr_ioc *mrioc =
2384             container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2385         unsigned long flags;
2386         enum mpi3mr_iocstate ioc_state;
2387         u32 fault, host_diagnostic, ioc_status;
2388         u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2389
2390         if (mrioc->reset_in_progress || mrioc->unrecoverable)
2391                 return;
2392
2393         if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
2394                 mrioc->ts_update_counter = 0;
2395                 mpi3mr_sync_timestamp(mrioc);
2396         }
2397
2398         if ((mrioc->prepare_for_reset) &&
2399             ((mrioc->prepare_for_reset_timeout_counter++) >=
2400              MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2401                 mpi3mr_soft_reset_handler(mrioc,
2402                     MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2403                 return;
2404         }
2405
2406         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2407         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2408                 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2409                 return;
2410         }
2411
2412         /*Check for fault state every one second and issue Soft reset*/
2413         ioc_state = mpi3mr_get_iocstate(mrioc);
2414         if (ioc_state != MRIOC_STATE_FAULT)
2415                 goto schedule_work;
2416
2417         fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2418         host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2419         if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2420                 if (!mrioc->diagsave_timeout) {
2421                         mpi3mr_print_fault_info(mrioc);
2422                         ioc_warn(mrioc, "diag save in progress\n");
2423                 }
2424                 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2425                         goto schedule_work;
2426         }
2427
2428         mpi3mr_print_fault_info(mrioc);
2429         mrioc->diagsave_timeout = 0;
2430
2431         switch (fault) {
2432         case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2433                 ioc_info(mrioc,
2434                     "controller requires system power cycle, marking controller as unrecoverable\n");
2435                 mrioc->unrecoverable = 1;
2436                 return;
2437         case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2438                 return;
2439         case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2440                 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2441                 break;
2442         default:
2443                 break;
2444         }
2445         mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2446         return;
2447
2448 schedule_work:
2449         spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2450         if (mrioc->watchdog_work_q)
2451                 queue_delayed_work(mrioc->watchdog_work_q,
2452                     &mrioc->watchdog_work,
2453                     msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2454         spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2455         return;
2456 }
2457
2458 /**
2459  * mpi3mr_start_watchdog - Start watchdog
2460  * @mrioc: Adapter instance reference
2461  *
2462  * Create and start the watchdog thread to monitor controller
2463  * faults.
2464  *
2465  * Return: Nothing.
2466  */
2467 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2468 {
2469         if (mrioc->watchdog_work_q)
2470                 return;
2471
2472         INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2473         snprintf(mrioc->watchdog_work_q_name,
2474             sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2475             mrioc->id);
2476         mrioc->watchdog_work_q =
2477             create_singlethread_workqueue(mrioc->watchdog_work_q_name);
2478         if (!mrioc->watchdog_work_q) {
2479                 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2480                 return;
2481         }
2482
2483         if (mrioc->watchdog_work_q)
2484                 queue_delayed_work(mrioc->watchdog_work_q,
2485                     &mrioc->watchdog_work,
2486                     msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2487 }
2488
2489 /**
2490  * mpi3mr_stop_watchdog - Stop watchdog
2491  * @mrioc: Adapter instance reference
2492  *
2493  * Stop the watchdog thread created to monitor controller
2494  * faults.
2495  *
2496  * Return: Nothing.
2497  */
2498 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2499 {
2500         unsigned long flags;
2501         struct workqueue_struct *wq;
2502
2503         spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2504         wq = mrioc->watchdog_work_q;
2505         mrioc->watchdog_work_q = NULL;
2506         spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2507         if (wq) {
2508                 if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2509                         flush_workqueue(wq);
2510                 destroy_workqueue(wq);
2511         }
2512 }
2513
2514 /**
2515  * mpi3mr_setup_admin_qpair - Setup admin queue pair
2516  * @mrioc: Adapter instance reference
2517  *
2518  * Allocate memory for admin queue pair if required and register
2519  * the admin queue with the controller.
2520  *
2521  * Return: 0 on success, non-zero on failures.
2522  */
2523 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2524 {
2525         int retval = 0;
2526         u32 num_admin_entries = 0;
2527
2528         mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2529         mrioc->num_admin_req = mrioc->admin_req_q_sz /
2530             MPI3MR_ADMIN_REQ_FRAME_SZ;
2531         mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2532         mrioc->admin_req_base = NULL;
2533
2534         mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2535         mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2536             MPI3MR_ADMIN_REPLY_FRAME_SZ;
2537         mrioc->admin_reply_ci = 0;
2538         mrioc->admin_reply_ephase = 1;
2539         mrioc->admin_reply_base = NULL;
2540
2541         if (!mrioc->admin_req_base) {
2542                 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2543                     mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2544
2545                 if (!mrioc->admin_req_base) {
2546                         retval = -1;
2547                         goto out_failed;
2548                 }
2549
2550                 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2551                     mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2552                     GFP_KERNEL);
2553
2554                 if (!mrioc->admin_reply_base) {
2555                         retval = -1;
2556                         goto out_failed;
2557                 }
2558         }
2559
2560         num_admin_entries = (mrioc->num_admin_replies << 16) |
2561             (mrioc->num_admin_req);
2562         writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2563         mpi3mr_writeq(mrioc->admin_req_dma,
2564             &mrioc->sysif_regs->admin_request_queue_address);
2565         mpi3mr_writeq(mrioc->admin_reply_dma,
2566             &mrioc->sysif_regs->admin_reply_queue_address);
2567         writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2568         writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2569         return retval;
2570
2571 out_failed:
2572
2573         if (mrioc->admin_reply_base) {
2574                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2575                     mrioc->admin_reply_base, mrioc->admin_reply_dma);
2576                 mrioc->admin_reply_base = NULL;
2577         }
2578         if (mrioc->admin_req_base) {
2579                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2580                     mrioc->admin_req_base, mrioc->admin_req_dma);
2581                 mrioc->admin_req_base = NULL;
2582         }
2583         return retval;
2584 }
2585
2586 /**
2587  * mpi3mr_issue_iocfacts - Send IOC Facts
2588  * @mrioc: Adapter instance reference
2589  * @facts_data: Cached IOC facts data
2590  *
2591  * Issue IOC Facts MPI request through admin queue and wait for
2592  * the completion of it or time out.
2593  *
2594  * Return: 0 on success, non-zero on failures.
2595  */
2596 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2597         struct mpi3_ioc_facts_data *facts_data)
2598 {
2599         struct mpi3_ioc_facts_request iocfacts_req;
2600         void *data = NULL;
2601         dma_addr_t data_dma;
2602         u32 data_len = sizeof(*facts_data);
2603         int retval = 0;
2604         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2605
2606         data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2607             GFP_KERNEL);
2608
2609         if (!data) {
2610                 retval = -1;
2611                 goto out;
2612         }
2613
2614         memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2615         mutex_lock(&mrioc->init_cmds.mutex);
2616         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2617                 retval = -1;
2618                 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2619                 mutex_unlock(&mrioc->init_cmds.mutex);
2620                 goto out;
2621         }
2622         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2623         mrioc->init_cmds.is_waiting = 1;
2624         mrioc->init_cmds.callback = NULL;
2625         iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2626         iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
2627
2628         mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
2629             data_dma);
2630
2631         init_completion(&mrioc->init_cmds.done);
2632         retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
2633             sizeof(iocfacts_req), 1);
2634         if (retval) {
2635                 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
2636                 goto out_unlock;
2637         }
2638         wait_for_completion_timeout(&mrioc->init_cmds.done,
2639             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2640         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2641                 ioc_err(mrioc, "ioc_facts timed out\n");
2642                 mpi3mr_check_rh_fault_ioc(mrioc,
2643                     MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
2644                 retval = -1;
2645                 goto out_unlock;
2646         }
2647         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2648             != MPI3_IOCSTATUS_SUCCESS) {
2649                 ioc_err(mrioc,
2650                     "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2651                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2652                     mrioc->init_cmds.ioc_loginfo);
2653                 retval = -1;
2654                 goto out_unlock;
2655         }
2656         memcpy(facts_data, (u8 *)data, data_len);
2657         mpi3mr_process_factsdata(mrioc, facts_data);
2658 out_unlock:
2659         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2660         mutex_unlock(&mrioc->init_cmds.mutex);
2661
2662 out:
2663         if (data)
2664                 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
2665
2666         return retval;
2667 }
2668
2669 /**
2670  * mpi3mr_check_reset_dma_mask - Process IOC facts data
2671  * @mrioc: Adapter instance reference
2672  *
2673  * Check whether the new DMA mask requested through IOCFacts by
2674  * firmware needs to be set, if so set it .
2675  *
2676  * Return: 0 on success, non-zero on failure.
2677  */
2678 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
2679 {
2680         struct pci_dev *pdev = mrioc->pdev;
2681         int r;
2682         u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
2683
2684         if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
2685                 return 0;
2686
2687         ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
2688             mrioc->dma_mask, facts_dma_mask);
2689
2690         r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
2691         if (r) {
2692                 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
2693                     facts_dma_mask, r);
2694                 return r;
2695         }
2696         mrioc->dma_mask = facts_dma_mask;
2697         return r;
2698 }
2699
2700 /**
2701  * mpi3mr_process_factsdata - Process IOC facts data
2702  * @mrioc: Adapter instance reference
2703  * @facts_data: Cached IOC facts data
2704  *
2705  * Convert IOC facts data into cpu endianness and cache it in
2706  * the driver .
2707  *
2708  * Return: Nothing.
2709  */
2710 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
2711         struct mpi3_ioc_facts_data *facts_data)
2712 {
2713         u32 ioc_config, req_sz, facts_flags;
2714
2715         if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
2716             (sizeof(*facts_data) / 4)) {
2717                 ioc_warn(mrioc,
2718                     "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
2719                     sizeof(*facts_data),
2720                     le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
2721         }
2722
2723         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
2724         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
2725             MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
2726         if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
2727                 ioc_err(mrioc,
2728                     "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
2729                     req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
2730         }
2731
2732         memset(&mrioc->facts, 0, sizeof(mrioc->facts));
2733
2734         facts_flags = le32_to_cpu(facts_data->flags);
2735         mrioc->facts.op_req_sz = req_sz;
2736         mrioc->op_reply_desc_sz = 1 << ((ioc_config &
2737             MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
2738             MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
2739
2740         mrioc->facts.ioc_num = facts_data->ioc_number;
2741         mrioc->facts.who_init = facts_data->who_init;
2742         mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
2743         mrioc->facts.personality = (facts_flags &
2744             MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
2745         mrioc->facts.dma_mask = (facts_flags &
2746             MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
2747             MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
2748         mrioc->facts.protocol_flags = facts_data->protocol_flags;
2749         mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
2750         mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
2751         mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
2752         mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
2753         mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
2754         mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
2755         mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
2756         mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
2757         mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
2758         mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
2759         mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
2760         mrioc->facts.max_pcie_switches =
2761             le16_to_cpu(facts_data->max_pcie_switches);
2762         mrioc->facts.max_sasexpanders =
2763             le16_to_cpu(facts_data->max_sas_expanders);
2764         mrioc->facts.max_sasinitiators =
2765             le16_to_cpu(facts_data->max_sas_initiators);
2766         mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
2767         mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
2768         mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
2769         mrioc->facts.max_op_req_q =
2770             le16_to_cpu(facts_data->max_operational_request_queues);
2771         mrioc->facts.max_op_reply_q =
2772             le16_to_cpu(facts_data->max_operational_reply_queues);
2773         mrioc->facts.ioc_capabilities =
2774             le32_to_cpu(facts_data->ioc_capabilities);
2775         mrioc->facts.fw_ver.build_num =
2776             le16_to_cpu(facts_data->fw_version.build_num);
2777         mrioc->facts.fw_ver.cust_id =
2778             le16_to_cpu(facts_data->fw_version.customer_id);
2779         mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
2780         mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
2781         mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
2782         mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
2783         mrioc->msix_count = min_t(int, mrioc->msix_count,
2784             mrioc->facts.max_msix_vectors);
2785         mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
2786         mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
2787         mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
2788         mrioc->facts.shutdown_timeout =
2789             le16_to_cpu(facts_data->shutdown_timeout);
2790
2791         mrioc->facts.max_dev_per_tg =
2792             facts_data->max_devices_per_throttle_group;
2793         mrioc->facts.io_throttle_data_length =
2794             le16_to_cpu(facts_data->io_throttle_data_length);
2795         mrioc->facts.max_io_throttle_group =
2796             le16_to_cpu(facts_data->max_io_throttle_group);
2797         mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
2798         mrioc->facts.io_throttle_high =
2799             le16_to_cpu(facts_data->io_throttle_high);
2800
2801         /* Store in 512b block count */
2802         if (mrioc->facts.io_throttle_data_length)
2803                 mrioc->io_throttle_data_length =
2804                     (mrioc->facts.io_throttle_data_length * 2 * 4);
2805         else
2806                 /* set the length to 1MB + 1K to disable throttle */
2807                 mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
2808
2809         mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
2810         mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
2811
2812         ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
2813             mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
2814             mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
2815         ioc_info(mrioc,
2816             "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
2817             mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
2818             mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
2819         ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
2820             mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
2821             mrioc->facts.sge_mod_shift);
2822         ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
2823             mrioc->facts.dma_mask, (facts_flags &
2824             MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
2825         ioc_info(mrioc,
2826             "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
2827             mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
2828         ioc_info(mrioc,
2829            "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
2830            mrioc->facts.io_throttle_data_length * 4,
2831            mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
2832 }
2833
2834 /**
2835  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
2836  * @mrioc: Adapter instance reference
2837  *
2838  * Allocate and initialize the reply free buffers, sense
2839  * buffers, reply free queue and sense buffer queue.
2840  *
2841  * Return: 0 on success, non-zero on failures.
2842  */
2843 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
2844 {
2845         int retval = 0;
2846         u32 sz, i;
2847
2848         if (mrioc->init_cmds.reply)
2849                 return retval;
2850
2851         mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2852         if (!mrioc->init_cmds.reply)
2853                 goto out_failed;
2854
2855         mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2856         if (!mrioc->bsg_cmds.reply)
2857                 goto out_failed;
2858
2859         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2860                 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
2861                     GFP_KERNEL);
2862                 if (!mrioc->dev_rmhs_cmds[i].reply)
2863                         goto out_failed;
2864         }
2865
2866         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
2867                 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
2868                     GFP_KERNEL);
2869                 if (!mrioc->evtack_cmds[i].reply)
2870                         goto out_failed;
2871         }
2872
2873         mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2874         if (!mrioc->host_tm_cmds.reply)
2875                 goto out_failed;
2876
2877         mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2878         if (!mrioc->pel_cmds.reply)
2879                 goto out_failed;
2880
2881         mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2882         if (!mrioc->pel_abort_cmd.reply)
2883                 goto out_failed;
2884
2885         mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
2886         if (mrioc->facts.max_devhandle % 8)
2887                 mrioc->dev_handle_bitmap_sz++;
2888         mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
2889             GFP_KERNEL);
2890         if (!mrioc->removepend_bitmap)
2891                 goto out_failed;
2892
2893         mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
2894         if (MPI3MR_NUM_DEVRMCMD % 8)
2895                 mrioc->devrem_bitmap_sz++;
2896         mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
2897             GFP_KERNEL);
2898         if (!mrioc->devrem_bitmap)
2899                 goto out_failed;
2900
2901         mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
2902         if (MPI3MR_NUM_EVTACKCMD % 8)
2903                 mrioc->evtack_cmds_bitmap_sz++;
2904         mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
2905             GFP_KERNEL);
2906         if (!mrioc->evtack_cmds_bitmap)
2907                 goto out_failed;
2908
2909         mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
2910         mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
2911         mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
2912         mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
2913
2914         /* reply buffer pool, 16 byte align */
2915         sz = mrioc->num_reply_bufs * mrioc->reply_sz;
2916         mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
2917             &mrioc->pdev->dev, sz, 16, 0);
2918         if (!mrioc->reply_buf_pool) {
2919                 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
2920                 goto out_failed;
2921         }
2922
2923         mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
2924             &mrioc->reply_buf_dma);
2925         if (!mrioc->reply_buf)
2926                 goto out_failed;
2927
2928         mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
2929
2930         /* reply free queue, 8 byte align */
2931         sz = mrioc->reply_free_qsz * 8;
2932         mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
2933             &mrioc->pdev->dev, sz, 8, 0);
2934         if (!mrioc->reply_free_q_pool) {
2935                 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
2936                 goto out_failed;
2937         }
2938         mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
2939             GFP_KERNEL, &mrioc->reply_free_q_dma);
2940         if (!mrioc->reply_free_q)
2941                 goto out_failed;
2942
2943         /* sense buffer pool,  4 byte align */
2944         sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
2945         mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
2946             &mrioc->pdev->dev, sz, 4, 0);
2947         if (!mrioc->sense_buf_pool) {
2948                 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
2949                 goto out_failed;
2950         }
2951         mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
2952             &mrioc->sense_buf_dma);
2953         if (!mrioc->sense_buf)
2954                 goto out_failed;
2955
2956         /* sense buffer queue, 8 byte align */
2957         sz = mrioc->sense_buf_q_sz * 8;
2958         mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
2959             &mrioc->pdev->dev, sz, 8, 0);
2960         if (!mrioc->sense_buf_q_pool) {
2961                 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
2962                 goto out_failed;
2963         }
2964         mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
2965             GFP_KERNEL, &mrioc->sense_buf_q_dma);
2966         if (!mrioc->sense_buf_q)
2967                 goto out_failed;
2968
2969         return retval;
2970
2971 out_failed:
2972         retval = -1;
2973         return retval;
2974 }
2975
2976 /**
2977  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
2978  * buffers
2979  * @mrioc: Adapter instance reference
2980  *
2981  * Helper function to initialize reply and sense buffers along
2982  * with some debug prints.
2983  *
2984  * Return:  None.
2985  */
2986 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
2987 {
2988         u32 sz, i;
2989         dma_addr_t phy_addr;
2990
2991         sz = mrioc->num_reply_bufs * mrioc->reply_sz;
2992         ioc_info(mrioc,
2993             "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2994             mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
2995             (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
2996         sz = mrioc->reply_free_qsz * 8;
2997         ioc_info(mrioc,
2998             "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2999             mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3000             (unsigned long long)mrioc->reply_free_q_dma);
3001         sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3002         ioc_info(mrioc,
3003             "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3004             mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3005             (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3006         sz = mrioc->sense_buf_q_sz * 8;
3007         ioc_info(mrioc,
3008             "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3009             mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3010             (unsigned long long)mrioc->sense_buf_q_dma);
3011
3012         /* initialize Reply buffer Queue */
3013         for (i = 0, phy_addr = mrioc->reply_buf_dma;
3014             i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3015                 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3016         mrioc->reply_free_q[i] = cpu_to_le64(0);
3017
3018         /* initialize Sense Buffer Queue */
3019         for (i = 0, phy_addr = mrioc->sense_buf_dma;
3020             i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3021                 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3022         mrioc->sense_buf_q[i] = cpu_to_le64(0);
3023 }
3024
3025 /**
3026  * mpi3mr_issue_iocinit - Send IOC Init
3027  * @mrioc: Adapter instance reference
3028  *
3029  * Issue IOC Init MPI request through admin queue and wait for
3030  * the completion of it or time out.
3031  *
3032  * Return: 0 on success, non-zero on failures.
3033  */
3034 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3035 {
3036         struct mpi3_ioc_init_request iocinit_req;
3037         struct mpi3_driver_info_layout *drv_info;
3038         dma_addr_t data_dma;
3039         u32 data_len = sizeof(*drv_info);
3040         int retval = 0;
3041         ktime_t current_time;
3042
3043         drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3044             GFP_KERNEL);
3045         if (!drv_info) {
3046                 retval = -1;
3047                 goto out;
3048         }
3049         mpimr_initialize_reply_sbuf_queues(mrioc);
3050
3051         drv_info->information_length = cpu_to_le32(data_len);
3052         strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3053         strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3054         strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3055         strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3056         strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3057         strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3058             sizeof(drv_info->driver_release_date));
3059         drv_info->driver_capabilities = 0;
3060         memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3061             sizeof(mrioc->driver_info));
3062
3063         memset(&iocinit_req, 0, sizeof(iocinit_req));
3064         mutex_lock(&mrioc->init_cmds.mutex);
3065         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3066                 retval = -1;
3067                 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3068                 mutex_unlock(&mrioc->init_cmds.mutex);
3069                 goto out;
3070         }
3071         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3072         mrioc->init_cmds.is_waiting = 1;
3073         mrioc->init_cmds.callback = NULL;
3074         iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3075         iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3076         iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3077         iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3078         iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3079         iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3080         iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3081         iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3082         iocinit_req.reply_free_queue_address =
3083             cpu_to_le64(mrioc->reply_free_q_dma);
3084         iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3085         iocinit_req.sense_buffer_free_queue_depth =
3086             cpu_to_le16(mrioc->sense_buf_q_sz);
3087         iocinit_req.sense_buffer_free_queue_address =
3088             cpu_to_le64(mrioc->sense_buf_q_dma);
3089         iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3090
3091         current_time = ktime_get_real();
3092         iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3093
3094         init_completion(&mrioc->init_cmds.done);
3095         retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3096             sizeof(iocinit_req), 1);
3097         if (retval) {
3098                 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3099                 goto out_unlock;
3100         }
3101         wait_for_completion_timeout(&mrioc->init_cmds.done,
3102             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3103         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3104                 mpi3mr_check_rh_fault_ioc(mrioc,
3105                     MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3106                 ioc_err(mrioc, "ioc_init timed out\n");
3107                 retval = -1;
3108                 goto out_unlock;
3109         }
3110         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3111             != MPI3_IOCSTATUS_SUCCESS) {
3112                 ioc_err(mrioc,
3113                     "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3114                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3115                     mrioc->init_cmds.ioc_loginfo);
3116                 retval = -1;
3117                 goto out_unlock;
3118         }
3119
3120         mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3121         writel(mrioc->reply_free_queue_host_index,
3122             &mrioc->sysif_regs->reply_free_host_index);
3123
3124         mrioc->sbq_host_index = mrioc->num_sense_bufs;
3125         writel(mrioc->sbq_host_index,
3126             &mrioc->sysif_regs->sense_buffer_free_host_index);
3127 out_unlock:
3128         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3129         mutex_unlock(&mrioc->init_cmds.mutex);
3130
3131 out:
3132         if (drv_info)
3133                 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3134                     data_dma);
3135
3136         return retval;
3137 }
3138
3139 /**
3140  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3141  * @mrioc: Adapter instance reference
3142  * @event: MPI event ID
3143  *
3144  * Un mask the specific event by resetting the event_mask
3145  * bitmap.
3146  *
3147  * Return: 0 on success, non-zero on failures.
3148  */
3149 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3150 {
3151         u32 desired_event;
3152         u8 word;
3153
3154         if (event >= 128)
3155                 return;
3156
3157         desired_event = (1 << (event % 32));
3158         word = event / 32;
3159
3160         mrioc->event_masks[word] &= ~desired_event;
3161 }
3162
3163 /**
3164  * mpi3mr_issue_event_notification - Send event notification
3165  * @mrioc: Adapter instance reference
3166  *
3167  * Issue event notification MPI request through admin queue and
3168  * wait for the completion of it or time out.
3169  *
3170  * Return: 0 on success, non-zero on failures.
3171  */
3172 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3173 {
3174         struct mpi3_event_notification_request evtnotify_req;
3175         int retval = 0;
3176         u8 i;
3177
3178         memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3179         mutex_lock(&mrioc->init_cmds.mutex);
3180         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3181                 retval = -1;
3182                 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3183                 mutex_unlock(&mrioc->init_cmds.mutex);
3184                 goto out;
3185         }
3186         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3187         mrioc->init_cmds.is_waiting = 1;
3188         mrioc->init_cmds.callback = NULL;
3189         evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3190         evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3191         for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3192                 evtnotify_req.event_masks[i] =
3193                     cpu_to_le32(mrioc->event_masks[i]);
3194         init_completion(&mrioc->init_cmds.done);
3195         retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3196             sizeof(evtnotify_req), 1);
3197         if (retval) {
3198                 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3199                 goto out_unlock;
3200         }
3201         wait_for_completion_timeout(&mrioc->init_cmds.done,
3202             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3203         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3204                 ioc_err(mrioc, "event notification timed out\n");
3205                 mpi3mr_check_rh_fault_ioc(mrioc,
3206                     MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3207                 retval = -1;
3208                 goto out_unlock;
3209         }
3210         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3211             != MPI3_IOCSTATUS_SUCCESS) {
3212                 ioc_err(mrioc,
3213                     "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3214                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3215                     mrioc->init_cmds.ioc_loginfo);
3216                 retval = -1;
3217                 goto out_unlock;
3218         }
3219
3220 out_unlock:
3221         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3222         mutex_unlock(&mrioc->init_cmds.mutex);
3223 out:
3224         return retval;
3225 }
3226
3227 /**
3228  * mpi3mr_process_event_ack - Process event acknowledgment
3229  * @mrioc: Adapter instance reference
3230  * @event: MPI3 event ID
3231  * @event_ctx: event context
3232  *
3233  * Send event acknowledgment through admin queue and wait for
3234  * it to complete.
3235  *
3236  * Return: 0 on success, non-zero on failures.
3237  */
3238 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3239         u32 event_ctx)
3240 {
3241         struct mpi3_event_ack_request evtack_req;
3242         int retval = 0;
3243
3244         memset(&evtack_req, 0, sizeof(evtack_req));
3245         mutex_lock(&mrioc->init_cmds.mutex);
3246         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3247                 retval = -1;
3248                 ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3249                 mutex_unlock(&mrioc->init_cmds.mutex);
3250                 goto out;
3251         }
3252         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3253         mrioc->init_cmds.is_waiting = 1;
3254         mrioc->init_cmds.callback = NULL;
3255         evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3256         evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3257         evtack_req.event = event;
3258         evtack_req.event_context = cpu_to_le32(event_ctx);
3259
3260         init_completion(&mrioc->init_cmds.done);
3261         retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3262             sizeof(evtack_req), 1);
3263         if (retval) {
3264                 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3265                 goto out_unlock;
3266         }
3267         wait_for_completion_timeout(&mrioc->init_cmds.done,
3268             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3269         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3270                 ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3271                 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3272                         mpi3mr_soft_reset_handler(mrioc,
3273                             MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
3274                 retval = -1;
3275                 goto out_unlock;
3276         }
3277         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3278             != MPI3_IOCSTATUS_SUCCESS) {
3279                 ioc_err(mrioc,
3280                     "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3281                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3282                     mrioc->init_cmds.ioc_loginfo);
3283                 retval = -1;
3284                 goto out_unlock;
3285         }
3286
3287 out_unlock:
3288         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3289         mutex_unlock(&mrioc->init_cmds.mutex);
3290 out:
3291         return retval;
3292 }
3293
3294 /**
3295  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3296  * @mrioc: Adapter instance reference
3297  *
3298  * Allocate chain buffers and set a bitmap to indicate free
3299  * chain buffers. Chain buffers are used to pass the SGE
3300  * information along with MPI3 SCSI IO requests for host I/O.
3301  *
3302  * Return: 0 on success, non-zero on failure
3303  */
3304 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3305 {
3306         int retval = 0;
3307         u32 sz, i;
3308         u16 num_chains;
3309
3310         if (mrioc->chain_sgl_list)
3311                 return retval;
3312
3313         num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3314
3315         if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3316             | SHOST_DIX_TYPE1_PROTECTION
3317             | SHOST_DIX_TYPE2_PROTECTION
3318             | SHOST_DIX_TYPE3_PROTECTION))
3319                 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3320
3321         mrioc->chain_buf_count = num_chains;
3322         sz = sizeof(struct chain_element) * num_chains;
3323         mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3324         if (!mrioc->chain_sgl_list)
3325                 goto out_failed;
3326
3327         sz = MPI3MR_PAGE_SIZE_4K;
3328         mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3329             &mrioc->pdev->dev, sz, 16, 0);
3330         if (!mrioc->chain_buf_pool) {
3331                 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3332                 goto out_failed;
3333         }
3334
3335         for (i = 0; i < num_chains; i++) {
3336                 mrioc->chain_sgl_list[i].addr =
3337                     dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3338                     &mrioc->chain_sgl_list[i].dma_addr);
3339
3340                 if (!mrioc->chain_sgl_list[i].addr)
3341                         goto out_failed;
3342         }
3343         mrioc->chain_bitmap_sz = num_chains / 8;
3344         if (num_chains % 8)
3345                 mrioc->chain_bitmap_sz++;
3346         mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
3347         if (!mrioc->chain_bitmap)
3348                 goto out_failed;
3349         return retval;
3350 out_failed:
3351         retval = -1;
3352         return retval;
3353 }
3354
3355 /**
3356  * mpi3mr_port_enable_complete - Mark port enable complete
3357  * @mrioc: Adapter instance reference
3358  * @drv_cmd: Internal command tracker
3359  *
3360  * Call back for asynchronous port enable request sets the
3361  * driver command to indicate port enable request is complete.
3362  *
3363  * Return: Nothing
3364  */
3365 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3366         struct mpi3mr_drv_cmd *drv_cmd)
3367 {
3368         drv_cmd->state = MPI3MR_CMD_NOTUSED;
3369         drv_cmd->callback = NULL;
3370         mrioc->scan_failed = drv_cmd->ioc_status;
3371         mrioc->scan_started = 0;
3372 }
3373
3374 /**
3375  * mpi3mr_issue_port_enable - Issue Port Enable
3376  * @mrioc: Adapter instance reference
3377  * @async: Flag to wait for completion or not
3378  *
3379  * Issue Port Enable MPI request through admin queue and if the
3380  * async flag is not set wait for the completion of the port
3381  * enable or time out.
3382  *
3383  * Return: 0 on success, non-zero on failures.
3384  */
3385 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3386 {
3387         struct mpi3_port_enable_request pe_req;
3388         int retval = 0;
3389         u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3390
3391         memset(&pe_req, 0, sizeof(pe_req));
3392         mutex_lock(&mrioc->init_cmds.mutex);
3393         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3394                 retval = -1;
3395                 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3396                 mutex_unlock(&mrioc->init_cmds.mutex);
3397                 goto out;
3398         }
3399         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3400         if (async) {
3401                 mrioc->init_cmds.is_waiting = 0;
3402                 mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3403         } else {
3404                 mrioc->init_cmds.is_waiting = 1;
3405                 mrioc->init_cmds.callback = NULL;
3406                 init_completion(&mrioc->init_cmds.done);
3407         }
3408         pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3409         pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3410
3411         retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3412         if (retval) {
3413                 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3414                 goto out_unlock;
3415         }
3416         if (async) {
3417                 mutex_unlock(&mrioc->init_cmds.mutex);
3418                 goto out;
3419         }
3420
3421         wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3422         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3423                 ioc_err(mrioc, "port enable timed out\n");
3424                 retval = -1;
3425                 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3426                 goto out_unlock;
3427         }
3428         mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3429
3430 out_unlock:
3431         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3432         mutex_unlock(&mrioc->init_cmds.mutex);
3433 out:
3434         return retval;
3435 }
3436
3437 /* Protocol type to name mapper structure */
3438 static const struct {
3439         u8 protocol;
3440         char *name;
3441 } mpi3mr_protocols[] = {
3442         { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3443         { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3444         { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3445 };
3446
3447 /* Capability to name mapper structure*/
3448 static const struct {
3449         u32 capability;
3450         char *name;
3451 } mpi3mr_capabilities[] = {
3452         { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
3453 };
3454
3455 /**
3456  * mpi3mr_print_ioc_info - Display controller information
3457  * @mrioc: Adapter instance reference
3458  *
3459  * Display controller personalit, capability, supported
3460  * protocols etc.
3461  *
3462  * Return: Nothing
3463  */
3464 static void
3465 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3466 {
3467         int i = 0, bytes_written = 0;
3468         char personality[16];
3469         char protocol[50] = {0};
3470         char capabilities[100] = {0};
3471         struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3472
3473         switch (mrioc->facts.personality) {
3474         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3475                 strncpy(personality, "Enhanced HBA", sizeof(personality));
3476                 break;
3477         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3478                 strncpy(personality, "RAID", sizeof(personality));
3479                 break;
3480         default:
3481                 strncpy(personality, "Unknown", sizeof(personality));
3482                 break;
3483         }
3484
3485         ioc_info(mrioc, "Running in %s Personality", personality);
3486
3487         ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3488             fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3489             fwver->ph_minor, fwver->cust_id, fwver->build_num);
3490
3491         for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
3492                 if (mrioc->facts.protocol_flags &
3493                     mpi3mr_protocols[i].protocol) {
3494                         bytes_written += scnprintf(protocol + bytes_written,
3495                                     sizeof(protocol) - bytes_written, "%s%s",
3496                                     bytes_written ? "," : "",
3497                                     mpi3mr_protocols[i].name);
3498                 }
3499         }
3500
3501         bytes_written = 0;
3502         for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
3503                 if (mrioc->facts.protocol_flags &
3504                     mpi3mr_capabilities[i].capability) {
3505                         bytes_written += scnprintf(capabilities + bytes_written,
3506                                     sizeof(capabilities) - bytes_written, "%s%s",
3507                                     bytes_written ? "," : "",
3508                                     mpi3mr_capabilities[i].name);
3509                 }
3510         }
3511
3512         ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
3513                  protocol, capabilities);
3514 }
3515
3516 /**
3517  * mpi3mr_cleanup_resources - Free PCI resources
3518  * @mrioc: Adapter instance reference
3519  *
3520  * Unmap PCI device memory and disable PCI device.
3521  *
3522  * Return: 0 on success and non-zero on failure.
3523  */
3524 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
3525 {
3526         struct pci_dev *pdev = mrioc->pdev;
3527
3528         mpi3mr_cleanup_isr(mrioc);
3529
3530         if (mrioc->sysif_regs) {
3531                 iounmap((void __iomem *)mrioc->sysif_regs);
3532                 mrioc->sysif_regs = NULL;
3533         }
3534
3535         if (pci_is_enabled(pdev)) {
3536                 if (mrioc->bars)
3537                         pci_release_selected_regions(pdev, mrioc->bars);
3538                 pci_disable_device(pdev);
3539         }
3540 }
3541
3542 /**
3543  * mpi3mr_setup_resources - Enable PCI resources
3544  * @mrioc: Adapter instance reference
3545  *
3546  * Enable PCI device memory, MSI-x registers and set DMA mask.
3547  *
3548  * Return: 0 on success and non-zero on failure.
3549  */
3550 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
3551 {
3552         struct pci_dev *pdev = mrioc->pdev;
3553         u32 memap_sz = 0;
3554         int i, retval = 0, capb = 0;
3555         u16 message_control;
3556         u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
3557             (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
3558             (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
3559
3560         if (pci_enable_device_mem(pdev)) {
3561                 ioc_err(mrioc, "pci_enable_device_mem: failed\n");
3562                 retval = -ENODEV;
3563                 goto out_failed;
3564         }
3565
3566         capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3567         if (!capb) {
3568                 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
3569                 retval = -ENODEV;
3570                 goto out_failed;
3571         }
3572         mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3573
3574         if (pci_request_selected_regions(pdev, mrioc->bars,
3575             mrioc->driver_name)) {
3576                 ioc_err(mrioc, "pci_request_selected_regions: failed\n");
3577                 retval = -ENODEV;
3578                 goto out_failed;
3579         }
3580
3581         for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
3582                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3583                         mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
3584                         memap_sz = pci_resource_len(pdev, i);
3585                         mrioc->sysif_regs =
3586                             ioremap(mrioc->sysif_regs_phys, memap_sz);
3587                         break;
3588                 }
3589         }
3590
3591         pci_set_master(pdev);
3592
3593         retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
3594         if (retval) {
3595                 if (dma_mask != DMA_BIT_MASK(32)) {
3596                         ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
3597                         dma_mask = DMA_BIT_MASK(32);
3598                         retval = dma_set_mask_and_coherent(&pdev->dev,
3599                             dma_mask);
3600                 }
3601                 if (retval) {
3602                         mrioc->dma_mask = 0;
3603                         ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
3604                         goto out_failed;
3605                 }
3606         }
3607         mrioc->dma_mask = dma_mask;
3608
3609         if (!mrioc->sysif_regs) {
3610                 ioc_err(mrioc,
3611                     "Unable to map adapter memory or resource not found\n");
3612                 retval = -EINVAL;
3613                 goto out_failed;
3614         }
3615
3616         pci_read_config_word(pdev, capb + 2, &message_control);
3617         mrioc->msix_count = (message_control & 0x3FF) + 1;
3618
3619         pci_save_state(pdev);
3620
3621         pci_set_drvdata(pdev, mrioc->shost);
3622
3623         mpi3mr_ioc_disable_intr(mrioc);
3624
3625         ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
3626             (unsigned long long)mrioc->sysif_regs_phys,
3627             mrioc->sysif_regs, memap_sz);
3628         ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
3629             mrioc->msix_count);
3630
3631         if (!reset_devices && poll_queues > 0)
3632                 mrioc->requested_poll_qcount = min_t(int, poll_queues,
3633                                 mrioc->msix_count - 2);
3634         return retval;
3635
3636 out_failed:
3637         mpi3mr_cleanup_resources(mrioc);
3638         return retval;
3639 }
3640
3641 /**
3642  * mpi3mr_enable_events - Enable required events
3643  * @mrioc: Adapter instance reference
3644  *
3645  * This routine unmasks the events required by the driver by
3646  * sennding appropriate event mask bitmapt through an event
3647  * notification request.
3648  *
3649  * Return: 0 on success and non-zero on failure.
3650  */
3651 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
3652 {
3653         int retval = 0;
3654         u32  i;
3655
3656         for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3657                 mrioc->event_masks[i] = -1;
3658
3659         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
3660         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
3661         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
3662         mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
3663         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3664         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
3665         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
3666         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
3667         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
3668         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
3669         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
3670         mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
3671         mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
3672
3673         retval = mpi3mr_issue_event_notification(mrioc);
3674         if (retval)
3675                 ioc_err(mrioc, "failed to issue event notification %d\n",
3676                     retval);
3677         return retval;
3678 }
3679
3680 /**
3681  * mpi3mr_init_ioc - Initialize the controller
3682  * @mrioc: Adapter instance reference
3683  *
3684  * This the controller initialization routine, executed either
3685  * after soft reset or from pci probe callback.
3686  * Setup the required resources, memory map the controller
3687  * registers, create admin and operational reply queue pairs,
3688  * allocate required memory for reply pool, sense buffer pool,
3689  * issue IOC init request to the firmware, unmask the events and
3690  * issue port enable to discover SAS/SATA/NVMe devies and RAID
3691  * volumes.
3692  *
3693  * Return: 0 on success and non-zero on failure.
3694  */
3695 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
3696 {
3697         int retval = 0;
3698         u8 retry = 0;
3699         struct mpi3_ioc_facts_data facts_data;
3700         u32 sz;
3701
3702 retry_init:
3703         retval = mpi3mr_bring_ioc_ready(mrioc);
3704         if (retval) {
3705                 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
3706                     retval);
3707                 goto out_failed_noretry;
3708         }
3709
3710         retval = mpi3mr_setup_isr(mrioc, 1);
3711         if (retval) {
3712                 ioc_err(mrioc, "Failed to setup ISR error %d\n",
3713                     retval);
3714                 goto out_failed_noretry;
3715         }
3716
3717         retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3718         if (retval) {
3719                 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
3720                     retval);
3721                 goto out_failed;
3722         }
3723
3724         mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
3725
3726         mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
3727         atomic_set(&mrioc->pend_large_data_sz, 0);
3728
3729         if (reset_devices)
3730                 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
3731                     MPI3MR_HOST_IOS_KDUMP);
3732
3733         mrioc->reply_sz = mrioc->facts.reply_sz;
3734
3735         retval = mpi3mr_check_reset_dma_mask(mrioc);
3736         if (retval) {
3737                 ioc_err(mrioc, "Resetting dma mask failed %d\n",
3738                     retval);
3739                 goto out_failed_noretry;
3740         }
3741
3742         mpi3mr_print_ioc_info(mrioc);
3743
3744         dprint_init(mrioc, "allocating config page buffers\n");
3745         mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
3746             MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
3747         if (!mrioc->cfg_page)
3748                 goto out_failed_noretry;
3749
3750         mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
3751
3752         retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
3753         if (retval) {
3754                 ioc_err(mrioc,
3755                     "%s :Failed to allocated reply sense buffers %d\n",
3756                     __func__, retval);
3757                 goto out_failed_noretry;
3758         }
3759
3760         retval = mpi3mr_alloc_chain_bufs(mrioc);
3761         if (retval) {
3762                 ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
3763                     retval);
3764                 goto out_failed_noretry;
3765         }
3766
3767         retval = mpi3mr_issue_iocinit(mrioc);
3768         if (retval) {
3769                 ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
3770                     retval);
3771                 goto out_failed;
3772         }
3773
3774         retval = mpi3mr_print_pkg_ver(mrioc);
3775         if (retval) {
3776                 ioc_err(mrioc, "failed to get package version\n");
3777                 goto out_failed;
3778         }
3779
3780         retval = mpi3mr_setup_isr(mrioc, 0);
3781         if (retval) {
3782                 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
3783                     retval);
3784                 goto out_failed_noretry;
3785         }
3786
3787         retval = mpi3mr_create_op_queues(mrioc);
3788         if (retval) {
3789                 ioc_err(mrioc, "Failed to create OpQueues error %d\n",
3790                     retval);
3791                 goto out_failed;
3792         }
3793
3794         if (!mrioc->pel_seqnum_virt) {
3795                 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
3796                 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
3797                 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
3798                     mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
3799                     GFP_KERNEL);
3800                 if (!mrioc->pel_seqnum_virt) {
3801                         retval = -ENOMEM;
3802                         goto out_failed_noretry;
3803                 }
3804         }
3805
3806         if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
3807                 dprint_init(mrioc, "allocating memory for throttle groups\n");
3808                 sz = sizeof(struct mpi3mr_throttle_group_info);
3809                 mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *)
3810                     kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
3811                 if (!mrioc->throttle_groups)
3812                         goto out_failed_noretry;
3813         }
3814
3815         retval = mpi3mr_enable_events(mrioc);
3816         if (retval) {
3817                 ioc_err(mrioc, "failed to enable events %d\n",
3818                     retval);
3819                 goto out_failed;
3820         }
3821
3822         ioc_info(mrioc, "controller initialization completed successfully\n");
3823         return retval;
3824 out_failed:
3825         if (retry < 2) {
3826                 retry++;
3827                 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
3828                     retry);
3829                 mpi3mr_memset_buffers(mrioc);
3830                 goto retry_init;
3831         }
3832 out_failed_noretry:
3833         ioc_err(mrioc, "controller initialization failed\n");
3834         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
3835             MPI3MR_RESET_FROM_CTLR_CLEANUP);
3836         mrioc->unrecoverable = 1;
3837         return retval;
3838 }
3839
3840 /**
3841  * mpi3mr_reinit_ioc - Re-Initialize the controller
3842  * @mrioc: Adapter instance reference
3843  * @is_resume: Called from resume or reset path
3844  *
3845  * This the controller re-initialization routine, executed from
3846  * the soft reset handler or resume callback. Creates
3847  * operational reply queue pairs, allocate required memory for
3848  * reply pool, sense buffer pool, issue IOC init request to the
3849  * firmware, unmask the events and issue port enable to discover
3850  * SAS/SATA/NVMe devices and RAID volumes.
3851  *
3852  * Return: 0 on success and non-zero on failure.
3853  */
3854 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
3855 {
3856         int retval = 0;
3857         u8 retry = 0;
3858         struct mpi3_ioc_facts_data facts_data;
3859
3860 retry_init:
3861         dprint_reset(mrioc, "bringing up the controller to ready state\n");
3862         retval = mpi3mr_bring_ioc_ready(mrioc);
3863         if (retval) {
3864                 ioc_err(mrioc, "failed to bring to ready state\n");
3865                 goto out_failed_noretry;
3866         }
3867
3868         if (is_resume) {
3869                 dprint_reset(mrioc, "setting up single ISR\n");
3870                 retval = mpi3mr_setup_isr(mrioc, 1);
3871                 if (retval) {
3872                         ioc_err(mrioc, "failed to setup ISR\n");
3873                         goto out_failed_noretry;
3874                 }
3875         } else
3876                 mpi3mr_ioc_enable_intr(mrioc);
3877
3878         dprint_reset(mrioc, "getting ioc_facts\n");
3879         retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3880         if (retval) {
3881                 ioc_err(mrioc, "failed to get ioc_facts\n");
3882                 goto out_failed;
3883         }
3884
3885         dprint_reset(mrioc, "validating ioc_facts\n");
3886         retval = mpi3mr_revalidate_factsdata(mrioc);
3887         if (retval) {
3888                 ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
3889                 goto out_failed_noretry;
3890         }
3891
3892         mpi3mr_print_ioc_info(mrioc);
3893
3894         dprint_reset(mrioc, "sending ioc_init\n");
3895         retval = mpi3mr_issue_iocinit(mrioc);
3896         if (retval) {
3897                 ioc_err(mrioc, "failed to send ioc_init\n");
3898                 goto out_failed;
3899         }
3900
3901         dprint_reset(mrioc, "getting package version\n");
3902         retval = mpi3mr_print_pkg_ver(mrioc);
3903         if (retval) {
3904                 ioc_err(mrioc, "failed to get package version\n");
3905                 goto out_failed;
3906         }
3907
3908         if (is_resume) {
3909                 dprint_reset(mrioc, "setting up multiple ISR\n");
3910                 retval = mpi3mr_setup_isr(mrioc, 0);
3911                 if (retval) {
3912                         ioc_err(mrioc, "failed to re-setup ISR\n");
3913                         goto out_failed_noretry;
3914                 }
3915         }
3916
3917         dprint_reset(mrioc, "creating operational queue pairs\n");
3918         retval = mpi3mr_create_op_queues(mrioc);
3919         if (retval) {
3920                 ioc_err(mrioc, "failed to create operational queue pairs\n");
3921                 goto out_failed;
3922         }
3923
3924         if (!mrioc->pel_seqnum_virt) {
3925                 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
3926                 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
3927                 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
3928                     mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
3929                     GFP_KERNEL);
3930                 if (!mrioc->pel_seqnum_virt) {
3931                         retval = -ENOMEM;
3932                         goto out_failed_noretry;
3933                 }
3934         }
3935
3936         if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
3937                 ioc_err(mrioc,
3938                     "cannot create minimum number of operational queues expected:%d created:%d\n",
3939                     mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
3940                 goto out_failed_noretry;
3941         }
3942
3943         dprint_reset(mrioc, "enabling events\n");
3944         retval = mpi3mr_enable_events(mrioc);
3945         if (retval) {
3946                 ioc_err(mrioc, "failed to enable events\n");
3947                 goto out_failed;
3948         }
3949
3950         ioc_info(mrioc, "sending port enable\n");
3951         retval = mpi3mr_issue_port_enable(mrioc, 0);
3952         if (retval) {
3953                 ioc_err(mrioc, "failed to issue port enable\n");
3954                 goto out_failed;
3955         }
3956
3957         ioc_info(mrioc, "controller %s completed successfully\n",
3958             (is_resume)?"resume":"re-initialization");
3959         return retval;
3960 out_failed:
3961         if (retry < 2) {
3962                 retry++;
3963                 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
3964                     (is_resume)?"resume":"re-initialization", retry);
3965                 mpi3mr_memset_buffers(mrioc);
3966                 goto retry_init;
3967         }
3968 out_failed_noretry:
3969         ioc_err(mrioc, "controller %s is failed\n",
3970             (is_resume)?"resume":"re-initialization");
3971         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
3972             MPI3MR_RESET_FROM_CTLR_CLEANUP);
3973         mrioc->unrecoverable = 1;
3974         return retval;
3975 }
3976
3977 /**
3978  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
3979  *                                      segments
3980  * @mrioc: Adapter instance reference
3981  * @qidx: Operational reply queue index
3982  *
3983  * Return: Nothing.
3984  */
3985 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3986 {
3987         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
3988         struct segments *segments;
3989         int i, size;
3990
3991         if (!op_reply_q->q_segments)
3992                 return;
3993
3994         size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
3995         segments = op_reply_q->q_segments;
3996         for (i = 0; i < op_reply_q->num_segments; i++)
3997                 memset(segments[i].segment, 0, size);
3998 }
3999
4000 /**
4001  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4002  *                                      segments
4003  * @mrioc: Adapter instance reference
4004  * @qidx: Operational request queue index
4005  *
4006  * Return: Nothing.
4007  */
4008 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4009 {
4010         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4011         struct segments *segments;
4012         int i, size;
4013
4014         if (!op_req_q->q_segments)
4015                 return;
4016
4017         size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4018         segments = op_req_q->q_segments;
4019         for (i = 0; i < op_req_q->num_segments; i++)
4020                 memset(segments[i].segment, 0, size);
4021 }
4022
4023 /**
4024  * mpi3mr_memset_buffers - memset memory for a controller
4025  * @mrioc: Adapter instance reference
4026  *
4027  * clear all the memory allocated for a controller, typically
4028  * called post reset to reuse the memory allocated during the
4029  * controller init.
4030  *
4031  * Return: Nothing.
4032  */
4033 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4034 {
4035         u16 i;
4036         struct mpi3mr_throttle_group_info *tg;
4037
4038         mrioc->change_count = 0;
4039         mrioc->active_poll_qcount = 0;
4040         mrioc->default_qcount = 0;
4041         if (mrioc->admin_req_base)
4042                 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4043         if (mrioc->admin_reply_base)
4044                 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4045
4046         if (mrioc->init_cmds.reply) {
4047                 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4048                 memset(mrioc->bsg_cmds.reply, 0,
4049                     sizeof(*mrioc->bsg_cmds.reply));
4050                 memset(mrioc->host_tm_cmds.reply, 0,
4051                     sizeof(*mrioc->host_tm_cmds.reply));
4052                 memset(mrioc->pel_cmds.reply, 0,
4053                     sizeof(*mrioc->pel_cmds.reply));
4054                 memset(mrioc->pel_abort_cmd.reply, 0,
4055                     sizeof(*mrioc->pel_abort_cmd.reply));
4056                 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4057                         memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4058                             sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4059                 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4060                         memset(mrioc->evtack_cmds[i].reply, 0,
4061                             sizeof(*mrioc->evtack_cmds[i].reply));
4062                 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
4063                 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
4064                 memset(mrioc->evtack_cmds_bitmap, 0,
4065                     mrioc->evtack_cmds_bitmap_sz);
4066         }
4067
4068         for (i = 0; i < mrioc->num_queues; i++) {
4069                 mrioc->op_reply_qinfo[i].qid = 0;
4070                 mrioc->op_reply_qinfo[i].ci = 0;
4071                 mrioc->op_reply_qinfo[i].num_replies = 0;
4072                 mrioc->op_reply_qinfo[i].ephase = 0;
4073                 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4074                 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4075                 mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4076
4077                 mrioc->req_qinfo[i].ci = 0;
4078                 mrioc->req_qinfo[i].pi = 0;
4079                 mrioc->req_qinfo[i].num_requests = 0;
4080                 mrioc->req_qinfo[i].qid = 0;
4081                 mrioc->req_qinfo[i].reply_qid = 0;
4082                 spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4083                 mpi3mr_memset_op_req_q_buffers(mrioc, i);
4084         }
4085
4086         atomic_set(&mrioc->pend_large_data_sz, 0);
4087         if (mrioc->throttle_groups) {
4088                 tg = mrioc->throttle_groups;
4089                 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4090                         tg->id = 0;
4091                         tg->fw_qd = 0;
4092                         tg->modified_qd = 0;
4093                         tg->io_divert = 0;
4094                         tg->need_qd_reduction = 0;
4095                         tg->high = 0;
4096                         tg->low = 0;
4097                         tg->qd_reduction = 0;
4098                         atomic_set(&tg->pend_large_data_sz, 0);
4099                 }
4100         }
4101 }
4102
4103 /**
4104  * mpi3mr_free_mem - Free memory allocated for a controller
4105  * @mrioc: Adapter instance reference
4106  *
4107  * Free all the memory allocated for a controller.
4108  *
4109  * Return: Nothing.
4110  */
4111 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4112 {
4113         u16 i;
4114         struct mpi3mr_intr_info *intr_info;
4115
4116         if (mrioc->sense_buf_pool) {
4117                 if (mrioc->sense_buf)
4118                         dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4119                             mrioc->sense_buf_dma);
4120                 dma_pool_destroy(mrioc->sense_buf_pool);
4121                 mrioc->sense_buf = NULL;
4122                 mrioc->sense_buf_pool = NULL;
4123         }
4124         if (mrioc->sense_buf_q_pool) {
4125                 if (mrioc->sense_buf_q)
4126                         dma_pool_free(mrioc->sense_buf_q_pool,
4127                             mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4128                 dma_pool_destroy(mrioc->sense_buf_q_pool);
4129                 mrioc->sense_buf_q = NULL;
4130                 mrioc->sense_buf_q_pool = NULL;
4131         }
4132
4133         if (mrioc->reply_buf_pool) {
4134                 if (mrioc->reply_buf)
4135                         dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4136                             mrioc->reply_buf_dma);
4137                 dma_pool_destroy(mrioc->reply_buf_pool);
4138                 mrioc->reply_buf = NULL;
4139                 mrioc->reply_buf_pool = NULL;
4140         }
4141         if (mrioc->reply_free_q_pool) {
4142                 if (mrioc->reply_free_q)
4143                         dma_pool_free(mrioc->reply_free_q_pool,
4144                             mrioc->reply_free_q, mrioc->reply_free_q_dma);
4145                 dma_pool_destroy(mrioc->reply_free_q_pool);
4146                 mrioc->reply_free_q = NULL;
4147                 mrioc->reply_free_q_pool = NULL;
4148         }
4149
4150         for (i = 0; i < mrioc->num_op_req_q; i++)
4151                 mpi3mr_free_op_req_q_segments(mrioc, i);
4152
4153         for (i = 0; i < mrioc->num_op_reply_q; i++)
4154                 mpi3mr_free_op_reply_q_segments(mrioc, i);
4155
4156         for (i = 0; i < mrioc->intr_info_count; i++) {
4157                 intr_info = mrioc->intr_info + i;
4158                 intr_info->op_reply_q = NULL;
4159         }
4160
4161         kfree(mrioc->req_qinfo);
4162         mrioc->req_qinfo = NULL;
4163         mrioc->num_op_req_q = 0;
4164
4165         kfree(mrioc->op_reply_qinfo);
4166         mrioc->op_reply_qinfo = NULL;
4167         mrioc->num_op_reply_q = 0;
4168
4169         kfree(mrioc->init_cmds.reply);
4170         mrioc->init_cmds.reply = NULL;
4171
4172         kfree(mrioc->bsg_cmds.reply);
4173         mrioc->bsg_cmds.reply = NULL;
4174
4175         kfree(mrioc->host_tm_cmds.reply);
4176         mrioc->host_tm_cmds.reply = NULL;
4177
4178         kfree(mrioc->pel_cmds.reply);
4179         mrioc->pel_cmds.reply = NULL;
4180
4181         kfree(mrioc->pel_abort_cmd.reply);
4182         mrioc->pel_abort_cmd.reply = NULL;
4183
4184         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4185                 kfree(mrioc->evtack_cmds[i].reply);
4186                 mrioc->evtack_cmds[i].reply = NULL;
4187         }
4188
4189         kfree(mrioc->removepend_bitmap);
4190         mrioc->removepend_bitmap = NULL;
4191
4192         kfree(mrioc->devrem_bitmap);
4193         mrioc->devrem_bitmap = NULL;
4194
4195         kfree(mrioc->evtack_cmds_bitmap);
4196         mrioc->evtack_cmds_bitmap = NULL;
4197
4198         kfree(mrioc->chain_bitmap);
4199         mrioc->chain_bitmap = NULL;
4200
4201         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4202                 kfree(mrioc->dev_rmhs_cmds[i].reply);
4203                 mrioc->dev_rmhs_cmds[i].reply = NULL;
4204         }
4205
4206         if (mrioc->chain_buf_pool) {
4207                 for (i = 0; i < mrioc->chain_buf_count; i++) {
4208                         if (mrioc->chain_sgl_list[i].addr) {
4209                                 dma_pool_free(mrioc->chain_buf_pool,
4210                                     mrioc->chain_sgl_list[i].addr,
4211                                     mrioc->chain_sgl_list[i].dma_addr);
4212                                 mrioc->chain_sgl_list[i].addr = NULL;
4213                         }
4214                 }
4215                 dma_pool_destroy(mrioc->chain_buf_pool);
4216                 mrioc->chain_buf_pool = NULL;
4217         }
4218
4219         kfree(mrioc->chain_sgl_list);
4220         mrioc->chain_sgl_list = NULL;
4221
4222         if (mrioc->admin_reply_base) {
4223                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4224                     mrioc->admin_reply_base, mrioc->admin_reply_dma);
4225                 mrioc->admin_reply_base = NULL;
4226         }
4227         if (mrioc->admin_req_base) {
4228                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4229                     mrioc->admin_req_base, mrioc->admin_req_dma);
4230                 mrioc->admin_req_base = NULL;
4231         }
4232
4233         if (mrioc->pel_seqnum_virt) {
4234                 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4235                     mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4236                 mrioc->pel_seqnum_virt = NULL;
4237         }
4238
4239         kfree(mrioc->logdata_buf);
4240         mrioc->logdata_buf = NULL;
4241
4242 }
4243
4244 /**
4245  * mpi3mr_issue_ioc_shutdown - shutdown controller
4246  * @mrioc: Adapter instance reference
4247  *
4248  * Send shutodwn notification to the controller and wait for the
4249  * shutdown_timeout for it to be completed.
4250  *
4251  * Return: Nothing.
4252  */
4253 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4254 {
4255         u32 ioc_config, ioc_status;
4256         u8 retval = 1;
4257         u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4258
4259         ioc_info(mrioc, "Issuing shutdown Notification\n");
4260         if (mrioc->unrecoverable) {
4261                 ioc_warn(mrioc,
4262                     "IOC is unrecoverable shutdown is not issued\n");
4263                 return;
4264         }
4265         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4266         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4267             == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4268                 ioc_info(mrioc, "shutdown already in progress\n");
4269                 return;
4270         }
4271
4272         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4273         ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4274         ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4275
4276         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4277
4278         if (mrioc->facts.shutdown_timeout)
4279                 timeout = mrioc->facts.shutdown_timeout * 10;
4280
4281         do {
4282                 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4283                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4284                     == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4285                         retval = 0;
4286                         break;
4287                 }
4288                 msleep(100);
4289         } while (--timeout);
4290
4291         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4292         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4293
4294         if (retval) {
4295                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4296                     == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4297                         ioc_warn(mrioc,
4298                             "shutdown still in progress after timeout\n");
4299         }
4300
4301         ioc_info(mrioc,
4302             "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
4303             (!retval) ? "successful" : "failed", ioc_status,
4304             ioc_config);
4305 }
4306
4307 /**
4308  * mpi3mr_cleanup_ioc - Cleanup controller
4309  * @mrioc: Adapter instance reference
4310  *
4311  * controller cleanup handler, Message unit reset or soft reset
4312  * and shutdown notification is issued to the controller.
4313  *
4314  * Return: Nothing.
4315  */
4316 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4317 {
4318         enum mpi3mr_iocstate ioc_state;
4319
4320         dprint_exit(mrioc, "cleaning up the controller\n");
4321         mpi3mr_ioc_disable_intr(mrioc);
4322
4323         ioc_state = mpi3mr_get_iocstate(mrioc);
4324
4325         if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
4326             (ioc_state == MRIOC_STATE_READY)) {
4327                 if (mpi3mr_issue_and_process_mur(mrioc,
4328                     MPI3MR_RESET_FROM_CTLR_CLEANUP))
4329                         mpi3mr_issue_reset(mrioc,
4330                             MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
4331                             MPI3MR_RESET_FROM_MUR_FAILURE);
4332                 mpi3mr_issue_ioc_shutdown(mrioc);
4333         }
4334         dprint_exit(mrioc, "controller cleanup completed\n");
4335 }
4336
4337 /**
4338  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
4339  * @mrioc: Adapter instance reference
4340  * @cmdptr: Internal command tracker
4341  *
4342  * Complete an internal driver commands with state indicating it
4343  * is completed due to reset.
4344  *
4345  * Return: Nothing.
4346  */
4347 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
4348         struct mpi3mr_drv_cmd *cmdptr)
4349 {
4350         if (cmdptr->state & MPI3MR_CMD_PENDING) {
4351                 cmdptr->state |= MPI3MR_CMD_RESET;
4352                 cmdptr->state &= ~MPI3MR_CMD_PENDING;
4353                 if (cmdptr->is_waiting) {
4354                         complete(&cmdptr->done);
4355                         cmdptr->is_waiting = 0;
4356                 } else if (cmdptr->callback)
4357                         cmdptr->callback(mrioc, cmdptr);
4358         }
4359 }
4360
4361 /**
4362  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
4363  * @mrioc: Adapter instance reference
4364  *
4365  * Flush all internal driver commands post reset
4366  *
4367  * Return: Nothing.
4368  */
4369 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
4370 {
4371         struct mpi3mr_drv_cmd *cmdptr;
4372         u8 i;
4373
4374         cmdptr = &mrioc->init_cmds;
4375         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4376
4377         cmdptr = &mrioc->cfg_cmds;
4378         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4379
4380         cmdptr = &mrioc->bsg_cmds;
4381         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4382         cmdptr = &mrioc->host_tm_cmds;
4383         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4384
4385         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4386                 cmdptr = &mrioc->dev_rmhs_cmds[i];
4387                 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4388         }
4389
4390         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4391                 cmdptr = &mrioc->evtack_cmds[i];
4392                 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4393         }
4394
4395         cmdptr = &mrioc->pel_cmds;
4396         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4397
4398         cmdptr = &mrioc->pel_abort_cmd;
4399         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4400
4401 }
4402
4403 /**
4404  * mpi3mr_pel_wait_post - Issue PEL Wait
4405  * @mrioc: Adapter instance reference
4406  * @drv_cmd: Internal command tracker
4407  *
4408  * Issue PEL Wait MPI request through admin queue and return.
4409  *
4410  * Return: Nothing.
4411  */
4412 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
4413         struct mpi3mr_drv_cmd *drv_cmd)
4414 {
4415         struct mpi3_pel_req_action_wait pel_wait;
4416
4417         mrioc->pel_abort_requested = false;
4418
4419         memset(&pel_wait, 0, sizeof(pel_wait));
4420         drv_cmd->state = MPI3MR_CMD_PENDING;
4421         drv_cmd->is_waiting = 0;
4422         drv_cmd->callback = mpi3mr_pel_wait_complete;
4423         drv_cmd->ioc_status = 0;
4424         drv_cmd->ioc_loginfo = 0;
4425         pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4426         pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4427         pel_wait.action = MPI3_PEL_ACTION_WAIT;
4428         pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
4429         pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
4430         pel_wait.class = cpu_to_le16(mrioc->pel_class);
4431         pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
4432         dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
4433             mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
4434
4435         if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
4436                 dprint_bsg_err(mrioc,
4437                             "Issuing PELWait: Admin post failed\n");
4438                 drv_cmd->state = MPI3MR_CMD_NOTUSED;
4439                 drv_cmd->callback = NULL;
4440                 drv_cmd->retry_count = 0;
4441                 mrioc->pel_enabled = false;
4442         }
4443 }
4444
4445 /**
4446  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
4447  * @mrioc: Adapter instance reference
4448  * @drv_cmd: Internal command tracker
4449  *
4450  * Issue PEL get sequence number MPI request through admin queue
4451  * and return.
4452  *
4453  * Return: 0 on success, non-zero on failure.
4454  */
4455 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
4456         struct mpi3mr_drv_cmd *drv_cmd)
4457 {
4458         struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
4459         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
4460         int retval = 0;
4461
4462         memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
4463         mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
4464         mrioc->pel_cmds.is_waiting = 0;
4465         mrioc->pel_cmds.ioc_status = 0;
4466         mrioc->pel_cmds.ioc_loginfo = 0;
4467         mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
4468         pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4469         pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4470         pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
4471         mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
4472             mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
4473
4474         retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
4475                         sizeof(pel_getseq_req), 0);
4476         if (retval) {
4477                 if (drv_cmd) {
4478                         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4479                         drv_cmd->callback = NULL;
4480                         drv_cmd->retry_count = 0;
4481                 }
4482                 mrioc->pel_enabled = false;
4483         }
4484
4485         return retval;
4486 }
4487
4488 /**
4489  * mpi3mr_pel_wait_complete - PELWait Completion callback
4490  * @mrioc: Adapter instance reference
4491  * @drv_cmd: Internal command tracker
4492  *
4493  * This is a callback handler for the PELWait request and
4494  * firmware completes a PELWait request when it is aborted or a
4495  * new PEL entry is available. This sends AEN to the application
4496  * and if the PELwait completion is not due to PELAbort then
4497  * this will send a request for new PEL Sequence number
4498  *
4499  * Return: Nothing.
4500  */
4501 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
4502         struct mpi3mr_drv_cmd *drv_cmd)
4503 {
4504         struct mpi3_pel_reply *pel_reply = NULL;
4505         u16 ioc_status, pe_log_status;
4506         bool do_retry = false;
4507
4508         if (drv_cmd->state & MPI3MR_CMD_RESET)
4509                 goto cleanup_drv_cmd;
4510
4511         ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4512         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
4513                 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
4514                         __func__, ioc_status, drv_cmd->ioc_loginfo);
4515                 dprint_bsg_err(mrioc,
4516                     "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
4517                     ioc_status, drv_cmd->ioc_loginfo);
4518                 do_retry = true;
4519         }
4520
4521         if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
4522                 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
4523
4524         if (!pel_reply) {
4525                 dprint_bsg_err(mrioc,
4526                     "pel_wait: failed due to no reply\n");
4527                 goto out_failed;
4528         }
4529
4530         pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
4531         if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
4532             (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
4533                 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
4534                         __func__, pe_log_status);
4535                 dprint_bsg_err(mrioc,
4536                     "pel_wait: failed due to pel_log_status(0x%04x)\n",
4537                     pe_log_status);
4538                 do_retry = true;
4539         }
4540
4541         if (do_retry) {
4542                 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
4543                         drv_cmd->retry_count++;
4544                         dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
4545                             drv_cmd->retry_count);
4546                         mpi3mr_pel_wait_post(mrioc, drv_cmd);
4547                         return;
4548                 }
4549                 dprint_bsg_err(mrioc,
4550                     "pel_wait: failed after all retries(%d)\n",
4551                     drv_cmd->retry_count);
4552                 goto out_failed;
4553         }
4554         atomic64_inc(&event_counter);
4555         if (!mrioc->pel_abort_requested) {
4556                 mrioc->pel_cmds.retry_count = 0;
4557                 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
4558         }
4559
4560         return;
4561 out_failed:
4562         mrioc->pel_enabled = false;
4563 cleanup_drv_cmd:
4564         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4565         drv_cmd->callback = NULL;
4566         drv_cmd->retry_count = 0;
4567 }
4568
4569 /**
4570  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
4571  * @mrioc: Adapter instance reference
4572  * @drv_cmd: Internal command tracker
4573  *
4574  * This is a callback handler for the PEL get sequence number
4575  * request and a new PEL wait request will be issued to the
4576  * firmware from this
4577  *
4578  * Return: Nothing.
4579  */
4580 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
4581         struct mpi3mr_drv_cmd *drv_cmd)
4582 {
4583         struct mpi3_pel_reply *pel_reply = NULL;
4584         struct mpi3_pel_seq *pel_seqnum_virt;
4585         u16 ioc_status;
4586         bool do_retry = false;
4587
4588         pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
4589
4590         if (drv_cmd->state & MPI3MR_CMD_RESET)
4591                 goto cleanup_drv_cmd;
4592
4593         ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4594         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
4595                 dprint_bsg_err(mrioc,
4596                     "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
4597                     ioc_status, drv_cmd->ioc_loginfo);
4598                 do_retry = true;
4599         }
4600
4601         if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
4602                 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
4603         if (!pel_reply) {
4604                 dprint_bsg_err(mrioc,
4605                     "pel_get_seqnum: failed due to no reply\n");
4606                 goto out_failed;
4607         }
4608
4609         if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
4610                 dprint_bsg_err(mrioc,
4611                     "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
4612                     le16_to_cpu(pel_reply->pe_log_status));
4613                 do_retry = true;
4614         }
4615
4616         if (do_retry) {
4617                 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
4618                         drv_cmd->retry_count++;
4619                         dprint_bsg_err(mrioc,
4620                             "pel_get_seqnum: retrying(%d)\n",
4621                             drv_cmd->retry_count);
4622                         mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
4623                         return;
4624                 }
4625
4626                 dprint_bsg_err(mrioc,
4627                     "pel_get_seqnum: failed after all retries(%d)\n",
4628                     drv_cmd->retry_count);
4629                 goto out_failed;
4630         }
4631         mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
4632         drv_cmd->retry_count = 0;
4633         mpi3mr_pel_wait_post(mrioc, drv_cmd);
4634
4635         return;
4636 out_failed:
4637         mrioc->pel_enabled = false;
4638 cleanup_drv_cmd:
4639         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4640         drv_cmd->callback = NULL;
4641         drv_cmd->retry_count = 0;
4642 }
4643
4644 /**
4645  * mpi3mr_soft_reset_handler - Reset the controller
4646  * @mrioc: Adapter instance reference
4647  * @reset_reason: Reset reason code
4648  * @snapdump: Flag to generate snapdump in firmware or not
4649  *
4650  * This is an handler for recovering controller by issuing soft
4651  * reset are diag fault reset.  This is a blocking function and
4652  * when one reset is executed if any other resets they will be
4653  * blocked. All BSG requests will be blocked during the reset. If
4654  * controller reset is successful then the controller will be
4655  * reinitalized, otherwise the controller will be marked as not
4656  * recoverable
4657  *
4658  * In snapdump bit is set, the controller is issued with diag
4659  * fault reset so that the firmware can create a snap dump and
4660  * post that the firmware will result in F000 fault and the
4661  * driver will issue soft reset to recover from that.
4662  *
4663  * Return: 0 on success, non-zero on failure.
4664  */
4665 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
4666         u32 reset_reason, u8 snapdump)
4667 {
4668         int retval = 0, i;
4669         unsigned long flags;
4670         u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
4671
4672         /* Block the reset handler until diag save in progress*/
4673         dprint_reset(mrioc,
4674             "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
4675             mrioc->diagsave_timeout);
4676         while (mrioc->diagsave_timeout)
4677                 ssleep(1);
4678         /*
4679          * Block new resets until the currently executing one is finished and
4680          * return the status of the existing reset for all blocked resets
4681          */
4682         dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
4683         if (!mutex_trylock(&mrioc->reset_mutex)) {
4684                 ioc_info(mrioc,
4685                     "controller reset triggered by %s is blocked due to another reset in progress\n",
4686                     mpi3mr_reset_rc_name(reset_reason));
4687                 do {
4688                         ssleep(1);
4689                 } while (mrioc->reset_in_progress == 1);
4690                 ioc_info(mrioc,
4691                     "returning previous reset result(%d) for the reset triggered by %s\n",
4692                     mrioc->prev_reset_result,
4693                     mpi3mr_reset_rc_name(reset_reason));
4694                 return mrioc->prev_reset_result;
4695         }
4696         ioc_info(mrioc, "controller reset is triggered by %s\n",
4697             mpi3mr_reset_rc_name(reset_reason));
4698
4699         mrioc->reset_in_progress = 1;
4700         mrioc->stop_bsgs = 1;
4701         mrioc->prev_reset_result = -1;
4702
4703         if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
4704             (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
4705             (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
4706                 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4707                         mrioc->event_masks[i] = -1;
4708
4709                 dprint_reset(mrioc, "soft_reset_handler: masking events\n");
4710                 mpi3mr_issue_event_notification(mrioc);
4711         }
4712
4713         mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
4714
4715         mpi3mr_ioc_disable_intr(mrioc);
4716
4717         if (snapdump) {
4718                 mpi3mr_set_diagsave(mrioc);
4719                 retval = mpi3mr_issue_reset(mrioc,
4720                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4721                 if (!retval) {
4722                         do {
4723                                 host_diagnostic =
4724                                     readl(&mrioc->sysif_regs->host_diagnostic);
4725                                 if (!(host_diagnostic &
4726                                     MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
4727                                         break;
4728                                 msleep(100);
4729                         } while (--timeout);
4730                 }
4731         }
4732
4733         retval = mpi3mr_issue_reset(mrioc,
4734             MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
4735         if (retval) {
4736                 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
4737                 goto out;
4738         }
4739         if (mrioc->num_io_throttle_group !=
4740             mrioc->facts.max_io_throttle_group) {
4741                 ioc_err(mrioc,
4742                     "max io throttle group doesn't match old(%d), new(%d)\n",
4743                     mrioc->num_io_throttle_group,
4744                     mrioc->facts.max_io_throttle_group);
4745                 retval = -EPERM;
4746                 goto out;
4747         }
4748
4749         mpi3mr_flush_delayed_cmd_lists(mrioc);
4750         mpi3mr_flush_drv_cmds(mrioc);
4751         memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
4752         memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
4753         memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
4754         mpi3mr_flush_host_io(mrioc);
4755         mpi3mr_cleanup_fwevt_list(mrioc);
4756         mpi3mr_invalidate_devhandles(mrioc);
4757         if (mrioc->prepare_for_reset) {
4758                 mrioc->prepare_for_reset = 0;
4759                 mrioc->prepare_for_reset_timeout_counter = 0;
4760         }
4761         mpi3mr_memset_buffers(mrioc);
4762         retval = mpi3mr_reinit_ioc(mrioc, 0);
4763         if (retval) {
4764                 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
4765                     mrioc->name, reset_reason);
4766                 goto out;
4767         }
4768         ssleep(10);
4769
4770 out:
4771         if (!retval) {
4772                 mrioc->diagsave_timeout = 0;
4773                 mrioc->reset_in_progress = 0;
4774                 mrioc->pel_abort_requested = 0;
4775                 if (mrioc->pel_enabled) {
4776                         mrioc->pel_cmds.retry_count = 0;
4777                         mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
4778                 }
4779
4780                 mpi3mr_rfresh_tgtdevs(mrioc);
4781                 mrioc->ts_update_counter = 0;
4782                 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
4783                 if (mrioc->watchdog_work_q)
4784                         queue_delayed_work(mrioc->watchdog_work_q,
4785                             &mrioc->watchdog_work,
4786                             msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
4787                 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
4788                 mrioc->stop_bsgs = 0;
4789                 if (mrioc->pel_enabled)
4790                         atomic64_inc(&event_counter);
4791         } else {
4792                 mpi3mr_issue_reset(mrioc,
4793                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4794                 mrioc->unrecoverable = 1;
4795                 mrioc->reset_in_progress = 0;
4796                 retval = -1;
4797         }
4798         mrioc->prev_reset_result = retval;
4799         mutex_unlock(&mrioc->reset_mutex);
4800         ioc_info(mrioc, "controller reset is %s\n",
4801             ((retval == 0) ? "successful" : "failed"));
4802         return retval;
4803 }
4804
4805
4806 /**
4807  * mpi3mr_free_config_dma_memory - free memory for config page
4808  * @mrioc: Adapter instance reference
4809  * @mem_desc: memory descriptor structure
4810  *
4811  * Check whether the size of the buffer specified by the memory
4812  * descriptor is greater than the default page size if so then
4813  * free the memory pointed by the descriptor.
4814  *
4815  * Return: Nothing.
4816  */
4817 static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
4818         struct dma_memory_desc *mem_desc)
4819 {
4820         if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
4821                 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
4822                     mem_desc->addr, mem_desc->dma_addr);
4823                 mem_desc->addr = NULL;
4824         }
4825 }
4826
4827 /**
4828  * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
4829  * @mrioc: Adapter instance reference
4830  * @mem_desc: Memory descriptor to hold dma memory info
4831  *
4832  * This function allocates new dmaable memory or provides the
4833  * default config page dmaable memory based on the memory size
4834  * described by the descriptor.
4835  *
4836  * Return: 0 on success, non-zero on failure.
4837  */
4838 static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
4839         struct dma_memory_desc *mem_desc)
4840 {
4841         if (mem_desc->size > mrioc->cfg_page_sz) {
4842                 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
4843                     mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
4844                 if (!mem_desc->addr)
4845                         return -ENOMEM;
4846         } else {
4847                 mem_desc->addr = mrioc->cfg_page;
4848                 mem_desc->dma_addr = mrioc->cfg_page_dma;
4849                 memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
4850         }
4851         return 0;
4852 }
4853
4854 /**
4855  * mpi3mr_post_cfg_req - Issue config requests and wait
4856  * @mrioc: Adapter instance reference
4857  * @cfg_req: Configuration request
4858  * @timeout: Timeout in seconds
4859  * @ioc_status: Pointer to return ioc status
4860  *
4861  * A generic function for posting MPI3 configuration request to
4862  * the firmware. This blocks for the completion of request for
4863  * timeout seconds and if the request times out this function
4864  * faults the controller with proper reason code.
4865  *
4866  * On successful completion of the request this function returns
4867  * appropriate ioc status from the firmware back to the caller.
4868  *
4869  * Return: 0 on success, non-zero on failure.
4870  */
4871 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
4872         struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
4873 {
4874         int retval = 0;
4875
4876         mutex_lock(&mrioc->cfg_cmds.mutex);
4877         if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
4878                 retval = -1;
4879                 ioc_err(mrioc, "sending config request failed due to command in use\n");
4880                 mutex_unlock(&mrioc->cfg_cmds.mutex);
4881                 goto out;
4882         }
4883         mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
4884         mrioc->cfg_cmds.is_waiting = 1;
4885         mrioc->cfg_cmds.callback = NULL;
4886         mrioc->cfg_cmds.ioc_status = 0;
4887         mrioc->cfg_cmds.ioc_loginfo = 0;
4888
4889         cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
4890         cfg_req->function = MPI3_FUNCTION_CONFIG;
4891
4892         init_completion(&mrioc->cfg_cmds.done);
4893         dprint_cfg_info(mrioc, "posting config request\n");
4894         if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
4895                 dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
4896                     "mpi3_cfg_req");
4897         retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
4898         if (retval) {
4899                 ioc_err(mrioc, "posting config request failed\n");
4900                 goto out_unlock;
4901         }
4902         wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
4903         if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
4904                 mpi3mr_check_rh_fault_ioc(mrioc,
4905                     MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
4906                 ioc_err(mrioc, "config request timed out\n");
4907                 retval = -1;
4908                 goto out_unlock;
4909         }
4910         *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4911         if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
4912                 dprint_cfg_err(mrioc,
4913                     "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
4914                     *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
4915
4916 out_unlock:
4917         mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
4918         mutex_unlock(&mrioc->cfg_cmds.mutex);
4919
4920 out:
4921         return retval;
4922 }
4923
4924 /**
4925  * mpi3mr_process_cfg_req - config page request processor
4926  * @mrioc: Adapter instance reference
4927  * @cfg_req: Configuration request
4928  * @cfg_hdr: Configuration page header
4929  * @timeout: Timeout in seconds
4930  * @ioc_status: Pointer to return ioc status
4931  * @cfg_buf: Memory pointer to copy config page or header
4932  * @cfg_buf_sz: Size of the memory to get config page or header
4933  *
4934  * This is handler for config page read, write and config page
4935  * header read operations.
4936  *
4937  * This function expects the cfg_req to be populated with page
4938  * type, page number, action for the header read and with page
4939  * address for all other operations.
4940  *
4941  * The cfg_hdr can be passed as null for reading required header
4942  * details for read/write pages the cfg_hdr should point valid
4943  * configuration page header.
4944  *
4945  * This allocates dmaable memory based on the size of the config
4946  * buffer and set the SGE of the cfg_req.
4947  *
4948  * For write actions, the config page data has to be passed in
4949  * the cfg_buf and size of the data has to be mentioned in the
4950  * cfg_buf_sz.
4951  *
4952  * For read/header actions, on successful completion of the
4953  * request with successful ioc_status the data will be copied
4954  * into the cfg_buf limited to a minimum of actual page size and
4955  * cfg_buf_sz
4956  *
4957  *
4958  * Return: 0 on success, non-zero on failure.
4959  */
4960 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
4961         struct mpi3_config_request *cfg_req,
4962         struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
4963         void *cfg_buf, u32 cfg_buf_sz)
4964 {
4965         struct dma_memory_desc mem_desc;
4966         int retval = -1;
4967         u8 invalid_action = 0;
4968         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
4969
4970         memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
4971
4972         if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
4973                 mem_desc.size = sizeof(struct mpi3_config_page_header);
4974         else {
4975                 if (!cfg_hdr) {
4976                         ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
4977                             cfg_req->action, cfg_req->page_type,
4978                             cfg_req->page_number);
4979                         goto out;
4980                 }
4981                 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
4982                 case MPI3_CONFIG_PAGEATTR_READ_ONLY:
4983                         if (cfg_req->action
4984                             != MPI3_CONFIG_ACTION_READ_CURRENT)
4985                                 invalid_action = 1;
4986                         break;
4987                 case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
4988                         if ((cfg_req->action ==
4989                              MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
4990                             (cfg_req->action ==
4991                              MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
4992                                 invalid_action = 1;
4993                         break;
4994                 case MPI3_CONFIG_PAGEATTR_PERSISTENT:
4995                 default:
4996                         break;
4997                 }
4998                 if (invalid_action) {
4999                         ioc_err(mrioc,
5000                             "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5001                             cfg_req->action, cfg_req->page_type,
5002                             cfg_req->page_number, cfg_hdr->page_attribute);
5003                         goto out;
5004                 }
5005                 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5006                 cfg_req->page_length = cfg_hdr->page_length;
5007                 cfg_req->page_version = cfg_hdr->page_version;
5008         }
5009         if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
5010                 goto out;
5011
5012         mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5013             mem_desc.dma_addr);
5014
5015         if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5016             (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5017                 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5018                     cfg_buf_sz));
5019                 dprint_cfg_info(mrioc, "config buffer to be written\n");
5020                 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5021                         dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5022         }
5023
5024         if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5025                 goto out;
5026
5027         retval = 0;
5028         if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5029             (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5030             (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5031                 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5032                     cfg_buf_sz));
5033                 dprint_cfg_info(mrioc, "config buffer read\n");
5034                 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5035                         dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5036         }
5037
5038 out:
5039         mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
5040         return retval;
5041 }