Merge tag 's390-5.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[platform/kernel/linux-starfive.git] / drivers / scsi / mpi3mr / mpi3mr_fw.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2022 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17         struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19         struct mpi3mr_drv_cmd *drv_cmd);
20
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24
25 #if defined(writeq) && defined(CONFIG_64BIT)
26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
27 {
28         writeq(b, addr);
29 }
30 #else
31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
32 {
33         __u64 data_out = b;
34
35         writel((u32)(data_out), addr);
36         writel((u32)(data_out >> 32), (addr + 4));
37 }
38 #endif
39
40 static inline bool
41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
42 {
43         u16 pi, ci, max_entries;
44         bool is_qfull = false;
45
46         pi = op_req_q->pi;
47         ci = READ_ONCE(op_req_q->ci);
48         max_entries = op_req_q->num_requests;
49
50         if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
51                 is_qfull = true;
52
53         return is_qfull;
54 }
55
56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
57 {
58         u16 i, max_vectors;
59
60         max_vectors = mrioc->intr_info_count;
61
62         for (i = 0; i < max_vectors; i++)
63                 synchronize_irq(pci_irq_vector(mrioc->pdev, i));
64 }
65
66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
67 {
68         mrioc->intr_enabled = 0;
69         mpi3mr_sync_irqs(mrioc);
70 }
71
72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
73 {
74         mrioc->intr_enabled = 1;
75 }
76
77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
78 {
79         u16 i;
80
81         mpi3mr_ioc_disable_intr(mrioc);
82
83         if (!mrioc->intr_info)
84                 return;
85
86         for (i = 0; i < mrioc->intr_info_count; i++)
87                 free_irq(pci_irq_vector(mrioc->pdev, i),
88                     (mrioc->intr_info + i));
89
90         kfree(mrioc->intr_info);
91         mrioc->intr_info = NULL;
92         mrioc->intr_info_count = 0;
93         mrioc->is_intr_info_set = false;
94         pci_free_irq_vectors(mrioc->pdev);
95 }
96
97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
98         dma_addr_t dma_addr)
99 {
100         struct mpi3_sge_common *sgel = paddr;
101
102         sgel->flags = flags;
103         sgel->length = cpu_to_le32(length);
104         sgel->address = cpu_to_le64(dma_addr);
105 }
106
107 void mpi3mr_build_zero_len_sge(void *paddr)
108 {
109         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
110
111         mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
112 }
113
114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
115         dma_addr_t phys_addr)
116 {
117         if (!phys_addr)
118                 return NULL;
119
120         if ((phys_addr < mrioc->reply_buf_dma) ||
121             (phys_addr > mrioc->reply_buf_dma_max_address))
122                 return NULL;
123
124         return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
125 }
126
127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
128         dma_addr_t phys_addr)
129 {
130         if (!phys_addr)
131                 return NULL;
132
133         return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
134 }
135
136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
137         u64 reply_dma)
138 {
139         u32 old_idx = 0;
140         unsigned long flags;
141
142         spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
143         old_idx  =  mrioc->reply_free_queue_host_index;
144         mrioc->reply_free_queue_host_index = (
145             (mrioc->reply_free_queue_host_index ==
146             (mrioc->reply_free_qsz - 1)) ? 0 :
147             (mrioc->reply_free_queue_host_index + 1));
148         mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
149         writel(mrioc->reply_free_queue_host_index,
150             &mrioc->sysif_regs->reply_free_host_index);
151         spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
152 }
153
154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
155         u64 sense_buf_dma)
156 {
157         u32 old_idx = 0;
158         unsigned long flags;
159
160         spin_lock_irqsave(&mrioc->sbq_lock, flags);
161         old_idx  =  mrioc->sbq_host_index;
162         mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
163             (mrioc->sense_buf_q_sz - 1)) ? 0 :
164             (mrioc->sbq_host_index + 1));
165         mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
166         writel(mrioc->sbq_host_index,
167             &mrioc->sysif_regs->sense_buffer_free_host_index);
168         spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
169 }
170
171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
172         struct mpi3_event_notification_reply *event_reply)
173 {
174         char *desc = NULL;
175         u16 event;
176
177         event = event_reply->event;
178
179         switch (event) {
180         case MPI3_EVENT_LOG_DATA:
181                 desc = "Log Data";
182                 break;
183         case MPI3_EVENT_CHANGE:
184                 desc = "Event Change";
185                 break;
186         case MPI3_EVENT_GPIO_INTERRUPT:
187                 desc = "GPIO Interrupt";
188                 break;
189         case MPI3_EVENT_CABLE_MGMT:
190                 desc = "Cable Management";
191                 break;
192         case MPI3_EVENT_ENERGY_PACK_CHANGE:
193                 desc = "Energy Pack Change";
194                 break;
195         case MPI3_EVENT_DEVICE_ADDED:
196         {
197                 struct mpi3_device_page0 *event_data =
198                     (struct mpi3_device_page0 *)event_reply->event_data;
199                 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
200                     event_data->dev_handle, event_data->device_form);
201                 return;
202         }
203         case MPI3_EVENT_DEVICE_INFO_CHANGED:
204         {
205                 struct mpi3_device_page0 *event_data =
206                     (struct mpi3_device_page0 *)event_reply->event_data;
207                 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
208                     event_data->dev_handle, event_data->device_form);
209                 return;
210         }
211         case MPI3_EVENT_DEVICE_STATUS_CHANGE:
212         {
213                 struct mpi3_event_data_device_status_change *event_data =
214                     (struct mpi3_event_data_device_status_change *)event_reply->event_data;
215                 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
216                     event_data->dev_handle, event_data->reason_code);
217                 return;
218         }
219         case MPI3_EVENT_SAS_DISCOVERY:
220         {
221                 struct mpi3_event_data_sas_discovery *event_data =
222                     (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
223                 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
224                     (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
225                     "start" : "stop",
226                     le32_to_cpu(event_data->discovery_status));
227                 return;
228         }
229         case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
230                 desc = "SAS Broadcast Primitive";
231                 break;
232         case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
233                 desc = "SAS Notify Primitive";
234                 break;
235         case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
236                 desc = "SAS Init Device Status Change";
237                 break;
238         case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
239                 desc = "SAS Init Table Overflow";
240                 break;
241         case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
242                 desc = "SAS Topology Change List";
243                 break;
244         case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
245                 desc = "Enclosure Device Status Change";
246                 break;
247         case MPI3_EVENT_HARD_RESET_RECEIVED:
248                 desc = "Hard Reset Received";
249                 break;
250         case MPI3_EVENT_SAS_PHY_COUNTER:
251                 desc = "SAS PHY Counter";
252                 break;
253         case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
254                 desc = "SAS Device Discovery Error";
255                 break;
256         case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
257                 desc = "PCIE Topology Change List";
258                 break;
259         case MPI3_EVENT_PCIE_ENUMERATION:
260         {
261                 struct mpi3_event_data_pcie_enumeration *event_data =
262                     (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
263                 ioc_info(mrioc, "PCIE Enumeration: (%s)",
264                     (event_data->reason_code ==
265                     MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
266                 if (event_data->enumeration_status)
267                         ioc_info(mrioc, "enumeration_status(0x%08x)\n",
268                             le32_to_cpu(event_data->enumeration_status));
269                 return;
270         }
271         case MPI3_EVENT_PREPARE_FOR_RESET:
272                 desc = "Prepare For Reset";
273                 break;
274         }
275
276         if (!desc)
277                 return;
278
279         ioc_info(mrioc, "%s\n", desc);
280 }
281
282 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
283         struct mpi3_default_reply *def_reply)
284 {
285         struct mpi3_event_notification_reply *event_reply =
286             (struct mpi3_event_notification_reply *)def_reply;
287
288         mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
289         mpi3mr_print_event_data(mrioc, event_reply);
290         mpi3mr_os_handle_events(mrioc, event_reply);
291 }
292
293 static struct mpi3mr_drv_cmd *
294 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
295         struct mpi3_default_reply *def_reply)
296 {
297         u16 idx;
298
299         switch (host_tag) {
300         case MPI3MR_HOSTTAG_INITCMDS:
301                 return &mrioc->init_cmds;
302         case MPI3MR_HOSTTAG_BSG_CMDS:
303                 return &mrioc->bsg_cmds;
304         case MPI3MR_HOSTTAG_BLK_TMS:
305                 return &mrioc->host_tm_cmds;
306         case MPI3MR_HOSTTAG_PEL_ABORT:
307                 return &mrioc->pel_abort_cmd;
308         case MPI3MR_HOSTTAG_PEL_WAIT:
309                 return &mrioc->pel_cmds;
310         case MPI3MR_HOSTTAG_INVALID:
311                 if (def_reply && def_reply->function ==
312                     MPI3_FUNCTION_EVENT_NOTIFICATION)
313                         mpi3mr_handle_events(mrioc, def_reply);
314                 return NULL;
315         default:
316                 break;
317         }
318         if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
319             host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
320                 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
321                 return &mrioc->dev_rmhs_cmds[idx];
322         }
323
324         if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
325             host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
326                 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
327                 return &mrioc->evtack_cmds[idx];
328         }
329
330         return NULL;
331 }
332
333 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
334         struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
335 {
336         u16 reply_desc_type, host_tag = 0;
337         u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
338         u32 ioc_loginfo = 0;
339         struct mpi3_status_reply_descriptor *status_desc;
340         struct mpi3_address_reply_descriptor *addr_desc;
341         struct mpi3_success_reply_descriptor *success_desc;
342         struct mpi3_default_reply *def_reply = NULL;
343         struct mpi3mr_drv_cmd *cmdptr = NULL;
344         struct mpi3_scsi_io_reply *scsi_reply;
345         u8 *sense_buf = NULL;
346
347         *reply_dma = 0;
348         reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
349             MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
350         switch (reply_desc_type) {
351         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
352                 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
353                 host_tag = le16_to_cpu(status_desc->host_tag);
354                 ioc_status = le16_to_cpu(status_desc->ioc_status);
355                 if (ioc_status &
356                     MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
357                         ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
358                 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
359                 break;
360         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
361                 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
362                 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
363                 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
364                 if (!def_reply)
365                         goto out;
366                 host_tag = le16_to_cpu(def_reply->host_tag);
367                 ioc_status = le16_to_cpu(def_reply->ioc_status);
368                 if (ioc_status &
369                     MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
370                         ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
371                 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
372                 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
373                         scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
374                         sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
375                             le64_to_cpu(scsi_reply->sense_data_buffer_address));
376                 }
377                 break;
378         case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
379                 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
380                 host_tag = le16_to_cpu(success_desc->host_tag);
381                 break;
382         default:
383                 break;
384         }
385
386         cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
387         if (cmdptr) {
388                 if (cmdptr->state & MPI3MR_CMD_PENDING) {
389                         cmdptr->state |= MPI3MR_CMD_COMPLETE;
390                         cmdptr->ioc_loginfo = ioc_loginfo;
391                         cmdptr->ioc_status = ioc_status;
392                         cmdptr->state &= ~MPI3MR_CMD_PENDING;
393                         if (def_reply) {
394                                 cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
395                                 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
396                                     mrioc->reply_sz);
397                         }
398                         if (cmdptr->is_waiting) {
399                                 complete(&cmdptr->done);
400                                 cmdptr->is_waiting = 0;
401                         } else if (cmdptr->callback)
402                                 cmdptr->callback(mrioc, cmdptr);
403                 }
404         }
405 out:
406         if (sense_buf)
407                 mpi3mr_repost_sense_buf(mrioc,
408                     le64_to_cpu(scsi_reply->sense_data_buffer_address));
409 }
410
411 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
412 {
413         u32 exp_phase = mrioc->admin_reply_ephase;
414         u32 admin_reply_ci = mrioc->admin_reply_ci;
415         u32 num_admin_replies = 0;
416         u64 reply_dma = 0;
417         struct mpi3_default_reply_descriptor *reply_desc;
418
419         reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
420             admin_reply_ci;
421
422         if ((le16_to_cpu(reply_desc->reply_flags) &
423             MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
424                 return 0;
425
426         do {
427                 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
428                 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
429                 if (reply_dma)
430                         mpi3mr_repost_reply_buf(mrioc, reply_dma);
431                 num_admin_replies++;
432                 if (++admin_reply_ci == mrioc->num_admin_replies) {
433                         admin_reply_ci = 0;
434                         exp_phase ^= 1;
435                 }
436                 reply_desc =
437                     (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
438                     admin_reply_ci;
439                 if ((le16_to_cpu(reply_desc->reply_flags) &
440                     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
441                         break;
442         } while (1);
443
444         writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
445         mrioc->admin_reply_ci = admin_reply_ci;
446         mrioc->admin_reply_ephase = exp_phase;
447
448         return num_admin_replies;
449 }
450
451 /**
452  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
453  *      queue's consumer index from operational reply descriptor queue.
454  * @op_reply_q: op_reply_qinfo object
455  * @reply_ci: operational reply descriptor's queue consumer index
456  *
457  * Returns reply descriptor frame address
458  */
459 static inline struct mpi3_default_reply_descriptor *
460 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
461 {
462         void *segment_base_addr;
463         struct segments *segments = op_reply_q->q_segments;
464         struct mpi3_default_reply_descriptor *reply_desc = NULL;
465
466         segment_base_addr =
467             segments[reply_ci / op_reply_q->segment_qd].segment;
468         reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
469             (reply_ci % op_reply_q->segment_qd);
470         return reply_desc;
471 }
472
473 /**
474  * mpi3mr_process_op_reply_q - Operational reply queue handler
475  * @mrioc: Adapter instance reference
476  * @op_reply_q: Operational reply queue info
477  *
478  * Checks the specific operational reply queue and drains the
479  * reply queue entries until the queue is empty and process the
480  * individual reply descriptors.
481  *
482  * Return: 0 if queue is already processed,or number of reply
483  *          descriptors processed.
484  */
485 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
486         struct op_reply_qinfo *op_reply_q)
487 {
488         struct op_req_qinfo *op_req_q;
489         u32 exp_phase;
490         u32 reply_ci;
491         u32 num_op_reply = 0;
492         u64 reply_dma = 0;
493         struct mpi3_default_reply_descriptor *reply_desc;
494         u16 req_q_idx = 0, reply_qidx;
495
496         reply_qidx = op_reply_q->qid - 1;
497
498         if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
499                 return 0;
500
501         exp_phase = op_reply_q->ephase;
502         reply_ci = op_reply_q->ci;
503
504         reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
505         if ((le16_to_cpu(reply_desc->reply_flags) &
506             MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
507                 atomic_dec(&op_reply_q->in_use);
508                 return 0;
509         }
510
511         do {
512                 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
513                 op_req_q = &mrioc->req_qinfo[req_q_idx];
514
515                 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
516                 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
517                     reply_qidx);
518                 atomic_dec(&op_reply_q->pend_ios);
519                 if (reply_dma)
520                         mpi3mr_repost_reply_buf(mrioc, reply_dma);
521                 num_op_reply++;
522
523                 if (++reply_ci == op_reply_q->num_replies) {
524                         reply_ci = 0;
525                         exp_phase ^= 1;
526                 }
527
528                 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
529
530                 if ((le16_to_cpu(reply_desc->reply_flags) &
531                     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
532                         break;
533                 /*
534                  * Exit completion loop to avoid CPU lockup
535                  * Ensure remaining completion happens from threaded ISR.
536                  */
537                 if (num_op_reply > mrioc->max_host_ios) {
538                         op_reply_q->enable_irq_poll = true;
539                         break;
540                 }
541
542         } while (1);
543
544         writel(reply_ci,
545             &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
546         op_reply_q->ci = reply_ci;
547         op_reply_q->ephase = exp_phase;
548
549         atomic_dec(&op_reply_q->in_use);
550         return num_op_reply;
551 }
552
553 /**
554  * mpi3mr_blk_mq_poll - Operational reply queue handler
555  * @shost: SCSI Host reference
556  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
557  *
558  * Checks the specific operational reply queue and drains the
559  * reply queue entries until the queue is empty and process the
560  * individual reply descriptors.
561  *
562  * Return: 0 if queue is already processed,or number of reply
563  *          descriptors processed.
564  */
565 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
566 {
567         int num_entries = 0;
568         struct mpi3mr_ioc *mrioc;
569
570         mrioc = (struct mpi3mr_ioc *)shost->hostdata;
571
572         if ((mrioc->reset_in_progress || mrioc->prepare_for_reset))
573                 return 0;
574
575         num_entries = mpi3mr_process_op_reply_q(mrioc,
576                         &mrioc->op_reply_qinfo[queue_num]);
577
578         return num_entries;
579 }
580
581 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
582 {
583         struct mpi3mr_intr_info *intr_info = privdata;
584         struct mpi3mr_ioc *mrioc;
585         u16 midx;
586         u32 num_admin_replies = 0, num_op_reply = 0;
587
588         if (!intr_info)
589                 return IRQ_NONE;
590
591         mrioc = intr_info->mrioc;
592
593         if (!mrioc->intr_enabled)
594                 return IRQ_NONE;
595
596         midx = intr_info->msix_index;
597
598         if (!midx)
599                 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
600         if (intr_info->op_reply_q)
601                 num_op_reply = mpi3mr_process_op_reply_q(mrioc,
602                     intr_info->op_reply_q);
603
604         if (num_admin_replies || num_op_reply)
605                 return IRQ_HANDLED;
606         else
607                 return IRQ_NONE;
608 }
609
610 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
611 {
612         struct mpi3mr_intr_info *intr_info = privdata;
613         struct mpi3mr_ioc *mrioc;
614         u16 midx;
615         int ret;
616
617         if (!intr_info)
618                 return IRQ_NONE;
619
620         mrioc = intr_info->mrioc;
621         midx = intr_info->msix_index;
622         /* Call primary ISR routine */
623         ret = mpi3mr_isr_primary(irq, privdata);
624
625         /*
626          * If more IOs are expected, schedule IRQ polling thread.
627          * Otherwise exit from ISR.
628          */
629         if (!intr_info->op_reply_q)
630                 return ret;
631
632         if (!intr_info->op_reply_q->enable_irq_poll ||
633             !atomic_read(&intr_info->op_reply_q->pend_ios))
634                 return ret;
635
636         disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
637
638         return IRQ_WAKE_THREAD;
639 }
640
641 /**
642  * mpi3mr_isr_poll - Reply queue polling routine
643  * @irq: IRQ
644  * @privdata: Interrupt info
645  *
646  * poll for pending I/O completions in a loop until pending I/Os
647  * present or controller queue depth I/Os are processed.
648  *
649  * Return: IRQ_NONE or IRQ_HANDLED
650  */
651 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
652 {
653         struct mpi3mr_intr_info *intr_info = privdata;
654         struct mpi3mr_ioc *mrioc;
655         u16 midx;
656         u32 num_op_reply = 0;
657
658         if (!intr_info || !intr_info->op_reply_q)
659                 return IRQ_NONE;
660
661         mrioc = intr_info->mrioc;
662         midx = intr_info->msix_index;
663
664         /* Poll for pending IOs completions */
665         do {
666                 if (!mrioc->intr_enabled)
667                         break;
668
669                 if (!midx)
670                         mpi3mr_process_admin_reply_q(mrioc);
671                 if (intr_info->op_reply_q)
672                         num_op_reply +=
673                             mpi3mr_process_op_reply_q(mrioc,
674                                 intr_info->op_reply_q);
675
676                 usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
677
678         } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
679             (num_op_reply < mrioc->max_host_ios));
680
681         intr_info->op_reply_q->enable_irq_poll = false;
682         enable_irq(pci_irq_vector(mrioc->pdev, midx));
683
684         return IRQ_HANDLED;
685 }
686
687 /**
688  * mpi3mr_request_irq - Request IRQ and register ISR
689  * @mrioc: Adapter instance reference
690  * @index: IRQ vector index
691  *
692  * Request threaded ISR with primary ISR and secondary
693  *
694  * Return: 0 on success and non zero on failures.
695  */
696 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
697 {
698         struct pci_dev *pdev = mrioc->pdev;
699         struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
700         int retval = 0;
701
702         intr_info->mrioc = mrioc;
703         intr_info->msix_index = index;
704         intr_info->op_reply_q = NULL;
705
706         snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
707             mrioc->driver_name, mrioc->id, index);
708
709         retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
710             mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
711         if (retval) {
712                 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
713                     intr_info->name, pci_irq_vector(pdev, index));
714                 return retval;
715         }
716
717         return retval;
718 }
719
720 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
721 {
722         if (!mrioc->requested_poll_qcount)
723                 return;
724
725         /* Reserved for Admin and Default Queue */
726         if (max_vectors > 2 &&
727                 (mrioc->requested_poll_qcount < max_vectors - 2)) {
728                 ioc_info(mrioc,
729                     "enabled polled queues (%d) msix (%d)\n",
730                     mrioc->requested_poll_qcount, max_vectors);
731         } else {
732                 ioc_info(mrioc,
733                     "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
734                     mrioc->requested_poll_qcount, max_vectors);
735                 mrioc->requested_poll_qcount = 0;
736         }
737 }
738
739 /**
740  * mpi3mr_setup_isr - Setup ISR for the controller
741  * @mrioc: Adapter instance reference
742  * @setup_one: Request one IRQ or more
743  *
744  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
745  *
746  * Return: 0 on success and non zero on failures.
747  */
748 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
749 {
750         unsigned int irq_flags = PCI_IRQ_MSIX;
751         int max_vectors, min_vec;
752         int retval;
753         int i;
754         struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
755
756         if (mrioc->is_intr_info_set)
757                 return 0;
758
759         mpi3mr_cleanup_isr(mrioc);
760
761         if (setup_one || reset_devices) {
762                 max_vectors = 1;
763                 retval = pci_alloc_irq_vectors(mrioc->pdev,
764                     1, max_vectors, irq_flags);
765                 if (retval < 0) {
766                         ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
767                             retval);
768                         goto out_failed;
769                 }
770         } else {
771                 max_vectors =
772                     min_t(int, mrioc->cpu_count + 1 +
773                         mrioc->requested_poll_qcount, mrioc->msix_count);
774
775                 mpi3mr_calc_poll_queues(mrioc, max_vectors);
776
777                 ioc_info(mrioc,
778                     "MSI-X vectors supported: %d, no of cores: %d,",
779                     mrioc->msix_count, mrioc->cpu_count);
780                 ioc_info(mrioc,
781                     "MSI-x vectors requested: %d poll_queues %d\n",
782                     max_vectors, mrioc->requested_poll_qcount);
783
784                 desc.post_vectors = mrioc->requested_poll_qcount;
785                 min_vec = desc.pre_vectors + desc.post_vectors;
786                 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
787
788                 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
789                         min_vec, max_vectors, irq_flags, &desc);
790
791                 if (retval < 0) {
792                         ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
793                             retval);
794                         goto out_failed;
795                 }
796
797
798                 /*
799                  * If only one MSI-x is allocated, then MSI-x 0 will be shared
800                  * between Admin queue and operational queue
801                  */
802                 if (retval == min_vec)
803                         mrioc->op_reply_q_offset = 0;
804                 else if (retval != (max_vectors)) {
805                         ioc_info(mrioc,
806                             "allocated vectors (%d) are less than configured (%d)\n",
807                             retval, max_vectors);
808                 }
809
810                 max_vectors = retval;
811                 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
812
813                 mpi3mr_calc_poll_queues(mrioc, max_vectors);
814
815         }
816
817         mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
818             GFP_KERNEL);
819         if (!mrioc->intr_info) {
820                 retval = -ENOMEM;
821                 pci_free_irq_vectors(mrioc->pdev);
822                 goto out_failed;
823         }
824         for (i = 0; i < max_vectors; i++) {
825                 retval = mpi3mr_request_irq(mrioc, i);
826                 if (retval) {
827                         mrioc->intr_info_count = i;
828                         goto out_failed;
829                 }
830         }
831         if (reset_devices || !setup_one)
832                 mrioc->is_intr_info_set = true;
833         mrioc->intr_info_count = max_vectors;
834         mpi3mr_ioc_enable_intr(mrioc);
835         return 0;
836
837 out_failed:
838         mpi3mr_cleanup_isr(mrioc);
839
840         return retval;
841 }
842
843 static const struct {
844         enum mpi3mr_iocstate value;
845         char *name;
846 } mrioc_states[] = {
847         { MRIOC_STATE_READY, "ready" },
848         { MRIOC_STATE_FAULT, "fault" },
849         { MRIOC_STATE_RESET, "reset" },
850         { MRIOC_STATE_BECOMING_READY, "becoming ready" },
851         { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
852         { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
853 };
854
855 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
856 {
857         int i;
858         char *name = NULL;
859
860         for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
861                 if (mrioc_states[i].value == mrioc_state) {
862                         name = mrioc_states[i].name;
863                         break;
864                 }
865         }
866         return name;
867 }
868
869 /* Reset reason to name mapper structure*/
870 static const struct {
871         enum mpi3mr_reset_reason value;
872         char *name;
873 } mpi3mr_reset_reason_codes[] = {
874         { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
875         { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
876         { MPI3MR_RESET_FROM_APP, "application invocation" },
877         { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
878         { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
879         { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
880         { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
881         { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
882         { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
883         { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
884         { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
885         { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
886         { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
887         {
888                 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
889                 "create request queue timeout"
890         },
891         {
892                 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
893                 "create reply queue timeout"
894         },
895         { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
896         { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
897         { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
898         { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
899         {
900                 MPI3MR_RESET_FROM_CIACTVRST_TIMER,
901                 "component image activation timeout"
902         },
903         {
904                 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
905                 "get package version timeout"
906         },
907         { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
908         { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
909         { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
910 };
911
912 /**
913  * mpi3mr_reset_rc_name - get reset reason code name
914  * @reason_code: reset reason code value
915  *
916  * Map reset reason to an NULL terminated ASCII string
917  *
918  * Return: name corresponding to reset reason value or NULL.
919  */
920 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
921 {
922         int i;
923         char *name = NULL;
924
925         for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
926                 if (mpi3mr_reset_reason_codes[i].value == reason_code) {
927                         name = mpi3mr_reset_reason_codes[i].name;
928                         break;
929                 }
930         }
931         return name;
932 }
933
934 /* Reset type to name mapper structure*/
935 static const struct {
936         u16 reset_type;
937         char *name;
938 } mpi3mr_reset_types[] = {
939         { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
940         { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
941 };
942
943 /**
944  * mpi3mr_reset_type_name - get reset type name
945  * @reset_type: reset type value
946  *
947  * Map reset type to an NULL terminated ASCII string
948  *
949  * Return: name corresponding to reset type value or NULL.
950  */
951 static const char *mpi3mr_reset_type_name(u16 reset_type)
952 {
953         int i;
954         char *name = NULL;
955
956         for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
957                 if (mpi3mr_reset_types[i].reset_type == reset_type) {
958                         name = mpi3mr_reset_types[i].name;
959                         break;
960                 }
961         }
962         return name;
963 }
964
965 /**
966  * mpi3mr_print_fault_info - Display fault information
967  * @mrioc: Adapter instance reference
968  *
969  * Display the controller fault information if there is a
970  * controller fault.
971  *
972  * Return: Nothing.
973  */
974 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
975 {
976         u32 ioc_status, code, code1, code2, code3;
977
978         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
979
980         if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
981                 code = readl(&mrioc->sysif_regs->fault);
982                 code1 = readl(&mrioc->sysif_regs->fault_info[0]);
983                 code2 = readl(&mrioc->sysif_regs->fault_info[1]);
984                 code3 = readl(&mrioc->sysif_regs->fault_info[2]);
985
986                 ioc_info(mrioc,
987                     "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
988                     code, code1, code2, code3);
989         }
990 }
991
992 /**
993  * mpi3mr_get_iocstate - Get IOC State
994  * @mrioc: Adapter instance reference
995  *
996  * Return a proper IOC state enum based on the IOC status and
997  * IOC configuration and unrcoverable state of the controller.
998  *
999  * Return: Current IOC state.
1000  */
1001 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1002 {
1003         u32 ioc_status, ioc_config;
1004         u8 ready, enabled;
1005
1006         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1007         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1008
1009         if (mrioc->unrecoverable)
1010                 return MRIOC_STATE_UNRECOVERABLE;
1011         if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1012                 return MRIOC_STATE_FAULT;
1013
1014         ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1015         enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1016
1017         if (ready && enabled)
1018                 return MRIOC_STATE_READY;
1019         if ((!ready) && (!enabled))
1020                 return MRIOC_STATE_RESET;
1021         if ((!ready) && (enabled))
1022                 return MRIOC_STATE_BECOMING_READY;
1023
1024         return MRIOC_STATE_RESET_REQUESTED;
1025 }
1026
1027 /**
1028  * mpi3mr_clear_reset_history - clear reset history
1029  * @mrioc: Adapter instance reference
1030  *
1031  * Write the reset history bit in IOC status to clear the bit,
1032  * if it is already set.
1033  *
1034  * Return: Nothing.
1035  */
1036 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1037 {
1038         u32 ioc_status;
1039
1040         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1041         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1042                 writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1043 }
1044
1045 /**
1046  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1047  * @mrioc: Adapter instance reference
1048  * @reset_reason: Reset reason code
1049  *
1050  * Issue Message unit Reset to the controller and wait for it to
1051  * be complete.
1052  *
1053  * Return: 0 on success, -1 on failure.
1054  */
1055 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1056         u32 reset_reason)
1057 {
1058         u32 ioc_config, timeout, ioc_status;
1059         int retval = -1;
1060
1061         ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1062         if (mrioc->unrecoverable) {
1063                 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1064                 return retval;
1065         }
1066         mpi3mr_clear_reset_history(mrioc);
1067         writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1068         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1069         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1070         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1071
1072         timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1073         do {
1074                 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1075                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1076                         mpi3mr_clear_reset_history(mrioc);
1077                         break;
1078                 }
1079                 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1080                         mpi3mr_print_fault_info(mrioc);
1081                         break;
1082                 }
1083                 msleep(100);
1084         } while (--timeout);
1085
1086         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1087         if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1088               (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1089               (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1090                 retval = 0;
1091
1092         ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
1093             (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1094         return retval;
1095 }
1096
1097 /**
1098  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1099  * during reset/resume
1100  * @mrioc: Adapter instance reference
1101  *
1102  * Return zero if the new IOCFacts parameters value is compatible with
1103  * older values else return -EPERM
1104  */
1105 static int
1106 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1107 {
1108         u16 dev_handle_bitmap_sz;
1109         void *removepend_bitmap;
1110
1111         if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1112                 ioc_err(mrioc,
1113                     "cannot increase reply size from %d to %d\n",
1114                     mrioc->reply_sz, mrioc->facts.reply_sz);
1115                 return -EPERM;
1116         }
1117
1118         if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1119                 ioc_err(mrioc,
1120                     "cannot reduce number of operational reply queues from %d to %d\n",
1121                     mrioc->num_op_reply_q,
1122                     mrioc->facts.max_op_reply_q);
1123                 return -EPERM;
1124         }
1125
1126         if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1127                 ioc_err(mrioc,
1128                     "cannot reduce number of operational request queues from %d to %d\n",
1129                     mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1130                 return -EPERM;
1131         }
1132
1133         dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
1134         if (mrioc->facts.max_devhandle % 8)
1135                 dev_handle_bitmap_sz++;
1136         if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
1137                 removepend_bitmap = krealloc(mrioc->removepend_bitmap,
1138                     dev_handle_bitmap_sz, GFP_KERNEL);
1139                 if (!removepend_bitmap) {
1140                         ioc_err(mrioc,
1141                             "failed to increase removepend_bitmap sz from: %d to %d\n",
1142                             mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
1143                         return -EPERM;
1144                 }
1145                 memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
1146                     dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
1147                 mrioc->removepend_bitmap = removepend_bitmap;
1148                 ioc_info(mrioc,
1149                     "increased dev_handle_bitmap_sz from %d to %d\n",
1150                     mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
1151                 mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
1152         }
1153
1154         return 0;
1155 }
1156
1157 /**
1158  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1159  * @mrioc: Adapter instance reference
1160  *
1161  * Set Enable IOC bit in IOC configuration register and wait for
1162  * the controller to become ready.
1163  *
1164  * Return: 0 on success, appropriate error on failure.
1165  */
1166 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1167 {
1168         u32 ioc_config, ioc_status, timeout;
1169         int retval = 0;
1170         enum mpi3mr_iocstate ioc_state;
1171         u64 base_info;
1172
1173         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1174         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1175         base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1176         ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1177             ioc_status, ioc_config, base_info);
1178
1179         /*The timeout value is in 2sec unit, changing it to seconds*/
1180         mrioc->ready_timeout =
1181             ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1182             MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1183
1184         ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1185
1186         ioc_state = mpi3mr_get_iocstate(mrioc);
1187         ioc_info(mrioc, "controller is in %s state during detection\n",
1188             mpi3mr_iocstate_name(ioc_state));
1189
1190         if (ioc_state == MRIOC_STATE_BECOMING_READY ||
1191             ioc_state == MRIOC_STATE_RESET_REQUESTED) {
1192                 timeout = mrioc->ready_timeout * 10;
1193                 do {
1194                         msleep(100);
1195                 } while (--timeout);
1196
1197                 ioc_state = mpi3mr_get_iocstate(mrioc);
1198                 ioc_info(mrioc,
1199                     "controller is in %s state after waiting to reset\n",
1200                     mpi3mr_iocstate_name(ioc_state));
1201         }
1202
1203         if (ioc_state == MRIOC_STATE_READY) {
1204                 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1205                 retval = mpi3mr_issue_and_process_mur(mrioc,
1206                     MPI3MR_RESET_FROM_BRINGUP);
1207                 ioc_state = mpi3mr_get_iocstate(mrioc);
1208                 if (retval)
1209                         ioc_err(mrioc,
1210                             "message unit reset failed with error %d current state %s\n",
1211                             retval, mpi3mr_iocstate_name(ioc_state));
1212         }
1213         if (ioc_state != MRIOC_STATE_RESET) {
1214                 mpi3mr_print_fault_info(mrioc);
1215                 ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1216                 retval = mpi3mr_issue_reset(mrioc,
1217                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1218                     MPI3MR_RESET_FROM_BRINGUP);
1219                 if (retval) {
1220                         ioc_err(mrioc,
1221                             "soft reset failed with error %d\n", retval);
1222                         goto out_failed;
1223                 }
1224         }
1225         ioc_state = mpi3mr_get_iocstate(mrioc);
1226         if (ioc_state != MRIOC_STATE_RESET) {
1227                 ioc_err(mrioc,
1228                     "cannot bring controller to reset state, current state: %s\n",
1229                     mpi3mr_iocstate_name(ioc_state));
1230                 goto out_failed;
1231         }
1232         mpi3mr_clear_reset_history(mrioc);
1233         retval = mpi3mr_setup_admin_qpair(mrioc);
1234         if (retval) {
1235                 ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1236                     retval);
1237                 goto out_failed;
1238         }
1239
1240         ioc_info(mrioc, "bringing controller to ready state\n");
1241         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1242         ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1243         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1244
1245         timeout = mrioc->ready_timeout * 10;
1246         do {
1247                 ioc_state = mpi3mr_get_iocstate(mrioc);
1248                 if (ioc_state == MRIOC_STATE_READY) {
1249                         ioc_info(mrioc,
1250                             "successfully transitioned to %s state\n",
1251                             mpi3mr_iocstate_name(ioc_state));
1252                         return 0;
1253                 }
1254                 msleep(100);
1255         } while (--timeout);
1256
1257 out_failed:
1258         ioc_state = mpi3mr_get_iocstate(mrioc);
1259         ioc_err(mrioc,
1260             "failed to bring to ready state,  current state: %s\n",
1261             mpi3mr_iocstate_name(ioc_state));
1262         return retval;
1263 }
1264
1265 /**
1266  * mpi3mr_soft_reset_success - Check softreset is success or not
1267  * @ioc_status: IOC status register value
1268  * @ioc_config: IOC config register value
1269  *
1270  * Check whether the soft reset is successful or not based on
1271  * IOC status and IOC config register values.
1272  *
1273  * Return: True when the soft reset is success, false otherwise.
1274  */
1275 static inline bool
1276 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1277 {
1278         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1279             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1280                 return true;
1281         return false;
1282 }
1283
1284 /**
1285  * mpi3mr_diagfault_success - Check diag fault is success or not
1286  * @mrioc: Adapter reference
1287  * @ioc_status: IOC status register value
1288  *
1289  * Check whether the controller hit diag reset fault code.
1290  *
1291  * Return: True when there is diag fault, false otherwise.
1292  */
1293 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1294         u32 ioc_status)
1295 {
1296         u32 fault;
1297
1298         if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1299                 return false;
1300         fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1301         if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1302                 mpi3mr_print_fault_info(mrioc);
1303                 return true;
1304         }
1305         return false;
1306 }
1307
1308 /**
1309  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1310  * @mrioc: Adapter reference
1311  *
1312  * Set diag save bit in IOC configuration register to enable
1313  * snapdump.
1314  *
1315  * Return: Nothing.
1316  */
1317 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1318 {
1319         u32 ioc_config;
1320
1321         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1322         ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1323         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1324 }
1325
1326 /**
1327  * mpi3mr_issue_reset - Issue reset to the controller
1328  * @mrioc: Adapter reference
1329  * @reset_type: Reset type
1330  * @reset_reason: Reset reason code
1331  *
1332  * Unlock the host diagnostic registers and write the specific
1333  * reset type to that, wait for reset acknowledgment from the
1334  * controller, if the reset is not successful retry for the
1335  * predefined number of times.
1336  *
1337  * Return: 0 on success, non-zero on failure.
1338  */
1339 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1340         u32 reset_reason)
1341 {
1342         int retval = -1;
1343         u8 unlock_retry_count = 0;
1344         u32 host_diagnostic, ioc_status, ioc_config;
1345         u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1346
1347         if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1348             (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1349                 return retval;
1350         if (mrioc->unrecoverable)
1351                 return retval;
1352         if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1353                 retval = 0;
1354                 return retval;
1355         }
1356
1357         ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1358             mpi3mr_reset_type_name(reset_type),
1359             mpi3mr_reset_rc_name(reset_reason), reset_reason);
1360
1361         mpi3mr_clear_reset_history(mrioc);
1362         do {
1363                 ioc_info(mrioc,
1364                     "Write magic sequence to unlock host diag register (retry=%d)\n",
1365                     ++unlock_retry_count);
1366                 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1367                         ioc_err(mrioc,
1368                             "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1369                             mpi3mr_reset_type_name(reset_type),
1370                             host_diagnostic);
1371                         mrioc->unrecoverable = 1;
1372                         return retval;
1373                 }
1374
1375                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1376                     &mrioc->sysif_regs->write_sequence);
1377                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1378                     &mrioc->sysif_regs->write_sequence);
1379                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1380                     &mrioc->sysif_regs->write_sequence);
1381                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1382                     &mrioc->sysif_regs->write_sequence);
1383                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1384                     &mrioc->sysif_regs->write_sequence);
1385                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1386                     &mrioc->sysif_regs->write_sequence);
1387                 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1388                     &mrioc->sysif_regs->write_sequence);
1389                 usleep_range(1000, 1100);
1390                 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1391                 ioc_info(mrioc,
1392                     "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1393                     unlock_retry_count, host_diagnostic);
1394         } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1395
1396         writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1397         writel(host_diagnostic | reset_type,
1398             &mrioc->sysif_regs->host_diagnostic);
1399         switch (reset_type) {
1400         case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1401                 do {
1402                         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1403                         ioc_config =
1404                             readl(&mrioc->sysif_regs->ioc_configuration);
1405                         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1406                             && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1407                             ) {
1408                                 mpi3mr_clear_reset_history(mrioc);
1409                                 retval = 0;
1410                                 break;
1411                         }
1412                         msleep(100);
1413                 } while (--timeout);
1414                 mpi3mr_print_fault_info(mrioc);
1415                 break;
1416         case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1417                 do {
1418                         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1419                         if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1420                                 retval = 0;
1421                                 break;
1422                         }
1423                         msleep(100);
1424                 } while (--timeout);
1425                 break;
1426         default:
1427                 break;
1428         }
1429
1430         writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1431             &mrioc->sysif_regs->write_sequence);
1432
1433         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1434         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1435         ioc_info(mrioc,
1436             "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
1437             (!retval)?"successful":"failed", ioc_status,
1438             ioc_config);
1439         if (retval)
1440                 mrioc->unrecoverable = 1;
1441         return retval;
1442 }
1443
1444 /**
1445  * mpi3mr_admin_request_post - Post request to admin queue
1446  * @mrioc: Adapter reference
1447  * @admin_req: MPI3 request
1448  * @admin_req_sz: Request size
1449  * @ignore_reset: Ignore reset in process
1450  *
1451  * Post the MPI3 request into admin request queue and
1452  * inform the controller, if the queue is full return
1453  * appropriate error.
1454  *
1455  * Return: 0 on success, non-zero on failure.
1456  */
1457 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1458         u16 admin_req_sz, u8 ignore_reset)
1459 {
1460         u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1461         int retval = 0;
1462         unsigned long flags;
1463         u8 *areq_entry;
1464
1465         if (mrioc->unrecoverable) {
1466                 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1467                 return -EFAULT;
1468         }
1469
1470         spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1471         areq_pi = mrioc->admin_req_pi;
1472         areq_ci = mrioc->admin_req_ci;
1473         max_entries = mrioc->num_admin_req;
1474         if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1475             (areq_pi == (max_entries - 1)))) {
1476                 ioc_err(mrioc, "AdminReqQ full condition detected\n");
1477                 retval = -EAGAIN;
1478                 goto out;
1479         }
1480         if (!ignore_reset && mrioc->reset_in_progress) {
1481                 ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1482                 retval = -EAGAIN;
1483                 goto out;
1484         }
1485         areq_entry = (u8 *)mrioc->admin_req_base +
1486             (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1487         memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1488         memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1489
1490         if (++areq_pi == max_entries)
1491                 areq_pi = 0;
1492         mrioc->admin_req_pi = areq_pi;
1493
1494         writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1495
1496 out:
1497         spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1498
1499         return retval;
1500 }
1501
1502 /**
1503  * mpi3mr_free_op_req_q_segments - free request memory segments
1504  * @mrioc: Adapter instance reference
1505  * @q_idx: operational request queue index
1506  *
1507  * Free memory segments allocated for operational request queue
1508  *
1509  * Return: Nothing.
1510  */
1511 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1512 {
1513         u16 j;
1514         int size;
1515         struct segments *segments;
1516
1517         segments = mrioc->req_qinfo[q_idx].q_segments;
1518         if (!segments)
1519                 return;
1520
1521         if (mrioc->enable_segqueue) {
1522                 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1523                 if (mrioc->req_qinfo[q_idx].q_segment_list) {
1524                         dma_free_coherent(&mrioc->pdev->dev,
1525                             MPI3MR_MAX_SEG_LIST_SIZE,
1526                             mrioc->req_qinfo[q_idx].q_segment_list,
1527                             mrioc->req_qinfo[q_idx].q_segment_list_dma);
1528                         mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1529                 }
1530         } else
1531                 size = mrioc->req_qinfo[q_idx].segment_qd *
1532                     mrioc->facts.op_req_sz;
1533
1534         for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1535                 if (!segments[j].segment)
1536                         continue;
1537                 dma_free_coherent(&mrioc->pdev->dev,
1538                     size, segments[j].segment, segments[j].segment_dma);
1539                 segments[j].segment = NULL;
1540         }
1541         kfree(mrioc->req_qinfo[q_idx].q_segments);
1542         mrioc->req_qinfo[q_idx].q_segments = NULL;
1543         mrioc->req_qinfo[q_idx].qid = 0;
1544 }
1545
1546 /**
1547  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1548  * @mrioc: Adapter instance reference
1549  * @q_idx: operational reply queue index
1550  *
1551  * Free memory segments allocated for operational reply queue
1552  *
1553  * Return: Nothing.
1554  */
1555 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1556 {
1557         u16 j;
1558         int size;
1559         struct segments *segments;
1560
1561         segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1562         if (!segments)
1563                 return;
1564
1565         if (mrioc->enable_segqueue) {
1566                 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1567                 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1568                         dma_free_coherent(&mrioc->pdev->dev,
1569                             MPI3MR_MAX_SEG_LIST_SIZE,
1570                             mrioc->op_reply_qinfo[q_idx].q_segment_list,
1571                             mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1572                         mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1573                 }
1574         } else
1575                 size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1576                     mrioc->op_reply_desc_sz;
1577
1578         for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1579                 if (!segments[j].segment)
1580                         continue;
1581                 dma_free_coherent(&mrioc->pdev->dev,
1582                     size, segments[j].segment, segments[j].segment_dma);
1583                 segments[j].segment = NULL;
1584         }
1585
1586         kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1587         mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1588         mrioc->op_reply_qinfo[q_idx].qid = 0;
1589 }
1590
1591 /**
1592  * mpi3mr_delete_op_reply_q - delete operational reply queue
1593  * @mrioc: Adapter instance reference
1594  * @qidx: operational reply queue index
1595  *
1596  * Delete operatinal reply queue by issuing MPI request
1597  * through admin queue.
1598  *
1599  * Return:  0 on success, non-zero on failure.
1600  */
1601 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1602 {
1603         struct mpi3_delete_reply_queue_request delq_req;
1604         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1605         int retval = 0;
1606         u16 reply_qid = 0, midx;
1607
1608         reply_qid = op_reply_q->qid;
1609
1610         midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1611
1612         if (!reply_qid) {
1613                 retval = -1;
1614                 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1615                 goto out;
1616         }
1617
1618         (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1619             mrioc->active_poll_qcount--;
1620
1621         memset(&delq_req, 0, sizeof(delq_req));
1622         mutex_lock(&mrioc->init_cmds.mutex);
1623         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1624                 retval = -1;
1625                 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1626                 mutex_unlock(&mrioc->init_cmds.mutex);
1627                 goto out;
1628         }
1629         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1630         mrioc->init_cmds.is_waiting = 1;
1631         mrioc->init_cmds.callback = NULL;
1632         delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1633         delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1634         delq_req.queue_id = cpu_to_le16(reply_qid);
1635
1636         init_completion(&mrioc->init_cmds.done);
1637         retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1638             1);
1639         if (retval) {
1640                 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1641                 goto out_unlock;
1642         }
1643         wait_for_completion_timeout(&mrioc->init_cmds.done,
1644             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1645         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1646                 ioc_err(mrioc, "delete reply queue timed out\n");
1647                 mpi3mr_check_rh_fault_ioc(mrioc,
1648                     MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1649                 retval = -1;
1650                 goto out_unlock;
1651         }
1652         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1653             != MPI3_IOCSTATUS_SUCCESS) {
1654                 ioc_err(mrioc,
1655                     "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1656                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1657                     mrioc->init_cmds.ioc_loginfo);
1658                 retval = -1;
1659                 goto out_unlock;
1660         }
1661         mrioc->intr_info[midx].op_reply_q = NULL;
1662
1663         mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1664 out_unlock:
1665         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1666         mutex_unlock(&mrioc->init_cmds.mutex);
1667 out:
1668
1669         return retval;
1670 }
1671
1672 /**
1673  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1674  * @mrioc: Adapter instance reference
1675  * @qidx: request queue index
1676  *
1677  * Allocate segmented memory pools for operational reply
1678  * queue.
1679  *
1680  * Return: 0 on success, non-zero on failure.
1681  */
1682 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1683 {
1684         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1685         int i, size;
1686         u64 *q_segment_list_entry = NULL;
1687         struct segments *segments;
1688
1689         if (mrioc->enable_segqueue) {
1690                 op_reply_q->segment_qd =
1691                     MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1692
1693                 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1694
1695                 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1696                     MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1697                     GFP_KERNEL);
1698                 if (!op_reply_q->q_segment_list)
1699                         return -ENOMEM;
1700                 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1701         } else {
1702                 op_reply_q->segment_qd = op_reply_q->num_replies;
1703                 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
1704         }
1705
1706         op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
1707             op_reply_q->segment_qd);
1708
1709         op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
1710             sizeof(struct segments), GFP_KERNEL);
1711         if (!op_reply_q->q_segments)
1712                 return -ENOMEM;
1713
1714         segments = op_reply_q->q_segments;
1715         for (i = 0; i < op_reply_q->num_segments; i++) {
1716                 segments[i].segment =
1717                     dma_alloc_coherent(&mrioc->pdev->dev,
1718                     size, &segments[i].segment_dma, GFP_KERNEL);
1719                 if (!segments[i].segment)
1720                         return -ENOMEM;
1721                 if (mrioc->enable_segqueue)
1722                         q_segment_list_entry[i] =
1723                             (unsigned long)segments[i].segment_dma;
1724         }
1725
1726         return 0;
1727 }
1728
1729 /**
1730  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
1731  * @mrioc: Adapter instance reference
1732  * @qidx: request queue index
1733  *
1734  * Allocate segmented memory pools for operational request
1735  * queue.
1736  *
1737  * Return: 0 on success, non-zero on failure.
1738  */
1739 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1740 {
1741         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
1742         int i, size;
1743         u64 *q_segment_list_entry = NULL;
1744         struct segments *segments;
1745
1746         if (mrioc->enable_segqueue) {
1747                 op_req_q->segment_qd =
1748                     MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
1749
1750                 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1751
1752                 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1753                     MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
1754                     GFP_KERNEL);
1755                 if (!op_req_q->q_segment_list)
1756                         return -ENOMEM;
1757                 q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
1758
1759         } else {
1760                 op_req_q->segment_qd = op_req_q->num_requests;
1761                 size = op_req_q->num_requests * mrioc->facts.op_req_sz;
1762         }
1763
1764         op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
1765             op_req_q->segment_qd);
1766
1767         op_req_q->q_segments = kcalloc(op_req_q->num_segments,
1768             sizeof(struct segments), GFP_KERNEL);
1769         if (!op_req_q->q_segments)
1770                 return -ENOMEM;
1771
1772         segments = op_req_q->q_segments;
1773         for (i = 0; i < op_req_q->num_segments; i++) {
1774                 segments[i].segment =
1775                     dma_alloc_coherent(&mrioc->pdev->dev,
1776                     size, &segments[i].segment_dma, GFP_KERNEL);
1777                 if (!segments[i].segment)
1778                         return -ENOMEM;
1779                 if (mrioc->enable_segqueue)
1780                         q_segment_list_entry[i] =
1781                             (unsigned long)segments[i].segment_dma;
1782         }
1783
1784         return 0;
1785 }
1786
1787 /**
1788  * mpi3mr_create_op_reply_q - create operational reply queue
1789  * @mrioc: Adapter instance reference
1790  * @qidx: operational reply queue index
1791  *
1792  * Create operatinal reply queue by issuing MPI request
1793  * through admin queue.
1794  *
1795  * Return:  0 on success, non-zero on failure.
1796  */
1797 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1798 {
1799         struct mpi3_create_reply_queue_request create_req;
1800         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1801         int retval = 0;
1802         u16 reply_qid = 0, midx;
1803
1804         reply_qid = op_reply_q->qid;
1805
1806         midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1807
1808         if (reply_qid) {
1809                 retval = -1;
1810                 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
1811                     reply_qid);
1812
1813                 return retval;
1814         }
1815
1816         reply_qid = qidx + 1;
1817         op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
1818         if (!mrioc->pdev->revision)
1819                 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
1820         op_reply_q->ci = 0;
1821         op_reply_q->ephase = 1;
1822         atomic_set(&op_reply_q->pend_ios, 0);
1823         atomic_set(&op_reply_q->in_use, 0);
1824         op_reply_q->enable_irq_poll = false;
1825
1826         if (!op_reply_q->q_segments) {
1827                 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
1828                 if (retval) {
1829                         mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1830                         goto out;
1831                 }
1832         }
1833
1834         memset(&create_req, 0, sizeof(create_req));
1835         mutex_lock(&mrioc->init_cmds.mutex);
1836         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1837                 retval = -1;
1838                 ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
1839                 goto out_unlock;
1840         }
1841         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1842         mrioc->init_cmds.is_waiting = 1;
1843         mrioc->init_cmds.callback = NULL;
1844         create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1845         create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
1846         create_req.queue_id = cpu_to_le16(reply_qid);
1847
1848         if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
1849                 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
1850         else
1851                 op_reply_q->qtype = MPI3MR_POLL_QUEUE;
1852
1853         if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
1854                 create_req.flags =
1855                         MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
1856                 create_req.msix_index =
1857                         cpu_to_le16(mrioc->intr_info[midx].msix_index);
1858         } else {
1859                 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
1860                 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
1861                         reply_qid, midx);
1862                 if (!mrioc->active_poll_qcount)
1863                         disable_irq_nosync(pci_irq_vector(mrioc->pdev,
1864                             mrioc->intr_info_count - 1));
1865         }
1866
1867         if (mrioc->enable_segqueue) {
1868                 create_req.flags |=
1869                     MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1870                 create_req.base_address = cpu_to_le64(
1871                     op_reply_q->q_segment_list_dma);
1872         } else
1873                 create_req.base_address = cpu_to_le64(
1874                     op_reply_q->q_segments[0].segment_dma);
1875
1876         create_req.size = cpu_to_le16(op_reply_q->num_replies);
1877
1878         init_completion(&mrioc->init_cmds.done);
1879         retval = mpi3mr_admin_request_post(mrioc, &create_req,
1880             sizeof(create_req), 1);
1881         if (retval) {
1882                 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
1883                 goto out_unlock;
1884         }
1885         wait_for_completion_timeout(&mrioc->init_cmds.done,
1886             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1887         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1888                 ioc_err(mrioc, "create reply queue timed out\n");
1889                 mpi3mr_check_rh_fault_ioc(mrioc,
1890                     MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
1891                 retval = -1;
1892                 goto out_unlock;
1893         }
1894         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1895             != MPI3_IOCSTATUS_SUCCESS) {
1896                 ioc_err(mrioc,
1897                     "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1898                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1899                     mrioc->init_cmds.ioc_loginfo);
1900                 retval = -1;
1901                 goto out_unlock;
1902         }
1903         op_reply_q->qid = reply_qid;
1904         if (midx < mrioc->intr_info_count)
1905                 mrioc->intr_info[midx].op_reply_q = op_reply_q;
1906
1907         (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
1908             mrioc->active_poll_qcount++;
1909
1910 out_unlock:
1911         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1912         mutex_unlock(&mrioc->init_cmds.mutex);
1913 out:
1914
1915         return retval;
1916 }
1917
1918 /**
1919  * mpi3mr_create_op_req_q - create operational request queue
1920  * @mrioc: Adapter instance reference
1921  * @idx: operational request queue index
1922  * @reply_qid: Reply queue ID
1923  *
1924  * Create operatinal request queue by issuing MPI request
1925  * through admin queue.
1926  *
1927  * Return:  0 on success, non-zero on failure.
1928  */
1929 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
1930         u16 reply_qid)
1931 {
1932         struct mpi3_create_request_queue_request create_req;
1933         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
1934         int retval = 0;
1935         u16 req_qid = 0;
1936
1937         req_qid = op_req_q->qid;
1938
1939         if (req_qid) {
1940                 retval = -1;
1941                 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
1942                     req_qid);
1943
1944                 return retval;
1945         }
1946         req_qid = idx + 1;
1947
1948         op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
1949         op_req_q->ci = 0;
1950         op_req_q->pi = 0;
1951         op_req_q->reply_qid = reply_qid;
1952         spin_lock_init(&op_req_q->q_lock);
1953
1954         if (!op_req_q->q_segments) {
1955                 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
1956                 if (retval) {
1957                         mpi3mr_free_op_req_q_segments(mrioc, idx);
1958                         goto out;
1959                 }
1960         }
1961
1962         memset(&create_req, 0, sizeof(create_req));
1963         mutex_lock(&mrioc->init_cmds.mutex);
1964         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1965                 retval = -1;
1966                 ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
1967                 goto out_unlock;
1968         }
1969         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1970         mrioc->init_cmds.is_waiting = 1;
1971         mrioc->init_cmds.callback = NULL;
1972         create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1973         create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
1974         create_req.queue_id = cpu_to_le16(req_qid);
1975         if (mrioc->enable_segqueue) {
1976                 create_req.flags =
1977                     MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1978                 create_req.base_address = cpu_to_le64(
1979                     op_req_q->q_segment_list_dma);
1980         } else
1981                 create_req.base_address = cpu_to_le64(
1982                     op_req_q->q_segments[0].segment_dma);
1983         create_req.reply_queue_id = cpu_to_le16(reply_qid);
1984         create_req.size = cpu_to_le16(op_req_q->num_requests);
1985
1986         init_completion(&mrioc->init_cmds.done);
1987         retval = mpi3mr_admin_request_post(mrioc, &create_req,
1988             sizeof(create_req), 1);
1989         if (retval) {
1990                 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
1991                 goto out_unlock;
1992         }
1993         wait_for_completion_timeout(&mrioc->init_cmds.done,
1994             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1995         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1996                 ioc_err(mrioc, "create request queue timed out\n");
1997                 mpi3mr_check_rh_fault_ioc(mrioc,
1998                     MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
1999                 retval = -1;
2000                 goto out_unlock;
2001         }
2002         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2003             != MPI3_IOCSTATUS_SUCCESS) {
2004                 ioc_err(mrioc,
2005                     "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2006                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2007                     mrioc->init_cmds.ioc_loginfo);
2008                 retval = -1;
2009                 goto out_unlock;
2010         }
2011         op_req_q->qid = req_qid;
2012
2013 out_unlock:
2014         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2015         mutex_unlock(&mrioc->init_cmds.mutex);
2016 out:
2017
2018         return retval;
2019 }
2020
2021 /**
2022  * mpi3mr_create_op_queues - create operational queue pairs
2023  * @mrioc: Adapter instance reference
2024  *
2025  * Allocate memory for operational queue meta data and call
2026  * create request and reply queue functions.
2027  *
2028  * Return: 0 on success, non-zero on failures.
2029  */
2030 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2031 {
2032         int retval = 0;
2033         u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2034
2035         num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2036             mrioc->facts.max_op_req_q);
2037
2038         msix_count_op_q =
2039             mrioc->intr_info_count - mrioc->op_reply_q_offset;
2040         if (!mrioc->num_queues)
2041                 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2042         /*
2043          * During reset set the num_queues to the number of queues
2044          * that was set before the reset.
2045          */
2046         num_queues = mrioc->num_op_reply_q ?
2047             mrioc->num_op_reply_q : mrioc->num_queues;
2048         ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2049             num_queues);
2050
2051         if (!mrioc->req_qinfo) {
2052                 mrioc->req_qinfo = kcalloc(num_queues,
2053                     sizeof(struct op_req_qinfo), GFP_KERNEL);
2054                 if (!mrioc->req_qinfo) {
2055                         retval = -1;
2056                         goto out_failed;
2057                 }
2058
2059                 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2060                     num_queues, GFP_KERNEL);
2061                 if (!mrioc->op_reply_qinfo) {
2062                         retval = -1;
2063                         goto out_failed;
2064                 }
2065         }
2066
2067         if (mrioc->enable_segqueue)
2068                 ioc_info(mrioc,
2069                     "allocating operational queues through segmented queues\n");
2070
2071         for (i = 0; i < num_queues; i++) {
2072                 if (mpi3mr_create_op_reply_q(mrioc, i)) {
2073                         ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2074                         break;
2075                 }
2076                 if (mpi3mr_create_op_req_q(mrioc, i,
2077                     mrioc->op_reply_qinfo[i].qid)) {
2078                         ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2079                         mpi3mr_delete_op_reply_q(mrioc, i);
2080                         break;
2081                 }
2082         }
2083
2084         if (i == 0) {
2085                 /* Not even one queue is created successfully*/
2086                 retval = -1;
2087                 goto out_failed;
2088         }
2089         mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2090         ioc_info(mrioc,
2091             "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2092             mrioc->num_op_reply_q, mrioc->default_qcount,
2093             mrioc->active_poll_qcount);
2094
2095         return retval;
2096 out_failed:
2097         kfree(mrioc->req_qinfo);
2098         mrioc->req_qinfo = NULL;
2099
2100         kfree(mrioc->op_reply_qinfo);
2101         mrioc->op_reply_qinfo = NULL;
2102
2103         return retval;
2104 }
2105
2106 /**
2107  * mpi3mr_op_request_post - Post request to operational queue
2108  * @mrioc: Adapter reference
2109  * @op_req_q: Operational request queue info
2110  * @req: MPI3 request
2111  *
2112  * Post the MPI3 request into operational request queue and
2113  * inform the controller, if the queue is full return
2114  * appropriate error.
2115  *
2116  * Return: 0 on success, non-zero on failure.
2117  */
2118 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2119         struct op_req_qinfo *op_req_q, u8 *req)
2120 {
2121         u16 pi = 0, max_entries, reply_qidx = 0, midx;
2122         int retval = 0;
2123         unsigned long flags;
2124         u8 *req_entry;
2125         void *segment_base_addr;
2126         u16 req_sz = mrioc->facts.op_req_sz;
2127         struct segments *segments = op_req_q->q_segments;
2128
2129         reply_qidx = op_req_q->reply_qid - 1;
2130
2131         if (mrioc->unrecoverable)
2132                 return -EFAULT;
2133
2134         spin_lock_irqsave(&op_req_q->q_lock, flags);
2135         pi = op_req_q->pi;
2136         max_entries = op_req_q->num_requests;
2137
2138         if (mpi3mr_check_req_qfull(op_req_q)) {
2139                 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2140                     reply_qidx, mrioc->op_reply_q_offset);
2141                 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2142
2143                 if (mpi3mr_check_req_qfull(op_req_q)) {
2144                         retval = -EAGAIN;
2145                         goto out;
2146                 }
2147         }
2148
2149         if (mrioc->reset_in_progress) {
2150                 ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2151                 retval = -EAGAIN;
2152                 goto out;
2153         }
2154
2155         segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2156         req_entry = (u8 *)segment_base_addr +
2157             ((pi % op_req_q->segment_qd) * req_sz);
2158
2159         memset(req_entry, 0, req_sz);
2160         memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2161
2162         if (++pi == max_entries)
2163                 pi = 0;
2164         op_req_q->pi = pi;
2165
2166         if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2167             > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2168                 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2169
2170         writel(op_req_q->pi,
2171             &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2172
2173 out:
2174         spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2175         return retval;
2176 }
2177
2178 /**
2179  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2180  * controller
2181  * @mrioc: Adapter instance reference
2182  * @reason_code: reason code for the fault.
2183  *
2184  * This routine will save snapdump and fault the controller with
2185  * the given reason code if it is not already in the fault or
2186  * not asynchronosuly reset. This will be used to handle
2187  * initilaization time faults/resets/timeout as in those cases
2188  * immediate soft reset invocation is not required.
2189  *
2190  * Return:  None.
2191  */
2192 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2193 {
2194         u32 ioc_status, host_diagnostic, timeout;
2195
2196         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2197         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2198             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
2199                 mpi3mr_print_fault_info(mrioc);
2200                 return;
2201         }
2202         mpi3mr_set_diagsave(mrioc);
2203         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2204             reason_code);
2205         timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2206         do {
2207                 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2208                 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2209                         break;
2210                 msleep(100);
2211         } while (--timeout);
2212 }
2213
2214 /**
2215  * mpi3mr_sync_timestamp - Issue time stamp sync request
2216  * @mrioc: Adapter reference
2217  *
2218  * Issue IO unit control MPI request to synchornize firmware
2219  * timestamp with host time.
2220  *
2221  * Return: 0 on success, non-zero on failure.
2222  */
2223 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2224 {
2225         ktime_t current_time;
2226         struct mpi3_iounit_control_request iou_ctrl;
2227         int retval = 0;
2228
2229         memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2230         mutex_lock(&mrioc->init_cmds.mutex);
2231         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2232                 retval = -1;
2233                 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2234                 mutex_unlock(&mrioc->init_cmds.mutex);
2235                 goto out;
2236         }
2237         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2238         mrioc->init_cmds.is_waiting = 1;
2239         mrioc->init_cmds.callback = NULL;
2240         iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2241         iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2242         iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2243         current_time = ktime_get_real();
2244         iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2245
2246         init_completion(&mrioc->init_cmds.done);
2247         retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2248             sizeof(iou_ctrl), 0);
2249         if (retval) {
2250                 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2251                 goto out_unlock;
2252         }
2253
2254         wait_for_completion_timeout(&mrioc->init_cmds.done,
2255             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2256         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2257                 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2258                 mrioc->init_cmds.is_waiting = 0;
2259                 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2260                         mpi3mr_soft_reset_handler(mrioc,
2261                             MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
2262                 retval = -1;
2263                 goto out_unlock;
2264         }
2265         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2266             != MPI3_IOCSTATUS_SUCCESS) {
2267                 ioc_err(mrioc,
2268                     "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2269                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2270                     mrioc->init_cmds.ioc_loginfo);
2271                 retval = -1;
2272                 goto out_unlock;
2273         }
2274
2275 out_unlock:
2276         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2277         mutex_unlock(&mrioc->init_cmds.mutex);
2278
2279 out:
2280         return retval;
2281 }
2282
2283 /**
2284  * mpi3mr_print_pkg_ver - display controller fw package version
2285  * @mrioc: Adapter reference
2286  *
2287  * Retrieve firmware package version from the component image
2288  * header of the controller flash and display it.
2289  *
2290  * Return: 0 on success and non-zero on failure.
2291  */
2292 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2293 {
2294         struct mpi3_ci_upload_request ci_upload;
2295         int retval = -1;
2296         void *data = NULL;
2297         dma_addr_t data_dma;
2298         struct mpi3_ci_manifest_mpi *manifest;
2299         u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2300         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2301
2302         data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2303             GFP_KERNEL);
2304         if (!data)
2305                 return -ENOMEM;
2306
2307         memset(&ci_upload, 0, sizeof(ci_upload));
2308         mutex_lock(&mrioc->init_cmds.mutex);
2309         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2310                 ioc_err(mrioc, "sending get package version failed due to command in use\n");
2311                 mutex_unlock(&mrioc->init_cmds.mutex);
2312                 goto out;
2313         }
2314         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2315         mrioc->init_cmds.is_waiting = 1;
2316         mrioc->init_cmds.callback = NULL;
2317         ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2318         ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2319         ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2320         ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2321         ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2322         ci_upload.segment_size = cpu_to_le32(data_len);
2323
2324         mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2325             data_dma);
2326         init_completion(&mrioc->init_cmds.done);
2327         retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2328             sizeof(ci_upload), 1);
2329         if (retval) {
2330                 ioc_err(mrioc, "posting get package version failed\n");
2331                 goto out_unlock;
2332         }
2333         wait_for_completion_timeout(&mrioc->init_cmds.done,
2334             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2335         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2336                 ioc_err(mrioc, "get package version timed out\n");
2337                 mpi3mr_check_rh_fault_ioc(mrioc,
2338                     MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2339                 retval = -1;
2340                 goto out_unlock;
2341         }
2342         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2343             == MPI3_IOCSTATUS_SUCCESS) {
2344                 manifest = (struct mpi3_ci_manifest_mpi *) data;
2345                 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2346                         ioc_info(mrioc,
2347                             "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2348                             manifest->package_version.gen_major,
2349                             manifest->package_version.gen_minor,
2350                             manifest->package_version.phase_major,
2351                             manifest->package_version.phase_minor,
2352                             manifest->package_version.customer_id,
2353                             manifest->package_version.build_num);
2354                 }
2355         }
2356         retval = 0;
2357 out_unlock:
2358         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2359         mutex_unlock(&mrioc->init_cmds.mutex);
2360
2361 out:
2362         if (data)
2363                 dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2364                     data_dma);
2365         return retval;
2366 }
2367
2368 /**
2369  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2370  * @work: work struct
2371  *
2372  * Watch dog work periodically executed (1 second interval) to
2373  * monitor firmware fault and to issue periodic timer sync to
2374  * the firmware.
2375  *
2376  * Return: Nothing.
2377  */
2378 static void mpi3mr_watchdog_work(struct work_struct *work)
2379 {
2380         struct mpi3mr_ioc *mrioc =
2381             container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2382         unsigned long flags;
2383         enum mpi3mr_iocstate ioc_state;
2384         u32 fault, host_diagnostic, ioc_status;
2385         u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2386
2387         if (mrioc->reset_in_progress || mrioc->unrecoverable)
2388                 return;
2389
2390         if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
2391                 mrioc->ts_update_counter = 0;
2392                 mpi3mr_sync_timestamp(mrioc);
2393         }
2394
2395         if ((mrioc->prepare_for_reset) &&
2396             ((mrioc->prepare_for_reset_timeout_counter++) >=
2397              MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2398                 mpi3mr_soft_reset_handler(mrioc,
2399                     MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2400                 return;
2401         }
2402
2403         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2404         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2405                 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2406                 return;
2407         }
2408
2409         /*Check for fault state every one second and issue Soft reset*/
2410         ioc_state = mpi3mr_get_iocstate(mrioc);
2411         if (ioc_state != MRIOC_STATE_FAULT)
2412                 goto schedule_work;
2413
2414         fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2415         host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2416         if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2417                 if (!mrioc->diagsave_timeout) {
2418                         mpi3mr_print_fault_info(mrioc);
2419                         ioc_warn(mrioc, "diag save in progress\n");
2420                 }
2421                 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2422                         goto schedule_work;
2423         }
2424
2425         mpi3mr_print_fault_info(mrioc);
2426         mrioc->diagsave_timeout = 0;
2427
2428         switch (fault) {
2429         case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2430                 ioc_info(mrioc,
2431                     "controller requires system power cycle, marking controller as unrecoverable\n");
2432                 mrioc->unrecoverable = 1;
2433                 return;
2434         case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2435                 return;
2436         case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2437                 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2438                 break;
2439         default:
2440                 break;
2441         }
2442         mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2443         return;
2444
2445 schedule_work:
2446         spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2447         if (mrioc->watchdog_work_q)
2448                 queue_delayed_work(mrioc->watchdog_work_q,
2449                     &mrioc->watchdog_work,
2450                     msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2451         spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2452         return;
2453 }
2454
2455 /**
2456  * mpi3mr_start_watchdog - Start watchdog
2457  * @mrioc: Adapter instance reference
2458  *
2459  * Create and start the watchdog thread to monitor controller
2460  * faults.
2461  *
2462  * Return: Nothing.
2463  */
2464 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2465 {
2466         if (mrioc->watchdog_work_q)
2467                 return;
2468
2469         INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2470         snprintf(mrioc->watchdog_work_q_name,
2471             sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2472             mrioc->id);
2473         mrioc->watchdog_work_q =
2474             create_singlethread_workqueue(mrioc->watchdog_work_q_name);
2475         if (!mrioc->watchdog_work_q) {
2476                 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2477                 return;
2478         }
2479
2480         if (mrioc->watchdog_work_q)
2481                 queue_delayed_work(mrioc->watchdog_work_q,
2482                     &mrioc->watchdog_work,
2483                     msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2484 }
2485
2486 /**
2487  * mpi3mr_stop_watchdog - Stop watchdog
2488  * @mrioc: Adapter instance reference
2489  *
2490  * Stop the watchdog thread created to monitor controller
2491  * faults.
2492  *
2493  * Return: Nothing.
2494  */
2495 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2496 {
2497         unsigned long flags;
2498         struct workqueue_struct *wq;
2499
2500         spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2501         wq = mrioc->watchdog_work_q;
2502         mrioc->watchdog_work_q = NULL;
2503         spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2504         if (wq) {
2505                 if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2506                         flush_workqueue(wq);
2507                 destroy_workqueue(wq);
2508         }
2509 }
2510
2511 /**
2512  * mpi3mr_setup_admin_qpair - Setup admin queue pair
2513  * @mrioc: Adapter instance reference
2514  *
2515  * Allocate memory for admin queue pair if required and register
2516  * the admin queue with the controller.
2517  *
2518  * Return: 0 on success, non-zero on failures.
2519  */
2520 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2521 {
2522         int retval = 0;
2523         u32 num_admin_entries = 0;
2524
2525         mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2526         mrioc->num_admin_req = mrioc->admin_req_q_sz /
2527             MPI3MR_ADMIN_REQ_FRAME_SZ;
2528         mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2529         mrioc->admin_req_base = NULL;
2530
2531         mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2532         mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2533             MPI3MR_ADMIN_REPLY_FRAME_SZ;
2534         mrioc->admin_reply_ci = 0;
2535         mrioc->admin_reply_ephase = 1;
2536         mrioc->admin_reply_base = NULL;
2537
2538         if (!mrioc->admin_req_base) {
2539                 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2540                     mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2541
2542                 if (!mrioc->admin_req_base) {
2543                         retval = -1;
2544                         goto out_failed;
2545                 }
2546
2547                 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2548                     mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2549                     GFP_KERNEL);
2550
2551                 if (!mrioc->admin_reply_base) {
2552                         retval = -1;
2553                         goto out_failed;
2554                 }
2555         }
2556
2557         num_admin_entries = (mrioc->num_admin_replies << 16) |
2558             (mrioc->num_admin_req);
2559         writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2560         mpi3mr_writeq(mrioc->admin_req_dma,
2561             &mrioc->sysif_regs->admin_request_queue_address);
2562         mpi3mr_writeq(mrioc->admin_reply_dma,
2563             &mrioc->sysif_regs->admin_reply_queue_address);
2564         writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2565         writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2566         return retval;
2567
2568 out_failed:
2569
2570         if (mrioc->admin_reply_base) {
2571                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2572                     mrioc->admin_reply_base, mrioc->admin_reply_dma);
2573                 mrioc->admin_reply_base = NULL;
2574         }
2575         if (mrioc->admin_req_base) {
2576                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2577                     mrioc->admin_req_base, mrioc->admin_req_dma);
2578                 mrioc->admin_req_base = NULL;
2579         }
2580         return retval;
2581 }
2582
2583 /**
2584  * mpi3mr_issue_iocfacts - Send IOC Facts
2585  * @mrioc: Adapter instance reference
2586  * @facts_data: Cached IOC facts data
2587  *
2588  * Issue IOC Facts MPI request through admin queue and wait for
2589  * the completion of it or time out.
2590  *
2591  * Return: 0 on success, non-zero on failures.
2592  */
2593 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2594         struct mpi3_ioc_facts_data *facts_data)
2595 {
2596         struct mpi3_ioc_facts_request iocfacts_req;
2597         void *data = NULL;
2598         dma_addr_t data_dma;
2599         u32 data_len = sizeof(*facts_data);
2600         int retval = 0;
2601         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2602
2603         data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2604             GFP_KERNEL);
2605
2606         if (!data) {
2607                 retval = -1;
2608                 goto out;
2609         }
2610
2611         memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2612         mutex_lock(&mrioc->init_cmds.mutex);
2613         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2614                 retval = -1;
2615                 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2616                 mutex_unlock(&mrioc->init_cmds.mutex);
2617                 goto out;
2618         }
2619         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2620         mrioc->init_cmds.is_waiting = 1;
2621         mrioc->init_cmds.callback = NULL;
2622         iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2623         iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
2624
2625         mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
2626             data_dma);
2627
2628         init_completion(&mrioc->init_cmds.done);
2629         retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
2630             sizeof(iocfacts_req), 1);
2631         if (retval) {
2632                 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
2633                 goto out_unlock;
2634         }
2635         wait_for_completion_timeout(&mrioc->init_cmds.done,
2636             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2637         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2638                 ioc_err(mrioc, "ioc_facts timed out\n");
2639                 mpi3mr_check_rh_fault_ioc(mrioc,
2640                     MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
2641                 retval = -1;
2642                 goto out_unlock;
2643         }
2644         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2645             != MPI3_IOCSTATUS_SUCCESS) {
2646                 ioc_err(mrioc,
2647                     "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2648                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2649                     mrioc->init_cmds.ioc_loginfo);
2650                 retval = -1;
2651                 goto out_unlock;
2652         }
2653         memcpy(facts_data, (u8 *)data, data_len);
2654         mpi3mr_process_factsdata(mrioc, facts_data);
2655 out_unlock:
2656         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2657         mutex_unlock(&mrioc->init_cmds.mutex);
2658
2659 out:
2660         if (data)
2661                 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
2662
2663         return retval;
2664 }
2665
2666 /**
2667  * mpi3mr_check_reset_dma_mask - Process IOC facts data
2668  * @mrioc: Adapter instance reference
2669  *
2670  * Check whether the new DMA mask requested through IOCFacts by
2671  * firmware needs to be set, if so set it .
2672  *
2673  * Return: 0 on success, non-zero on failure.
2674  */
2675 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
2676 {
2677         struct pci_dev *pdev = mrioc->pdev;
2678         int r;
2679         u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
2680
2681         if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
2682                 return 0;
2683
2684         ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
2685             mrioc->dma_mask, facts_dma_mask);
2686
2687         r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
2688         if (r) {
2689                 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
2690                     facts_dma_mask, r);
2691                 return r;
2692         }
2693         mrioc->dma_mask = facts_dma_mask;
2694         return r;
2695 }
2696
2697 /**
2698  * mpi3mr_process_factsdata - Process IOC facts data
2699  * @mrioc: Adapter instance reference
2700  * @facts_data: Cached IOC facts data
2701  *
2702  * Convert IOC facts data into cpu endianness and cache it in
2703  * the driver .
2704  *
2705  * Return: Nothing.
2706  */
2707 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
2708         struct mpi3_ioc_facts_data *facts_data)
2709 {
2710         u32 ioc_config, req_sz, facts_flags;
2711
2712         if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
2713             (sizeof(*facts_data) / 4)) {
2714                 ioc_warn(mrioc,
2715                     "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
2716                     sizeof(*facts_data),
2717                     le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
2718         }
2719
2720         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
2721         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
2722             MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
2723         if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
2724                 ioc_err(mrioc,
2725                     "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
2726                     req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
2727         }
2728
2729         memset(&mrioc->facts, 0, sizeof(mrioc->facts));
2730
2731         facts_flags = le32_to_cpu(facts_data->flags);
2732         mrioc->facts.op_req_sz = req_sz;
2733         mrioc->op_reply_desc_sz = 1 << ((ioc_config &
2734             MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
2735             MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
2736
2737         mrioc->facts.ioc_num = facts_data->ioc_number;
2738         mrioc->facts.who_init = facts_data->who_init;
2739         mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
2740         mrioc->facts.personality = (facts_flags &
2741             MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
2742         mrioc->facts.dma_mask = (facts_flags &
2743             MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
2744             MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
2745         mrioc->facts.protocol_flags = facts_data->protocol_flags;
2746         mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
2747         mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
2748         mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
2749         mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
2750         mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
2751         mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
2752         mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
2753         mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
2754         mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
2755         mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
2756         mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
2757         mrioc->facts.max_pcie_switches =
2758             le16_to_cpu(facts_data->max_pcie_switches);
2759         mrioc->facts.max_sasexpanders =
2760             le16_to_cpu(facts_data->max_sas_expanders);
2761         mrioc->facts.max_sasinitiators =
2762             le16_to_cpu(facts_data->max_sas_initiators);
2763         mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
2764         mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
2765         mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
2766         mrioc->facts.max_op_req_q =
2767             le16_to_cpu(facts_data->max_operational_request_queues);
2768         mrioc->facts.max_op_reply_q =
2769             le16_to_cpu(facts_data->max_operational_reply_queues);
2770         mrioc->facts.ioc_capabilities =
2771             le32_to_cpu(facts_data->ioc_capabilities);
2772         mrioc->facts.fw_ver.build_num =
2773             le16_to_cpu(facts_data->fw_version.build_num);
2774         mrioc->facts.fw_ver.cust_id =
2775             le16_to_cpu(facts_data->fw_version.customer_id);
2776         mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
2777         mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
2778         mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
2779         mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
2780         mrioc->msix_count = min_t(int, mrioc->msix_count,
2781             mrioc->facts.max_msix_vectors);
2782         mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
2783         mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
2784         mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
2785         mrioc->facts.shutdown_timeout =
2786             le16_to_cpu(facts_data->shutdown_timeout);
2787
2788         mrioc->facts.max_dev_per_tg =
2789             facts_data->max_devices_per_throttle_group;
2790         mrioc->facts.io_throttle_data_length =
2791             le16_to_cpu(facts_data->io_throttle_data_length);
2792         mrioc->facts.max_io_throttle_group =
2793             le16_to_cpu(facts_data->max_io_throttle_group);
2794         mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
2795         mrioc->facts.io_throttle_high =
2796             le16_to_cpu(facts_data->io_throttle_high);
2797
2798         /* Store in 512b block count */
2799         if (mrioc->facts.io_throttle_data_length)
2800                 mrioc->io_throttle_data_length =
2801                     (mrioc->facts.io_throttle_data_length * 2 * 4);
2802         else
2803                 /* set the length to 1MB + 1K to disable throttle */
2804                 mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
2805
2806         mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
2807         mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
2808
2809         ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
2810             mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
2811             mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
2812         ioc_info(mrioc,
2813             "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
2814             mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
2815             mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
2816         ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
2817             mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
2818             mrioc->facts.sge_mod_shift);
2819         ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
2820             mrioc->facts.dma_mask, (facts_flags &
2821             MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
2822         ioc_info(mrioc,
2823             "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
2824             mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
2825         ioc_info(mrioc,
2826            "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
2827            mrioc->facts.io_throttle_data_length * 4,
2828            mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
2829 }
2830
2831 /**
2832  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
2833  * @mrioc: Adapter instance reference
2834  *
2835  * Allocate and initialize the reply free buffers, sense
2836  * buffers, reply free queue and sense buffer queue.
2837  *
2838  * Return: 0 on success, non-zero on failures.
2839  */
2840 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
2841 {
2842         int retval = 0;
2843         u32 sz, i;
2844
2845         if (mrioc->init_cmds.reply)
2846                 return retval;
2847
2848         mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2849         if (!mrioc->init_cmds.reply)
2850                 goto out_failed;
2851
2852         mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2853         if (!mrioc->bsg_cmds.reply)
2854                 goto out_failed;
2855
2856         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2857                 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
2858                     GFP_KERNEL);
2859                 if (!mrioc->dev_rmhs_cmds[i].reply)
2860                         goto out_failed;
2861         }
2862
2863         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
2864                 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
2865                     GFP_KERNEL);
2866                 if (!mrioc->evtack_cmds[i].reply)
2867                         goto out_failed;
2868         }
2869
2870         mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2871         if (!mrioc->host_tm_cmds.reply)
2872                 goto out_failed;
2873
2874         mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2875         if (!mrioc->pel_cmds.reply)
2876                 goto out_failed;
2877
2878         mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
2879         if (!mrioc->pel_abort_cmd.reply)
2880                 goto out_failed;
2881
2882         mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
2883         if (mrioc->facts.max_devhandle % 8)
2884                 mrioc->dev_handle_bitmap_sz++;
2885         mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
2886             GFP_KERNEL);
2887         if (!mrioc->removepend_bitmap)
2888                 goto out_failed;
2889
2890         mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
2891         if (MPI3MR_NUM_DEVRMCMD % 8)
2892                 mrioc->devrem_bitmap_sz++;
2893         mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
2894             GFP_KERNEL);
2895         if (!mrioc->devrem_bitmap)
2896                 goto out_failed;
2897
2898         mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
2899         if (MPI3MR_NUM_EVTACKCMD % 8)
2900                 mrioc->evtack_cmds_bitmap_sz++;
2901         mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
2902             GFP_KERNEL);
2903         if (!mrioc->evtack_cmds_bitmap)
2904                 goto out_failed;
2905
2906         mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
2907         mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
2908         mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
2909         mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
2910
2911         /* reply buffer pool, 16 byte align */
2912         sz = mrioc->num_reply_bufs * mrioc->reply_sz;
2913         mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
2914             &mrioc->pdev->dev, sz, 16, 0);
2915         if (!mrioc->reply_buf_pool) {
2916                 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
2917                 goto out_failed;
2918         }
2919
2920         mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
2921             &mrioc->reply_buf_dma);
2922         if (!mrioc->reply_buf)
2923                 goto out_failed;
2924
2925         mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
2926
2927         /* reply free queue, 8 byte align */
2928         sz = mrioc->reply_free_qsz * 8;
2929         mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
2930             &mrioc->pdev->dev, sz, 8, 0);
2931         if (!mrioc->reply_free_q_pool) {
2932                 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
2933                 goto out_failed;
2934         }
2935         mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
2936             GFP_KERNEL, &mrioc->reply_free_q_dma);
2937         if (!mrioc->reply_free_q)
2938                 goto out_failed;
2939
2940         /* sense buffer pool,  4 byte align */
2941         sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
2942         mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
2943             &mrioc->pdev->dev, sz, 4, 0);
2944         if (!mrioc->sense_buf_pool) {
2945                 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
2946                 goto out_failed;
2947         }
2948         mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
2949             &mrioc->sense_buf_dma);
2950         if (!mrioc->sense_buf)
2951                 goto out_failed;
2952
2953         /* sense buffer queue, 8 byte align */
2954         sz = mrioc->sense_buf_q_sz * 8;
2955         mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
2956             &mrioc->pdev->dev, sz, 8, 0);
2957         if (!mrioc->sense_buf_q_pool) {
2958                 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
2959                 goto out_failed;
2960         }
2961         mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
2962             GFP_KERNEL, &mrioc->sense_buf_q_dma);
2963         if (!mrioc->sense_buf_q)
2964                 goto out_failed;
2965
2966         return retval;
2967
2968 out_failed:
2969         retval = -1;
2970         return retval;
2971 }
2972
2973 /**
2974  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
2975  * buffers
2976  * @mrioc: Adapter instance reference
2977  *
2978  * Helper function to initialize reply and sense buffers along
2979  * with some debug prints.
2980  *
2981  * Return:  None.
2982  */
2983 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
2984 {
2985         u32 sz, i;
2986         dma_addr_t phy_addr;
2987
2988         sz = mrioc->num_reply_bufs * mrioc->reply_sz;
2989         ioc_info(mrioc,
2990             "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2991             mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
2992             (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
2993         sz = mrioc->reply_free_qsz * 8;
2994         ioc_info(mrioc,
2995             "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2996             mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
2997             (unsigned long long)mrioc->reply_free_q_dma);
2998         sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
2999         ioc_info(mrioc,
3000             "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3001             mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3002             (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3003         sz = mrioc->sense_buf_q_sz * 8;
3004         ioc_info(mrioc,
3005             "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3006             mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3007             (unsigned long long)mrioc->sense_buf_q_dma);
3008
3009         /* initialize Reply buffer Queue */
3010         for (i = 0, phy_addr = mrioc->reply_buf_dma;
3011             i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3012                 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3013         mrioc->reply_free_q[i] = cpu_to_le64(0);
3014
3015         /* initialize Sense Buffer Queue */
3016         for (i = 0, phy_addr = mrioc->sense_buf_dma;
3017             i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3018                 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3019         mrioc->sense_buf_q[i] = cpu_to_le64(0);
3020 }
3021
3022 /**
3023  * mpi3mr_issue_iocinit - Send IOC Init
3024  * @mrioc: Adapter instance reference
3025  *
3026  * Issue IOC Init MPI request through admin queue and wait for
3027  * the completion of it or time out.
3028  *
3029  * Return: 0 on success, non-zero on failures.
3030  */
3031 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3032 {
3033         struct mpi3_ioc_init_request iocinit_req;
3034         struct mpi3_driver_info_layout *drv_info;
3035         dma_addr_t data_dma;
3036         u32 data_len = sizeof(*drv_info);
3037         int retval = 0;
3038         ktime_t current_time;
3039
3040         drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3041             GFP_KERNEL);
3042         if (!drv_info) {
3043                 retval = -1;
3044                 goto out;
3045         }
3046         mpimr_initialize_reply_sbuf_queues(mrioc);
3047
3048         drv_info->information_length = cpu_to_le32(data_len);
3049         strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3050         strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3051         strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3052         strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3053         strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3054         strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3055             sizeof(drv_info->driver_release_date));
3056         drv_info->driver_capabilities = 0;
3057         memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3058             sizeof(mrioc->driver_info));
3059
3060         memset(&iocinit_req, 0, sizeof(iocinit_req));
3061         mutex_lock(&mrioc->init_cmds.mutex);
3062         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3063                 retval = -1;
3064                 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3065                 mutex_unlock(&mrioc->init_cmds.mutex);
3066                 goto out;
3067         }
3068         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3069         mrioc->init_cmds.is_waiting = 1;
3070         mrioc->init_cmds.callback = NULL;
3071         iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3072         iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3073         iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3074         iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3075         iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3076         iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3077         iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3078         iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3079         iocinit_req.reply_free_queue_address =
3080             cpu_to_le64(mrioc->reply_free_q_dma);
3081         iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3082         iocinit_req.sense_buffer_free_queue_depth =
3083             cpu_to_le16(mrioc->sense_buf_q_sz);
3084         iocinit_req.sense_buffer_free_queue_address =
3085             cpu_to_le64(mrioc->sense_buf_q_dma);
3086         iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3087
3088         current_time = ktime_get_real();
3089         iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3090
3091         init_completion(&mrioc->init_cmds.done);
3092         retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3093             sizeof(iocinit_req), 1);
3094         if (retval) {
3095                 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3096                 goto out_unlock;
3097         }
3098         wait_for_completion_timeout(&mrioc->init_cmds.done,
3099             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3100         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3101                 mpi3mr_check_rh_fault_ioc(mrioc,
3102                     MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3103                 ioc_err(mrioc, "ioc_init timed out\n");
3104                 retval = -1;
3105                 goto out_unlock;
3106         }
3107         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3108             != MPI3_IOCSTATUS_SUCCESS) {
3109                 ioc_err(mrioc,
3110                     "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3111                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3112                     mrioc->init_cmds.ioc_loginfo);
3113                 retval = -1;
3114                 goto out_unlock;
3115         }
3116
3117         mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3118         writel(mrioc->reply_free_queue_host_index,
3119             &mrioc->sysif_regs->reply_free_host_index);
3120
3121         mrioc->sbq_host_index = mrioc->num_sense_bufs;
3122         writel(mrioc->sbq_host_index,
3123             &mrioc->sysif_regs->sense_buffer_free_host_index);
3124 out_unlock:
3125         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3126         mutex_unlock(&mrioc->init_cmds.mutex);
3127
3128 out:
3129         if (drv_info)
3130                 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3131                     data_dma);
3132
3133         return retval;
3134 }
3135
3136 /**
3137  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3138  * @mrioc: Adapter instance reference
3139  * @event: MPI event ID
3140  *
3141  * Un mask the specific event by resetting the event_mask
3142  * bitmap.
3143  *
3144  * Return: 0 on success, non-zero on failures.
3145  */
3146 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3147 {
3148         u32 desired_event;
3149         u8 word;
3150
3151         if (event >= 128)
3152                 return;
3153
3154         desired_event = (1 << (event % 32));
3155         word = event / 32;
3156
3157         mrioc->event_masks[word] &= ~desired_event;
3158 }
3159
3160 /**
3161  * mpi3mr_issue_event_notification - Send event notification
3162  * @mrioc: Adapter instance reference
3163  *
3164  * Issue event notification MPI request through admin queue and
3165  * wait for the completion of it or time out.
3166  *
3167  * Return: 0 on success, non-zero on failures.
3168  */
3169 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3170 {
3171         struct mpi3_event_notification_request evtnotify_req;
3172         int retval = 0;
3173         u8 i;
3174
3175         memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3176         mutex_lock(&mrioc->init_cmds.mutex);
3177         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3178                 retval = -1;
3179                 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3180                 mutex_unlock(&mrioc->init_cmds.mutex);
3181                 goto out;
3182         }
3183         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3184         mrioc->init_cmds.is_waiting = 1;
3185         mrioc->init_cmds.callback = NULL;
3186         evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3187         evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3188         for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3189                 evtnotify_req.event_masks[i] =
3190                     cpu_to_le32(mrioc->event_masks[i]);
3191         init_completion(&mrioc->init_cmds.done);
3192         retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3193             sizeof(evtnotify_req), 1);
3194         if (retval) {
3195                 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3196                 goto out_unlock;
3197         }
3198         wait_for_completion_timeout(&mrioc->init_cmds.done,
3199             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3200         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3201                 ioc_err(mrioc, "event notification timed out\n");
3202                 mpi3mr_check_rh_fault_ioc(mrioc,
3203                     MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3204                 retval = -1;
3205                 goto out_unlock;
3206         }
3207         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3208             != MPI3_IOCSTATUS_SUCCESS) {
3209                 ioc_err(mrioc,
3210                     "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3211                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3212                     mrioc->init_cmds.ioc_loginfo);
3213                 retval = -1;
3214                 goto out_unlock;
3215         }
3216
3217 out_unlock:
3218         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3219         mutex_unlock(&mrioc->init_cmds.mutex);
3220 out:
3221         return retval;
3222 }
3223
3224 /**
3225  * mpi3mr_process_event_ack - Process event acknowledgment
3226  * @mrioc: Adapter instance reference
3227  * @event: MPI3 event ID
3228  * @event_ctx: event context
3229  *
3230  * Send event acknowledgment through admin queue and wait for
3231  * it to complete.
3232  *
3233  * Return: 0 on success, non-zero on failures.
3234  */
3235 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3236         u32 event_ctx)
3237 {
3238         struct mpi3_event_ack_request evtack_req;
3239         int retval = 0;
3240
3241         memset(&evtack_req, 0, sizeof(evtack_req));
3242         mutex_lock(&mrioc->init_cmds.mutex);
3243         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3244                 retval = -1;
3245                 ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3246                 mutex_unlock(&mrioc->init_cmds.mutex);
3247                 goto out;
3248         }
3249         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3250         mrioc->init_cmds.is_waiting = 1;
3251         mrioc->init_cmds.callback = NULL;
3252         evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3253         evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3254         evtack_req.event = event;
3255         evtack_req.event_context = cpu_to_le32(event_ctx);
3256
3257         init_completion(&mrioc->init_cmds.done);
3258         retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3259             sizeof(evtack_req), 1);
3260         if (retval) {
3261                 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3262                 goto out_unlock;
3263         }
3264         wait_for_completion_timeout(&mrioc->init_cmds.done,
3265             (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3266         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3267                 ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3268                 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3269                         mpi3mr_soft_reset_handler(mrioc,
3270                             MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
3271                 retval = -1;
3272                 goto out_unlock;
3273         }
3274         if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3275             != MPI3_IOCSTATUS_SUCCESS) {
3276                 ioc_err(mrioc,
3277                     "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3278                     (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3279                     mrioc->init_cmds.ioc_loginfo);
3280                 retval = -1;
3281                 goto out_unlock;
3282         }
3283
3284 out_unlock:
3285         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3286         mutex_unlock(&mrioc->init_cmds.mutex);
3287 out:
3288         return retval;
3289 }
3290
3291 /**
3292  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3293  * @mrioc: Adapter instance reference
3294  *
3295  * Allocate chain buffers and set a bitmap to indicate free
3296  * chain buffers. Chain buffers are used to pass the SGE
3297  * information along with MPI3 SCSI IO requests for host I/O.
3298  *
3299  * Return: 0 on success, non-zero on failure
3300  */
3301 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3302 {
3303         int retval = 0;
3304         u32 sz, i;
3305         u16 num_chains;
3306
3307         if (mrioc->chain_sgl_list)
3308                 return retval;
3309
3310         num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3311
3312         if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3313             | SHOST_DIX_TYPE1_PROTECTION
3314             | SHOST_DIX_TYPE2_PROTECTION
3315             | SHOST_DIX_TYPE3_PROTECTION))
3316                 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3317
3318         mrioc->chain_buf_count = num_chains;
3319         sz = sizeof(struct chain_element) * num_chains;
3320         mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3321         if (!mrioc->chain_sgl_list)
3322                 goto out_failed;
3323
3324         sz = MPI3MR_PAGE_SIZE_4K;
3325         mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3326             &mrioc->pdev->dev, sz, 16, 0);
3327         if (!mrioc->chain_buf_pool) {
3328                 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3329                 goto out_failed;
3330         }
3331
3332         for (i = 0; i < num_chains; i++) {
3333                 mrioc->chain_sgl_list[i].addr =
3334                     dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3335                     &mrioc->chain_sgl_list[i].dma_addr);
3336
3337                 if (!mrioc->chain_sgl_list[i].addr)
3338                         goto out_failed;
3339         }
3340         mrioc->chain_bitmap_sz = num_chains / 8;
3341         if (num_chains % 8)
3342                 mrioc->chain_bitmap_sz++;
3343         mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
3344         if (!mrioc->chain_bitmap)
3345                 goto out_failed;
3346         return retval;
3347 out_failed:
3348         retval = -1;
3349         return retval;
3350 }
3351
3352 /**
3353  * mpi3mr_port_enable_complete - Mark port enable complete
3354  * @mrioc: Adapter instance reference
3355  * @drv_cmd: Internal command tracker
3356  *
3357  * Call back for asynchronous port enable request sets the
3358  * driver command to indicate port enable request is complete.
3359  *
3360  * Return: Nothing
3361  */
3362 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3363         struct mpi3mr_drv_cmd *drv_cmd)
3364 {
3365         drv_cmd->state = MPI3MR_CMD_NOTUSED;
3366         drv_cmd->callback = NULL;
3367         mrioc->scan_failed = drv_cmd->ioc_status;
3368         mrioc->scan_started = 0;
3369 }
3370
3371 /**
3372  * mpi3mr_issue_port_enable - Issue Port Enable
3373  * @mrioc: Adapter instance reference
3374  * @async: Flag to wait for completion or not
3375  *
3376  * Issue Port Enable MPI request through admin queue and if the
3377  * async flag is not set wait for the completion of the port
3378  * enable or time out.
3379  *
3380  * Return: 0 on success, non-zero on failures.
3381  */
3382 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3383 {
3384         struct mpi3_port_enable_request pe_req;
3385         int retval = 0;
3386         u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3387
3388         memset(&pe_req, 0, sizeof(pe_req));
3389         mutex_lock(&mrioc->init_cmds.mutex);
3390         if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3391                 retval = -1;
3392                 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3393                 mutex_unlock(&mrioc->init_cmds.mutex);
3394                 goto out;
3395         }
3396         mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3397         if (async) {
3398                 mrioc->init_cmds.is_waiting = 0;
3399                 mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3400         } else {
3401                 mrioc->init_cmds.is_waiting = 1;
3402                 mrioc->init_cmds.callback = NULL;
3403                 init_completion(&mrioc->init_cmds.done);
3404         }
3405         pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3406         pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3407
3408         retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3409         if (retval) {
3410                 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3411                 goto out_unlock;
3412         }
3413         if (async) {
3414                 mutex_unlock(&mrioc->init_cmds.mutex);
3415                 goto out;
3416         }
3417
3418         wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3419         if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3420                 ioc_err(mrioc, "port enable timed out\n");
3421                 retval = -1;
3422                 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3423                 goto out_unlock;
3424         }
3425         mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3426
3427 out_unlock:
3428         mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3429         mutex_unlock(&mrioc->init_cmds.mutex);
3430 out:
3431         return retval;
3432 }
3433
3434 /* Protocol type to name mapper structure */
3435 static const struct {
3436         u8 protocol;
3437         char *name;
3438 } mpi3mr_protocols[] = {
3439         { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3440         { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3441         { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3442 };
3443
3444 /* Capability to name mapper structure*/
3445 static const struct {
3446         u32 capability;
3447         char *name;
3448 } mpi3mr_capabilities[] = {
3449         { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
3450 };
3451
3452 /**
3453  * mpi3mr_print_ioc_info - Display controller information
3454  * @mrioc: Adapter instance reference
3455  *
3456  * Display controller personalit, capability, supported
3457  * protocols etc.
3458  *
3459  * Return: Nothing
3460  */
3461 static void
3462 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3463 {
3464         int i = 0, bytes_written = 0;
3465         char personality[16];
3466         char protocol[50] = {0};
3467         char capabilities[100] = {0};
3468         struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3469
3470         switch (mrioc->facts.personality) {
3471         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3472                 strncpy(personality, "Enhanced HBA", sizeof(personality));
3473                 break;
3474         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3475                 strncpy(personality, "RAID", sizeof(personality));
3476                 break;
3477         default:
3478                 strncpy(personality, "Unknown", sizeof(personality));
3479                 break;
3480         }
3481
3482         ioc_info(mrioc, "Running in %s Personality", personality);
3483
3484         ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3485             fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3486             fwver->ph_minor, fwver->cust_id, fwver->build_num);
3487
3488         for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
3489                 if (mrioc->facts.protocol_flags &
3490                     mpi3mr_protocols[i].protocol) {
3491                         bytes_written += scnprintf(protocol + bytes_written,
3492                                     sizeof(protocol) - bytes_written, "%s%s",
3493                                     bytes_written ? "," : "",
3494                                     mpi3mr_protocols[i].name);
3495                 }
3496         }
3497
3498         bytes_written = 0;
3499         for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
3500                 if (mrioc->facts.protocol_flags &
3501                     mpi3mr_capabilities[i].capability) {
3502                         bytes_written += scnprintf(capabilities + bytes_written,
3503                                     sizeof(capabilities) - bytes_written, "%s%s",
3504                                     bytes_written ? "," : "",
3505                                     mpi3mr_capabilities[i].name);
3506                 }
3507         }
3508
3509         ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
3510                  protocol, capabilities);
3511 }
3512
3513 /**
3514  * mpi3mr_cleanup_resources - Free PCI resources
3515  * @mrioc: Adapter instance reference
3516  *
3517  * Unmap PCI device memory and disable PCI device.
3518  *
3519  * Return: 0 on success and non-zero on failure.
3520  */
3521 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
3522 {
3523         struct pci_dev *pdev = mrioc->pdev;
3524
3525         mpi3mr_cleanup_isr(mrioc);
3526
3527         if (mrioc->sysif_regs) {
3528                 iounmap((void __iomem *)mrioc->sysif_regs);
3529                 mrioc->sysif_regs = NULL;
3530         }
3531
3532         if (pci_is_enabled(pdev)) {
3533                 if (mrioc->bars)
3534                         pci_release_selected_regions(pdev, mrioc->bars);
3535                 pci_disable_device(pdev);
3536         }
3537 }
3538
3539 /**
3540  * mpi3mr_setup_resources - Enable PCI resources
3541  * @mrioc: Adapter instance reference
3542  *
3543  * Enable PCI device memory, MSI-x registers and set DMA mask.
3544  *
3545  * Return: 0 on success and non-zero on failure.
3546  */
3547 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
3548 {
3549         struct pci_dev *pdev = mrioc->pdev;
3550         u32 memap_sz = 0;
3551         int i, retval = 0, capb = 0;
3552         u16 message_control;
3553         u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
3554             (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
3555             (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
3556
3557         if (pci_enable_device_mem(pdev)) {
3558                 ioc_err(mrioc, "pci_enable_device_mem: failed\n");
3559                 retval = -ENODEV;
3560                 goto out_failed;
3561         }
3562
3563         capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3564         if (!capb) {
3565                 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
3566                 retval = -ENODEV;
3567                 goto out_failed;
3568         }
3569         mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3570
3571         if (pci_request_selected_regions(pdev, mrioc->bars,
3572             mrioc->driver_name)) {
3573                 ioc_err(mrioc, "pci_request_selected_regions: failed\n");
3574                 retval = -ENODEV;
3575                 goto out_failed;
3576         }
3577
3578         for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
3579                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3580                         mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
3581                         memap_sz = pci_resource_len(pdev, i);
3582                         mrioc->sysif_regs =
3583                             ioremap(mrioc->sysif_regs_phys, memap_sz);
3584                         break;
3585                 }
3586         }
3587
3588         pci_set_master(pdev);
3589
3590         retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
3591         if (retval) {
3592                 if (dma_mask != DMA_BIT_MASK(32)) {
3593                         ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
3594                         dma_mask = DMA_BIT_MASK(32);
3595                         retval = dma_set_mask_and_coherent(&pdev->dev,
3596                             dma_mask);
3597                 }
3598                 if (retval) {
3599                         mrioc->dma_mask = 0;
3600                         ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
3601                         goto out_failed;
3602                 }
3603         }
3604         mrioc->dma_mask = dma_mask;
3605
3606         if (!mrioc->sysif_regs) {
3607                 ioc_err(mrioc,
3608                     "Unable to map adapter memory or resource not found\n");
3609                 retval = -EINVAL;
3610                 goto out_failed;
3611         }
3612
3613         pci_read_config_word(pdev, capb + 2, &message_control);
3614         mrioc->msix_count = (message_control & 0x3FF) + 1;
3615
3616         pci_save_state(pdev);
3617
3618         pci_set_drvdata(pdev, mrioc->shost);
3619
3620         mpi3mr_ioc_disable_intr(mrioc);
3621
3622         ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
3623             (unsigned long long)mrioc->sysif_regs_phys,
3624             mrioc->sysif_regs, memap_sz);
3625         ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
3626             mrioc->msix_count);
3627
3628         if (!reset_devices && poll_queues > 0)
3629                 mrioc->requested_poll_qcount = min_t(int, poll_queues,
3630                                 mrioc->msix_count - 2);
3631         return retval;
3632
3633 out_failed:
3634         mpi3mr_cleanup_resources(mrioc);
3635         return retval;
3636 }
3637
3638 /**
3639  * mpi3mr_enable_events - Enable required events
3640  * @mrioc: Adapter instance reference
3641  *
3642  * This routine unmasks the events required by the driver by
3643  * sennding appropriate event mask bitmapt through an event
3644  * notification request.
3645  *
3646  * Return: 0 on success and non-zero on failure.
3647  */
3648 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
3649 {
3650         int retval = 0;
3651         u32  i;
3652
3653         for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3654                 mrioc->event_masks[i] = -1;
3655
3656         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
3657         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
3658         mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
3659         mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
3660         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3661         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
3662         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
3663         mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
3664         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
3665         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
3666         mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
3667         mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
3668         mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
3669
3670         retval = mpi3mr_issue_event_notification(mrioc);
3671         if (retval)
3672                 ioc_err(mrioc, "failed to issue event notification %d\n",
3673                     retval);
3674         return retval;
3675 }
3676
3677 /**
3678  * mpi3mr_init_ioc - Initialize the controller
3679  * @mrioc: Adapter instance reference
3680  *
3681  * This the controller initialization routine, executed either
3682  * after soft reset or from pci probe callback.
3683  * Setup the required resources, memory map the controller
3684  * registers, create admin and operational reply queue pairs,
3685  * allocate required memory for reply pool, sense buffer pool,
3686  * issue IOC init request to the firmware, unmask the events and
3687  * issue port enable to discover SAS/SATA/NVMe devies and RAID
3688  * volumes.
3689  *
3690  * Return: 0 on success and non-zero on failure.
3691  */
3692 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
3693 {
3694         int retval = 0;
3695         u8 retry = 0;
3696         struct mpi3_ioc_facts_data facts_data;
3697         u32 sz;
3698
3699 retry_init:
3700         retval = mpi3mr_bring_ioc_ready(mrioc);
3701         if (retval) {
3702                 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
3703                     retval);
3704                 goto out_failed_noretry;
3705         }
3706
3707         retval = mpi3mr_setup_isr(mrioc, 1);
3708         if (retval) {
3709                 ioc_err(mrioc, "Failed to setup ISR error %d\n",
3710                     retval);
3711                 goto out_failed_noretry;
3712         }
3713
3714         retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3715         if (retval) {
3716                 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
3717                     retval);
3718                 goto out_failed;
3719         }
3720
3721         mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
3722
3723         mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
3724         atomic_set(&mrioc->pend_large_data_sz, 0);
3725
3726         if (reset_devices)
3727                 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
3728                     MPI3MR_HOST_IOS_KDUMP);
3729
3730         mrioc->reply_sz = mrioc->facts.reply_sz;
3731
3732         retval = mpi3mr_check_reset_dma_mask(mrioc);
3733         if (retval) {
3734                 ioc_err(mrioc, "Resetting dma mask failed %d\n",
3735                     retval);
3736                 goto out_failed_noretry;
3737         }
3738
3739         mpi3mr_print_ioc_info(mrioc);
3740
3741         retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
3742         if (retval) {
3743                 ioc_err(mrioc,
3744                     "%s :Failed to allocated reply sense buffers %d\n",
3745                     __func__, retval);
3746                 goto out_failed_noretry;
3747         }
3748
3749         retval = mpi3mr_alloc_chain_bufs(mrioc);
3750         if (retval) {
3751                 ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
3752                     retval);
3753                 goto out_failed_noretry;
3754         }
3755
3756         retval = mpi3mr_issue_iocinit(mrioc);
3757         if (retval) {
3758                 ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
3759                     retval);
3760                 goto out_failed;
3761         }
3762
3763         retval = mpi3mr_print_pkg_ver(mrioc);
3764         if (retval) {
3765                 ioc_err(mrioc, "failed to get package version\n");
3766                 goto out_failed;
3767         }
3768
3769         retval = mpi3mr_setup_isr(mrioc, 0);
3770         if (retval) {
3771                 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
3772                     retval);
3773                 goto out_failed_noretry;
3774         }
3775
3776         retval = mpi3mr_create_op_queues(mrioc);
3777         if (retval) {
3778                 ioc_err(mrioc, "Failed to create OpQueues error %d\n",
3779                     retval);
3780                 goto out_failed;
3781         }
3782
3783         if (!mrioc->pel_seqnum_virt) {
3784                 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
3785                 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
3786                 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
3787                     mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
3788                     GFP_KERNEL);
3789                 if (!mrioc->pel_seqnum_virt) {
3790                         retval = -ENOMEM;
3791                         goto out_failed_noretry;
3792                 }
3793         }
3794
3795         if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
3796                 dprint_init(mrioc, "allocating memory for throttle groups\n");
3797                 sz = sizeof(struct mpi3mr_throttle_group_info);
3798                 mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *)
3799                     kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
3800                 if (!mrioc->throttle_groups)
3801                         goto out_failed_noretry;
3802         }
3803
3804         retval = mpi3mr_enable_events(mrioc);
3805         if (retval) {
3806                 ioc_err(mrioc, "failed to enable events %d\n",
3807                     retval);
3808                 goto out_failed;
3809         }
3810
3811         ioc_info(mrioc, "controller initialization completed successfully\n");
3812         return retval;
3813 out_failed:
3814         if (retry < 2) {
3815                 retry++;
3816                 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
3817                     retry);
3818                 mpi3mr_memset_buffers(mrioc);
3819                 goto retry_init;
3820         }
3821 out_failed_noretry:
3822         ioc_err(mrioc, "controller initialization failed\n");
3823         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
3824             MPI3MR_RESET_FROM_CTLR_CLEANUP);
3825         mrioc->unrecoverable = 1;
3826         return retval;
3827 }
3828
3829 /**
3830  * mpi3mr_reinit_ioc - Re-Initialize the controller
3831  * @mrioc: Adapter instance reference
3832  * @is_resume: Called from resume or reset path
3833  *
3834  * This the controller re-initialization routine, executed from
3835  * the soft reset handler or resume callback. Creates
3836  * operational reply queue pairs, allocate required memory for
3837  * reply pool, sense buffer pool, issue IOC init request to the
3838  * firmware, unmask the events and issue port enable to discover
3839  * SAS/SATA/NVMe devices and RAID volumes.
3840  *
3841  * Return: 0 on success and non-zero on failure.
3842  */
3843 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
3844 {
3845         int retval = 0;
3846         u8 retry = 0;
3847         struct mpi3_ioc_facts_data facts_data;
3848
3849 retry_init:
3850         dprint_reset(mrioc, "bringing up the controller to ready state\n");
3851         retval = mpi3mr_bring_ioc_ready(mrioc);
3852         if (retval) {
3853                 ioc_err(mrioc, "failed to bring to ready state\n");
3854                 goto out_failed_noretry;
3855         }
3856
3857         if (is_resume) {
3858                 dprint_reset(mrioc, "setting up single ISR\n");
3859                 retval = mpi3mr_setup_isr(mrioc, 1);
3860                 if (retval) {
3861                         ioc_err(mrioc, "failed to setup ISR\n");
3862                         goto out_failed_noretry;
3863                 }
3864         } else
3865                 mpi3mr_ioc_enable_intr(mrioc);
3866
3867         dprint_reset(mrioc, "getting ioc_facts\n");
3868         retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3869         if (retval) {
3870                 ioc_err(mrioc, "failed to get ioc_facts\n");
3871                 goto out_failed;
3872         }
3873
3874         dprint_reset(mrioc, "validating ioc_facts\n");
3875         retval = mpi3mr_revalidate_factsdata(mrioc);
3876         if (retval) {
3877                 ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
3878                 goto out_failed_noretry;
3879         }
3880
3881         mpi3mr_print_ioc_info(mrioc);
3882
3883         dprint_reset(mrioc, "sending ioc_init\n");
3884         retval = mpi3mr_issue_iocinit(mrioc);
3885         if (retval) {
3886                 ioc_err(mrioc, "failed to send ioc_init\n");
3887                 goto out_failed;
3888         }
3889
3890         dprint_reset(mrioc, "getting package version\n");
3891         retval = mpi3mr_print_pkg_ver(mrioc);
3892         if (retval) {
3893                 ioc_err(mrioc, "failed to get package version\n");
3894                 goto out_failed;
3895         }
3896
3897         if (is_resume) {
3898                 dprint_reset(mrioc, "setting up multiple ISR\n");
3899                 retval = mpi3mr_setup_isr(mrioc, 0);
3900                 if (retval) {
3901                         ioc_err(mrioc, "failed to re-setup ISR\n");
3902                         goto out_failed_noretry;
3903                 }
3904         }
3905
3906         dprint_reset(mrioc, "creating operational queue pairs\n");
3907         retval = mpi3mr_create_op_queues(mrioc);
3908         if (retval) {
3909                 ioc_err(mrioc, "failed to create operational queue pairs\n");
3910                 goto out_failed;
3911         }
3912
3913         if (!mrioc->pel_seqnum_virt) {
3914                 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
3915                 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
3916                 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
3917                     mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
3918                     GFP_KERNEL);
3919                 if (!mrioc->pel_seqnum_virt) {
3920                         retval = -ENOMEM;
3921                         goto out_failed_noretry;
3922                 }
3923         }
3924
3925         if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
3926                 ioc_err(mrioc,
3927                     "cannot create minimum number of operational queues expected:%d created:%d\n",
3928                     mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
3929                 goto out_failed_noretry;
3930         }
3931
3932         dprint_reset(mrioc, "enabling events\n");
3933         retval = mpi3mr_enable_events(mrioc);
3934         if (retval) {
3935                 ioc_err(mrioc, "failed to enable events\n");
3936                 goto out_failed;
3937         }
3938
3939         ioc_info(mrioc, "sending port enable\n");
3940         retval = mpi3mr_issue_port_enable(mrioc, 0);
3941         if (retval) {
3942                 ioc_err(mrioc, "failed to issue port enable\n");
3943                 goto out_failed;
3944         }
3945
3946         ioc_info(mrioc, "controller %s completed successfully\n",
3947             (is_resume)?"resume":"re-initialization");
3948         return retval;
3949 out_failed:
3950         if (retry < 2) {
3951                 retry++;
3952                 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
3953                     (is_resume)?"resume":"re-initialization", retry);
3954                 mpi3mr_memset_buffers(mrioc);
3955                 goto retry_init;
3956         }
3957 out_failed_noretry:
3958         ioc_err(mrioc, "controller %s is failed\n",
3959             (is_resume)?"resume":"re-initialization");
3960         mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
3961             MPI3MR_RESET_FROM_CTLR_CLEANUP);
3962         mrioc->unrecoverable = 1;
3963         return retval;
3964 }
3965
3966 /**
3967  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
3968  *                                      segments
3969  * @mrioc: Adapter instance reference
3970  * @qidx: Operational reply queue index
3971  *
3972  * Return: Nothing.
3973  */
3974 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3975 {
3976         struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
3977         struct segments *segments;
3978         int i, size;
3979
3980         if (!op_reply_q->q_segments)
3981                 return;
3982
3983         size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
3984         segments = op_reply_q->q_segments;
3985         for (i = 0; i < op_reply_q->num_segments; i++)
3986                 memset(segments[i].segment, 0, size);
3987 }
3988
3989 /**
3990  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
3991  *                                      segments
3992  * @mrioc: Adapter instance reference
3993  * @qidx: Operational request queue index
3994  *
3995  * Return: Nothing.
3996  */
3997 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3998 {
3999         struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4000         struct segments *segments;
4001         int i, size;
4002
4003         if (!op_req_q->q_segments)
4004                 return;
4005
4006         size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4007         segments = op_req_q->q_segments;
4008         for (i = 0; i < op_req_q->num_segments; i++)
4009                 memset(segments[i].segment, 0, size);
4010 }
4011
4012 /**
4013  * mpi3mr_memset_buffers - memset memory for a controller
4014  * @mrioc: Adapter instance reference
4015  *
4016  * clear all the memory allocated for a controller, typically
4017  * called post reset to reuse the memory allocated during the
4018  * controller init.
4019  *
4020  * Return: Nothing.
4021  */
4022 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4023 {
4024         u16 i;
4025         struct mpi3mr_throttle_group_info *tg;
4026
4027         mrioc->change_count = 0;
4028         mrioc->active_poll_qcount = 0;
4029         mrioc->default_qcount = 0;
4030         if (mrioc->admin_req_base)
4031                 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4032         if (mrioc->admin_reply_base)
4033                 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4034
4035         if (mrioc->init_cmds.reply) {
4036                 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4037                 memset(mrioc->bsg_cmds.reply, 0,
4038                     sizeof(*mrioc->bsg_cmds.reply));
4039                 memset(mrioc->host_tm_cmds.reply, 0,
4040                     sizeof(*mrioc->host_tm_cmds.reply));
4041                 memset(mrioc->pel_cmds.reply, 0,
4042                     sizeof(*mrioc->pel_cmds.reply));
4043                 memset(mrioc->pel_abort_cmd.reply, 0,
4044                     sizeof(*mrioc->pel_abort_cmd.reply));
4045                 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4046                         memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4047                             sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4048                 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4049                         memset(mrioc->evtack_cmds[i].reply, 0,
4050                             sizeof(*mrioc->evtack_cmds[i].reply));
4051                 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
4052                 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
4053                 memset(mrioc->evtack_cmds_bitmap, 0,
4054                     mrioc->evtack_cmds_bitmap_sz);
4055         }
4056
4057         for (i = 0; i < mrioc->num_queues; i++) {
4058                 mrioc->op_reply_qinfo[i].qid = 0;
4059                 mrioc->op_reply_qinfo[i].ci = 0;
4060                 mrioc->op_reply_qinfo[i].num_replies = 0;
4061                 mrioc->op_reply_qinfo[i].ephase = 0;
4062                 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4063                 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4064                 mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4065
4066                 mrioc->req_qinfo[i].ci = 0;
4067                 mrioc->req_qinfo[i].pi = 0;
4068                 mrioc->req_qinfo[i].num_requests = 0;
4069                 mrioc->req_qinfo[i].qid = 0;
4070                 mrioc->req_qinfo[i].reply_qid = 0;
4071                 spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4072                 mpi3mr_memset_op_req_q_buffers(mrioc, i);
4073         }
4074
4075         atomic_set(&mrioc->pend_large_data_sz, 0);
4076         if (mrioc->throttle_groups) {
4077                 tg = mrioc->throttle_groups;
4078                 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4079                         tg->id = 0;
4080                         tg->fw_qd = 0;
4081                         tg->modified_qd = 0;
4082                         tg->io_divert = 0;
4083                         tg->need_qd_reduction = 0;
4084                         tg->high = 0;
4085                         tg->low = 0;
4086                         tg->qd_reduction = 0;
4087                         atomic_set(&tg->pend_large_data_sz, 0);
4088                 }
4089         }
4090 }
4091
4092 /**
4093  * mpi3mr_free_mem - Free memory allocated for a controller
4094  * @mrioc: Adapter instance reference
4095  *
4096  * Free all the memory allocated for a controller.
4097  *
4098  * Return: Nothing.
4099  */
4100 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4101 {
4102         u16 i;
4103         struct mpi3mr_intr_info *intr_info;
4104
4105         if (mrioc->sense_buf_pool) {
4106                 if (mrioc->sense_buf)
4107                         dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4108                             mrioc->sense_buf_dma);
4109                 dma_pool_destroy(mrioc->sense_buf_pool);
4110                 mrioc->sense_buf = NULL;
4111                 mrioc->sense_buf_pool = NULL;
4112         }
4113         if (mrioc->sense_buf_q_pool) {
4114                 if (mrioc->sense_buf_q)
4115                         dma_pool_free(mrioc->sense_buf_q_pool,
4116                             mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4117                 dma_pool_destroy(mrioc->sense_buf_q_pool);
4118                 mrioc->sense_buf_q = NULL;
4119                 mrioc->sense_buf_q_pool = NULL;
4120         }
4121
4122         if (mrioc->reply_buf_pool) {
4123                 if (mrioc->reply_buf)
4124                         dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4125                             mrioc->reply_buf_dma);
4126                 dma_pool_destroy(mrioc->reply_buf_pool);
4127                 mrioc->reply_buf = NULL;
4128                 mrioc->reply_buf_pool = NULL;
4129         }
4130         if (mrioc->reply_free_q_pool) {
4131                 if (mrioc->reply_free_q)
4132                         dma_pool_free(mrioc->reply_free_q_pool,
4133                             mrioc->reply_free_q, mrioc->reply_free_q_dma);
4134                 dma_pool_destroy(mrioc->reply_free_q_pool);
4135                 mrioc->reply_free_q = NULL;
4136                 mrioc->reply_free_q_pool = NULL;
4137         }
4138
4139         for (i = 0; i < mrioc->num_op_req_q; i++)
4140                 mpi3mr_free_op_req_q_segments(mrioc, i);
4141
4142         for (i = 0; i < mrioc->num_op_reply_q; i++)
4143                 mpi3mr_free_op_reply_q_segments(mrioc, i);
4144
4145         for (i = 0; i < mrioc->intr_info_count; i++) {
4146                 intr_info = mrioc->intr_info + i;
4147                 intr_info->op_reply_q = NULL;
4148         }
4149
4150         kfree(mrioc->req_qinfo);
4151         mrioc->req_qinfo = NULL;
4152         mrioc->num_op_req_q = 0;
4153
4154         kfree(mrioc->op_reply_qinfo);
4155         mrioc->op_reply_qinfo = NULL;
4156         mrioc->num_op_reply_q = 0;
4157
4158         kfree(mrioc->init_cmds.reply);
4159         mrioc->init_cmds.reply = NULL;
4160
4161         kfree(mrioc->bsg_cmds.reply);
4162         mrioc->bsg_cmds.reply = NULL;
4163
4164         kfree(mrioc->host_tm_cmds.reply);
4165         mrioc->host_tm_cmds.reply = NULL;
4166
4167         kfree(mrioc->pel_cmds.reply);
4168         mrioc->pel_cmds.reply = NULL;
4169
4170         kfree(mrioc->pel_abort_cmd.reply);
4171         mrioc->pel_abort_cmd.reply = NULL;
4172
4173         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4174                 kfree(mrioc->evtack_cmds[i].reply);
4175                 mrioc->evtack_cmds[i].reply = NULL;
4176         }
4177
4178         kfree(mrioc->removepend_bitmap);
4179         mrioc->removepend_bitmap = NULL;
4180
4181         kfree(mrioc->devrem_bitmap);
4182         mrioc->devrem_bitmap = NULL;
4183
4184         kfree(mrioc->evtack_cmds_bitmap);
4185         mrioc->evtack_cmds_bitmap = NULL;
4186
4187         kfree(mrioc->chain_bitmap);
4188         mrioc->chain_bitmap = NULL;
4189
4190         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4191                 kfree(mrioc->dev_rmhs_cmds[i].reply);
4192                 mrioc->dev_rmhs_cmds[i].reply = NULL;
4193         }
4194
4195         if (mrioc->chain_buf_pool) {
4196                 for (i = 0; i < mrioc->chain_buf_count; i++) {
4197                         if (mrioc->chain_sgl_list[i].addr) {
4198                                 dma_pool_free(mrioc->chain_buf_pool,
4199                                     mrioc->chain_sgl_list[i].addr,
4200                                     mrioc->chain_sgl_list[i].dma_addr);
4201                                 mrioc->chain_sgl_list[i].addr = NULL;
4202                         }
4203                 }
4204                 dma_pool_destroy(mrioc->chain_buf_pool);
4205                 mrioc->chain_buf_pool = NULL;
4206         }
4207
4208         kfree(mrioc->chain_sgl_list);
4209         mrioc->chain_sgl_list = NULL;
4210
4211         if (mrioc->admin_reply_base) {
4212                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4213                     mrioc->admin_reply_base, mrioc->admin_reply_dma);
4214                 mrioc->admin_reply_base = NULL;
4215         }
4216         if (mrioc->admin_req_base) {
4217                 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4218                     mrioc->admin_req_base, mrioc->admin_req_dma);
4219                 mrioc->admin_req_base = NULL;
4220         }
4221
4222         if (mrioc->pel_seqnum_virt) {
4223                 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4224                     mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4225                 mrioc->pel_seqnum_virt = NULL;
4226         }
4227
4228         kfree(mrioc->logdata_buf);
4229         mrioc->logdata_buf = NULL;
4230
4231 }
4232
4233 /**
4234  * mpi3mr_issue_ioc_shutdown - shutdown controller
4235  * @mrioc: Adapter instance reference
4236  *
4237  * Send shutodwn notification to the controller and wait for the
4238  * shutdown_timeout for it to be completed.
4239  *
4240  * Return: Nothing.
4241  */
4242 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4243 {
4244         u32 ioc_config, ioc_status;
4245         u8 retval = 1;
4246         u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4247
4248         ioc_info(mrioc, "Issuing shutdown Notification\n");
4249         if (mrioc->unrecoverable) {
4250                 ioc_warn(mrioc,
4251                     "IOC is unrecoverable shutdown is not issued\n");
4252                 return;
4253         }
4254         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4255         if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4256             == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4257                 ioc_info(mrioc, "shutdown already in progress\n");
4258                 return;
4259         }
4260
4261         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4262         ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4263         ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4264
4265         writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4266
4267         if (mrioc->facts.shutdown_timeout)
4268                 timeout = mrioc->facts.shutdown_timeout * 10;
4269
4270         do {
4271                 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4272                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4273                     == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4274                         retval = 0;
4275                         break;
4276                 }
4277                 msleep(100);
4278         } while (--timeout);
4279
4280         ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4281         ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4282
4283         if (retval) {
4284                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4285                     == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4286                         ioc_warn(mrioc,
4287                             "shutdown still in progress after timeout\n");
4288         }
4289
4290         ioc_info(mrioc,
4291             "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
4292             (!retval) ? "successful" : "failed", ioc_status,
4293             ioc_config);
4294 }
4295
4296 /**
4297  * mpi3mr_cleanup_ioc - Cleanup controller
4298  * @mrioc: Adapter instance reference
4299  *
4300  * controller cleanup handler, Message unit reset or soft reset
4301  * and shutdown notification is issued to the controller.
4302  *
4303  * Return: Nothing.
4304  */
4305 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4306 {
4307         enum mpi3mr_iocstate ioc_state;
4308
4309         dprint_exit(mrioc, "cleaning up the controller\n");
4310         mpi3mr_ioc_disable_intr(mrioc);
4311
4312         ioc_state = mpi3mr_get_iocstate(mrioc);
4313
4314         if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
4315             (ioc_state == MRIOC_STATE_READY)) {
4316                 if (mpi3mr_issue_and_process_mur(mrioc,
4317                     MPI3MR_RESET_FROM_CTLR_CLEANUP))
4318                         mpi3mr_issue_reset(mrioc,
4319                             MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
4320                             MPI3MR_RESET_FROM_MUR_FAILURE);
4321                 mpi3mr_issue_ioc_shutdown(mrioc);
4322         }
4323         dprint_exit(mrioc, "controller cleanup completed\n");
4324 }
4325
4326 /**
4327  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
4328  * @mrioc: Adapter instance reference
4329  * @cmdptr: Internal command tracker
4330  *
4331  * Complete an internal driver commands with state indicating it
4332  * is completed due to reset.
4333  *
4334  * Return: Nothing.
4335  */
4336 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
4337         struct mpi3mr_drv_cmd *cmdptr)
4338 {
4339         if (cmdptr->state & MPI3MR_CMD_PENDING) {
4340                 cmdptr->state |= MPI3MR_CMD_RESET;
4341                 cmdptr->state &= ~MPI3MR_CMD_PENDING;
4342                 if (cmdptr->is_waiting) {
4343                         complete(&cmdptr->done);
4344                         cmdptr->is_waiting = 0;
4345                 } else if (cmdptr->callback)
4346                         cmdptr->callback(mrioc, cmdptr);
4347         }
4348 }
4349
4350 /**
4351  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
4352  * @mrioc: Adapter instance reference
4353  *
4354  * Flush all internal driver commands post reset
4355  *
4356  * Return: Nothing.
4357  */
4358 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
4359 {
4360         struct mpi3mr_drv_cmd *cmdptr;
4361         u8 i;
4362
4363         cmdptr = &mrioc->init_cmds;
4364         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4365         cmdptr = &mrioc->bsg_cmds;
4366         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4367         cmdptr = &mrioc->host_tm_cmds;
4368         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4369
4370         for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4371                 cmdptr = &mrioc->dev_rmhs_cmds[i];
4372                 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4373         }
4374
4375         for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4376                 cmdptr = &mrioc->evtack_cmds[i];
4377                 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4378         }
4379
4380         cmdptr = &mrioc->pel_cmds;
4381         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4382
4383         cmdptr = &mrioc->pel_abort_cmd;
4384         mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4385
4386 }
4387
4388 /**
4389  * mpi3mr_pel_wait_post - Issue PEL Wait
4390  * @mrioc: Adapter instance reference
4391  * @drv_cmd: Internal command tracker
4392  *
4393  * Issue PEL Wait MPI request through admin queue and return.
4394  *
4395  * Return: Nothing.
4396  */
4397 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
4398         struct mpi3mr_drv_cmd *drv_cmd)
4399 {
4400         struct mpi3_pel_req_action_wait pel_wait;
4401
4402         mrioc->pel_abort_requested = false;
4403
4404         memset(&pel_wait, 0, sizeof(pel_wait));
4405         drv_cmd->state = MPI3MR_CMD_PENDING;
4406         drv_cmd->is_waiting = 0;
4407         drv_cmd->callback = mpi3mr_pel_wait_complete;
4408         drv_cmd->ioc_status = 0;
4409         drv_cmd->ioc_loginfo = 0;
4410         pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4411         pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4412         pel_wait.action = MPI3_PEL_ACTION_WAIT;
4413         pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
4414         pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
4415         pel_wait.class = cpu_to_le16(mrioc->pel_class);
4416         pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
4417         dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
4418             mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
4419
4420         if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
4421                 dprint_bsg_err(mrioc,
4422                             "Issuing PELWait: Admin post failed\n");
4423                 drv_cmd->state = MPI3MR_CMD_NOTUSED;
4424                 drv_cmd->callback = NULL;
4425                 drv_cmd->retry_count = 0;
4426                 mrioc->pel_enabled = false;
4427         }
4428 }
4429
4430 /**
4431  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
4432  * @mrioc: Adapter instance reference
4433  * @drv_cmd: Internal command tracker
4434  *
4435  * Issue PEL get sequence number MPI request through admin queue
4436  * and return.
4437  *
4438  * Return: 0 on success, non-zero on failure.
4439  */
4440 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
4441         struct mpi3mr_drv_cmd *drv_cmd)
4442 {
4443         struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
4444         u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
4445         int retval = 0;
4446
4447         memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
4448         mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
4449         mrioc->pel_cmds.is_waiting = 0;
4450         mrioc->pel_cmds.ioc_status = 0;
4451         mrioc->pel_cmds.ioc_loginfo = 0;
4452         mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
4453         pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4454         pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4455         pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
4456         mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
4457             mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
4458
4459         retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
4460                         sizeof(pel_getseq_req), 0);
4461         if (retval) {
4462                 if (drv_cmd) {
4463                         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4464                         drv_cmd->callback = NULL;
4465                         drv_cmd->retry_count = 0;
4466                 }
4467                 mrioc->pel_enabled = false;
4468         }
4469
4470         return retval;
4471 }
4472
4473 /**
4474  * mpi3mr_pel_wait_complete - PELWait Completion callback
4475  * @mrioc: Adapter instance reference
4476  * @drv_cmd: Internal command tracker
4477  *
4478  * This is a callback handler for the PELWait request and
4479  * firmware completes a PELWait request when it is aborted or a
4480  * new PEL entry is available. This sends AEN to the application
4481  * and if the PELwait completion is not due to PELAbort then
4482  * this will send a request for new PEL Sequence number
4483  *
4484  * Return: Nothing.
4485  */
4486 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
4487         struct mpi3mr_drv_cmd *drv_cmd)
4488 {
4489         struct mpi3_pel_reply *pel_reply = NULL;
4490         u16 ioc_status, pe_log_status;
4491         bool do_retry = false;
4492
4493         if (drv_cmd->state & MPI3MR_CMD_RESET)
4494                 goto cleanup_drv_cmd;
4495
4496         ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4497         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
4498                 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
4499                         __func__, ioc_status, drv_cmd->ioc_loginfo);
4500                 dprint_bsg_err(mrioc,
4501                     "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
4502                     ioc_status, drv_cmd->ioc_loginfo);
4503                 do_retry = true;
4504         }
4505
4506         if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
4507                 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
4508
4509         if (!pel_reply) {
4510                 dprint_bsg_err(mrioc,
4511                     "pel_wait: failed due to no reply\n");
4512                 goto out_failed;
4513         }
4514
4515         pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
4516         if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
4517             (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
4518                 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
4519                         __func__, pe_log_status);
4520                 dprint_bsg_err(mrioc,
4521                     "pel_wait: failed due to pel_log_status(0x%04x)\n",
4522                     pe_log_status);
4523                 do_retry = true;
4524         }
4525
4526         if (do_retry) {
4527                 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
4528                         drv_cmd->retry_count++;
4529                         dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
4530                             drv_cmd->retry_count);
4531                         mpi3mr_pel_wait_post(mrioc, drv_cmd);
4532                         return;
4533                 }
4534                 dprint_bsg_err(mrioc,
4535                     "pel_wait: failed after all retries(%d)\n",
4536                     drv_cmd->retry_count);
4537                 goto out_failed;
4538         }
4539         atomic64_inc(&event_counter);
4540         if (!mrioc->pel_abort_requested) {
4541                 mrioc->pel_cmds.retry_count = 0;
4542                 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
4543         }
4544
4545         return;
4546 out_failed:
4547         mrioc->pel_enabled = false;
4548 cleanup_drv_cmd:
4549         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4550         drv_cmd->callback = NULL;
4551         drv_cmd->retry_count = 0;
4552 }
4553
4554 /**
4555  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
4556  * @mrioc: Adapter instance reference
4557  * @drv_cmd: Internal command tracker
4558  *
4559  * This is a callback handler for the PEL get sequence number
4560  * request and a new PEL wait request will be issued to the
4561  * firmware from this
4562  *
4563  * Return: Nothing.
4564  */
4565 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
4566         struct mpi3mr_drv_cmd *drv_cmd)
4567 {
4568         struct mpi3_pel_reply *pel_reply = NULL;
4569         struct mpi3_pel_seq *pel_seqnum_virt;
4570         u16 ioc_status;
4571         bool do_retry = false;
4572
4573         pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
4574
4575         if (drv_cmd->state & MPI3MR_CMD_RESET)
4576                 goto cleanup_drv_cmd;
4577
4578         ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
4579         if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
4580                 dprint_bsg_err(mrioc,
4581                     "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
4582                     ioc_status, drv_cmd->ioc_loginfo);
4583                 do_retry = true;
4584         }
4585
4586         if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
4587                 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
4588         if (!pel_reply) {
4589                 dprint_bsg_err(mrioc,
4590                     "pel_get_seqnum: failed due to no reply\n");
4591                 goto out_failed;
4592         }
4593
4594         if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
4595                 dprint_bsg_err(mrioc,
4596                     "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
4597                     le16_to_cpu(pel_reply->pe_log_status));
4598                 do_retry = true;
4599         }
4600
4601         if (do_retry) {
4602                 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
4603                         drv_cmd->retry_count++;
4604                         dprint_bsg_err(mrioc,
4605                             "pel_get_seqnum: retrying(%d)\n",
4606                             drv_cmd->retry_count);
4607                         mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
4608                         return;
4609                 }
4610
4611                 dprint_bsg_err(mrioc,
4612                     "pel_get_seqnum: failed after all retries(%d)\n",
4613                     drv_cmd->retry_count);
4614                 goto out_failed;
4615         }
4616         mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
4617         drv_cmd->retry_count = 0;
4618         mpi3mr_pel_wait_post(mrioc, drv_cmd);
4619
4620         return;
4621 out_failed:
4622         mrioc->pel_enabled = false;
4623 cleanup_drv_cmd:
4624         drv_cmd->state = MPI3MR_CMD_NOTUSED;
4625         drv_cmd->callback = NULL;
4626         drv_cmd->retry_count = 0;
4627 }
4628
4629 /**
4630  * mpi3mr_soft_reset_handler - Reset the controller
4631  * @mrioc: Adapter instance reference
4632  * @reset_reason: Reset reason code
4633  * @snapdump: Flag to generate snapdump in firmware or not
4634  *
4635  * This is an handler for recovering controller by issuing soft
4636  * reset are diag fault reset.  This is a blocking function and
4637  * when one reset is executed if any other resets they will be
4638  * blocked. All BSG requests will be blocked during the reset. If
4639  * controller reset is successful then the controller will be
4640  * reinitalized, otherwise the controller will be marked as not
4641  * recoverable
4642  *
4643  * In snapdump bit is set, the controller is issued with diag
4644  * fault reset so that the firmware can create a snap dump and
4645  * post that the firmware will result in F000 fault and the
4646  * driver will issue soft reset to recover from that.
4647  *
4648  * Return: 0 on success, non-zero on failure.
4649  */
4650 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
4651         u32 reset_reason, u8 snapdump)
4652 {
4653         int retval = 0, i;
4654         unsigned long flags;
4655         u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
4656
4657         /* Block the reset handler until diag save in progress*/
4658         dprint_reset(mrioc,
4659             "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
4660             mrioc->diagsave_timeout);
4661         while (mrioc->diagsave_timeout)
4662                 ssleep(1);
4663         /*
4664          * Block new resets until the currently executing one is finished and
4665          * return the status of the existing reset for all blocked resets
4666          */
4667         dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
4668         if (!mutex_trylock(&mrioc->reset_mutex)) {
4669                 ioc_info(mrioc,
4670                     "controller reset triggered by %s is blocked due to another reset in progress\n",
4671                     mpi3mr_reset_rc_name(reset_reason));
4672                 do {
4673                         ssleep(1);
4674                 } while (mrioc->reset_in_progress == 1);
4675                 ioc_info(mrioc,
4676                     "returning previous reset result(%d) for the reset triggered by %s\n",
4677                     mrioc->prev_reset_result,
4678                     mpi3mr_reset_rc_name(reset_reason));
4679                 return mrioc->prev_reset_result;
4680         }
4681         ioc_info(mrioc, "controller reset is triggered by %s\n",
4682             mpi3mr_reset_rc_name(reset_reason));
4683
4684         mrioc->reset_in_progress = 1;
4685         mrioc->stop_bsgs = 1;
4686         mrioc->prev_reset_result = -1;
4687
4688         if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
4689             (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
4690             (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
4691                 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4692                         mrioc->event_masks[i] = -1;
4693
4694                 dprint_reset(mrioc, "soft_reset_handler: masking events\n");
4695                 mpi3mr_issue_event_notification(mrioc);
4696         }
4697
4698         mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
4699
4700         mpi3mr_ioc_disable_intr(mrioc);
4701
4702         if (snapdump) {
4703                 mpi3mr_set_diagsave(mrioc);
4704                 retval = mpi3mr_issue_reset(mrioc,
4705                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4706                 if (!retval) {
4707                         do {
4708                                 host_diagnostic =
4709                                     readl(&mrioc->sysif_regs->host_diagnostic);
4710                                 if (!(host_diagnostic &
4711                                     MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
4712                                         break;
4713                                 msleep(100);
4714                         } while (--timeout);
4715                 }
4716         }
4717
4718         retval = mpi3mr_issue_reset(mrioc,
4719             MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
4720         if (retval) {
4721                 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
4722                 goto out;
4723         }
4724         if (mrioc->num_io_throttle_group !=
4725             mrioc->facts.max_io_throttle_group) {
4726                 ioc_err(mrioc,
4727                     "max io throttle group doesn't match old(%d), new(%d)\n",
4728                     mrioc->num_io_throttle_group,
4729                     mrioc->facts.max_io_throttle_group);
4730                 retval = -EPERM;
4731                 goto out;
4732         }
4733
4734         mpi3mr_flush_delayed_cmd_lists(mrioc);
4735         mpi3mr_flush_drv_cmds(mrioc);
4736         memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
4737         memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
4738         memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
4739         mpi3mr_flush_host_io(mrioc);
4740         mpi3mr_cleanup_fwevt_list(mrioc);
4741         mpi3mr_invalidate_devhandles(mrioc);
4742         if (mrioc->prepare_for_reset) {
4743                 mrioc->prepare_for_reset = 0;
4744                 mrioc->prepare_for_reset_timeout_counter = 0;
4745         }
4746         mpi3mr_memset_buffers(mrioc);
4747         retval = mpi3mr_reinit_ioc(mrioc, 0);
4748         if (retval) {
4749                 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
4750                     mrioc->name, reset_reason);
4751                 goto out;
4752         }
4753         ssleep(10);
4754
4755 out:
4756         if (!retval) {
4757                 mrioc->diagsave_timeout = 0;
4758                 mrioc->reset_in_progress = 0;
4759                 mrioc->pel_abort_requested = 0;
4760                 if (mrioc->pel_enabled) {
4761                         mrioc->pel_cmds.retry_count = 0;
4762                         mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
4763                 }
4764
4765                 mpi3mr_rfresh_tgtdevs(mrioc);
4766                 mrioc->ts_update_counter = 0;
4767                 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
4768                 if (mrioc->watchdog_work_q)
4769                         queue_delayed_work(mrioc->watchdog_work_q,
4770                             &mrioc->watchdog_work,
4771                             msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
4772                 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
4773                 mrioc->stop_bsgs = 0;
4774                 if (mrioc->pel_enabled)
4775                         atomic64_inc(&event_counter);
4776         } else {
4777                 mpi3mr_issue_reset(mrioc,
4778                     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4779                 mrioc->unrecoverable = 1;
4780                 mrioc->reset_in_progress = 0;
4781                 retval = -1;
4782         }
4783         mrioc->prev_reset_result = retval;
4784         mutex_unlock(&mrioc->reset_mutex);
4785         ioc_info(mrioc, "controller reset is %s\n",
4786             ((retval == 0) ? "successful" : "failed"));
4787         return retval;
4788 }