Merge tag 'io_uring-6.5-2023-07-28' of git://git.kernel.dk/linux
[platform/kernel/linux-rpi.git] / drivers / scsi / qla4xxx / ql4_isr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic iSCSI HBA Driver
4  * Copyright (c)  2003-2013 QLogic Corporation
5  */
6
7 #include "ql4_def.h"
8 #include "ql4_glbl.h"
9 #include "ql4_dbg.h"
10 #include "ql4_inline.h"
11
12 /**
13  * qla4xxx_copy_sense - copy sense data into cmd sense buffer
14  * @ha: Pointer to host adapter structure.
15  * @sts_entry: Pointer to status entry structure.
16  * @srb: Pointer to srb structure.
17  **/
18 static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
19                                struct status_entry *sts_entry,
20                                struct srb *srb)
21 {
22         struct scsi_cmnd *cmd = srb->cmd;
23         uint16_t sense_len;
24
25         memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
26         sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
27         if (sense_len == 0) {
28                 DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:"
29                                   " sense len 0\n", ha->host_no,
30                                   cmd->device->channel, cmd->device->id,
31                                   cmd->device->lun, __func__));
32                 ha->status_srb = NULL;
33                 return;
34         }
35         /* Save total available sense length,
36          * not to exceed cmd's sense buffer size */
37         sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
38         srb->req_sense_ptr = cmd->sense_buffer;
39         srb->req_sense_len = sense_len;
40
41         /* Copy sense from sts_entry pkt */
42         sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
43         memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
44
45         DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, "
46                 "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
47                 cmd->device->channel, cmd->device->id,
48                 cmd->device->lun, __func__,
49                 sts_entry->senseData[2] & 0x0f,
50                 sts_entry->senseData[7],
51                 sts_entry->senseData[12],
52                 sts_entry->senseData[13]));
53
54         DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
55         srb->flags |= SRB_GOT_SENSE;
56
57         /* Update srb, in case a sts_cont pkt follows */
58         srb->req_sense_ptr += sense_len;
59         srb->req_sense_len -= sense_len;
60         if (srb->req_sense_len != 0)
61                 ha->status_srb = srb;
62         else
63                 ha->status_srb = NULL;
64 }
65
66 /**
67  * qla4xxx_status_cont_entry - Process a Status Continuations entry.
68  * @ha: SCSI driver HA context
69  * @sts_cont: Entry pointer
70  *
71  * Extended sense data.
72  */
73 static void
74 qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
75                           struct status_cont_entry *sts_cont)
76 {
77         struct srb *srb = ha->status_srb;
78         struct scsi_cmnd *cmd;
79         uint16_t sense_len;
80
81         if (srb == NULL)
82                 return;
83
84         cmd = srb->cmd;
85         if (cmd == NULL) {
86                 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
87                         "back to OS srb=%p srb->state:%d\n", ha->host_no,
88                         __func__, srb, srb->state));
89                 ha->status_srb = NULL;
90                 return;
91         }
92
93         /* Copy sense data. */
94         sense_len = min_t(uint16_t, srb->req_sense_len,
95                           IOCB_MAX_EXT_SENSEDATA_LEN);
96         memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
97         DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
98
99         srb->req_sense_ptr += sense_len;
100         srb->req_sense_len -= sense_len;
101
102         /* Place command on done queue. */
103         if (srb->req_sense_len == 0) {
104                 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
105                 ha->status_srb = NULL;
106         }
107 }
108
109 /**
110  * qla4xxx_status_entry - processes status IOCBs
111  * @ha: Pointer to host adapter structure.
112  * @sts_entry: Pointer to status entry structure.
113  **/
114 static void qla4xxx_status_entry(struct scsi_qla_host *ha,
115                                  struct status_entry *sts_entry)
116 {
117         uint8_t scsi_status;
118         struct scsi_cmnd *cmd;
119         struct srb *srb;
120         struct ddb_entry *ddb_entry;
121         uint32_t residual;
122
123         srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
124         if (!srb) {
125                 ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
126                            "handle=0x%0x, srb=%p\n", __func__,
127                            sts_entry->handle, srb);
128                 if (is_qla80XX(ha))
129                         set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
130                 else
131                         set_bit(DPC_RESET_HA, &ha->dpc_flags);
132                 return;
133         }
134
135         cmd = srb->cmd;
136         if (cmd == NULL) {
137                 DEBUG2(printk("scsi%ld: %s: Command already returned back to "
138                               "OS pkt->handle=%d srb=%p srb->state:%d\n",
139                               ha->host_no, __func__, sts_entry->handle,
140                               srb, srb->state));
141                 ql4_printk(KERN_WARNING, ha, "Command is NULL:"
142                     " already returned to OS (srb=%p)\n", srb);
143                 return;
144         }
145
146         ddb_entry = srb->ddb;
147         if (ddb_entry == NULL) {
148                 cmd->result = DID_NO_CONNECT << 16;
149                 goto status_entry_exit;
150         }
151
152         residual = le32_to_cpu(sts_entry->residualByteCnt);
153
154         /* Translate ISP error to a Linux SCSI error. */
155         scsi_status = sts_entry->scsiStatus;
156         switch (sts_entry->completionStatus) {
157         case SCS_COMPLETE:
158
159                 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
160                         cmd->result = DID_ERROR << 16;
161                         break;
162                 }
163
164                 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
165                         scsi_set_resid(cmd, residual);
166                         if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
167                                 cmd->underflow)) {
168
169                                 cmd->result = DID_ERROR << 16;
170
171                                 DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: "
172                                         "Mid-layer Data underrun0, "
173                                         "xferlen = 0x%x, "
174                                         "residual = 0x%x\n", ha->host_no,
175                                         cmd->device->channel,
176                                         cmd->device->id,
177                                         cmd->device->lun, __func__,
178                                         scsi_bufflen(cmd), residual));
179                                 break;
180                         }
181                 }
182
183                 cmd->result = DID_OK << 16 | scsi_status;
184
185                 if (scsi_status != SAM_STAT_CHECK_CONDITION)
186                         break;
187
188                 /* Copy Sense Data into sense buffer. */
189                 qla4xxx_copy_sense(ha, sts_entry, srb);
190                 break;
191
192         case SCS_INCOMPLETE:
193                 /* Always set the status to DID_ERROR, since
194                  * all conditions result in that status anyway */
195                 cmd->result = DID_ERROR << 16;
196                 break;
197
198         case SCS_RESET_OCCURRED:
199                 DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n",
200                               ha->host_no, cmd->device->channel,
201                               cmd->device->id, cmd->device->lun, __func__));
202
203                 cmd->result = DID_RESET << 16;
204                 break;
205
206         case SCS_ABORTED:
207                 DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n",
208                               ha->host_no, cmd->device->channel,
209                               cmd->device->id, cmd->device->lun, __func__));
210
211                 cmd->result = DID_RESET << 16;
212                 break;
213
214         case SCS_TIMEOUT:
215                 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n",
216                               ha->host_no, cmd->device->channel,
217                               cmd->device->id, cmd->device->lun));
218
219                 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
220
221                 /*
222                  * Mark device missing so that we won't continue to send
223                  * I/O to this device.  We should get a ddb state change
224                  * AEN soon.
225                  */
226                 if (iscsi_is_session_online(ddb_entry->sess))
227                         qla4xxx_mark_device_missing(ddb_entry->sess);
228                 break;
229
230         case SCS_DATA_UNDERRUN:
231         case SCS_DATA_OVERRUN:
232                 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
233                      (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
234                         DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n",
235                                       ha->host_no,
236                                       cmd->device->channel, cmd->device->id,
237                                       cmd->device->lun, __func__));
238
239                         cmd->result = DID_ERROR << 16;
240                         break;
241                 }
242
243                 scsi_set_resid(cmd, residual);
244
245                 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
246
247                         /* Both the firmware and target reported UNDERRUN:
248                          *
249                          * MID-LAYER UNDERFLOW case:
250                          * Some kernels do not properly detect midlayer
251                          * underflow, so we manually check it and return
252                          * ERROR if the minimum required data was not
253                          * received.
254                          *
255                          * ALL OTHER cases:
256                          * Fall thru to check scsi_status
257                          */
258                         if (!scsi_status && (scsi_bufflen(cmd) - residual) <
259                             cmd->underflow) {
260                                 DEBUG2(ql4_printk(KERN_INFO, ha,
261                                                   "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
262                                                    ha->host_no,
263                                                    cmd->device->channel,
264                                                    cmd->device->id,
265                                                    cmd->device->lun, __func__,
266                                                    scsi_bufflen(cmd),
267                                                    residual));
268
269                                 cmd->result = DID_ERROR << 16;
270                                 break;
271                         }
272
273                 } else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
274                            scsi_status != SAM_STAT_BUSY) {
275
276                         /*
277                          * The firmware reports UNDERRUN, but the target does
278                          * not report it:
279                          *
280                          *   scsi_status     |    host_byte       device_byte
281                          *                   |     (19:16)          (7:0)
282                          *   =============   |    =========       ===========
283                          *   TASK_SET_FULL   |    DID_OK          scsi_status
284                          *   BUSY            |    DID_OK          scsi_status
285                          *   ALL OTHERS      |    DID_ERROR       scsi_status
286                          *
287                          *   Note: If scsi_status is task set full or busy,
288                          *   then this else if would fall thru to check the
289                          *   scsi_status and return DID_OK.
290                          */
291
292                         DEBUG2(ql4_printk(KERN_INFO, ha,
293                                           "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
294                                           ha->host_no,
295                                           cmd->device->channel,
296                                           cmd->device->id,
297                                           cmd->device->lun, __func__,
298                                           residual,
299                                           scsi_bufflen(cmd)));
300
301                         cmd->result = DID_ERROR << 16 | scsi_status;
302                         goto check_scsi_status;
303                 }
304
305                 cmd->result = DID_OK << 16 | scsi_status;
306
307 check_scsi_status:
308                 if (scsi_status == SAM_STAT_CHECK_CONDITION)
309                         qla4xxx_copy_sense(ha, sts_entry, srb);
310
311                 break;
312
313         case SCS_DEVICE_LOGGED_OUT:
314         case SCS_DEVICE_UNAVAILABLE:
315                 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE "
316                     "state: 0x%x\n", ha->host_no,
317                     cmd->device->channel, cmd->device->id,
318                     cmd->device->lun, sts_entry->completionStatus));
319                 /*
320                  * Mark device missing so that we won't continue to
321                  * send I/O to this device.  We should get a ddb
322                  * state change AEN soon.
323                  */
324                 if (iscsi_is_session_online(ddb_entry->sess))
325                         qla4xxx_mark_device_missing(ddb_entry->sess);
326
327                 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
328                 break;
329
330         case SCS_QUEUE_FULL:
331                 /*
332                  * SCSI Mid-Layer handles device queue full
333                  */
334                 cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
335                 DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected "
336                               "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
337                               " iResp=%02x\n", ha->host_no, cmd->device->id,
338                               cmd->device->lun, __func__,
339                               sts_entry->completionStatus,
340                               sts_entry->scsiStatus, sts_entry->state_flags,
341                               sts_entry->iscsiFlags,
342                               sts_entry->iscsiResponse));
343                 break;
344
345         default:
346                 cmd->result = DID_ERROR << 16;
347                 break;
348         }
349
350 status_entry_exit:
351
352         /* complete the request, if not waiting for status_continuation pkt */
353         srb->cc_stat = sts_entry->completionStatus;
354         if (ha->status_srb == NULL)
355                 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
356 }
357
358 /**
359  * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
360  * @ha: Pointer to host adapter structure.
361  * @sts_entry: Pointer to status entry structure.
362  **/
363 static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
364                                           struct passthru_status *sts_entry)
365 {
366         struct iscsi_task *task;
367         struct ddb_entry *ddb_entry;
368         struct ql4_task_data *task_data;
369         struct iscsi_cls_conn *cls_conn;
370         struct iscsi_conn *conn;
371         itt_t itt;
372         uint32_t fw_ddb_index;
373
374         itt = sts_entry->handle;
375         fw_ddb_index = le32_to_cpu(sts_entry->target);
376
377         ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
378
379         if (ddb_entry == NULL) {
380                 ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
381                            __func__, sts_entry->target);
382                 return;
383         }
384
385         cls_conn = ddb_entry->conn;
386         conn = cls_conn->dd_data;
387         spin_lock(&conn->session->back_lock);
388         task = iscsi_itt_to_task(conn, itt);
389         spin_unlock(&conn->session->back_lock);
390
391         if (task == NULL) {
392                 ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
393                 return;
394         }
395
396         task_data = task->dd_data;
397         memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
398         ha->iocb_cnt -= task_data->iocb_req_cnt;
399         queue_work(ha->task_wq, &task_data->task_work);
400 }
401
402 static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
403                                                      uint32_t index)
404 {
405         struct mrb *mrb = NULL;
406
407         /* validate handle and remove from active array */
408         if (index >= MAX_MRB)
409                 return mrb;
410
411         mrb = ha->active_mrb_array[index];
412         ha->active_mrb_array[index] = NULL;
413         if (!mrb)
414                 return mrb;
415
416         /* update counters */
417         ha->iocb_cnt -= mrb->iocb_cnt;
418
419         return mrb;
420 }
421
422 static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
423                                       struct mbox_status_iocb *mbox_sts_entry)
424 {
425         struct mrb *mrb;
426         uint32_t status;
427         uint32_t data_size;
428
429         mrb = qla4xxx_del_mrb_from_active_array(ha,
430                                         le32_to_cpu(mbox_sts_entry->handle));
431
432         if (mrb == NULL) {
433                 ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
434                            mbox_sts_entry->handle);
435                 return;
436         }
437
438         switch (mrb->mbox_cmd) {
439         case MBOX_CMD_PING:
440                 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
441                                   "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
442                                   __func__, mrb->mbox_cmd,
443                                   mbox_sts_entry->out_mbox[0],
444                                   mbox_sts_entry->out_mbox[6]));
445
446                 if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
447                         status = ISCSI_PING_SUCCESS;
448                 else
449                         status = mbox_sts_entry->out_mbox[6];
450
451                 data_size = sizeof(mbox_sts_entry->out_mbox);
452
453                 qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
454                                         (uint8_t *) mbox_sts_entry->out_mbox);
455                 break;
456
457         default:
458                 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
459                                   "0x%x\n", __func__, mrb->mbox_cmd));
460         }
461
462         kfree(mrb);
463         return;
464 }
465
466 /**
467  * qla4xxx_process_response_queue - process response queue completions
468  * @ha: Pointer to host adapter structure.
469  *
470  * This routine process response queue completions in interrupt context.
471  * Hardware_lock locked upon entry
472  **/
473 void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
474 {
475         struct srb *srb = NULL;
476         struct status_entry *sts_entry;
477
478         /* Process all responses from response queue */
479         while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
480                 sts_entry = (struct status_entry *) ha->response_ptr;
481
482                 /* Advance pointers for next entry */
483                 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
484                         ha->response_out = 0;
485                         ha->response_ptr = ha->response_ring;
486                 } else {
487                         ha->response_out++;
488                         ha->response_ptr++;
489                 }
490
491                 /* process entry */
492                 switch (sts_entry->hdr.entryType) {
493                 case ET_STATUS:
494                         /* Common status */
495                         qla4xxx_status_entry(ha, sts_entry);
496                         break;
497
498                 case ET_PASSTHRU_STATUS:
499                         if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
500                                 qla4xxx_passthru_status_entry(ha,
501                                         (struct passthru_status *)sts_entry);
502                         else
503                                 ql4_printk(KERN_ERR, ha,
504                                            "%s: Invalid status received\n",
505                                            __func__);
506
507                         break;
508
509                 case ET_STATUS_CONTINUATION:
510                         qla4xxx_status_cont_entry(ha,
511                                 (struct status_cont_entry *) sts_entry);
512                         break;
513
514                 case ET_COMMAND:
515                         /* ISP device queue is full. Command not
516                          * accepted by ISP.  Queue command for
517                          * later */
518
519                         srb = qla4xxx_del_from_active_array(ha,
520                                                     le32_to_cpu(sts_entry->
521                                                                 handle));
522                         if (srb == NULL)
523                                 goto exit_prq_invalid_handle;
524
525                         DEBUG2(printk("scsi%ld: %s: FW device queue full, "
526                                       "srb %p\n", ha->host_no, __func__, srb));
527
528                         /* ETRY normally by sending it back with
529                          * DID_BUS_BUSY */
530                         srb->cmd->result = DID_BUS_BUSY << 16;
531                         kref_put(&srb->srb_ref, qla4xxx_srb_compl);
532                         break;
533
534                 case ET_CONTINUE:
535                         /* Just throw away the continuation entries */
536                         DEBUG2(printk("scsi%ld: %s: Continuation entry - "
537                                       "ignoring\n", ha->host_no, __func__));
538                         break;
539
540                 case ET_MBOX_STATUS:
541                         DEBUG2(ql4_printk(KERN_INFO, ha,
542                                           "%s: mbox status IOCB\n", __func__));
543                         qla4xxx_mbox_status_entry(ha,
544                                         (struct mbox_status_iocb *)sts_entry);
545                         break;
546
547                 default:
548                         /*
549                          * Invalid entry in response queue, reset RISC
550                          * firmware.
551                          */
552                         DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
553                                       "response queue \n", ha->host_no,
554                                       __func__,
555                                       sts_entry->hdr.entryType));
556                         goto exit_prq_error;
557                 }
558                 ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
559                 wmb();
560         }
561
562         /*
563          * Tell ISP we're done with response(s). This also clears the interrupt.
564          */
565         ha->isp_ops->complete_iocb(ha);
566
567         return;
568
569 exit_prq_invalid_handle:
570         DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
571                       ha->host_no, __func__, srb, sts_entry->hdr.entryType,
572                       sts_entry->completionStatus));
573
574 exit_prq_error:
575         ha->isp_ops->complete_iocb(ha);
576         set_bit(DPC_RESET_HA, &ha->dpc_flags);
577 }
578
579 /**
580  * qla4_83xx_loopback_in_progress: Is loopback in progress?
581  * @ha: Pointer to host adapter structure.
582  * returns: 1 = loopback in progress, 0 = loopback not in progress
583  **/
584 static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
585 {
586         int rval = 1;
587
588         if (is_qla8032(ha) || is_qla8042(ha)) {
589                 if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
590                     (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
591                         DEBUG2(ql4_printk(KERN_INFO, ha,
592                                           "%s: Loopback diagnostics in progress\n",
593                                           __func__));
594                         rval = 1;
595                 } else {
596                         DEBUG2(ql4_printk(KERN_INFO, ha,
597                                           "%s: Loopback diagnostics not in progress\n",
598                                           __func__));
599                         rval = 0;
600                 }
601         }
602
603         return rval;
604 }
605
606 static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
607                                         uint32_t ipaddr_idx,
608                                         uint32_t ipaddr_fw_state)
609 {
610         uint8_t ipaddr_state;
611         uint8_t ip_idx;
612
613         ip_idx = ipaddr_idx & 0xF;
614         ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state);
615
616         switch (ip_idx) {
617         case 0:
618                 ha->ip_config.ipv4_addr_state = ipaddr_state;
619                 break;
620         case 1:
621                 ha->ip_config.ipv6_link_local_state = ipaddr_state;
622                 break;
623         case 2:
624                 ha->ip_config.ipv6_addr0_state = ipaddr_state;
625                 break;
626         case 3:
627                 ha->ip_config.ipv6_addr1_state = ipaddr_state;
628                 break;
629         default:
630                 ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n",
631                            __func__, ip_idx);
632         }
633 }
634
635 static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
636                                            uint32_t *mbox_sts)
637 {
638         memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
639                &mbox_sts[2], sizeof(uint32_t));
640         memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
641                &mbox_sts[3], sizeof(uint32_t));
642         memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
643                &mbox_sts[4], sizeof(uint32_t));
644         memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
645                &mbox_sts[5], sizeof(uint32_t));
646 }
647
648 /**
649  * qla4xxx_isr_decode_mailbox - decodes mailbox status
650  * @ha: Pointer to host adapter structure.
651  * @mbox_status: Mailbox status.
652  *
653  * This routine decodes the mailbox status during the ISR.
654  * Hardware_lock locked upon entry. runs in interrupt context.
655  **/
656 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
657                                        uint32_t mbox_status)
658 {
659         int i;
660         uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
661         __le32 __iomem *mailbox_out;
662         uint32_t opcode = 0;
663
664         if (is_qla8032(ha) || is_qla8042(ha))
665                 mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
666         else if (is_qla8022(ha))
667                 mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
668         else
669                 mailbox_out = &ha->reg->mailbox[0];
670
671         if ((mbox_status == MBOX_STS_BUSY) ||
672             (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
673             (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
674                 ha->mbox_status[0] = mbox_status;
675
676                 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
677                         /*
678                          * Copy all mailbox registers to a temporary
679                          * location and set mailbox command done flag
680                          */
681                         for (i = 0; i < ha->mbox_status_count; i++)
682                                 ha->mbox_status[i] = readl(&mailbox_out[i]);
683
684                         set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
685
686                         if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
687                                 complete(&ha->mbx_intr_comp);
688                 }
689         } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
690                 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
691                         mbox_sts[i] = readl(&mailbox_out[i]);
692
693                 /* Immediately process the AENs that don't require much work.
694                  * Only queue the database_changed AENs */
695                 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
696                         for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
697                                 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
698                                     mbox_sts[i];
699                         ha->aen_log.count++;
700                 }
701                 switch (mbox_status) {
702                 case MBOX_ASTS_SYSTEM_ERROR:
703                         /* Log Mailbox registers */
704                         ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
705                         qla4xxx_dump_registers(ha);
706
707                         if ((is_qla8022(ha) && ql4xdontresethba) ||
708                             ((is_qla8032(ha) || is_qla8042(ha)) &&
709                              qla4_83xx_idc_dontreset(ha))) {
710                                 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
711                                     ha->host_no, __func__));
712                         } else {
713                                 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
714                                 set_bit(DPC_RESET_HA, &ha->dpc_flags);
715                         }
716                         break;
717
718                 case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
719                 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
720                 case MBOX_ASTS_NVRAM_INVALID:
721                 case MBOX_ASTS_IP_ADDRESS_CHANGED:
722                 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
723                         DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
724                                       "Reset HA\n", ha->host_no, mbox_status));
725                         if (is_qla80XX(ha))
726                                 set_bit(DPC_RESET_HA_FW_CONTEXT,
727                                         &ha->dpc_flags);
728                         else
729                                 set_bit(DPC_RESET_HA, &ha->dpc_flags);
730                         break;
731
732                 case MBOX_ASTS_LINK_UP:
733                         set_bit(AF_LINK_UP, &ha->flags);
734                         if (test_bit(AF_INIT_DONE, &ha->flags))
735                                 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
736
737                         ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
738                         qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
739                                               sizeof(mbox_sts),
740                                               (uint8_t *) mbox_sts);
741
742                         if ((is_qla8032(ha) || is_qla8042(ha)) &&
743                             ha->notify_link_up_comp)
744                                 complete(&ha->link_up_comp);
745
746                         break;
747
748                 case MBOX_ASTS_LINK_DOWN:
749                         clear_bit(AF_LINK_UP, &ha->flags);
750                         if (test_bit(AF_INIT_DONE, &ha->flags)) {
751                                 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
752                                 qla4xxx_wake_dpc(ha);
753                         }
754
755                         ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
756                         qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
757                                               sizeof(mbox_sts),
758                                               (uint8_t *) mbox_sts);
759                         break;
760
761                 case MBOX_ASTS_HEARTBEAT:
762                         ha->seconds_since_last_heartbeat = 0;
763                         break;
764
765                 case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
766                         DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
767                                       "ACQUIRED\n", ha->host_no, mbox_status));
768                         set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
769                         break;
770
771                 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
772                 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
773                                                            * mode
774                                                            * only */
775                 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
776                 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
777                 case MBOX_ASTS_SUBNET_STATE_CHANGE:
778                 case MBOX_ASTS_DUPLICATE_IP:
779                         /* No action */
780                         DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
781                                       mbox_status));
782                         break;
783
784                 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
785                         printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
786                             "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
787                             mbox_sts[2], mbox_sts[3]);
788
789                         qla4xxx_update_ipaddr_state(ha, mbox_sts[5],
790                                                     mbox_sts[3]);
791                         /* mbox_sts[2] = Old ACB state
792                          * mbox_sts[3] = new ACB state */
793                         if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
794                             ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
795                              (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
796                                 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
797                         } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
798                                    (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
799                                 if (is_qla80XX(ha))
800                                         set_bit(DPC_RESET_HA_FW_CONTEXT,
801                                                 &ha->dpc_flags);
802                                 else
803                                         set_bit(DPC_RESET_HA, &ha->dpc_flags);
804                         } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
805                                 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
806                                            ha->host_no, __func__);
807                         } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
808                                 complete(&ha->disable_acb_comp);
809                                 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
810                                            ha->host_no, __func__);
811                         }
812                         break;
813
814                 case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
815                 case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
816                 case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
817                         /* No action */
818                         DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
819                                           ha->host_no, mbox_status));
820                         break;
821
822                 case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
823                         DEBUG2(ql4_printk(KERN_INFO, ha,
824                                           "scsi%ld: AEN %04x, IPv6 ERROR, "
825                                           "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
826                                           ha->host_no, mbox_sts[0], mbox_sts[1],
827                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
828                                           mbox_sts[5]));
829                         break;
830
831                 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
832                 case MBOX_ASTS_DNS:
833                         /* No action */
834                         DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
835                                       "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
836                                       ha->host_no, mbox_sts[0],
837                                       mbox_sts[1], mbox_sts[2]));
838                         break;
839
840                 case MBOX_ASTS_SELF_TEST_FAILED:
841                 case MBOX_ASTS_LOGIN_FAILED:
842                         /* No action */
843                         DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
844                                       "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
845                                       ha->host_no, mbox_sts[0], mbox_sts[1],
846                                       mbox_sts[2], mbox_sts[3]));
847                         break;
848
849                 case MBOX_ASTS_DATABASE_CHANGED:
850                         /* Queue AEN information and process it in the DPC
851                          * routine */
852                         if (ha->aen_q_count > 0) {
853
854                                 /* decrement available counter */
855                                 ha->aen_q_count--;
856
857                                 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
858                                         ha->aen_q[ha->aen_in].mbox_sts[i] =
859                                             mbox_sts[i];
860
861                                 /* print debug message */
862                                 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
863                                               "mb1:0x%x mb2:0x%x mb3:0x%x "
864                                               "mb4:0x%x mb5:0x%x\n",
865                                               ha->host_no, ha->aen_in,
866                                               mbox_sts[0], mbox_sts[1],
867                                               mbox_sts[2], mbox_sts[3],
868                                               mbox_sts[4], mbox_sts[5]));
869
870                                 /* advance pointer */
871                                 ha->aen_in++;
872                                 if (ha->aen_in == MAX_AEN_ENTRIES)
873                                         ha->aen_in = 0;
874
875                                 /* The DPC routine will process the aen */
876                                 set_bit(DPC_AEN, &ha->dpc_flags);
877                         } else {
878                                 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
879                                               "overflowed!  AEN LOST!!\n",
880                                               ha->host_no, __func__,
881                                               mbox_sts[0]));
882
883                                 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
884                                               ha->host_no));
885
886                                 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
887                                         DEBUG2(printk("AEN[%d] %04x %04x %04x "
888                                                       "%04x\n", i, mbox_sts[0],
889                                                       mbox_sts[1], mbox_sts[2],
890                                                       mbox_sts[3]));
891                                 }
892                         }
893                         break;
894
895                 case MBOX_ASTS_TXSCVR_INSERTED:
896                         DEBUG2(printk(KERN_WARNING
897                             "scsi%ld: AEN %04x Transceiver"
898                             " inserted\n",  ha->host_no, mbox_sts[0]));
899                         break;
900
901                 case MBOX_ASTS_TXSCVR_REMOVED:
902                         DEBUG2(printk(KERN_WARNING
903                             "scsi%ld: AEN %04x Transceiver"
904                             " removed\n",  ha->host_no, mbox_sts[0]));
905                         break;
906
907                 case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
908                         if (is_qla8032(ha) || is_qla8042(ha)) {
909                                 DEBUG2(ql4_printk(KERN_INFO, ha,
910                                                   "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
911                                                   ha->host_no, mbox_sts[0],
912                                                   mbox_sts[1], mbox_sts[2],
913                                                   mbox_sts[3], mbox_sts[4]));
914                                 opcode = mbox_sts[1] >> 16;
915                                 if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
916                                     (opcode == MBOX_CMD_PORT_RESET)) {
917                                         set_bit(DPC_POST_IDC_ACK,
918                                                 &ha->dpc_flags);
919                                         ha->idc_info.request_desc = mbox_sts[1];
920                                         ha->idc_info.info1 = mbox_sts[2];
921                                         ha->idc_info.info2 = mbox_sts[3];
922                                         ha->idc_info.info3 = mbox_sts[4];
923                                         qla4xxx_wake_dpc(ha);
924                                 }
925                         }
926                         break;
927
928                 case MBOX_ASTS_IDC_COMPLETE:
929                         if (is_qla8032(ha) || is_qla8042(ha)) {
930                                 DEBUG2(ql4_printk(KERN_INFO, ha,
931                                                   "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
932                                                   ha->host_no, mbox_sts[0],
933                                                   mbox_sts[1], mbox_sts[2],
934                                                   mbox_sts[3], mbox_sts[4]));
935                                 DEBUG2(ql4_printk(KERN_INFO, ha,
936                                                   "scsi:%ld: AEN %04x IDC Complete notification\n",
937                                                   ha->host_no, mbox_sts[0]));
938
939                                 opcode = mbox_sts[1] >> 16;
940                                 if (ha->notify_idc_comp)
941                                         complete(&ha->idc_comp);
942
943                                 if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
944                                     (opcode == MBOX_CMD_PORT_RESET))
945                                         ha->idc_info.info2 = mbox_sts[3];
946
947                                 if (qla4_83xx_loopback_in_progress(ha)) {
948                                         set_bit(AF_LOOPBACK, &ha->flags);
949                                 } else {
950                                         clear_bit(AF_LOOPBACK, &ha->flags);
951                                         if (ha->saved_acb)
952                                                 set_bit(DPC_RESTORE_ACB,
953                                                         &ha->dpc_flags);
954                                 }
955                                 qla4xxx_wake_dpc(ha);
956                         }
957                         break;
958
959                 case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
960                         DEBUG2(ql4_printk(KERN_INFO, ha,
961                                           "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
962                                           ha->host_no, mbox_sts[0], mbox_sts[1],
963                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
964                                           mbox_sts[5]));
965                         DEBUG2(ql4_printk(KERN_INFO, ha,
966                                           "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
967                                           ha->host_no, mbox_sts[0]));
968                         qla4xxx_default_router_changed(ha, mbox_sts);
969                         break;
970
971                 case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
972                         DEBUG2(ql4_printk(KERN_INFO, ha,
973                                           "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
974                                           ha->host_no, mbox_sts[0], mbox_sts[1],
975                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
976                                           mbox_sts[5]));
977                         DEBUG2(ql4_printk(KERN_INFO, ha,
978                                           "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
979                                           ha->host_no, mbox_sts[0]));
980                         /* new IDC timeout */
981                         ha->idc_extend_tmo = mbox_sts[1];
982                         break;
983
984                 case MBOX_ASTS_INITIALIZATION_FAILED:
985                         DEBUG2(ql4_printk(KERN_INFO, ha,
986                                           "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
987                                           ha->host_no, mbox_sts[0],
988                                           mbox_sts[3]));
989                         break;
990
991                 case MBOX_ASTS_SYSTEM_WARNING_EVENT:
992                         DEBUG2(ql4_printk(KERN_WARNING, ha,
993                                           "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
994                                           ha->host_no, mbox_sts[0], mbox_sts[1],
995                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
996                                           mbox_sts[5]));
997                         break;
998
999                 case MBOX_ASTS_DCBX_CONF_CHANGE:
1000                         DEBUG2(ql4_printk(KERN_INFO, ha,
1001                                           "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
1002                                           ha->host_no, mbox_sts[0], mbox_sts[1],
1003                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
1004                                           mbox_sts[5]));
1005                         DEBUG2(ql4_printk(KERN_INFO, ha,
1006                                           "scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
1007                                           ha->host_no, mbox_sts[0]));
1008                         break;
1009
1010                 default:
1011                         DEBUG2(printk(KERN_WARNING
1012                                       "scsi%ld: AEN %04x UNKNOWN\n",
1013                                       ha->host_no, mbox_sts[0]));
1014                         break;
1015                 }
1016         } else {
1017                 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
1018                               ha->host_no, mbox_status));
1019
1020                 ha->mbox_status[0] = mbox_status;
1021         }
1022 }
1023
1024 void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
1025                                          uint32_t intr_status)
1026 {
1027         /* Process mailbox/asynch event interrupt.*/
1028         if (intr_status) {
1029                 qla4xxx_isr_decode_mailbox(ha,
1030                                 readl(&ha->qla4_83xx_reg->mailbox_out[0]));
1031                 /* clear the interrupt */
1032                 writel(0, &ha->qla4_83xx_reg->risc_intr);
1033         } else {
1034                 qla4xxx_process_response_queue(ha);
1035         }
1036
1037         /* clear the interrupt */
1038         writel(0, &ha->qla4_83xx_reg->mb_int_mask);
1039 }
1040
1041 /**
1042  * qla4_82xx_interrupt_service_routine - isr
1043  * @ha: pointer to host adapter structure.
1044  * @intr_status: Local interrupt status/type.
1045  *
1046  * This is the main interrupt service routine.
1047  * hardware_lock locked upon entry. runs in interrupt context.
1048  **/
1049 void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
1050     uint32_t intr_status)
1051 {
1052         /* Process response queue interrupt. */
1053         if ((intr_status & HSRX_RISC_IOCB_INT) &&
1054             test_bit(AF_INIT_DONE, &ha->flags))
1055                 qla4xxx_process_response_queue(ha);
1056
1057         /* Process mailbox/asynch event interrupt.*/
1058         if (intr_status & HSRX_RISC_MB_INT)
1059                 qla4xxx_isr_decode_mailbox(ha,
1060                     readl(&ha->qla4_82xx_reg->mailbox_out[0]));
1061
1062         /* clear the interrupt */
1063         writel(0, &ha->qla4_82xx_reg->host_int);
1064         readl(&ha->qla4_82xx_reg->host_int);
1065 }
1066
1067 /**
1068  * qla4xxx_interrupt_service_routine - isr
1069  * @ha: pointer to host adapter structure.
1070  * @intr_status: Local interrupt status/type.
1071  *
1072  * This is the main interrupt service routine.
1073  * hardware_lock locked upon entry. runs in interrupt context.
1074  **/
1075 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
1076                                        uint32_t intr_status)
1077 {
1078         /* Process response queue interrupt. */
1079         if (intr_status & CSR_SCSI_COMPLETION_INTR)
1080                 qla4xxx_process_response_queue(ha);
1081
1082         /* Process mailbox/asynch event  interrupt.*/
1083         if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
1084                 qla4xxx_isr_decode_mailbox(ha,
1085                                            readl(&ha->reg->mailbox[0]));
1086
1087                 /* Clear Mailbox Interrupt */
1088                 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
1089                        &ha->reg->ctrl_status);
1090                 readl(&ha->reg->ctrl_status);
1091         }
1092 }
1093
1094 /**
1095  * qla4_82xx_spurious_interrupt - processes spurious interrupt
1096  * @ha: pointer to host adapter structure.
1097  * @reqs_count: .
1098  *
1099  **/
1100 static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
1101     uint8_t reqs_count)
1102 {
1103         if (reqs_count)
1104                 return;
1105
1106         DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
1107         if (is_qla8022(ha)) {
1108                 writel(0, &ha->qla4_82xx_reg->host_int);
1109                 if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
1110                         qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
1111                             0xfbff);
1112         }
1113         ha->spurious_int_count++;
1114 }
1115
1116 /**
1117  * qla4xxx_intr_handler - hardware interrupt handler.
1118  * @irq: Unused
1119  * @dev_id: Pointer to host adapter structure
1120  **/
1121 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
1122 {
1123         struct scsi_qla_host *ha;
1124         uint32_t intr_status;
1125         unsigned long flags = 0;
1126         uint8_t reqs_count = 0;
1127
1128         ha = (struct scsi_qla_host *) dev_id;
1129         if (!ha) {
1130                 DEBUG2(printk(KERN_INFO
1131                               "qla4xxx: Interrupt with NULL host ptr\n"));
1132                 return IRQ_NONE;
1133         }
1134
1135         spin_lock_irqsave(&ha->hardware_lock, flags);
1136
1137         ha->isr_count++;
1138         /*
1139          * Repeatedly service interrupts up to a maximum of
1140          * MAX_REQS_SERVICED_PER_INTR
1141          */
1142         while (1) {
1143                 /*
1144                  * Read interrupt status
1145                  */
1146                 if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
1147                     ha->response_out)
1148                         intr_status = CSR_SCSI_COMPLETION_INTR;
1149                 else
1150                         intr_status = readl(&ha->reg->ctrl_status);
1151
1152                 if ((intr_status &
1153                     (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
1154                         if (reqs_count == 0)
1155                                 ha->spurious_int_count++;
1156                         break;
1157                 }
1158
1159                 if (intr_status & CSR_FATAL_ERROR) {
1160                         DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
1161                                       "Status 0x%04x\n", ha->host_no,
1162                                       readl(isp_port_error_status (ha))));
1163
1164                         /* Issue Soft Reset to clear this error condition.
1165                          * This will prevent the RISC from repeatedly
1166                          * interrupting the driver; thus, allowing the DPC to
1167                          * get scheduled to continue error recovery.
1168                          * NOTE: Disabling RISC interrupts does not work in
1169                          * this case, as CSR_FATAL_ERROR overrides
1170                          * CSR_SCSI_INTR_ENABLE */
1171                         if ((readl(&ha->reg->ctrl_status) &
1172                              CSR_SCSI_RESET_INTR) == 0) {
1173                                 writel(set_rmask(CSR_SOFT_RESET),
1174                                        &ha->reg->ctrl_status);
1175                                 readl(&ha->reg->ctrl_status);
1176                         }
1177
1178                         writel(set_rmask(CSR_FATAL_ERROR),
1179                                &ha->reg->ctrl_status);
1180                         readl(&ha->reg->ctrl_status);
1181
1182                         __qla4xxx_disable_intrs(ha);
1183
1184                         set_bit(DPC_RESET_HA, &ha->dpc_flags);
1185
1186                         break;
1187                 } else if (intr_status & CSR_SCSI_RESET_INTR) {
1188                         clear_bit(AF_ONLINE, &ha->flags);
1189                         __qla4xxx_disable_intrs(ha);
1190
1191                         writel(set_rmask(CSR_SCSI_RESET_INTR),
1192                                &ha->reg->ctrl_status);
1193                         readl(&ha->reg->ctrl_status);
1194
1195                         if (!test_bit(AF_HA_REMOVAL, &ha->flags))
1196                                 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
1197
1198                         break;
1199                 } else if (intr_status & INTR_PENDING) {
1200                         ha->isp_ops->interrupt_service_routine(ha, intr_status);
1201                         ha->total_io_count++;
1202                         if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1203                                 break;
1204                 }
1205         }
1206
1207         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1208
1209         return IRQ_HANDLED;
1210 }
1211
1212 /**
1213  * qla4_82xx_intr_handler - hardware interrupt handler.
1214  * @irq: Unused
1215  * @dev_id: Pointer to host adapter structure
1216  **/
1217 irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
1218 {
1219         struct scsi_qla_host *ha = dev_id;
1220         uint32_t intr_status;
1221         uint32_t status;
1222         unsigned long flags = 0;
1223         uint8_t reqs_count = 0;
1224
1225         if (unlikely(pci_channel_offline(ha->pdev)))
1226                 return IRQ_HANDLED;
1227
1228         ha->isr_count++;
1229         status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1230         if (!(status & ha->nx_legacy_intr.int_vec_bit))
1231                 return IRQ_NONE;
1232
1233         status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
1234         if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
1235                 DEBUG7(ql4_printk(KERN_INFO, ha,
1236                                   "%s legacy Int not triggered\n", __func__));
1237                 return IRQ_NONE;
1238         }
1239
1240         /* clear the interrupt */
1241         qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1242
1243         /* read twice to ensure write is flushed */
1244         qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1245         qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1246
1247         spin_lock_irqsave(&ha->hardware_lock, flags);
1248         while (1) {
1249                 if (!(readl(&ha->qla4_82xx_reg->host_int) &
1250                     ISRX_82XX_RISC_INT)) {
1251                         qla4_82xx_spurious_interrupt(ha, reqs_count);
1252                         break;
1253                 }
1254                 intr_status =  readl(&ha->qla4_82xx_reg->host_status);
1255                 if ((intr_status &
1256                     (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0)  {
1257                         qla4_82xx_spurious_interrupt(ha, reqs_count);
1258                         break;
1259                 }
1260
1261                 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1262
1263                 /* Enable Interrupt */
1264                 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
1265
1266                 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1267                         break;
1268         }
1269
1270         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1271         return IRQ_HANDLED;
1272 }
1273
1274 #define LEG_INT_PTR_B31         (1 << 31)
1275 #define LEG_INT_PTR_B30         (1 << 30)
1276 #define PF_BITS_MASK            (0xF << 16)
1277
1278 /**
1279  * qla4_83xx_intr_handler - hardware interrupt handler.
1280  * @irq: Unused
1281  * @dev_id: Pointer to host adapter structure
1282  **/
1283 irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
1284 {
1285         struct scsi_qla_host *ha = dev_id;
1286         uint32_t leg_int_ptr = 0;
1287         unsigned long flags = 0;
1288
1289         ha->isr_count++;
1290         leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1291
1292         /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
1293         if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
1294                 DEBUG7(ql4_printk(KERN_ERR, ha,
1295                                   "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
1296                                   __func__));
1297                 return IRQ_NONE;
1298         }
1299
1300         /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
1301         if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
1302                 DEBUG7(ql4_printk(KERN_ERR, ha,
1303                                   "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
1304                                   __func__, (leg_int_ptr & PF_BITS_MASK),
1305                                   ha->pf_bit));
1306                 return IRQ_NONE;
1307         }
1308
1309         /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
1310          * Control register and poll till Legacy Interrupt Pointer register
1311          * bit30 is 0.
1312          */
1313         writel(0, &ha->qla4_83xx_reg->leg_int_trig);
1314         do {
1315                 leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1316                 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
1317                         break;
1318         } while (leg_int_ptr & LEG_INT_PTR_B30);
1319
1320         spin_lock_irqsave(&ha->hardware_lock, flags);
1321         leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
1322         ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
1323         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1324
1325         return IRQ_HANDLED;
1326 }
1327
1328 irqreturn_t
1329 qla4_8xxx_msi_handler(int irq, void *dev_id)
1330 {
1331         struct scsi_qla_host *ha;
1332
1333         ha = (struct scsi_qla_host *) dev_id;
1334         if (!ha) {
1335                 DEBUG2(printk(KERN_INFO
1336                     "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
1337                 return IRQ_NONE;
1338         }
1339
1340         ha->isr_count++;
1341         /* clear the interrupt */
1342         qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1343
1344         /* read twice to ensure write is flushed */
1345         qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1346         qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1347
1348         return qla4_8xxx_default_intr_handler(irq, dev_id);
1349 }
1350
1351 static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
1352 {
1353         struct scsi_qla_host *ha = dev_id;
1354         unsigned long flags;
1355         uint32_t ival = 0;
1356
1357         spin_lock_irqsave(&ha->hardware_lock, flags);
1358
1359         ival = readl(&ha->qla4_83xx_reg->risc_intr);
1360         if (ival == 0) {
1361                 ql4_printk(KERN_INFO, ha,
1362                            "%s: It is a spurious mailbox interrupt!\n",
1363                            __func__);
1364                 ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1365                 ival &= ~INT_MASK_FW_MB;
1366                 writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1367                 goto exit;
1368         }
1369
1370         qla4xxx_isr_decode_mailbox(ha,
1371                                    readl(&ha->qla4_83xx_reg->mailbox_out[0]));
1372         writel(0, &ha->qla4_83xx_reg->risc_intr);
1373         ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1374         ival &= ~INT_MASK_FW_MB;
1375         writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1376         ha->isr_count++;
1377 exit:
1378         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1379         return IRQ_HANDLED;
1380 }
1381
1382 /**
1383  * qla4_8xxx_default_intr_handler - hardware interrupt handler.
1384  * @irq: Unused
1385  * @dev_id: Pointer to host adapter structure
1386  *
1387  * This interrupt handler is called directly for MSI-X, and
1388  * called indirectly for MSI.
1389  **/
1390 irqreturn_t
1391 qla4_8xxx_default_intr_handler(int irq, void *dev_id)
1392 {
1393         struct scsi_qla_host *ha = dev_id;
1394         unsigned long   flags;
1395         uint32_t intr_status;
1396         uint8_t reqs_count = 0;
1397
1398         if (is_qla8032(ha) || is_qla8042(ha)) {
1399                 qla4_83xx_mailbox_intr_handler(irq, dev_id);
1400         } else {
1401                 spin_lock_irqsave(&ha->hardware_lock, flags);
1402                 while (1) {
1403                         if (!(readl(&ha->qla4_82xx_reg->host_int) &
1404                             ISRX_82XX_RISC_INT)) {
1405                                 qla4_82xx_spurious_interrupt(ha, reqs_count);
1406                                 break;
1407                         }
1408
1409                         intr_status =  readl(&ha->qla4_82xx_reg->host_status);
1410                         if ((intr_status &
1411                             (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1412                                 qla4_82xx_spurious_interrupt(ha, reqs_count);
1413                                 break;
1414                         }
1415
1416                         ha->isp_ops->interrupt_service_routine(ha, intr_status);
1417
1418                         if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1419                                 break;
1420                 }
1421                 ha->isr_count++;
1422                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1423         }
1424         return IRQ_HANDLED;
1425 }
1426
1427 irqreturn_t
1428 qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1429 {
1430         struct scsi_qla_host *ha = dev_id;
1431         unsigned long flags;
1432         int intr_status;
1433         uint32_t ival = 0;
1434
1435         spin_lock_irqsave(&ha->hardware_lock, flags);
1436         if (is_qla8032(ha) || is_qla8042(ha)) {
1437                 ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
1438                 if (ival == 0) {
1439                         ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
1440                                    __func__);
1441                         goto exit_msix_rsp_q;
1442                 }
1443                 qla4xxx_process_response_queue(ha);
1444                 writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
1445         } else {
1446                 intr_status = readl(&ha->qla4_82xx_reg->host_status);
1447                 if (intr_status & HSRX_RISC_IOCB_INT) {
1448                         qla4xxx_process_response_queue(ha);
1449                         writel(0, &ha->qla4_82xx_reg->host_int);
1450                 } else {
1451                         ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
1452                                    __func__);
1453                         goto exit_msix_rsp_q;
1454                 }
1455         }
1456         ha->isr_count++;
1457 exit_msix_rsp_q:
1458         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1459         return IRQ_HANDLED;
1460 }
1461
1462 /**
1463  * qla4xxx_process_aen - processes AENs generated by firmware
1464  * @ha: pointer to host adapter structure.
1465  * @process_aen: type of AENs to process
1466  *
1467  * Processes specific types of Asynchronous Events generated by firmware.
1468  * The type of AENs to process is specified by process_aen and can be
1469  *      PROCESS_ALL_AENS         0
1470  *      FLUSH_DDB_CHANGED_AENS   1
1471  *      RELOGIN_DDB_CHANGED_AENS 2
1472  **/
1473 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
1474 {
1475         uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
1476         struct aen *aen;
1477         int i;
1478         unsigned long flags;
1479
1480         spin_lock_irqsave(&ha->hardware_lock, flags);
1481         while (ha->aen_out != ha->aen_in) {
1482                 aen = &ha->aen_q[ha->aen_out];
1483                 /* copy aen information to local structure */
1484                 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
1485                         mbox_sts[i] = aen->mbox_sts[i];
1486
1487                 ha->aen_q_count++;
1488                 ha->aen_out++;
1489
1490                 if (ha->aen_out == MAX_AEN_ENTRIES)
1491                         ha->aen_out = 0;
1492
1493                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1494
1495                 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
1496                         " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
1497                         (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
1498                         mbox_sts[0], mbox_sts[1], mbox_sts[2],
1499                         mbox_sts[3], mbox_sts[4]));
1500
1501                 switch (mbox_sts[0]) {
1502                 case MBOX_ASTS_DATABASE_CHANGED:
1503                         switch (process_aen) {
1504                         case FLUSH_DDB_CHANGED_AENS:
1505                                 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
1506                                               "[%d] state=%04x FLUSHED!\n",
1507                                               ha->host_no, ha->aen_out,
1508                                               mbox_sts[0], mbox_sts[2],
1509                                               mbox_sts[3]));
1510                                 break;
1511                         case PROCESS_ALL_AENS:
1512                         default:
1513                                 /* Specific device. */
1514                                 if (mbox_sts[1] == 1)
1515                                         qla4xxx_process_ddb_changed(ha,
1516                                                 mbox_sts[2], mbox_sts[3],
1517                                                 mbox_sts[4]);
1518                                 break;
1519                         }
1520                 }
1521                 spin_lock_irqsave(&ha->hardware_lock, flags);
1522         }
1523         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1524 }
1525
1526 int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1527 {
1528         int ret = 0;
1529         int rval = QLA_ERROR;
1530
1531         if (is_qla40XX(ha))
1532                 goto try_intx;
1533
1534         if (ql4xenablemsix == 2) {
1535                 /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */
1536                 if (is_qla8032(ha) || is_qla8042(ha)) {
1537                         ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n",
1538                                    __func__, ha->pdev->device);
1539                         goto try_intx;
1540                 }
1541                 goto try_msi;
1542         }
1543
1544         if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1545                 goto try_intx;
1546
1547         /* Trying MSI-X */
1548         ret = qla4_8xxx_enable_msix(ha);
1549         if (!ret) {
1550                 DEBUG2(ql4_printk(KERN_INFO, ha,
1551                     "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1552                 goto irq_attached;
1553         } else {
1554                 if (is_qla8032(ha) || is_qla8042(ha)) {
1555                         ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n",
1556                                    __func__, ha->pdev->device, ret);
1557                         goto try_intx;
1558                 }
1559         }
1560
1561         ql4_printk(KERN_WARNING, ha,
1562             "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
1563
1564 try_msi:
1565         /* Trying MSI */
1566         ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
1567         if (ret > 0) {
1568                 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1569                         0, DRIVER_NAME, ha);
1570                 if (!ret) {
1571                         DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1572                         goto irq_attached;
1573                 } else {
1574                         ql4_printk(KERN_WARNING, ha,
1575                             "MSI: Failed to reserve interrupt %d "
1576                             "already in use.\n", ha->pdev->irq);
1577                         pci_free_irq_vectors(ha->pdev);
1578                 }
1579         }
1580
1581 try_intx:
1582         if (is_qla8022(ha)) {
1583                 ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
1584                            __func__);
1585                 goto irq_not_attached;
1586         }
1587
1588         /* Trying INTx */
1589         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1590             IRQF_SHARED, DRIVER_NAME, ha);
1591         if (!ret) {
1592                 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1593                 goto irq_attached;
1594
1595         } else {
1596                 ql4_printk(KERN_WARNING, ha,
1597                     "INTx: Failed to reserve interrupt %d already in"
1598                     " use.\n", ha->pdev->irq);
1599                 goto irq_not_attached;
1600         }
1601
1602 irq_attached:
1603         set_bit(AF_IRQ_ATTACHED, &ha->flags);
1604         ha->host->irq = ha->pdev->irq;
1605         ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1606                    __func__, ha->pdev->irq);
1607         rval = QLA_SUCCESS;
1608 irq_not_attached:
1609         return rval;
1610 }
1611
1612 void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1613 {
1614         if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1615                 return;
1616
1617         if (ha->pdev->msix_enabled)
1618                 free_irq(pci_irq_vector(ha->pdev, 1), ha);
1619         free_irq(pci_irq_vector(ha->pdev, 0), ha);
1620         pci_free_irq_vectors(ha->pdev);
1621 }