2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #include <linux/moduleparam.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
15 #include <scsi/scsi_tcq.h>
16 #include <scsi/scsicam.h>
17 #include <linux/delay.h>
20 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
22 if (vha->vp_idx && vha->timer_active) {
23 del_timer_sync(&vha->timer);
24 vha->timer_active = 0;
29 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
32 struct qla_hw_data *ha = vha->hw;
35 /* Find an empty slot and assign an vp_id */
36 mutex_lock(&ha->vport_lock);
37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
38 if (vp_id > ha->max_npiv_vports) {
39 DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
40 vp_id, ha->max_npiv_vports));
41 mutex_unlock(&ha->vport_lock);
45 set_bit(vp_id, ha->vp_idx_map);
49 spin_lock_irqsave(&ha->vport_slock, flags);
50 list_add_tail(&vha->list, &ha->vp_list);
51 spin_unlock_irqrestore(&ha->vport_slock, flags);
53 mutex_unlock(&ha->vport_lock);
58 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
61 struct qla_hw_data *ha = vha->hw;
62 unsigned long flags = 0;
64 mutex_lock(&ha->vport_lock);
66 * Wait for all pending activities to finish before removing vport from
68 * Lock needs to be held for safe removal from the list (it
69 * ensures no active vp_list traversal while the vport is removed
72 spin_lock_irqsave(&ha->vport_slock, flags);
73 while (atomic_read(&vha->vref_count)) {
74 spin_unlock_irqrestore(&ha->vport_slock, flags);
78 spin_lock_irqsave(&ha->vport_slock, flags);
81 spin_unlock_irqrestore(&ha->vport_slock, flags);
85 clear_bit(vp_id, ha->vp_idx_map);
87 mutex_unlock(&ha->vport_lock);
90 static scsi_qla_host_t *
91 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
94 struct scsi_qla_host *tvha;
97 spin_lock_irqsave(&ha->vport_slock, flags);
98 /* Locate matching device in database. */
99 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
100 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
101 spin_unlock_irqrestore(&ha->vport_slock, flags);
105 spin_unlock_irqrestore(&ha->vport_slock, flags);
110 * qla2x00_mark_vp_devices_dead
111 * Updates fcport state when device goes offline.
114 * ha = adapter block pointer.
115 * fcport = port structure pointer.
123 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
127 * This function, if called in contexts other than vp create, disable
128 * or delete, please make sure this is synchronized with the
133 list_for_each_entry(fcport, &vha->vp_fcports, list) {
134 DEBUG15(printk("scsi(%ld): Marking port dead, "
135 "loop_id=0x%04x :%x\n",
136 vha->host_no, fcport->loop_id, fcport->vp_idx));
138 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
139 qla2x00_mark_device_lost(vha, fcport, 0, 0);
140 atomic_set(&fcport->state, FCS_UNCONFIGURED);
145 qla24xx_disable_vp(scsi_qla_host_t *vha)
149 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
150 atomic_set(&vha->loop_state, LOOP_DOWN);
151 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
153 qla2x00_mark_vp_devices_dead(vha);
154 atomic_set(&vha->vp_state, VP_FAILED);
155 vha->flags.management_server_logged_in = 0;
156 if (ret == QLA_SUCCESS) {
157 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
159 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
166 qla24xx_enable_vp(scsi_qla_host_t *vha)
169 struct qla_hw_data *ha = vha->hw;
170 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
172 /* Check if physical ha port is Up */
173 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
174 atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
175 !(ha->current_topology & ISP_CFG_F)) {
176 vha->vp_err_state = VP_ERR_PORTDWN;
177 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
181 /* Initialize the new vport unless it is a persistent port */
182 mutex_lock(&ha->vport_lock);
183 ret = qla24xx_modify_vp_config(vha);
184 mutex_unlock(&ha->vport_lock);
186 if (ret != QLA_SUCCESS) {
187 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
191 DEBUG15(qla_printk(KERN_INFO, ha,
192 "Virtual port with id: %d - Enabled\n", vha->vp_idx));
196 DEBUG15(qla_printk(KERN_INFO, ha,
197 "Virtual port with id: %d - Disabled\n", vha->vp_idx));
202 qla24xx_configure_vp(scsi_qla_host_t *vha)
204 struct fc_vport *fc_vport;
207 fc_vport = vha->fc_vport;
209 DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
210 vha->host_no, __func__));
211 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
212 if (ret != QLA_SUCCESS) {
213 DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
214 "receiving of RSCN requests: 0x%x\n", ret));
217 /* Corresponds to SCR enabled */
218 clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
221 vha->flags.online = 1;
222 if (qla24xx_configure_vhba(vha))
225 atomic_set(&vha->vp_state, VP_ACTIVE);
226 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
230 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
232 scsi_qla_host_t *vha;
233 struct qla_hw_data *ha = rsp->hw;
237 spin_lock_irqsave(&ha->vport_slock, flags);
238 list_for_each_entry(vha, &ha->vp_list, list) {
240 atomic_inc(&vha->vref_count);
241 spin_unlock_irqrestore(&ha->vport_slock, flags);
244 case MBA_LIP_OCCURRED:
248 case MBA_POINT_TO_POINT:
249 case MBA_CHG_IN_CONNECTION:
250 case MBA_PORT_UPDATE:
251 case MBA_RSCN_UPDATE:
252 DEBUG15(printk("scsi(%ld)%s: Async_event for"
253 " VP[%d], mb = 0x%x, vha=%p\n",
254 vha->host_no, __func__, i, *mb, vha));
255 qla2x00_async_event(vha, rsp, mb);
259 spin_lock_irqsave(&ha->vport_slock, flags);
260 atomic_dec(&vha->vref_count);
264 spin_unlock_irqrestore(&ha->vport_slock, flags);
268 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
271 * Physical port will do most of the abort and recovery work. We can
272 * just treat it as a loop down
274 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
275 atomic_set(&vha->loop_state, LOOP_DOWN);
276 qla2x00_mark_all_devices_lost(vha, 0);
278 if (!atomic_read(&vha->loop_down_timer))
279 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
283 * To exclusively reset vport, we need to log it out first. Note: this
284 * control_vp can fail if ISP reset is already issued, this is
285 * expected, as the vp would be already logged out due to ISP reset.
287 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
288 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
290 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
291 vha->host_no, vha->vp_idx));
292 return qla24xx_enable_vp(vha);
296 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
298 qla2x00_do_work(vha);
300 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
301 /* VP acquired. complete port configuration */
302 qla24xx_configure_vp(vha);
306 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
307 qla2x00_update_fcports(vha);
308 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
311 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
312 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
313 atomic_read(&vha->loop_state) != LOOP_DOWN) {
315 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
317 qla2x00_relogin(vha);
319 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
323 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
324 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
325 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
328 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
329 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
330 qla2x00_loop_resync(vha);
331 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
339 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
342 struct qla_hw_data *ha = vha->hw;
344 unsigned long flags = 0;
348 if (list_empty(&ha->vp_list))
351 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
353 if (!(ha->current_topology & ISP_CFG_F))
356 spin_lock_irqsave(&ha->vport_slock, flags);
357 list_for_each_entry(vp, &ha->vp_list, list) {
359 atomic_inc(&vp->vref_count);
360 spin_unlock_irqrestore(&ha->vport_slock, flags);
362 ret = qla2x00_do_dpc_vp(vp);
364 spin_lock_irqsave(&ha->vport_slock, flags);
365 atomic_dec(&vp->vref_count);
368 spin_unlock_irqrestore(&ha->vport_slock, flags);
372 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
374 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
375 struct qla_hw_data *ha = base_vha->hw;
376 scsi_qla_host_t *vha;
377 uint8_t port_name[WWN_SIZE];
379 if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
380 return VPCERR_UNSUPPORTED;
382 /* Check up the F/W and H/W support NPIV */
383 if (!ha->flags.npiv_supported)
384 return VPCERR_UNSUPPORTED;
386 /* Check up whether npiv supported switch presented */
387 if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
388 return VPCERR_NO_FABRIC_SUPP;
390 /* Check up unique WWPN */
391 u64_to_wwn(fc_vport->port_name, port_name);
392 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
393 return VPCERR_BAD_WWN;
394 vha = qla24xx_find_vhost_by_name(ha, port_name);
396 return VPCERR_BAD_WWN;
398 /* Check up max-npiv-supports */
399 if (ha->num_vhosts > ha->max_npiv_vports) {
400 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
401 "max_npv_vports %ud.\n", base_vha->host_no,
402 ha->num_vhosts, ha->max_npiv_vports));
403 return VPCERR_UNSUPPORTED;
409 qla24xx_create_vhost(struct fc_vport *fc_vport)
411 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
412 struct qla_hw_data *ha = base_vha->hw;
413 scsi_qla_host_t *vha;
414 struct scsi_host_template *sht = &qla2xxx_driver_template;
415 struct Scsi_Host *host;
417 vha = qla2x00_create_host(sht, ha);
419 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
424 fc_vport->dd_data = vha;
426 u64_to_wwn(fc_vport->node_name, vha->node_name);
427 u64_to_wwn(fc_vport->port_name, vha->port_name);
429 vha->fc_vport = fc_vport;
430 vha->device_flags = 0;
431 vha->vp_idx = qla24xx_allocate_vp_id(vha);
432 if (vha->vp_idx > ha->max_npiv_vports) {
433 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
435 goto create_vhost_failed;
437 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
442 * To fix the issue of processing a parent's RSCN for the vport before
443 * its SCR is complete.
445 set_bit(VP_SCR_NEEDED, &vha->vp_flags);
446 atomic_set(&vha->loop_state, LOOP_DOWN);
447 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
449 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
451 vha->req = base_vha->req;
452 host->can_queue = base_vha->req->length + 128;
454 host->cmd_per_lun = 3;
455 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
456 host->max_cmd_len = 32;
458 host->max_cmd_len = MAX_CMDSZ;
459 host->max_channel = MAX_BUSES - 1;
460 host->max_lun = MAX_LUNS;
461 host->unique_id = host->host_no;
462 host->max_id = MAX_TARGETS_2200;
463 host->transportt = qla2xxx_transport_vport_template;
465 DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
468 vha->flags.init_done = 1;
470 mutex_lock(&ha->vport_lock);
471 set_bit(vha->vp_idx, ha->vp_idx_map);
472 ha->cur_vport_count++;
473 mutex_unlock(&ha->vport_lock);
482 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
484 struct qla_hw_data *ha = vha->hw;
485 uint16_t que_id = req->id;
487 dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
488 sizeof(request_t), req->ring, req->dma);
492 ha->req_q_map[que_id] = NULL;
493 mutex_lock(&ha->vport_lock);
494 clear_bit(que_id, ha->req_qid_map);
495 mutex_unlock(&ha->vport_lock);
502 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
504 struct qla_hw_data *ha = vha->hw;
505 uint16_t que_id = rsp->id;
507 if (rsp->msix && rsp->msix->have_irq) {
508 free_irq(rsp->msix->vector, rsp);
509 rsp->msix->have_irq = 0;
510 rsp->msix->rsp = NULL;
512 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
513 sizeof(response_t), rsp->ring, rsp->dma);
517 ha->rsp_q_map[que_id] = NULL;
518 mutex_lock(&ha->vport_lock);
519 clear_bit(que_id, ha->rsp_qid_map);
520 mutex_unlock(&ha->vport_lock);
527 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
532 req->options |= BIT_0;
533 ret = qla25xx_init_req_que(vha, req);
535 if (ret == QLA_SUCCESS)
536 qla25xx_free_req_que(vha, req);
542 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
547 rsp->options |= BIT_0;
548 ret = qla25xx_init_rsp_que(vha, rsp);
550 if (ret == QLA_SUCCESS)
551 qla25xx_free_rsp_que(vha, rsp);
556 /* Delete all queues for a given vhost */
558 qla25xx_delete_queues(struct scsi_qla_host *vha)
561 struct req_que *req = NULL;
562 struct rsp_que *rsp = NULL;
563 struct qla_hw_data *ha = vha->hw;
565 /* Delete request queues */
566 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
567 req = ha->req_q_map[cnt];
569 ret = qla25xx_delete_req_que(vha, req);
570 if (ret != QLA_SUCCESS) {
571 qla_printk(KERN_WARNING, ha,
572 "Couldn't delete req que %d\n",
579 /* Delete response queues */
580 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
581 rsp = ha->rsp_q_map[cnt];
583 ret = qla25xx_delete_rsp_que(vha, rsp);
584 if (ret != QLA_SUCCESS) {
585 qla_printk(KERN_WARNING, ha,
586 "Couldn't delete rsp que %d\n",
596 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
597 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
600 struct req_que *req = NULL;
601 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
603 device_reg_t __iomem *reg;
606 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
608 qla_printk(KERN_WARNING, ha, "could not allocate memory"
609 "for request que\n");
613 req->length = REQUEST_ENTRY_CNT_24XX;
614 req->ring = dma_alloc_coherent(&ha->pdev->dev,
615 (req->length + 1) * sizeof(request_t),
616 &req->dma, GFP_KERNEL);
617 if (req->ring == NULL) {
618 qla_printk(KERN_WARNING, ha,
619 "Memory Allocation failed - request_ring\n");
623 mutex_lock(&ha->vport_lock);
624 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
625 if (que_id >= ha->max_req_queues) {
626 mutex_unlock(&ha->vport_lock);
627 qla_printk(KERN_INFO, ha, "No resources to create "
628 "additional request queue\n");
631 set_bit(que_id, ha->req_qid_map);
632 ha->req_q_map[que_id] = req;
634 req->vp_idx = vp_idx;
640 req->rsp = ha->rsp_q_map[rsp_que];
641 /* Use alternate PCI bus number */
644 /* Use alternate PCI devfn */
647 req->options = options;
649 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
650 req->outstanding_cmds[cnt] = NULL;
651 req->current_outstanding_cmd = 1;
653 req->ring_ptr = req->ring;
655 req->cnt = req->length;
657 reg = ISP_QUE_REG(ha, que_id);
658 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
659 mutex_unlock(&ha->vport_lock);
661 ret = qla25xx_init_req_que(base_vha, req);
662 if (ret != QLA_SUCCESS) {
663 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
664 mutex_lock(&ha->vport_lock);
665 clear_bit(que_id, ha->req_qid_map);
666 mutex_unlock(&ha->vport_lock);
673 qla25xx_free_req_que(base_vha, req);
678 static void qla_do_work(struct work_struct *work)
681 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
682 struct scsi_qla_host *vha;
683 struct qla_hw_data *ha = rsp->hw;
685 spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
686 vha = pci_get_drvdata(ha->pdev);
687 qla24xx_process_response_queue(vha, rsp);
688 spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
691 /* create response queue */
693 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
694 uint8_t vp_idx, uint16_t rid, int req)
697 struct rsp_que *rsp = NULL;
698 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
700 device_reg_t __iomem *reg;
702 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
704 qla_printk(KERN_WARNING, ha, "could not allocate memory for"
709 rsp->length = RESPONSE_ENTRY_CNT_MQ;
710 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
711 (rsp->length + 1) * sizeof(response_t),
712 &rsp->dma, GFP_KERNEL);
713 if (rsp->ring == NULL) {
714 qla_printk(KERN_WARNING, ha,
715 "Memory Allocation failed - response_ring\n");
719 mutex_lock(&ha->vport_lock);
720 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
721 if (que_id >= ha->max_rsp_queues) {
722 mutex_unlock(&ha->vport_lock);
723 qla_printk(KERN_INFO, ha, "No resources to create "
724 "additional response queue\n");
727 set_bit(que_id, ha->rsp_qid_map);
729 if (ha->flags.msix_enabled)
730 rsp->msix = &ha->msix_entries[que_id + 1];
732 qla_printk(KERN_WARNING, ha, "msix not enabled\n");
734 ha->rsp_q_map[que_id] = rsp;
736 rsp->vp_idx = vp_idx;
738 /* Use alternate PCI bus number */
741 /* Use alternate PCI devfn */
744 /* Enable MSIX handshake mode on for uncapable adapters */
745 if (!IS_MSIX_NACK_CAPABLE(ha))
748 rsp->options = options;
750 reg = ISP_QUE_REG(ha, que_id);
751 rsp->rsp_q_in = ®->isp25mq.rsp_q_in;
752 rsp->rsp_q_out = ®->isp25mq.rsp_q_out;
753 mutex_unlock(&ha->vport_lock);
755 ret = qla25xx_request_irq(rsp);
759 ret = qla25xx_init_rsp_que(base_vha, rsp);
760 if (ret != QLA_SUCCESS) {
761 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
762 mutex_lock(&ha->vport_lock);
763 clear_bit(que_id, ha->rsp_qid_map);
764 mutex_unlock(&ha->vport_lock);
768 rsp->req = ha->req_q_map[req];
772 qla2x00_init_response_q_entries(rsp);
774 INIT_WORK(&rsp->q_work, qla_do_work);
778 qla25xx_free_rsp_que(base_vha, rsp);