2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) PFX fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <linux/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_transport_srp.h>
54 #define DRV_NAME "ib_srp"
55 #define PFX DRV_NAME ": "
56 #define DRV_VERSION "1.0"
57 #define DRV_RELDATE "July 1, 2013"
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
61 "v" DRV_VERSION " (" DRV_RELDATE ")");
62 MODULE_LICENSE("Dual BSD/GPL");
64 static unsigned int srp_sg_tablesize;
65 static unsigned int cmd_sg_entries;
66 static unsigned int indirect_sg_entries;
67 static bool allow_ext_sg;
68 static int topspin_workarounds = 1;
70 module_param(srp_sg_tablesize, uint, 0444);
71 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
73 module_param(cmd_sg_entries, uint, 0444);
74 MODULE_PARM_DESC(cmd_sg_entries,
75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
77 module_param(indirect_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(indirect_sg_entries,
79 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
81 module_param(allow_ext_sg, bool, 0444);
82 MODULE_PARM_DESC(allow_ext_sg,
83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
85 module_param(topspin_workarounds, int, 0444);
86 MODULE_PARM_DESC(topspin_workarounds,
87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
89 static struct kernel_param_ops srp_tmo_ops;
91 static int srp_reconnect_delay = 10;
92 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
94 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
96 static int srp_fast_io_fail_tmo = 15;
97 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
99 MODULE_PARM_DESC(fast_io_fail_tmo,
100 "Number of seconds between the observation of a transport"
101 " layer error and failing all I/O. \"off\" means that this"
102 " functionality is disabled.");
104 static int srp_dev_loss_tmo = 600;
105 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
107 MODULE_PARM_DESC(dev_loss_tmo,
108 "Maximum number of seconds that the SRP transport should"
109 " insulate transport layer errors. After this time has been"
110 " exceeded the SCSI host is removed. Should be"
111 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
112 " if fast_io_fail_tmo has not been set. \"off\" means that"
113 " this functionality is disabled.");
115 static void srp_add_one(struct ib_device *device);
116 static void srp_remove_one(struct ib_device *device);
117 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
118 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
119 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
121 static struct scsi_transport_template *ib_srp_transport_template;
123 static struct ib_client srp_client = {
126 .remove = srp_remove_one
129 static struct ib_sa_client srp_sa_client;
131 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
133 int tmo = *(int *)kp->arg;
136 return sprintf(buffer, "%d", tmo);
138 return sprintf(buffer, "off");
141 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
145 if (strncmp(val, "off", 3) != 0) {
146 res = kstrtoint(val, 0, &tmo);
152 if (kp->arg == &srp_reconnect_delay)
153 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
155 else if (kp->arg == &srp_fast_io_fail_tmo)
156 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
158 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
162 *(int *)kp->arg = tmo;
168 static struct kernel_param_ops srp_tmo_ops = {
173 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
175 return (struct srp_target_port *) host->hostdata;
178 static const char *srp_target_info(struct Scsi_Host *host)
180 return host_to_target(host)->target_name;
183 static int srp_target_is_topspin(struct srp_target_port *target)
185 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
186 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
188 return topspin_workarounds &&
189 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
190 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
193 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
195 enum dma_data_direction direction)
199 iu = kmalloc(sizeof *iu, gfp_mask);
203 iu->buf = kzalloc(size, gfp_mask);
207 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
209 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
213 iu->direction = direction;
225 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
230 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
236 static void srp_qp_event(struct ib_event *event, void *context)
238 pr_debug("QP event %d\n", event->event);
241 static int srp_init_qp(struct srp_target_port *target,
244 struct ib_qp_attr *attr;
247 attr = kmalloc(sizeof *attr, GFP_KERNEL);
251 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
252 target->srp_host->port,
253 be16_to_cpu(target->path.pkey),
258 attr->qp_state = IB_QPS_INIT;
259 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
260 IB_ACCESS_REMOTE_WRITE);
261 attr->port_num = target->srp_host->port;
263 ret = ib_modify_qp(qp, attr,
274 static int srp_new_cm_id(struct srp_target_port *target)
276 struct ib_cm_id *new_cm_id;
278 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
279 srp_cm_handler, target);
280 if (IS_ERR(new_cm_id))
281 return PTR_ERR(new_cm_id);
284 ib_destroy_cm_id(target->cm_id);
285 target->cm_id = new_cm_id;
290 static int srp_create_target_ib(struct srp_target_port *target)
292 struct ib_qp_init_attr *init_attr;
293 struct ib_cq *recv_cq, *send_cq;
297 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
301 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
302 srp_recv_completion, NULL, target,
303 target->queue_size, target->comp_vector);
304 if (IS_ERR(recv_cq)) {
305 ret = PTR_ERR(recv_cq);
309 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
310 srp_send_completion, NULL, target,
311 target->queue_size, target->comp_vector);
312 if (IS_ERR(send_cq)) {
313 ret = PTR_ERR(send_cq);
317 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
319 init_attr->event_handler = srp_qp_event;
320 init_attr->cap.max_send_wr = target->queue_size;
321 init_attr->cap.max_recv_wr = target->queue_size;
322 init_attr->cap.max_recv_sge = 1;
323 init_attr->cap.max_send_sge = 1;
324 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
325 init_attr->qp_type = IB_QPT_RC;
326 init_attr->send_cq = send_cq;
327 init_attr->recv_cq = recv_cq;
329 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
335 ret = srp_init_qp(target, qp);
340 ib_destroy_qp(target->qp);
342 ib_destroy_cq(target->recv_cq);
344 ib_destroy_cq(target->send_cq);
347 target->recv_cq = recv_cq;
348 target->send_cq = send_cq;
357 ib_destroy_cq(send_cq);
360 ib_destroy_cq(recv_cq);
368 * Note: this function may be called without srp_alloc_iu_bufs() having been
369 * invoked. Hence the target->[rt]x_ring checks.
371 static void srp_free_target_ib(struct srp_target_port *target)
375 ib_destroy_qp(target->qp);
376 ib_destroy_cq(target->send_cq);
377 ib_destroy_cq(target->recv_cq);
380 target->send_cq = target->recv_cq = NULL;
382 if (target->rx_ring) {
383 for (i = 0; i < target->queue_size; ++i)
384 srp_free_iu(target->srp_host, target->rx_ring[i]);
385 kfree(target->rx_ring);
386 target->rx_ring = NULL;
388 if (target->tx_ring) {
389 for (i = 0; i < target->queue_size; ++i)
390 srp_free_iu(target->srp_host, target->tx_ring[i]);
391 kfree(target->tx_ring);
392 target->tx_ring = NULL;
396 static void srp_path_rec_completion(int status,
397 struct ib_sa_path_rec *pathrec,
400 struct srp_target_port *target = target_ptr;
402 target->status = status;
404 shost_printk(KERN_ERR, target->scsi_host,
405 PFX "Got failed path rec status %d\n", status);
407 target->path = *pathrec;
408 complete(&target->done);
411 static int srp_lookup_path(struct srp_target_port *target)
413 target->path.numb_path = 1;
415 init_completion(&target->done);
417 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
418 target->srp_host->srp_dev->dev,
419 target->srp_host->port,
421 IB_SA_PATH_REC_SERVICE_ID |
422 IB_SA_PATH_REC_DGID |
423 IB_SA_PATH_REC_SGID |
424 IB_SA_PATH_REC_NUMB_PATH |
426 SRP_PATH_REC_TIMEOUT_MS,
428 srp_path_rec_completion,
429 target, &target->path_query);
430 if (target->path_query_id < 0)
431 return target->path_query_id;
433 wait_for_completion(&target->done);
435 if (target->status < 0)
436 shost_printk(KERN_WARNING, target->scsi_host,
437 PFX "Path record query failed\n");
439 return target->status;
442 static int srp_send_req(struct srp_target_port *target)
445 struct ib_cm_req_param param;
446 struct srp_login_req priv;
450 req = kzalloc(sizeof *req, GFP_KERNEL);
454 req->param.primary_path = &target->path;
455 req->param.alternate_path = NULL;
456 req->param.service_id = target->service_id;
457 req->param.qp_num = target->qp->qp_num;
458 req->param.qp_type = target->qp->qp_type;
459 req->param.private_data = &req->priv;
460 req->param.private_data_len = sizeof req->priv;
461 req->param.flow_control = 1;
463 get_random_bytes(&req->param.starting_psn, 4);
464 req->param.starting_psn &= 0xffffff;
467 * Pick some arbitrary defaults here; we could make these
468 * module parameters if anyone cared about setting them.
470 req->param.responder_resources = 4;
471 req->param.remote_cm_response_timeout = 20;
472 req->param.local_cm_response_timeout = 20;
473 req->param.retry_count = target->tl_retry_count;
474 req->param.rnr_retry_count = 7;
475 req->param.max_cm_retries = 15;
477 req->priv.opcode = SRP_LOGIN_REQ;
479 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
480 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
481 SRP_BUF_FORMAT_INDIRECT);
483 * In the published SRP specification (draft rev. 16a), the
484 * port identifier format is 8 bytes of ID extension followed
485 * by 8 bytes of GUID. Older drafts put the two halves in the
486 * opposite order, so that the GUID comes first.
488 * Targets conforming to these obsolete drafts can be
489 * recognized by the I/O Class they report.
491 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
492 memcpy(req->priv.initiator_port_id,
493 &target->path.sgid.global.interface_id, 8);
494 memcpy(req->priv.initiator_port_id + 8,
495 &target->initiator_ext, 8);
496 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
497 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
499 memcpy(req->priv.initiator_port_id,
500 &target->initiator_ext, 8);
501 memcpy(req->priv.initiator_port_id + 8,
502 &target->path.sgid.global.interface_id, 8);
503 memcpy(req->priv.target_port_id, &target->id_ext, 8);
504 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
508 * Topspin/Cisco SRP targets will reject our login unless we
509 * zero out the first 8 bytes of our initiator port ID and set
510 * the second 8 bytes to the local node GUID.
512 if (srp_target_is_topspin(target)) {
513 shost_printk(KERN_DEBUG, target->scsi_host,
514 PFX "Topspin/Cisco initiator port ID workaround "
515 "activated for target GUID %016llx\n",
516 (unsigned long long) be64_to_cpu(target->ioc_guid));
517 memset(req->priv.initiator_port_id, 0, 8);
518 memcpy(req->priv.initiator_port_id + 8,
519 &target->srp_host->srp_dev->dev->node_guid, 8);
522 status = ib_send_cm_req(target->cm_id, &req->param);
529 static bool srp_queue_remove_work(struct srp_target_port *target)
531 bool changed = false;
533 spin_lock_irq(&target->lock);
534 if (target->state != SRP_TARGET_REMOVED) {
535 target->state = SRP_TARGET_REMOVED;
538 spin_unlock_irq(&target->lock);
541 queue_work(system_long_wq, &target->remove_work);
546 static bool srp_change_conn_state(struct srp_target_port *target,
549 bool changed = false;
551 spin_lock_irq(&target->lock);
552 if (target->connected != connected) {
553 target->connected = connected;
556 spin_unlock_irq(&target->lock);
561 static void srp_disconnect_target(struct srp_target_port *target)
563 if (srp_change_conn_state(target, false)) {
564 /* XXX should send SRP_I_LOGOUT request */
566 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
567 shost_printk(KERN_DEBUG, target->scsi_host,
568 PFX "Sending CM DREQ failed\n");
573 static void srp_free_req_data(struct srp_target_port *target)
575 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
576 struct srp_request *req;
579 if (!target->req_ring)
582 for (i = 0; i < target->req_ring_size; ++i) {
583 req = &target->req_ring[i];
584 kfree(req->fmr_list);
585 kfree(req->map_page);
586 if (req->indirect_dma_addr) {
587 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
588 target->indirect_size,
591 kfree(req->indirect_desc);
594 kfree(target->req_ring);
595 target->req_ring = NULL;
598 static int srp_alloc_req_data(struct srp_target_port *target)
600 struct srp_device *srp_dev = target->srp_host->srp_dev;
601 struct ib_device *ibdev = srp_dev->dev;
602 struct srp_request *req;
604 int i, ret = -ENOMEM;
606 INIT_LIST_HEAD(&target->free_reqs);
608 target->req_ring = kzalloc(target->req_ring_size *
609 sizeof(*target->req_ring), GFP_KERNEL);
610 if (!target->req_ring)
613 for (i = 0; i < target->req_ring_size; ++i) {
614 req = &target->req_ring[i];
615 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
617 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
619 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
620 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
623 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
624 target->indirect_size,
626 if (ib_dma_mapping_error(ibdev, dma_addr))
629 req->indirect_dma_addr = dma_addr;
631 list_add_tail(&req->list, &target->free_reqs);
640 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
641 * @shost: SCSI host whose attributes to remove from sysfs.
643 * Note: Any attributes defined in the host template and that did not exist
644 * before invocation of this function will be ignored.
646 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
648 struct device_attribute **attr;
650 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
651 device_remove_file(&shost->shost_dev, *attr);
654 static void srp_remove_target(struct srp_target_port *target)
656 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
658 srp_del_scsi_host_attr(target->scsi_host);
659 srp_rport_get(target->rport);
660 srp_remove_host(target->scsi_host);
661 scsi_remove_host(target->scsi_host);
662 srp_disconnect_target(target);
663 ib_destroy_cm_id(target->cm_id);
664 srp_free_target_ib(target);
665 cancel_work_sync(&target->tl_err_work);
666 srp_rport_put(target->rport);
667 srp_free_req_data(target);
668 scsi_host_put(target->scsi_host);
671 static void srp_remove_work(struct work_struct *work)
673 struct srp_target_port *target =
674 container_of(work, struct srp_target_port, remove_work);
676 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
678 srp_remove_target(target);
680 spin_lock(&target->srp_host->target_lock);
681 list_del(&target->list);
682 spin_unlock(&target->srp_host->target_lock);
685 static void srp_rport_delete(struct srp_rport *rport)
687 struct srp_target_port *target = rport->lld_data;
689 srp_queue_remove_work(target);
692 static int srp_connect_target(struct srp_target_port *target)
697 WARN_ON_ONCE(target->connected);
699 target->qp_in_error = false;
701 ret = srp_lookup_path(target);
706 init_completion(&target->done);
707 ret = srp_send_req(target);
710 wait_for_completion(&target->done);
713 * The CM event handling code will set status to
714 * SRP_PORT_REDIRECT if we get a port redirect REJ
715 * back, or SRP_DLID_REDIRECT if we get a lid/qp
718 switch (target->status) {
720 srp_change_conn_state(target, true);
723 case SRP_PORT_REDIRECT:
724 ret = srp_lookup_path(target);
729 case SRP_DLID_REDIRECT:
733 /* Our current CM id was stale, and is now in timewait.
734 * Try to reconnect with a new one.
736 if (!retries-- || srp_new_cm_id(target)) {
737 shost_printk(KERN_ERR, target->scsi_host, PFX
738 "giving up on stale connection\n");
739 target->status = -ECONNRESET;
740 return target->status;
743 shost_printk(KERN_ERR, target->scsi_host, PFX
744 "retrying stale connection\n");
748 return target->status;
753 static void srp_unmap_data(struct scsi_cmnd *scmnd,
754 struct srp_target_port *target,
755 struct srp_request *req)
757 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
758 struct ib_pool_fmr **pfmr;
760 if (!scsi_sglist(scmnd) ||
761 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
762 scmnd->sc_data_direction != DMA_FROM_DEVICE))
765 pfmr = req->fmr_list;
767 ib_fmr_pool_unmap(*pfmr++);
769 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
770 scmnd->sc_data_direction);
774 * srp_claim_req - Take ownership of the scmnd associated with a request.
775 * @target: SRP target port.
777 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
778 * ownership of @req->scmnd if it equals @scmnd.
781 * Either NULL or a pointer to the SCSI command the caller became owner of.
783 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
784 struct srp_request *req,
785 struct scsi_cmnd *scmnd)
789 spin_lock_irqsave(&target->lock, flags);
793 } else if (req->scmnd == scmnd) {
798 spin_unlock_irqrestore(&target->lock, flags);
804 * srp_free_req() - Unmap data and add request to the free request list.
806 static void srp_free_req(struct srp_target_port *target,
807 struct srp_request *req, struct scsi_cmnd *scmnd,
812 srp_unmap_data(scmnd, target, req);
814 spin_lock_irqsave(&target->lock, flags);
815 target->req_lim += req_lim_delta;
816 list_add_tail(&req->list, &target->free_reqs);
817 spin_unlock_irqrestore(&target->lock, flags);
820 static void srp_finish_req(struct srp_target_port *target,
821 struct srp_request *req, int result)
823 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
826 srp_free_req(target, req, scmnd, 0);
827 scmnd->result = result;
828 scmnd->scsi_done(scmnd);
832 static void srp_terminate_io(struct srp_rport *rport)
834 struct srp_target_port *target = rport->lld_data;
837 for (i = 0; i < target->req_ring_size; ++i) {
838 struct srp_request *req = &target->req_ring[i];
839 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
844 * It is up to the caller to ensure that srp_rport_reconnect() calls are
845 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
846 * srp_reset_device() or srp_reset_host() calls will occur while this function
847 * is in progress. One way to realize that is not to call this function
848 * directly but to call srp_reconnect_rport() instead since that last function
849 * serializes calls of this function via rport->mutex and also blocks
850 * srp_queuecommand() calls before invoking this function.
852 static int srp_rport_reconnect(struct srp_rport *rport)
854 struct srp_target_port *target = rport->lld_data;
857 srp_disconnect_target(target);
859 * Now get a new local CM ID so that we avoid confusing the target in
860 * case things are really fouled up. Doing so also ensures that all CM
861 * callbacks will have finished before a new QP is allocated.
863 ret = srp_new_cm_id(target);
865 * Whether or not creating a new CM ID succeeded, create a new
866 * QP. This guarantees that all completion callback function
867 * invocations have finished before request resetting starts.
870 ret = srp_create_target_ib(target);
872 srp_create_target_ib(target);
874 for (i = 0; i < target->req_ring_size; ++i) {
875 struct srp_request *req = &target->req_ring[i];
876 srp_finish_req(target, req, DID_RESET << 16);
879 INIT_LIST_HEAD(&target->free_tx);
880 for (i = 0; i < target->queue_size; ++i)
881 list_add(&target->tx_ring[i]->list, &target->free_tx);
884 ret = srp_connect_target(target);
887 shost_printk(KERN_INFO, target->scsi_host,
888 PFX "reconnect succeeded\n");
893 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
894 unsigned int dma_len, u32 rkey)
896 struct srp_direct_buf *desc = state->desc;
898 desc->va = cpu_to_be64(dma_addr);
899 desc->key = cpu_to_be32(rkey);
900 desc->len = cpu_to_be32(dma_len);
902 state->total_len += dma_len;
907 static int srp_map_finish_fmr(struct srp_map_state *state,
908 struct srp_target_port *target)
910 struct srp_device *dev = target->srp_host->srp_dev;
911 struct ib_pool_fmr *fmr;
917 if (state->npages == 1) {
918 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
920 state->npages = state->fmr_len = 0;
924 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
925 state->npages, io_addr);
929 *state->next_fmr++ = fmr;
932 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
933 state->npages = state->fmr_len = 0;
937 static void srp_map_update_start(struct srp_map_state *state,
938 struct scatterlist *sg, int sg_index,
941 state->unmapped_sg = sg;
942 state->unmapped_index = sg_index;
943 state->unmapped_addr = dma_addr;
946 static int srp_map_sg_entry(struct srp_map_state *state,
947 struct srp_target_port *target,
948 struct scatterlist *sg, int sg_index,
951 struct srp_device *dev = target->srp_host->srp_dev;
952 struct ib_device *ibdev = dev->dev;
953 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
954 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
961 if (use_fmr == SRP_MAP_NO_FMR) {
962 /* Once we're in direct map mode for a request, we don't
963 * go back to FMR mode, so no need to update anything
964 * other than the descriptor.
966 srp_map_desc(state, dma_addr, dma_len, target->rkey);
970 /* If we start at an offset into the FMR page, don't merge into
971 * the current FMR. Finish it out, and use the kernel's MR for this
972 * sg entry. This is to avoid potential bugs on some SRP targets
973 * that were never quite defined, but went away when the initiator
974 * avoided using FMR on such page fragments.
976 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
977 ret = srp_map_finish_fmr(state, target);
981 srp_map_desc(state, dma_addr, dma_len, target->rkey);
982 srp_map_update_start(state, NULL, 0, 0);
986 /* If this is the first sg to go into the FMR, save our position.
987 * We need to know the first unmapped entry, its index, and the
988 * first unmapped address within that entry to be able to restart
989 * mapping after an error.
991 if (!state->unmapped_sg)
992 srp_map_update_start(state, sg, sg_index, dma_addr);
995 if (state->npages == SRP_FMR_SIZE) {
996 ret = srp_map_finish_fmr(state, target);
1000 srp_map_update_start(state, sg, sg_index, dma_addr);
1003 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
1006 state->base_dma_addr = dma_addr;
1007 state->pages[state->npages++] = dma_addr;
1008 state->fmr_len += len;
1013 /* If the last entry of the FMR wasn't a full page, then we need to
1014 * close it out and start a new one -- we can only merge at page
1018 if (len != dev->fmr_page_size) {
1019 ret = srp_map_finish_fmr(state, target);
1021 srp_map_update_start(state, NULL, 0, 0);
1026 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1027 struct srp_request *req)
1029 struct scatterlist *scat, *sg;
1030 struct srp_cmd *cmd = req->cmd->buf;
1031 int i, len, nents, count, use_fmr;
1032 struct srp_device *dev;
1033 struct ib_device *ibdev;
1034 struct srp_map_state state;
1035 struct srp_indirect_buf *indirect_hdr;
1039 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1040 return sizeof (struct srp_cmd);
1042 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1043 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1044 shost_printk(KERN_WARNING, target->scsi_host,
1045 PFX "Unhandled data direction %d\n",
1046 scmnd->sc_data_direction);
1050 nents = scsi_sg_count(scmnd);
1051 scat = scsi_sglist(scmnd);
1053 dev = target->srp_host->srp_dev;
1056 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1057 if (unlikely(count == 0))
1060 fmt = SRP_DATA_DESC_DIRECT;
1061 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1065 * The midlayer only generated a single gather/scatter
1066 * entry, or DMA mapping coalesced everything to a
1067 * single entry. So a direct descriptor along with
1068 * the DMA MR suffices.
1070 struct srp_direct_buf *buf = (void *) cmd->add_data;
1072 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1073 buf->key = cpu_to_be32(target->rkey);
1074 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1080 /* We have more than one scatter/gather entry, so build our indirect
1081 * descriptor table, trying to merge as many entries with FMR as we
1084 indirect_hdr = (void *) cmd->add_data;
1086 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1087 target->indirect_size, DMA_TO_DEVICE);
1089 memset(&state, 0, sizeof(state));
1090 state.desc = req->indirect_desc;
1091 state.pages = req->map_page;
1092 state.next_fmr = req->fmr_list;
1094 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1096 for_each_sg(scat, sg, count, i) {
1097 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1098 /* FMR mapping failed, so backtrack to the first
1099 * unmapped entry and continue on without using FMR.
1101 dma_addr_t dma_addr;
1102 unsigned int dma_len;
1105 sg = state.unmapped_sg;
1106 i = state.unmapped_index;
1108 dma_addr = ib_sg_dma_address(ibdev, sg);
1109 dma_len = ib_sg_dma_len(ibdev, sg);
1110 dma_len -= (state.unmapped_addr - dma_addr);
1111 dma_addr = state.unmapped_addr;
1112 use_fmr = SRP_MAP_NO_FMR;
1113 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
1117 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1120 /* We've mapped the request, now pull as much of the indirect
1121 * descriptor table as we can into the command buffer. If this
1122 * target is not using an external indirect table, we are
1123 * guaranteed to fit into the command, as the SCSI layer won't
1124 * give us more S/G entries than we allow.
1126 req->nfmr = state.nfmr;
1127 if (state.ndesc == 1) {
1128 /* FMR mapping was able to collapse this to one entry,
1129 * so use a direct descriptor.
1131 struct srp_direct_buf *buf = (void *) cmd->add_data;
1133 *buf = req->indirect_desc[0];
1137 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1138 !target->allow_ext_sg)) {
1139 shost_printk(KERN_ERR, target->scsi_host,
1140 "Could not fit S/G list into SRP_CMD\n");
1144 count = min(state.ndesc, target->cmd_sg_cnt);
1145 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1147 fmt = SRP_DATA_DESC_INDIRECT;
1148 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1149 len += count * sizeof (struct srp_direct_buf);
1151 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1152 count * sizeof (struct srp_direct_buf));
1154 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1155 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1156 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1157 indirect_hdr->len = cpu_to_be32(state.total_len);
1159 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1160 cmd->data_out_desc_cnt = count;
1162 cmd->data_in_desc_cnt = count;
1164 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1168 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1169 cmd->buf_fmt = fmt << 4;
1177 * Return an IU and possible credit to the free pool
1179 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1180 enum srp_iu_type iu_type)
1182 unsigned long flags;
1184 spin_lock_irqsave(&target->lock, flags);
1185 list_add(&iu->list, &target->free_tx);
1186 if (iu_type != SRP_IU_RSP)
1188 spin_unlock_irqrestore(&target->lock, flags);
1192 * Must be called with target->lock held to protect req_lim and free_tx.
1193 * If IU is not sent, it must be returned using srp_put_tx_iu().
1196 * An upper limit for the number of allocated information units for each
1198 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1199 * more than Scsi_Host.can_queue requests.
1200 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1201 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1202 * one unanswered SRP request to an initiator.
1204 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1205 enum srp_iu_type iu_type)
1207 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1210 srp_send_completion(target->send_cq, target);
1212 if (list_empty(&target->free_tx))
1215 /* Initiator responses to target requests do not consume credits */
1216 if (iu_type != SRP_IU_RSP) {
1217 if (target->req_lim <= rsv) {
1218 ++target->zero_req_lim;
1225 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1226 list_del(&iu->list);
1230 static int srp_post_send(struct srp_target_port *target,
1231 struct srp_iu *iu, int len)
1234 struct ib_send_wr wr, *bad_wr;
1236 list.addr = iu->dma;
1238 list.lkey = target->lkey;
1241 wr.wr_id = (uintptr_t) iu;
1244 wr.opcode = IB_WR_SEND;
1245 wr.send_flags = IB_SEND_SIGNALED;
1247 return ib_post_send(target->qp, &wr, &bad_wr);
1250 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1252 struct ib_recv_wr wr, *bad_wr;
1255 list.addr = iu->dma;
1256 list.length = iu->size;
1257 list.lkey = target->lkey;
1260 wr.wr_id = (uintptr_t) iu;
1264 return ib_post_recv(target->qp, &wr, &bad_wr);
1267 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1269 struct srp_request *req;
1270 struct scsi_cmnd *scmnd;
1271 unsigned long flags;
1273 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1274 spin_lock_irqsave(&target->lock, flags);
1275 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1276 spin_unlock_irqrestore(&target->lock, flags);
1278 target->tsk_mgmt_status = -1;
1279 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1280 target->tsk_mgmt_status = rsp->data[3];
1281 complete(&target->tsk_mgmt_done);
1283 req = &target->req_ring[rsp->tag];
1284 scmnd = srp_claim_req(target, req, NULL);
1286 shost_printk(KERN_ERR, target->scsi_host,
1287 "Null scmnd for RSP w/tag %016llx\n",
1288 (unsigned long long) rsp->tag);
1290 spin_lock_irqsave(&target->lock, flags);
1291 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1292 spin_unlock_irqrestore(&target->lock, flags);
1296 scmnd->result = rsp->status;
1298 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1299 memcpy(scmnd->sense_buffer, rsp->data +
1300 be32_to_cpu(rsp->resp_data_len),
1301 min_t(int, be32_to_cpu(rsp->sense_data_len),
1302 SCSI_SENSE_BUFFERSIZE));
1305 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1306 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1307 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1308 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1310 srp_free_req(target, req, scmnd,
1311 be32_to_cpu(rsp->req_lim_delta));
1313 scmnd->host_scribble = NULL;
1314 scmnd->scsi_done(scmnd);
1318 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1321 struct ib_device *dev = target->srp_host->srp_dev->dev;
1322 unsigned long flags;
1326 spin_lock_irqsave(&target->lock, flags);
1327 target->req_lim += req_delta;
1328 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1329 spin_unlock_irqrestore(&target->lock, flags);
1332 shost_printk(KERN_ERR, target->scsi_host, PFX
1333 "no IU available to send response\n");
1337 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1338 memcpy(iu->buf, rsp, len);
1339 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1341 err = srp_post_send(target, iu, len);
1343 shost_printk(KERN_ERR, target->scsi_host, PFX
1344 "unable to post response: %d\n", err);
1345 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1351 static void srp_process_cred_req(struct srp_target_port *target,
1352 struct srp_cred_req *req)
1354 struct srp_cred_rsp rsp = {
1355 .opcode = SRP_CRED_RSP,
1358 s32 delta = be32_to_cpu(req->req_lim_delta);
1360 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1361 shost_printk(KERN_ERR, target->scsi_host, PFX
1362 "problems processing SRP_CRED_REQ\n");
1365 static void srp_process_aer_req(struct srp_target_port *target,
1366 struct srp_aer_req *req)
1368 struct srp_aer_rsp rsp = {
1369 .opcode = SRP_AER_RSP,
1372 s32 delta = be32_to_cpu(req->req_lim_delta);
1374 shost_printk(KERN_ERR, target->scsi_host, PFX
1375 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1377 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1378 shost_printk(KERN_ERR, target->scsi_host, PFX
1379 "problems processing SRP_AER_REQ\n");
1382 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1384 struct ib_device *dev = target->srp_host->srp_dev->dev;
1385 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1389 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1392 opcode = *(u8 *) iu->buf;
1395 shost_printk(KERN_ERR, target->scsi_host,
1396 PFX "recv completion, opcode 0x%02x\n", opcode);
1397 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1398 iu->buf, wc->byte_len, true);
1403 srp_process_rsp(target, iu->buf);
1407 srp_process_cred_req(target, iu->buf);
1411 srp_process_aer_req(target, iu->buf);
1415 /* XXX Handle target logout */
1416 shost_printk(KERN_WARNING, target->scsi_host,
1417 PFX "Got target logout request\n");
1421 shost_printk(KERN_WARNING, target->scsi_host,
1422 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1426 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1429 res = srp_post_recv(target, iu);
1431 shost_printk(KERN_ERR, target->scsi_host,
1432 PFX "Recv failed with error code %d\n", res);
1436 * srp_tl_err_work() - handle a transport layer error
1438 * Note: This function may get invoked before the rport has been created,
1439 * hence the target->rport test.
1441 static void srp_tl_err_work(struct work_struct *work)
1443 struct srp_target_port *target;
1445 target = container_of(work, struct srp_target_port, tl_err_work);
1447 srp_start_tl_fail_timers(target->rport);
1450 static void srp_handle_qp_err(enum ib_wc_status wc_status,
1451 enum ib_wc_opcode wc_opcode,
1452 struct srp_target_port *target)
1454 if (target->connected && !target->qp_in_error) {
1455 shost_printk(KERN_ERR, target->scsi_host,
1456 PFX "failed %s status %d\n",
1457 wc_opcode & IB_WC_RECV ? "receive" : "send",
1459 queue_work(system_long_wq, &target->tl_err_work);
1461 target->qp_in_error = true;
1464 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1466 struct srp_target_port *target = target_ptr;
1469 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1470 while (ib_poll_cq(cq, 1, &wc) > 0) {
1471 if (likely(wc.status == IB_WC_SUCCESS)) {
1472 srp_handle_recv(target, &wc);
1474 srp_handle_qp_err(wc.status, wc.opcode, target);
1479 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1481 struct srp_target_port *target = target_ptr;
1485 while (ib_poll_cq(cq, 1, &wc) > 0) {
1486 if (likely(wc.status == IB_WC_SUCCESS)) {
1487 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1488 list_add(&iu->list, &target->free_tx);
1490 srp_handle_qp_err(wc.status, wc.opcode, target);
1495 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1497 struct srp_target_port *target = host_to_target(shost);
1498 struct srp_rport *rport = target->rport;
1499 struct srp_request *req;
1501 struct srp_cmd *cmd;
1502 struct ib_device *dev;
1503 unsigned long flags;
1505 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1508 * The SCSI EH thread is the only context from which srp_queuecommand()
1509 * can get invoked for blocked devices (SDEV_BLOCK /
1510 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1511 * locking the rport mutex if invoked from inside the SCSI EH.
1514 mutex_lock(&rport->mutex);
1516 result = srp_chkready(target->rport);
1517 if (unlikely(result)) {
1518 scmnd->result = result;
1519 scmnd->scsi_done(scmnd);
1523 spin_lock_irqsave(&target->lock, flags);
1524 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1528 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1529 list_del(&req->list);
1530 spin_unlock_irqrestore(&target->lock, flags);
1532 dev = target->srp_host->srp_dev->dev;
1533 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1537 scmnd->host_scribble = (void *) req;
1540 memset(cmd, 0, sizeof *cmd);
1542 cmd->opcode = SRP_CMD;
1543 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1544 cmd->tag = req->index;
1545 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1550 len = srp_map_data(scmnd, target, req);
1552 shost_printk(KERN_ERR, target->scsi_host,
1553 PFX "Failed to map data\n");
1557 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1560 if (srp_post_send(target, iu, len)) {
1561 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1567 mutex_unlock(&rport->mutex);
1572 srp_unmap_data(scmnd, target, req);
1575 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1577 spin_lock_irqsave(&target->lock, flags);
1578 list_add(&req->list, &target->free_reqs);
1581 spin_unlock_irqrestore(&target->lock, flags);
1584 mutex_unlock(&rport->mutex);
1586 return SCSI_MLQUEUE_HOST_BUSY;
1590 * Note: the resources allocated in this function are freed in
1591 * srp_free_target_ib().
1593 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1597 target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
1599 if (!target->rx_ring)
1601 target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
1603 if (!target->tx_ring)
1606 for (i = 0; i < target->queue_size; ++i) {
1607 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1608 target->max_ti_iu_len,
1609 GFP_KERNEL, DMA_FROM_DEVICE);
1610 if (!target->rx_ring[i])
1614 for (i = 0; i < target->queue_size; ++i) {
1615 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1617 GFP_KERNEL, DMA_TO_DEVICE);
1618 if (!target->tx_ring[i])
1621 list_add(&target->tx_ring[i]->list, &target->free_tx);
1627 for (i = 0; i < target->queue_size; ++i) {
1628 srp_free_iu(target->srp_host, target->rx_ring[i]);
1629 srp_free_iu(target->srp_host, target->tx_ring[i]);
1634 kfree(target->tx_ring);
1635 target->tx_ring = NULL;
1636 kfree(target->rx_ring);
1637 target->rx_ring = NULL;
1642 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1644 uint64_t T_tr_ns, max_compl_time_ms;
1645 uint32_t rq_tmo_jiffies;
1648 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1649 * table 91), both the QP timeout and the retry count have to be set
1650 * for RC QP's during the RTR to RTS transition.
1652 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1653 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1656 * Set target->rq_tmo_jiffies to one second more than the largest time
1657 * it can take before an error completion is generated. See also
1658 * C9-140..142 in the IBTA spec for more information about how to
1659 * convert the QP Local ACK Timeout value to nanoseconds.
1661 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1662 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1663 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1664 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1666 return rq_tmo_jiffies;
1669 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1670 struct srp_login_rsp *lrsp,
1671 struct srp_target_port *target)
1673 struct ib_qp_attr *qp_attr = NULL;
1678 if (lrsp->opcode == SRP_LOGIN_RSP) {
1679 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1680 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1683 * Reserve credits for task management so we don't
1684 * bounce requests back to the SCSI mid-layer.
1686 target->scsi_host->can_queue
1687 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1688 target->scsi_host->can_queue);
1689 target->scsi_host->cmd_per_lun
1690 = min_t(int, target->scsi_host->can_queue,
1691 target->scsi_host->cmd_per_lun);
1693 shost_printk(KERN_WARNING, target->scsi_host,
1694 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1699 if (!target->rx_ring) {
1700 ret = srp_alloc_iu_bufs(target);
1706 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1710 qp_attr->qp_state = IB_QPS_RTR;
1711 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1715 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1719 for (i = 0; i < target->queue_size; i++) {
1720 struct srp_iu *iu = target->rx_ring[i];
1721 ret = srp_post_recv(target, iu);
1726 qp_attr->qp_state = IB_QPS_RTS;
1727 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1731 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1733 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1737 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1743 target->status = ret;
1746 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1747 struct ib_cm_event *event,
1748 struct srp_target_port *target)
1750 struct Scsi_Host *shost = target->scsi_host;
1751 struct ib_class_port_info *cpi;
1754 switch (event->param.rej_rcvd.reason) {
1755 case IB_CM_REJ_PORT_CM_REDIRECT:
1756 cpi = event->param.rej_rcvd.ari;
1757 target->path.dlid = cpi->redirect_lid;
1758 target->path.pkey = cpi->redirect_pkey;
1759 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1760 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1762 target->status = target->path.dlid ?
1763 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1766 case IB_CM_REJ_PORT_REDIRECT:
1767 if (srp_target_is_topspin(target)) {
1769 * Topspin/Cisco SRP gateways incorrectly send
1770 * reject reason code 25 when they mean 24
1773 memcpy(target->path.dgid.raw,
1774 event->param.rej_rcvd.ari, 16);
1776 shost_printk(KERN_DEBUG, shost,
1777 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1778 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1779 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1781 target->status = SRP_PORT_REDIRECT;
1783 shost_printk(KERN_WARNING, shost,
1784 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1785 target->status = -ECONNRESET;
1789 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1790 shost_printk(KERN_WARNING, shost,
1791 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1792 target->status = -ECONNRESET;
1795 case IB_CM_REJ_CONSUMER_DEFINED:
1796 opcode = *(u8 *) event->private_data;
1797 if (opcode == SRP_LOGIN_REJ) {
1798 struct srp_login_rej *rej = event->private_data;
1799 u32 reason = be32_to_cpu(rej->reason);
1801 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1802 shost_printk(KERN_WARNING, shost,
1803 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1805 shost_printk(KERN_WARNING, shost,
1806 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1808 shost_printk(KERN_WARNING, shost,
1809 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1810 " opcode 0x%02x\n", opcode);
1811 target->status = -ECONNRESET;
1814 case IB_CM_REJ_STALE_CONN:
1815 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1816 target->status = SRP_STALE_CONN;
1820 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1821 event->param.rej_rcvd.reason);
1822 target->status = -ECONNRESET;
1826 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1828 struct srp_target_port *target = cm_id->context;
1831 switch (event->event) {
1832 case IB_CM_REQ_ERROR:
1833 shost_printk(KERN_DEBUG, target->scsi_host,
1834 PFX "Sending CM REQ failed\n");
1836 target->status = -ECONNRESET;
1839 case IB_CM_REP_RECEIVED:
1841 srp_cm_rep_handler(cm_id, event->private_data, target);
1844 case IB_CM_REJ_RECEIVED:
1845 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1848 srp_cm_rej_handler(cm_id, event, target);
1851 case IB_CM_DREQ_RECEIVED:
1852 shost_printk(KERN_WARNING, target->scsi_host,
1853 PFX "DREQ received - connection closed\n");
1854 srp_change_conn_state(target, false);
1855 if (ib_send_cm_drep(cm_id, NULL, 0))
1856 shost_printk(KERN_ERR, target->scsi_host,
1857 PFX "Sending CM DREP failed\n");
1858 queue_work(system_long_wq, &target->tl_err_work);
1861 case IB_CM_TIMEWAIT_EXIT:
1862 shost_printk(KERN_ERR, target->scsi_host,
1863 PFX "connection closed\n");
1868 case IB_CM_MRA_RECEIVED:
1869 case IB_CM_DREQ_ERROR:
1870 case IB_CM_DREP_RECEIVED:
1874 shost_printk(KERN_WARNING, target->scsi_host,
1875 PFX "Unhandled CM event %d\n", event->event);
1880 complete(&target->done);
1885 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1886 u64 req_tag, unsigned int lun, u8 func)
1888 struct srp_rport *rport = target->rport;
1889 struct ib_device *dev = target->srp_host->srp_dev->dev;
1891 struct srp_tsk_mgmt *tsk_mgmt;
1893 if (!target->connected || target->qp_in_error)
1896 init_completion(&target->tsk_mgmt_done);
1899 * Lock the rport mutex to avoid that srp_create_target_ib() is
1900 * invoked while a task management function is being sent.
1902 mutex_lock(&rport->mutex);
1903 spin_lock_irq(&target->lock);
1904 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1905 spin_unlock_irq(&target->lock);
1908 mutex_unlock(&rport->mutex);
1913 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1916 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1918 tsk_mgmt->opcode = SRP_TSK_MGMT;
1919 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1920 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1921 tsk_mgmt->tsk_mgmt_func = func;
1922 tsk_mgmt->task_tag = req_tag;
1924 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1926 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1927 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1928 mutex_unlock(&rport->mutex);
1932 mutex_unlock(&rport->mutex);
1934 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1935 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1941 static int srp_abort(struct scsi_cmnd *scmnd)
1943 struct srp_target_port *target = host_to_target(scmnd->device->host);
1944 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1947 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1949 if (!req || !srp_claim_req(target, req, scmnd))
1951 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1952 SRP_TSK_ABORT_TASK) == 0)
1954 else if (target->rport->state == SRP_RPORT_LOST)
1958 srp_free_req(target, req, scmnd, 0);
1959 scmnd->result = DID_ABORT << 16;
1960 scmnd->scsi_done(scmnd);
1965 static int srp_reset_device(struct scsi_cmnd *scmnd)
1967 struct srp_target_port *target = host_to_target(scmnd->device->host);
1970 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1972 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1975 if (target->tsk_mgmt_status)
1978 for (i = 0; i < target->req_ring_size; ++i) {
1979 struct srp_request *req = &target->req_ring[i];
1980 if (req->scmnd && req->scmnd->device == scmnd->device)
1981 srp_finish_req(target, req, DID_RESET << 16);
1987 static int srp_reset_host(struct scsi_cmnd *scmnd)
1989 struct srp_target_port *target = host_to_target(scmnd->device->host);
1991 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1993 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
1996 static int srp_slave_configure(struct scsi_device *sdev)
1998 struct Scsi_Host *shost = sdev->host;
1999 struct srp_target_port *target = host_to_target(shost);
2000 struct request_queue *q = sdev->request_queue;
2001 unsigned long timeout;
2003 if (sdev->type == TYPE_DISK) {
2004 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2005 blk_queue_rq_timeout(q, timeout);
2011 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2014 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2016 return sprintf(buf, "0x%016llx\n",
2017 (unsigned long long) be64_to_cpu(target->id_ext));
2020 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2023 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2025 return sprintf(buf, "0x%016llx\n",
2026 (unsigned long long) be64_to_cpu(target->ioc_guid));
2029 static ssize_t show_service_id(struct device *dev,
2030 struct device_attribute *attr, char *buf)
2032 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2034 return sprintf(buf, "0x%016llx\n",
2035 (unsigned long long) be64_to_cpu(target->service_id));
2038 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2041 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2043 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
2046 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2049 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2051 return sprintf(buf, "%pI6\n", target->path.sgid.raw);
2054 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2057 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2059 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
2062 static ssize_t show_orig_dgid(struct device *dev,
2063 struct device_attribute *attr, char *buf)
2065 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2067 return sprintf(buf, "%pI6\n", target->orig_dgid);
2070 static ssize_t show_req_lim(struct device *dev,
2071 struct device_attribute *attr, char *buf)
2073 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2075 return sprintf(buf, "%d\n", target->req_lim);
2078 static ssize_t show_zero_req_lim(struct device *dev,
2079 struct device_attribute *attr, char *buf)
2081 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2083 return sprintf(buf, "%d\n", target->zero_req_lim);
2086 static ssize_t show_local_ib_port(struct device *dev,
2087 struct device_attribute *attr, char *buf)
2089 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2091 return sprintf(buf, "%d\n", target->srp_host->port);
2094 static ssize_t show_local_ib_device(struct device *dev,
2095 struct device_attribute *attr, char *buf)
2097 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2099 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2102 static ssize_t show_comp_vector(struct device *dev,
2103 struct device_attribute *attr, char *buf)
2105 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2107 return sprintf(buf, "%d\n", target->comp_vector);
2110 static ssize_t show_tl_retry_count(struct device *dev,
2111 struct device_attribute *attr, char *buf)
2113 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2115 return sprintf(buf, "%d\n", target->tl_retry_count);
2118 static ssize_t show_cmd_sg_entries(struct device *dev,
2119 struct device_attribute *attr, char *buf)
2121 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2123 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2126 static ssize_t show_allow_ext_sg(struct device *dev,
2127 struct device_attribute *attr, char *buf)
2129 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2131 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2134 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2135 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2136 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2137 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2138 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2139 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2140 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2141 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2142 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2143 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2144 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2145 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2146 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2147 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2148 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2150 static struct device_attribute *srp_host_attrs[] = {
2153 &dev_attr_service_id,
2157 &dev_attr_orig_dgid,
2159 &dev_attr_zero_req_lim,
2160 &dev_attr_local_ib_port,
2161 &dev_attr_local_ib_device,
2162 &dev_attr_comp_vector,
2163 &dev_attr_tl_retry_count,
2164 &dev_attr_cmd_sg_entries,
2165 &dev_attr_allow_ext_sg,
2169 static struct scsi_host_template srp_template = {
2170 .module = THIS_MODULE,
2171 .name = "InfiniBand SRP initiator",
2172 .proc_name = DRV_NAME,
2173 .slave_configure = srp_slave_configure,
2174 .info = srp_target_info,
2175 .queuecommand = srp_queuecommand,
2176 .eh_abort_handler = srp_abort,
2177 .eh_device_reset_handler = srp_reset_device,
2178 .eh_host_reset_handler = srp_reset_host,
2179 .skip_settle_delay = true,
2180 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2181 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2183 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2184 .use_clustering = ENABLE_CLUSTERING,
2185 .shost_attrs = srp_host_attrs
2188 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2190 struct srp_rport_identifiers ids;
2191 struct srp_rport *rport;
2193 sprintf(target->target_name, "SRP.T10:%016llX",
2194 (unsigned long long) be64_to_cpu(target->id_ext));
2196 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2199 memcpy(ids.port_id, &target->id_ext, 8);
2200 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2201 ids.roles = SRP_RPORT_ROLE_TARGET;
2202 rport = srp_rport_add(target->scsi_host, &ids);
2203 if (IS_ERR(rport)) {
2204 scsi_remove_host(target->scsi_host);
2205 return PTR_ERR(rport);
2208 rport->lld_data = target;
2209 target->rport = rport;
2211 spin_lock(&host->target_lock);
2212 list_add_tail(&target->list, &host->target_list);
2213 spin_unlock(&host->target_lock);
2215 target->state = SRP_TARGET_LIVE;
2217 scsi_scan_target(&target->scsi_host->shost_gendev,
2218 0, target->scsi_id, SCAN_WILD_CARD, 0);
2223 static void srp_release_dev(struct device *dev)
2225 struct srp_host *host =
2226 container_of(dev, struct srp_host, dev);
2228 complete(&host->released);
2231 static struct class srp_class = {
2232 .name = "infiniband_srp",
2233 .dev_release = srp_release_dev
2237 * srp_conn_unique() - check whether the connection to a target is unique
2239 static bool srp_conn_unique(struct srp_host *host,
2240 struct srp_target_port *target)
2242 struct srp_target_port *t;
2245 if (target->state == SRP_TARGET_REMOVED)
2250 spin_lock(&host->target_lock);
2251 list_for_each_entry(t, &host->target_list, list) {
2253 target->id_ext == t->id_ext &&
2254 target->ioc_guid == t->ioc_guid &&
2255 target->initiator_ext == t->initiator_ext) {
2260 spin_unlock(&host->target_lock);
2267 * Target ports are added by writing
2269 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2270 * pkey=<P_Key>,service_id=<service ID>
2272 * to the add_target sysfs attribute.
2276 SRP_OPT_ID_EXT = 1 << 0,
2277 SRP_OPT_IOC_GUID = 1 << 1,
2278 SRP_OPT_DGID = 1 << 2,
2279 SRP_OPT_PKEY = 1 << 3,
2280 SRP_OPT_SERVICE_ID = 1 << 4,
2281 SRP_OPT_MAX_SECT = 1 << 5,
2282 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2283 SRP_OPT_IO_CLASS = 1 << 7,
2284 SRP_OPT_INITIATOR_EXT = 1 << 8,
2285 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2286 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2287 SRP_OPT_SG_TABLESIZE = 1 << 11,
2288 SRP_OPT_COMP_VECTOR = 1 << 12,
2289 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2290 SRP_OPT_QUEUE_SIZE = 1 << 14,
2291 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2295 SRP_OPT_SERVICE_ID),
2298 static const match_table_t srp_opt_tokens = {
2299 { SRP_OPT_ID_EXT, "id_ext=%s" },
2300 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2301 { SRP_OPT_DGID, "dgid=%s" },
2302 { SRP_OPT_PKEY, "pkey=%x" },
2303 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2304 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2305 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2306 { SRP_OPT_IO_CLASS, "io_class=%x" },
2307 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2308 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2309 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2310 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2311 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2312 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2313 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2314 { SRP_OPT_ERR, NULL }
2317 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2319 char *options, *sep_opt;
2322 substring_t args[MAX_OPT_ARGS];
2328 options = kstrdup(buf, GFP_KERNEL);
2333 while ((p = strsep(&sep_opt, ",")) != NULL) {
2337 token = match_token(p, srp_opt_tokens, args);
2341 case SRP_OPT_ID_EXT:
2342 p = match_strdup(args);
2347 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2351 case SRP_OPT_IOC_GUID:
2352 p = match_strdup(args);
2357 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2362 p = match_strdup(args);
2367 if (strlen(p) != 32) {
2368 pr_warn("bad dest GID parameter '%s'\n", p);
2373 for (i = 0; i < 16; ++i) {
2374 strlcpy(dgid, p + i * 2, 3);
2375 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2378 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2382 if (match_hex(args, &token)) {
2383 pr_warn("bad P_Key parameter '%s'\n", p);
2386 target->path.pkey = cpu_to_be16(token);
2389 case SRP_OPT_SERVICE_ID:
2390 p = match_strdup(args);
2395 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2396 target->path.service_id = target->service_id;
2400 case SRP_OPT_MAX_SECT:
2401 if (match_int(args, &token)) {
2402 pr_warn("bad max sect parameter '%s'\n", p);
2405 target->scsi_host->max_sectors = token;
2408 case SRP_OPT_QUEUE_SIZE:
2409 if (match_int(args, &token) || token < 1) {
2410 pr_warn("bad queue_size parameter '%s'\n", p);
2413 target->scsi_host->can_queue = token;
2414 target->queue_size = token + SRP_RSP_SQ_SIZE +
2415 SRP_TSK_MGMT_SQ_SIZE;
2416 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2417 target->scsi_host->cmd_per_lun = token;
2420 case SRP_OPT_MAX_CMD_PER_LUN:
2421 if (match_int(args, &token) || token < 1) {
2422 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2426 target->scsi_host->cmd_per_lun = token;
2429 case SRP_OPT_IO_CLASS:
2430 if (match_hex(args, &token)) {
2431 pr_warn("bad IO class parameter '%s'\n", p);
2434 if (token != SRP_REV10_IB_IO_CLASS &&
2435 token != SRP_REV16A_IB_IO_CLASS) {
2436 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2437 token, SRP_REV10_IB_IO_CLASS,
2438 SRP_REV16A_IB_IO_CLASS);
2441 target->io_class = token;
2444 case SRP_OPT_INITIATOR_EXT:
2445 p = match_strdup(args);
2450 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2454 case SRP_OPT_CMD_SG_ENTRIES:
2455 if (match_int(args, &token) || token < 1 || token > 255) {
2456 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2460 target->cmd_sg_cnt = token;
2463 case SRP_OPT_ALLOW_EXT_SG:
2464 if (match_int(args, &token)) {
2465 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2468 target->allow_ext_sg = !!token;
2471 case SRP_OPT_SG_TABLESIZE:
2472 if (match_int(args, &token) || token < 1 ||
2473 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2474 pr_warn("bad max sg_tablesize parameter '%s'\n",
2478 target->sg_tablesize = token;
2481 case SRP_OPT_COMP_VECTOR:
2482 if (match_int(args, &token) || token < 0) {
2483 pr_warn("bad comp_vector parameter '%s'\n", p);
2486 target->comp_vector = token;
2489 case SRP_OPT_TL_RETRY_COUNT:
2490 if (match_int(args, &token) || token < 2 || token > 7) {
2491 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2495 target->tl_retry_count = token;
2499 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2505 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2508 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2509 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2510 !(srp_opt_tokens[i].token & opt_mask))
2511 pr_warn("target creation request is missing parameter '%s'\n",
2512 srp_opt_tokens[i].pattern);
2514 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
2515 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2516 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
2517 target->scsi_host->cmd_per_lun,
2518 target->scsi_host->can_queue);
2525 static ssize_t srp_create_target(struct device *dev,
2526 struct device_attribute *attr,
2527 const char *buf, size_t count)
2529 struct srp_host *host =
2530 container_of(dev, struct srp_host, dev);
2531 struct Scsi_Host *target_host;
2532 struct srp_target_port *target;
2533 struct ib_device *ibdev = host->srp_dev->dev;
2536 target_host = scsi_host_alloc(&srp_template,
2537 sizeof (struct srp_target_port));
2541 target_host->transportt = ib_srp_transport_template;
2542 target_host->max_channel = 0;
2543 target_host->max_id = 1;
2544 target_host->max_lun = SRP_MAX_LUN;
2545 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2547 target = host_to_target(target_host);
2549 target->io_class = SRP_REV16A_IB_IO_CLASS;
2550 target->scsi_host = target_host;
2551 target->srp_host = host;
2552 target->lkey = host->srp_dev->mr->lkey;
2553 target->rkey = host->srp_dev->mr->rkey;
2554 target->cmd_sg_cnt = cmd_sg_entries;
2555 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2556 target->allow_ext_sg = allow_ext_sg;
2557 target->tl_retry_count = 7;
2558 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
2560 ret = srp_parse_options(buf, target);
2564 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
2566 if (!srp_conn_unique(target->srp_host, target)) {
2567 shost_printk(KERN_INFO, target->scsi_host,
2568 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2569 be64_to_cpu(target->id_ext),
2570 be64_to_cpu(target->ioc_guid),
2571 be64_to_cpu(target->initiator_ext));
2576 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2577 target->cmd_sg_cnt < target->sg_tablesize) {
2578 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2579 target->sg_tablesize = target->cmd_sg_cnt;
2582 target_host->sg_tablesize = target->sg_tablesize;
2583 target->indirect_size = target->sg_tablesize *
2584 sizeof (struct srp_direct_buf);
2585 target->max_iu_len = sizeof (struct srp_cmd) +
2586 sizeof (struct srp_indirect_buf) +
2587 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2589 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
2590 INIT_WORK(&target->remove_work, srp_remove_work);
2591 spin_lock_init(&target->lock);
2592 INIT_LIST_HEAD(&target->free_tx);
2593 ret = srp_alloc_req_data(target);
2597 ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2599 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2600 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2601 "service_id %016llx dgid %pI6\n",
2602 (unsigned long long) be64_to_cpu(target->id_ext),
2603 (unsigned long long) be64_to_cpu(target->ioc_guid),
2604 be16_to_cpu(target->path.pkey),
2605 (unsigned long long) be64_to_cpu(target->service_id),
2606 target->path.dgid.raw);
2608 ret = srp_create_target_ib(target);
2612 ret = srp_new_cm_id(target);
2616 ret = srp_connect_target(target);
2618 shost_printk(KERN_ERR, target->scsi_host,
2619 PFX "Connection failed\n");
2623 ret = srp_add_target(host, target);
2625 goto err_disconnect;
2630 srp_disconnect_target(target);
2633 ib_destroy_cm_id(target->cm_id);
2636 srp_free_target_ib(target);
2639 srp_free_req_data(target);
2642 scsi_host_put(target_host);
2647 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2649 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2652 struct srp_host *host = container_of(dev, struct srp_host, dev);
2654 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2657 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2659 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2662 struct srp_host *host = container_of(dev, struct srp_host, dev);
2664 return sprintf(buf, "%d\n", host->port);
2667 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2669 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2671 struct srp_host *host;
2673 host = kzalloc(sizeof *host, GFP_KERNEL);
2677 INIT_LIST_HEAD(&host->target_list);
2678 spin_lock_init(&host->target_lock);
2679 init_completion(&host->released);
2680 host->srp_dev = device;
2683 host->dev.class = &srp_class;
2684 host->dev.parent = device->dev->dma_device;
2685 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2687 if (device_register(&host->dev))
2689 if (device_create_file(&host->dev, &dev_attr_add_target))
2691 if (device_create_file(&host->dev, &dev_attr_ibdev))
2693 if (device_create_file(&host->dev, &dev_attr_port))
2699 device_unregister(&host->dev);
2707 static void srp_add_one(struct ib_device *device)
2709 struct srp_device *srp_dev;
2710 struct ib_device_attr *dev_attr;
2711 struct ib_fmr_pool_param fmr_param;
2712 struct srp_host *host;
2713 int max_pages_per_fmr, fmr_page_shift, s, e, p;
2715 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2719 if (ib_query_device(device, dev_attr)) {
2720 pr_warn("Query device failed for %s\n", device->name);
2724 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2729 * Use the smallest page size supported by the HCA, down to a
2730 * minimum of 4096 bytes. We're unlikely to build large sglists
2731 * out of smaller entries.
2733 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2734 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2735 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2736 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2738 INIT_LIST_HEAD(&srp_dev->dev_list);
2740 srp_dev->dev = device;
2741 srp_dev->pd = ib_alloc_pd(device);
2742 if (IS_ERR(srp_dev->pd))
2745 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2746 IB_ACCESS_LOCAL_WRITE |
2747 IB_ACCESS_REMOTE_READ |
2748 IB_ACCESS_REMOTE_WRITE);
2749 if (IS_ERR(srp_dev->mr))
2752 for (max_pages_per_fmr = SRP_FMR_SIZE;
2753 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2754 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2755 memset(&fmr_param, 0, sizeof fmr_param);
2756 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2757 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2758 fmr_param.cache = 1;
2759 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2760 fmr_param.page_shift = fmr_page_shift;
2761 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2762 IB_ACCESS_REMOTE_WRITE |
2763 IB_ACCESS_REMOTE_READ);
2765 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2766 if (!IS_ERR(srp_dev->fmr_pool))
2770 if (IS_ERR(srp_dev->fmr_pool))
2771 srp_dev->fmr_pool = NULL;
2773 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2778 e = device->phys_port_cnt;
2781 for (p = s; p <= e; ++p) {
2782 host = srp_add_port(srp_dev, p);
2784 list_add_tail(&host->list, &srp_dev->dev_list);
2787 ib_set_client_data(device, &srp_client, srp_dev);
2792 ib_dealloc_pd(srp_dev->pd);
2801 static void srp_remove_one(struct ib_device *device)
2803 struct srp_device *srp_dev;
2804 struct srp_host *host, *tmp_host;
2805 struct srp_target_port *target;
2807 srp_dev = ib_get_client_data(device, &srp_client);
2811 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2812 device_unregister(&host->dev);
2814 * Wait for the sysfs entry to go away, so that no new
2815 * target ports can be created.
2817 wait_for_completion(&host->released);
2820 * Remove all target ports.
2822 spin_lock(&host->target_lock);
2823 list_for_each_entry(target, &host->target_list, list)
2824 srp_queue_remove_work(target);
2825 spin_unlock(&host->target_lock);
2828 * Wait for target port removal tasks.
2830 flush_workqueue(system_long_wq);
2835 if (srp_dev->fmr_pool)
2836 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2837 ib_dereg_mr(srp_dev->mr);
2838 ib_dealloc_pd(srp_dev->pd);
2843 static struct srp_function_template ib_srp_transport_functions = {
2844 .has_rport_state = true,
2845 .reset_timer_if_blocked = true,
2846 .reconnect_delay = &srp_reconnect_delay,
2847 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
2848 .dev_loss_tmo = &srp_dev_loss_tmo,
2849 .reconnect = srp_rport_reconnect,
2850 .rport_delete = srp_rport_delete,
2851 .terminate_rport_io = srp_terminate_io,
2854 static int __init srp_init_module(void)
2858 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2860 if (srp_sg_tablesize) {
2861 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2862 if (!cmd_sg_entries)
2863 cmd_sg_entries = srp_sg_tablesize;
2866 if (!cmd_sg_entries)
2867 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2869 if (cmd_sg_entries > 255) {
2870 pr_warn("Clamping cmd_sg_entries to 255\n");
2871 cmd_sg_entries = 255;
2874 if (!indirect_sg_entries)
2875 indirect_sg_entries = cmd_sg_entries;
2876 else if (indirect_sg_entries < cmd_sg_entries) {
2877 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2879 indirect_sg_entries = cmd_sg_entries;
2882 ib_srp_transport_template =
2883 srp_attach_transport(&ib_srp_transport_functions);
2884 if (!ib_srp_transport_template)
2887 ret = class_register(&srp_class);
2889 pr_err("couldn't register class infiniband_srp\n");
2890 srp_release_transport(ib_srp_transport_template);
2894 ib_sa_register_client(&srp_sa_client);
2896 ret = ib_register_client(&srp_client);
2898 pr_err("couldn't register IB client\n");
2899 srp_release_transport(ib_srp_transport_template);
2900 ib_sa_unregister_client(&srp_sa_client);
2901 class_unregister(&srp_class);
2908 static void __exit srp_cleanup_module(void)
2910 ib_unregister_client(&srp_client);
2911 ib_sa_unregister_client(&srp_sa_client);
2912 class_unregister(&srp_class);
2913 srp_release_transport(ib_srp_transport_template);
2916 module_init(srp_init_module);
2917 module_exit(srp_cleanup_module);