2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) PFX fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <linux/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_transport_srp.h>
54 #define DRV_NAME "ib_srp"
55 #define PFX DRV_NAME ": "
56 #define DRV_VERSION "1.0"
57 #define DRV_RELDATE "July 1, 2013"
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
61 "v" DRV_VERSION " (" DRV_RELDATE ")");
62 MODULE_LICENSE("Dual BSD/GPL");
64 static unsigned int srp_sg_tablesize;
65 static unsigned int cmd_sg_entries;
66 static unsigned int indirect_sg_entries;
67 static bool allow_ext_sg;
68 static int topspin_workarounds = 1;
70 module_param(srp_sg_tablesize, uint, 0444);
71 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
73 module_param(cmd_sg_entries, uint, 0444);
74 MODULE_PARM_DESC(cmd_sg_entries,
75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
77 module_param(indirect_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(indirect_sg_entries,
79 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
81 module_param(allow_ext_sg, bool, 0444);
82 MODULE_PARM_DESC(allow_ext_sg,
83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
85 module_param(topspin_workarounds, int, 0444);
86 MODULE_PARM_DESC(topspin_workarounds,
87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
89 static struct kernel_param_ops srp_tmo_ops;
91 static int srp_fast_io_fail_tmo = 15;
92 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
94 MODULE_PARM_DESC(fast_io_fail_tmo,
95 "Number of seconds between the observation of a transport"
96 " layer error and failing all I/O. \"off\" means that this"
97 " functionality is disabled.");
99 static int srp_dev_loss_tmo = 60;
100 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
102 MODULE_PARM_DESC(dev_loss_tmo,
103 "Maximum number of seconds that the SRP transport should"
104 " insulate transport layer errors. After this time has been"
105 " exceeded the SCSI host is removed. Should be"
106 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
107 " if fast_io_fail_tmo has not been set. \"off\" means that"
108 " this functionality is disabled.");
110 static void srp_add_one(struct ib_device *device);
111 static void srp_remove_one(struct ib_device *device);
112 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
113 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
114 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
116 static struct scsi_transport_template *ib_srp_transport_template;
118 static struct ib_client srp_client = {
121 .remove = srp_remove_one
124 static struct ib_sa_client srp_sa_client;
126 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
128 int tmo = *(int *)kp->arg;
131 return sprintf(buffer, "%d", tmo);
133 return sprintf(buffer, "off");
136 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
140 if (strncmp(val, "off", 3) != 0) {
141 res = kstrtoint(val, 0, &tmo);
147 if (kp->arg == &srp_fast_io_fail_tmo)
148 res = srp_tmo_valid(tmo, srp_dev_loss_tmo);
150 res = srp_tmo_valid(srp_fast_io_fail_tmo, tmo);
153 *(int *)kp->arg = tmo;
159 static struct kernel_param_ops srp_tmo_ops = {
164 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
166 return (struct srp_target_port *) host->hostdata;
169 static const char *srp_target_info(struct Scsi_Host *host)
171 return host_to_target(host)->target_name;
174 static int srp_target_is_topspin(struct srp_target_port *target)
176 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
177 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
179 return topspin_workarounds &&
180 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
181 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
184 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
186 enum dma_data_direction direction)
190 iu = kmalloc(sizeof *iu, gfp_mask);
194 iu->buf = kzalloc(size, gfp_mask);
198 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
200 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
204 iu->direction = direction;
216 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
221 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
227 static void srp_qp_event(struct ib_event *event, void *context)
229 pr_debug("QP event %d\n", event->event);
232 static int srp_init_qp(struct srp_target_port *target,
235 struct ib_qp_attr *attr;
238 attr = kmalloc(sizeof *attr, GFP_KERNEL);
242 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
243 target->srp_host->port,
244 be16_to_cpu(target->path.pkey),
249 attr->qp_state = IB_QPS_INIT;
250 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
251 IB_ACCESS_REMOTE_WRITE);
252 attr->port_num = target->srp_host->port;
254 ret = ib_modify_qp(qp, attr,
265 static int srp_new_cm_id(struct srp_target_port *target)
267 struct ib_cm_id *new_cm_id;
269 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
270 srp_cm_handler, target);
271 if (IS_ERR(new_cm_id))
272 return PTR_ERR(new_cm_id);
275 ib_destroy_cm_id(target->cm_id);
276 target->cm_id = new_cm_id;
281 static int srp_create_target_ib(struct srp_target_port *target)
283 struct ib_qp_init_attr *init_attr;
284 struct ib_cq *recv_cq, *send_cq;
288 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
292 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
293 srp_recv_completion, NULL, target, SRP_RQ_SIZE,
294 target->comp_vector);
295 if (IS_ERR(recv_cq)) {
296 ret = PTR_ERR(recv_cq);
300 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
301 srp_send_completion, NULL, target, SRP_SQ_SIZE,
302 target->comp_vector);
303 if (IS_ERR(send_cq)) {
304 ret = PTR_ERR(send_cq);
308 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
310 init_attr->event_handler = srp_qp_event;
311 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
312 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
313 init_attr->cap.max_recv_sge = 1;
314 init_attr->cap.max_send_sge = 1;
315 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
316 init_attr->qp_type = IB_QPT_RC;
317 init_attr->send_cq = send_cq;
318 init_attr->recv_cq = recv_cq;
320 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
326 ret = srp_init_qp(target, qp);
331 ib_destroy_qp(target->qp);
333 ib_destroy_cq(target->recv_cq);
335 ib_destroy_cq(target->send_cq);
338 target->recv_cq = recv_cq;
339 target->send_cq = send_cq;
348 ib_destroy_cq(send_cq);
351 ib_destroy_cq(recv_cq);
358 static void srp_free_target_ib(struct srp_target_port *target)
362 ib_destroy_qp(target->qp);
363 ib_destroy_cq(target->send_cq);
364 ib_destroy_cq(target->recv_cq);
367 target->send_cq = target->recv_cq = NULL;
369 for (i = 0; i < SRP_RQ_SIZE; ++i)
370 srp_free_iu(target->srp_host, target->rx_ring[i]);
371 for (i = 0; i < SRP_SQ_SIZE; ++i)
372 srp_free_iu(target->srp_host, target->tx_ring[i]);
375 static void srp_path_rec_completion(int status,
376 struct ib_sa_path_rec *pathrec,
379 struct srp_target_port *target = target_ptr;
381 target->status = status;
383 shost_printk(KERN_ERR, target->scsi_host,
384 PFX "Got failed path rec status %d\n", status);
386 target->path = *pathrec;
387 complete(&target->done);
390 static int srp_lookup_path(struct srp_target_port *target)
392 target->path.numb_path = 1;
394 init_completion(&target->done);
396 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
397 target->srp_host->srp_dev->dev,
398 target->srp_host->port,
400 IB_SA_PATH_REC_SERVICE_ID |
401 IB_SA_PATH_REC_DGID |
402 IB_SA_PATH_REC_SGID |
403 IB_SA_PATH_REC_NUMB_PATH |
405 SRP_PATH_REC_TIMEOUT_MS,
407 srp_path_rec_completion,
408 target, &target->path_query);
409 if (target->path_query_id < 0)
410 return target->path_query_id;
412 wait_for_completion(&target->done);
414 if (target->status < 0)
415 shost_printk(KERN_WARNING, target->scsi_host,
416 PFX "Path record query failed\n");
418 return target->status;
421 static int srp_send_req(struct srp_target_port *target)
424 struct ib_cm_req_param param;
425 struct srp_login_req priv;
429 req = kzalloc(sizeof *req, GFP_KERNEL);
433 req->param.primary_path = &target->path;
434 req->param.alternate_path = NULL;
435 req->param.service_id = target->service_id;
436 req->param.qp_num = target->qp->qp_num;
437 req->param.qp_type = target->qp->qp_type;
438 req->param.private_data = &req->priv;
439 req->param.private_data_len = sizeof req->priv;
440 req->param.flow_control = 1;
442 get_random_bytes(&req->param.starting_psn, 4);
443 req->param.starting_psn &= 0xffffff;
446 * Pick some arbitrary defaults here; we could make these
447 * module parameters if anyone cared about setting them.
449 req->param.responder_resources = 4;
450 req->param.remote_cm_response_timeout = 20;
451 req->param.local_cm_response_timeout = 20;
452 req->param.retry_count = target->tl_retry_count;
453 req->param.rnr_retry_count = 7;
454 req->param.max_cm_retries = 15;
456 req->priv.opcode = SRP_LOGIN_REQ;
458 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
459 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
460 SRP_BUF_FORMAT_INDIRECT);
462 * In the published SRP specification (draft rev. 16a), the
463 * port identifier format is 8 bytes of ID extension followed
464 * by 8 bytes of GUID. Older drafts put the two halves in the
465 * opposite order, so that the GUID comes first.
467 * Targets conforming to these obsolete drafts can be
468 * recognized by the I/O Class they report.
470 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
471 memcpy(req->priv.initiator_port_id,
472 &target->path.sgid.global.interface_id, 8);
473 memcpy(req->priv.initiator_port_id + 8,
474 &target->initiator_ext, 8);
475 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
476 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
478 memcpy(req->priv.initiator_port_id,
479 &target->initiator_ext, 8);
480 memcpy(req->priv.initiator_port_id + 8,
481 &target->path.sgid.global.interface_id, 8);
482 memcpy(req->priv.target_port_id, &target->id_ext, 8);
483 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
487 * Topspin/Cisco SRP targets will reject our login unless we
488 * zero out the first 8 bytes of our initiator port ID and set
489 * the second 8 bytes to the local node GUID.
491 if (srp_target_is_topspin(target)) {
492 shost_printk(KERN_DEBUG, target->scsi_host,
493 PFX "Topspin/Cisco initiator port ID workaround "
494 "activated for target GUID %016llx\n",
495 (unsigned long long) be64_to_cpu(target->ioc_guid));
496 memset(req->priv.initiator_port_id, 0, 8);
497 memcpy(req->priv.initiator_port_id + 8,
498 &target->srp_host->srp_dev->dev->node_guid, 8);
501 status = ib_send_cm_req(target->cm_id, &req->param);
508 static bool srp_queue_remove_work(struct srp_target_port *target)
510 bool changed = false;
512 spin_lock_irq(&target->lock);
513 if (target->state != SRP_TARGET_REMOVED) {
514 target->state = SRP_TARGET_REMOVED;
517 spin_unlock_irq(&target->lock);
520 queue_work(system_long_wq, &target->remove_work);
525 static bool srp_change_conn_state(struct srp_target_port *target,
528 bool changed = false;
530 spin_lock_irq(&target->lock);
531 if (target->connected != connected) {
532 target->connected = connected;
535 spin_unlock_irq(&target->lock);
540 static void srp_disconnect_target(struct srp_target_port *target)
542 if (srp_change_conn_state(target, false)) {
543 /* XXX should send SRP_I_LOGOUT request */
545 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
546 shost_printk(KERN_DEBUG, target->scsi_host,
547 PFX "Sending CM DREQ failed\n");
552 static void srp_free_req_data(struct srp_target_port *target)
554 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
555 struct srp_request *req;
558 for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
559 kfree(req->fmr_list);
560 kfree(req->map_page);
561 if (req->indirect_dma_addr) {
562 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
563 target->indirect_size,
566 kfree(req->indirect_desc);
571 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
572 * @shost: SCSI host whose attributes to remove from sysfs.
574 * Note: Any attributes defined in the host template and that did not exist
575 * before invocation of this function will be ignored.
577 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
579 struct device_attribute **attr;
581 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
582 device_remove_file(&shost->shost_dev, *attr);
585 static void srp_remove_target(struct srp_target_port *target)
587 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
589 srp_del_scsi_host_attr(target->scsi_host);
590 srp_rport_get(target->rport);
591 srp_remove_host(target->scsi_host);
592 scsi_remove_host(target->scsi_host);
593 srp_disconnect_target(target);
594 ib_destroy_cm_id(target->cm_id);
595 srp_free_target_ib(target);
596 cancel_work_sync(&target->tl_err_work);
597 srp_rport_put(target->rport);
598 srp_free_req_data(target);
599 scsi_host_put(target->scsi_host);
602 static void srp_remove_work(struct work_struct *work)
604 struct srp_target_port *target =
605 container_of(work, struct srp_target_port, remove_work);
607 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
609 srp_remove_target(target);
611 spin_lock(&target->srp_host->target_lock);
612 list_del(&target->list);
613 spin_unlock(&target->srp_host->target_lock);
616 static void srp_rport_delete(struct srp_rport *rport)
618 struct srp_target_port *target = rport->lld_data;
620 srp_queue_remove_work(target);
623 static int srp_connect_target(struct srp_target_port *target)
628 WARN_ON_ONCE(target->connected);
630 target->qp_in_error = false;
632 ret = srp_lookup_path(target);
637 init_completion(&target->done);
638 ret = srp_send_req(target);
641 wait_for_completion(&target->done);
644 * The CM event handling code will set status to
645 * SRP_PORT_REDIRECT if we get a port redirect REJ
646 * back, or SRP_DLID_REDIRECT if we get a lid/qp
649 switch (target->status) {
651 srp_change_conn_state(target, true);
654 case SRP_PORT_REDIRECT:
655 ret = srp_lookup_path(target);
660 case SRP_DLID_REDIRECT:
664 /* Our current CM id was stale, and is now in timewait.
665 * Try to reconnect with a new one.
667 if (!retries-- || srp_new_cm_id(target)) {
668 shost_printk(KERN_ERR, target->scsi_host, PFX
669 "giving up on stale connection\n");
670 target->status = -ECONNRESET;
671 return target->status;
674 shost_printk(KERN_ERR, target->scsi_host, PFX
675 "retrying stale connection\n");
679 return target->status;
684 static void srp_unmap_data(struct scsi_cmnd *scmnd,
685 struct srp_target_port *target,
686 struct srp_request *req)
688 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
689 struct ib_pool_fmr **pfmr;
691 if (!scsi_sglist(scmnd) ||
692 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
693 scmnd->sc_data_direction != DMA_FROM_DEVICE))
696 pfmr = req->fmr_list;
698 ib_fmr_pool_unmap(*pfmr++);
700 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
701 scmnd->sc_data_direction);
705 * srp_claim_req - Take ownership of the scmnd associated with a request.
706 * @target: SRP target port.
708 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
709 * ownership of @req->scmnd if it equals @scmnd.
712 * Either NULL or a pointer to the SCSI command the caller became owner of.
714 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
715 struct srp_request *req,
716 struct scsi_cmnd *scmnd)
720 spin_lock_irqsave(&target->lock, flags);
724 } else if (req->scmnd == scmnd) {
729 spin_unlock_irqrestore(&target->lock, flags);
735 * srp_free_req() - Unmap data and add request to the free request list.
737 static void srp_free_req(struct srp_target_port *target,
738 struct srp_request *req, struct scsi_cmnd *scmnd,
743 srp_unmap_data(scmnd, target, req);
745 spin_lock_irqsave(&target->lock, flags);
746 target->req_lim += req_lim_delta;
747 list_add_tail(&req->list, &target->free_reqs);
748 spin_unlock_irqrestore(&target->lock, flags);
751 static void srp_finish_req(struct srp_target_port *target,
752 struct srp_request *req, int result)
754 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
757 srp_free_req(target, req, scmnd, 0);
758 scmnd->result = result;
759 scmnd->scsi_done(scmnd);
763 static void srp_terminate_io(struct srp_rport *rport)
765 struct srp_target_port *target = rport->lld_data;
768 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
769 struct srp_request *req = &target->req_ring[i];
770 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
775 * It is up to the caller to ensure that srp_rport_reconnect() calls are
776 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
777 * srp_reset_device() or srp_reset_host() calls will occur while this function
778 * is in progress. One way to realize that is not to call this function
779 * directly but to call srp_reconnect_rport() instead since that last function
780 * serializes calls of this function via rport->mutex and also blocks
781 * srp_queuecommand() calls before invoking this function.
783 static int srp_rport_reconnect(struct srp_rport *rport)
785 struct srp_target_port *target = rport->lld_data;
788 srp_disconnect_target(target);
790 * Now get a new local CM ID so that we avoid confusing the target in
791 * case things are really fouled up. Doing so also ensures that all CM
792 * callbacks will have finished before a new QP is allocated.
794 ret = srp_new_cm_id(target);
796 * Whether or not creating a new CM ID succeeded, create a new
797 * QP. This guarantees that all completion callback function
798 * invocations have finished before request resetting starts.
801 ret = srp_create_target_ib(target);
803 srp_create_target_ib(target);
805 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
806 struct srp_request *req = &target->req_ring[i];
807 srp_finish_req(target, req, DID_RESET << 16);
810 INIT_LIST_HEAD(&target->free_tx);
811 for (i = 0; i < SRP_SQ_SIZE; ++i)
812 list_add(&target->tx_ring[i]->list, &target->free_tx);
815 ret = srp_connect_target(target);
818 shost_printk(KERN_INFO, target->scsi_host,
819 PFX "reconnect succeeded\n");
824 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
825 unsigned int dma_len, u32 rkey)
827 struct srp_direct_buf *desc = state->desc;
829 desc->va = cpu_to_be64(dma_addr);
830 desc->key = cpu_to_be32(rkey);
831 desc->len = cpu_to_be32(dma_len);
833 state->total_len += dma_len;
838 static int srp_map_finish_fmr(struct srp_map_state *state,
839 struct srp_target_port *target)
841 struct srp_device *dev = target->srp_host->srp_dev;
842 struct ib_pool_fmr *fmr;
848 if (state->npages == 1) {
849 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
851 state->npages = state->fmr_len = 0;
855 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
856 state->npages, io_addr);
860 *state->next_fmr++ = fmr;
863 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
864 state->npages = state->fmr_len = 0;
868 static void srp_map_update_start(struct srp_map_state *state,
869 struct scatterlist *sg, int sg_index,
872 state->unmapped_sg = sg;
873 state->unmapped_index = sg_index;
874 state->unmapped_addr = dma_addr;
877 static int srp_map_sg_entry(struct srp_map_state *state,
878 struct srp_target_port *target,
879 struct scatterlist *sg, int sg_index,
882 struct srp_device *dev = target->srp_host->srp_dev;
883 struct ib_device *ibdev = dev->dev;
884 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
885 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
892 if (use_fmr == SRP_MAP_NO_FMR) {
893 /* Once we're in direct map mode for a request, we don't
894 * go back to FMR mode, so no need to update anything
895 * other than the descriptor.
897 srp_map_desc(state, dma_addr, dma_len, target->rkey);
901 /* If we start at an offset into the FMR page, don't merge into
902 * the current FMR. Finish it out, and use the kernel's MR for this
903 * sg entry. This is to avoid potential bugs on some SRP targets
904 * that were never quite defined, but went away when the initiator
905 * avoided using FMR on such page fragments.
907 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
908 ret = srp_map_finish_fmr(state, target);
912 srp_map_desc(state, dma_addr, dma_len, target->rkey);
913 srp_map_update_start(state, NULL, 0, 0);
917 /* If this is the first sg to go into the FMR, save our position.
918 * We need to know the first unmapped entry, its index, and the
919 * first unmapped address within that entry to be able to restart
920 * mapping after an error.
922 if (!state->unmapped_sg)
923 srp_map_update_start(state, sg, sg_index, dma_addr);
926 if (state->npages == SRP_FMR_SIZE) {
927 ret = srp_map_finish_fmr(state, target);
931 srp_map_update_start(state, sg, sg_index, dma_addr);
934 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
937 state->base_dma_addr = dma_addr;
938 state->pages[state->npages++] = dma_addr;
939 state->fmr_len += len;
944 /* If the last entry of the FMR wasn't a full page, then we need to
945 * close it out and start a new one -- we can only merge at page
949 if (len != dev->fmr_page_size) {
950 ret = srp_map_finish_fmr(state, target);
952 srp_map_update_start(state, NULL, 0, 0);
957 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
958 struct srp_request *req)
960 struct scatterlist *scat, *sg;
961 struct srp_cmd *cmd = req->cmd->buf;
962 int i, len, nents, count, use_fmr;
963 struct srp_device *dev;
964 struct ib_device *ibdev;
965 struct srp_map_state state;
966 struct srp_indirect_buf *indirect_hdr;
970 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
971 return sizeof (struct srp_cmd);
973 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
974 scmnd->sc_data_direction != DMA_TO_DEVICE) {
975 shost_printk(KERN_WARNING, target->scsi_host,
976 PFX "Unhandled data direction %d\n",
977 scmnd->sc_data_direction);
981 nents = scsi_sg_count(scmnd);
982 scat = scsi_sglist(scmnd);
984 dev = target->srp_host->srp_dev;
987 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
988 if (unlikely(count == 0))
991 fmt = SRP_DATA_DESC_DIRECT;
992 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
996 * The midlayer only generated a single gather/scatter
997 * entry, or DMA mapping coalesced everything to a
998 * single entry. So a direct descriptor along with
999 * the DMA MR suffices.
1001 struct srp_direct_buf *buf = (void *) cmd->add_data;
1003 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1004 buf->key = cpu_to_be32(target->rkey);
1005 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1011 /* We have more than one scatter/gather entry, so build our indirect
1012 * descriptor table, trying to merge as many entries with FMR as we
1015 indirect_hdr = (void *) cmd->add_data;
1017 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1018 target->indirect_size, DMA_TO_DEVICE);
1020 memset(&state, 0, sizeof(state));
1021 state.desc = req->indirect_desc;
1022 state.pages = req->map_page;
1023 state.next_fmr = req->fmr_list;
1025 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1027 for_each_sg(scat, sg, count, i) {
1028 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1029 /* FMR mapping failed, so backtrack to the first
1030 * unmapped entry and continue on without using FMR.
1032 dma_addr_t dma_addr;
1033 unsigned int dma_len;
1036 sg = state.unmapped_sg;
1037 i = state.unmapped_index;
1039 dma_addr = ib_sg_dma_address(ibdev, sg);
1040 dma_len = ib_sg_dma_len(ibdev, sg);
1041 dma_len -= (state.unmapped_addr - dma_addr);
1042 dma_addr = state.unmapped_addr;
1043 use_fmr = SRP_MAP_NO_FMR;
1044 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
1048 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1051 /* We've mapped the request, now pull as much of the indirect
1052 * descriptor table as we can into the command buffer. If this
1053 * target is not using an external indirect table, we are
1054 * guaranteed to fit into the command, as the SCSI layer won't
1055 * give us more S/G entries than we allow.
1057 req->nfmr = state.nfmr;
1058 if (state.ndesc == 1) {
1059 /* FMR mapping was able to collapse this to one entry,
1060 * so use a direct descriptor.
1062 struct srp_direct_buf *buf = (void *) cmd->add_data;
1064 *buf = req->indirect_desc[0];
1068 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1069 !target->allow_ext_sg)) {
1070 shost_printk(KERN_ERR, target->scsi_host,
1071 "Could not fit S/G list into SRP_CMD\n");
1075 count = min(state.ndesc, target->cmd_sg_cnt);
1076 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1078 fmt = SRP_DATA_DESC_INDIRECT;
1079 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1080 len += count * sizeof (struct srp_direct_buf);
1082 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1083 count * sizeof (struct srp_direct_buf));
1085 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1086 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1087 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1088 indirect_hdr->len = cpu_to_be32(state.total_len);
1090 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1091 cmd->data_out_desc_cnt = count;
1093 cmd->data_in_desc_cnt = count;
1095 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1099 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1100 cmd->buf_fmt = fmt << 4;
1108 * Return an IU and possible credit to the free pool
1110 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1111 enum srp_iu_type iu_type)
1113 unsigned long flags;
1115 spin_lock_irqsave(&target->lock, flags);
1116 list_add(&iu->list, &target->free_tx);
1117 if (iu_type != SRP_IU_RSP)
1119 spin_unlock_irqrestore(&target->lock, flags);
1123 * Must be called with target->lock held to protect req_lim and free_tx.
1124 * If IU is not sent, it must be returned using srp_put_tx_iu().
1127 * An upper limit for the number of allocated information units for each
1129 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1130 * more than Scsi_Host.can_queue requests.
1131 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1132 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1133 * one unanswered SRP request to an initiator.
1135 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1136 enum srp_iu_type iu_type)
1138 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1141 srp_send_completion(target->send_cq, target);
1143 if (list_empty(&target->free_tx))
1146 /* Initiator responses to target requests do not consume credits */
1147 if (iu_type != SRP_IU_RSP) {
1148 if (target->req_lim <= rsv) {
1149 ++target->zero_req_lim;
1156 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1157 list_del(&iu->list);
1161 static int srp_post_send(struct srp_target_port *target,
1162 struct srp_iu *iu, int len)
1165 struct ib_send_wr wr, *bad_wr;
1167 list.addr = iu->dma;
1169 list.lkey = target->lkey;
1172 wr.wr_id = (uintptr_t) iu;
1175 wr.opcode = IB_WR_SEND;
1176 wr.send_flags = IB_SEND_SIGNALED;
1178 return ib_post_send(target->qp, &wr, &bad_wr);
1181 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1183 struct ib_recv_wr wr, *bad_wr;
1186 list.addr = iu->dma;
1187 list.length = iu->size;
1188 list.lkey = target->lkey;
1191 wr.wr_id = (uintptr_t) iu;
1195 return ib_post_recv(target->qp, &wr, &bad_wr);
1198 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1200 struct srp_request *req;
1201 struct scsi_cmnd *scmnd;
1202 unsigned long flags;
1204 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1205 spin_lock_irqsave(&target->lock, flags);
1206 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1207 spin_unlock_irqrestore(&target->lock, flags);
1209 target->tsk_mgmt_status = -1;
1210 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1211 target->tsk_mgmt_status = rsp->data[3];
1212 complete(&target->tsk_mgmt_done);
1214 req = &target->req_ring[rsp->tag];
1215 scmnd = srp_claim_req(target, req, NULL);
1217 shost_printk(KERN_ERR, target->scsi_host,
1218 "Null scmnd for RSP w/tag %016llx\n",
1219 (unsigned long long) rsp->tag);
1221 spin_lock_irqsave(&target->lock, flags);
1222 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1223 spin_unlock_irqrestore(&target->lock, flags);
1227 scmnd->result = rsp->status;
1229 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1230 memcpy(scmnd->sense_buffer, rsp->data +
1231 be32_to_cpu(rsp->resp_data_len),
1232 min_t(int, be32_to_cpu(rsp->sense_data_len),
1233 SCSI_SENSE_BUFFERSIZE));
1236 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1237 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1238 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1239 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1241 srp_free_req(target, req, scmnd,
1242 be32_to_cpu(rsp->req_lim_delta));
1244 scmnd->host_scribble = NULL;
1245 scmnd->scsi_done(scmnd);
1249 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1252 struct ib_device *dev = target->srp_host->srp_dev->dev;
1253 unsigned long flags;
1257 spin_lock_irqsave(&target->lock, flags);
1258 target->req_lim += req_delta;
1259 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1260 spin_unlock_irqrestore(&target->lock, flags);
1263 shost_printk(KERN_ERR, target->scsi_host, PFX
1264 "no IU available to send response\n");
1268 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1269 memcpy(iu->buf, rsp, len);
1270 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1272 err = srp_post_send(target, iu, len);
1274 shost_printk(KERN_ERR, target->scsi_host, PFX
1275 "unable to post response: %d\n", err);
1276 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1282 static void srp_process_cred_req(struct srp_target_port *target,
1283 struct srp_cred_req *req)
1285 struct srp_cred_rsp rsp = {
1286 .opcode = SRP_CRED_RSP,
1289 s32 delta = be32_to_cpu(req->req_lim_delta);
1291 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1292 shost_printk(KERN_ERR, target->scsi_host, PFX
1293 "problems processing SRP_CRED_REQ\n");
1296 static void srp_process_aer_req(struct srp_target_port *target,
1297 struct srp_aer_req *req)
1299 struct srp_aer_rsp rsp = {
1300 .opcode = SRP_AER_RSP,
1303 s32 delta = be32_to_cpu(req->req_lim_delta);
1305 shost_printk(KERN_ERR, target->scsi_host, PFX
1306 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1308 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1309 shost_printk(KERN_ERR, target->scsi_host, PFX
1310 "problems processing SRP_AER_REQ\n");
1313 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1315 struct ib_device *dev = target->srp_host->srp_dev->dev;
1316 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1320 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1323 opcode = *(u8 *) iu->buf;
1326 shost_printk(KERN_ERR, target->scsi_host,
1327 PFX "recv completion, opcode 0x%02x\n", opcode);
1328 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1329 iu->buf, wc->byte_len, true);
1334 srp_process_rsp(target, iu->buf);
1338 srp_process_cred_req(target, iu->buf);
1342 srp_process_aer_req(target, iu->buf);
1346 /* XXX Handle target logout */
1347 shost_printk(KERN_WARNING, target->scsi_host,
1348 PFX "Got target logout request\n");
1352 shost_printk(KERN_WARNING, target->scsi_host,
1353 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1357 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1360 res = srp_post_recv(target, iu);
1362 shost_printk(KERN_ERR, target->scsi_host,
1363 PFX "Recv failed with error code %d\n", res);
1367 * srp_tl_err_work() - handle a transport layer error
1369 * Note: This function may get invoked before the rport has been created,
1370 * hence the target->rport test.
1372 static void srp_tl_err_work(struct work_struct *work)
1374 struct srp_target_port *target;
1376 target = container_of(work, struct srp_target_port, tl_err_work);
1378 srp_start_tl_fail_timers(target->rport);
1381 static void srp_handle_qp_err(enum ib_wc_status wc_status,
1382 enum ib_wc_opcode wc_opcode,
1383 struct srp_target_port *target)
1385 if (target->connected && !target->qp_in_error) {
1386 shost_printk(KERN_ERR, target->scsi_host,
1387 PFX "failed %s status %d\n",
1388 wc_opcode & IB_WC_RECV ? "receive" : "send",
1390 queue_work(system_long_wq, &target->tl_err_work);
1392 target->qp_in_error = true;
1395 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1397 struct srp_target_port *target = target_ptr;
1400 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1401 while (ib_poll_cq(cq, 1, &wc) > 0) {
1402 if (likely(wc.status == IB_WC_SUCCESS)) {
1403 srp_handle_recv(target, &wc);
1405 srp_handle_qp_err(wc.status, wc.opcode, target);
1410 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1412 struct srp_target_port *target = target_ptr;
1416 while (ib_poll_cq(cq, 1, &wc) > 0) {
1417 if (likely(wc.status == IB_WC_SUCCESS)) {
1418 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1419 list_add(&iu->list, &target->free_tx);
1421 srp_handle_qp_err(wc.status, wc.opcode, target);
1426 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1428 struct srp_target_port *target = host_to_target(shost);
1429 struct srp_request *req;
1431 struct srp_cmd *cmd;
1432 struct ib_device *dev;
1433 unsigned long flags;
1436 result = srp_chkready(target->rport);
1437 if (unlikely(result)) {
1438 scmnd->result = result;
1439 scmnd->scsi_done(scmnd);
1443 spin_lock_irqsave(&target->lock, flags);
1444 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1448 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1449 list_del(&req->list);
1450 spin_unlock_irqrestore(&target->lock, flags);
1452 dev = target->srp_host->srp_dev->dev;
1453 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1457 scmnd->host_scribble = (void *) req;
1460 memset(cmd, 0, sizeof *cmd);
1462 cmd->opcode = SRP_CMD;
1463 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1464 cmd->tag = req->index;
1465 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1470 len = srp_map_data(scmnd, target, req);
1472 shost_printk(KERN_ERR, target->scsi_host,
1473 PFX "Failed to map data\n");
1477 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1480 if (srp_post_send(target, iu, len)) {
1481 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1488 srp_unmap_data(scmnd, target, req);
1491 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1493 spin_lock_irqsave(&target->lock, flags);
1494 list_add(&req->list, &target->free_reqs);
1497 spin_unlock_irqrestore(&target->lock, flags);
1499 return SCSI_MLQUEUE_HOST_BUSY;
1502 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1506 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1507 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1508 target->max_ti_iu_len,
1509 GFP_KERNEL, DMA_FROM_DEVICE);
1510 if (!target->rx_ring[i])
1514 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1515 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1517 GFP_KERNEL, DMA_TO_DEVICE);
1518 if (!target->tx_ring[i])
1521 list_add(&target->tx_ring[i]->list, &target->free_tx);
1527 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1528 srp_free_iu(target->srp_host, target->rx_ring[i]);
1529 target->rx_ring[i] = NULL;
1532 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1533 srp_free_iu(target->srp_host, target->tx_ring[i]);
1534 target->tx_ring[i] = NULL;
1540 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1542 uint64_t T_tr_ns, max_compl_time_ms;
1543 uint32_t rq_tmo_jiffies;
1546 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1547 * table 91), both the QP timeout and the retry count have to be set
1548 * for RC QP's during the RTR to RTS transition.
1550 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1551 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1554 * Set target->rq_tmo_jiffies to one second more than the largest time
1555 * it can take before an error completion is generated. See also
1556 * C9-140..142 in the IBTA spec for more information about how to
1557 * convert the QP Local ACK Timeout value to nanoseconds.
1559 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1560 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1561 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1562 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1564 return rq_tmo_jiffies;
1567 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1568 struct srp_login_rsp *lrsp,
1569 struct srp_target_port *target)
1571 struct ib_qp_attr *qp_attr = NULL;
1576 if (lrsp->opcode == SRP_LOGIN_RSP) {
1577 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1578 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1581 * Reserve credits for task management so we don't
1582 * bounce requests back to the SCSI mid-layer.
1584 target->scsi_host->can_queue
1585 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1586 target->scsi_host->can_queue);
1588 shost_printk(KERN_WARNING, target->scsi_host,
1589 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1594 if (!target->rx_ring[0]) {
1595 ret = srp_alloc_iu_bufs(target);
1601 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1605 qp_attr->qp_state = IB_QPS_RTR;
1606 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1610 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1614 for (i = 0; i < SRP_RQ_SIZE; i++) {
1615 struct srp_iu *iu = target->rx_ring[i];
1616 ret = srp_post_recv(target, iu);
1621 qp_attr->qp_state = IB_QPS_RTS;
1622 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1626 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1628 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1632 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1638 target->status = ret;
1641 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1642 struct ib_cm_event *event,
1643 struct srp_target_port *target)
1645 struct Scsi_Host *shost = target->scsi_host;
1646 struct ib_class_port_info *cpi;
1649 switch (event->param.rej_rcvd.reason) {
1650 case IB_CM_REJ_PORT_CM_REDIRECT:
1651 cpi = event->param.rej_rcvd.ari;
1652 target->path.dlid = cpi->redirect_lid;
1653 target->path.pkey = cpi->redirect_pkey;
1654 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1655 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1657 target->status = target->path.dlid ?
1658 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1661 case IB_CM_REJ_PORT_REDIRECT:
1662 if (srp_target_is_topspin(target)) {
1664 * Topspin/Cisco SRP gateways incorrectly send
1665 * reject reason code 25 when they mean 24
1668 memcpy(target->path.dgid.raw,
1669 event->param.rej_rcvd.ari, 16);
1671 shost_printk(KERN_DEBUG, shost,
1672 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1673 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1674 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1676 target->status = SRP_PORT_REDIRECT;
1678 shost_printk(KERN_WARNING, shost,
1679 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1680 target->status = -ECONNRESET;
1684 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1685 shost_printk(KERN_WARNING, shost,
1686 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1687 target->status = -ECONNRESET;
1690 case IB_CM_REJ_CONSUMER_DEFINED:
1691 opcode = *(u8 *) event->private_data;
1692 if (opcode == SRP_LOGIN_REJ) {
1693 struct srp_login_rej *rej = event->private_data;
1694 u32 reason = be32_to_cpu(rej->reason);
1696 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1697 shost_printk(KERN_WARNING, shost,
1698 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1700 shost_printk(KERN_WARNING, shost,
1701 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1703 shost_printk(KERN_WARNING, shost,
1704 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1705 " opcode 0x%02x\n", opcode);
1706 target->status = -ECONNRESET;
1709 case IB_CM_REJ_STALE_CONN:
1710 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1711 target->status = SRP_STALE_CONN;
1715 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1716 event->param.rej_rcvd.reason);
1717 target->status = -ECONNRESET;
1721 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1723 struct srp_target_port *target = cm_id->context;
1726 switch (event->event) {
1727 case IB_CM_REQ_ERROR:
1728 shost_printk(KERN_DEBUG, target->scsi_host,
1729 PFX "Sending CM REQ failed\n");
1731 target->status = -ECONNRESET;
1734 case IB_CM_REP_RECEIVED:
1736 srp_cm_rep_handler(cm_id, event->private_data, target);
1739 case IB_CM_REJ_RECEIVED:
1740 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1743 srp_cm_rej_handler(cm_id, event, target);
1746 case IB_CM_DREQ_RECEIVED:
1747 shost_printk(KERN_WARNING, target->scsi_host,
1748 PFX "DREQ received - connection closed\n");
1749 srp_change_conn_state(target, false);
1750 if (ib_send_cm_drep(cm_id, NULL, 0))
1751 shost_printk(KERN_ERR, target->scsi_host,
1752 PFX "Sending CM DREP failed\n");
1753 queue_work(system_long_wq, &target->tl_err_work);
1756 case IB_CM_TIMEWAIT_EXIT:
1757 shost_printk(KERN_ERR, target->scsi_host,
1758 PFX "connection closed\n");
1763 case IB_CM_MRA_RECEIVED:
1764 case IB_CM_DREQ_ERROR:
1765 case IB_CM_DREP_RECEIVED:
1769 shost_printk(KERN_WARNING, target->scsi_host,
1770 PFX "Unhandled CM event %d\n", event->event);
1775 complete(&target->done);
1780 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1781 u64 req_tag, unsigned int lun, u8 func)
1783 struct ib_device *dev = target->srp_host->srp_dev->dev;
1785 struct srp_tsk_mgmt *tsk_mgmt;
1787 if (!target->connected || target->qp_in_error)
1790 init_completion(&target->tsk_mgmt_done);
1792 spin_lock_irq(&target->lock);
1793 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1794 spin_unlock_irq(&target->lock);
1799 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1802 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1804 tsk_mgmt->opcode = SRP_TSK_MGMT;
1805 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1806 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1807 tsk_mgmt->tsk_mgmt_func = func;
1808 tsk_mgmt->task_tag = req_tag;
1810 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1812 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1813 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1817 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1818 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1824 static int srp_abort(struct scsi_cmnd *scmnd)
1826 struct srp_target_port *target = host_to_target(scmnd->device->host);
1827 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1830 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1832 if (!req || !srp_claim_req(target, req, scmnd))
1834 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1835 SRP_TSK_ABORT_TASK) == 0)
1837 else if (target->rport->state == SRP_RPORT_LOST)
1841 srp_free_req(target, req, scmnd, 0);
1842 scmnd->result = DID_ABORT << 16;
1843 scmnd->scsi_done(scmnd);
1848 static int srp_reset_device(struct scsi_cmnd *scmnd)
1850 struct srp_target_port *target = host_to_target(scmnd->device->host);
1853 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1855 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1858 if (target->tsk_mgmt_status)
1861 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1862 struct srp_request *req = &target->req_ring[i];
1863 if (req->scmnd && req->scmnd->device == scmnd->device)
1864 srp_finish_req(target, req, DID_RESET << 16);
1870 static int srp_reset_host(struct scsi_cmnd *scmnd)
1872 struct srp_target_port *target = host_to_target(scmnd->device->host);
1874 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1876 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
1879 static int srp_slave_configure(struct scsi_device *sdev)
1881 struct Scsi_Host *shost = sdev->host;
1882 struct srp_target_port *target = host_to_target(shost);
1883 struct request_queue *q = sdev->request_queue;
1884 unsigned long timeout;
1886 if (sdev->type == TYPE_DISK) {
1887 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
1888 blk_queue_rq_timeout(q, timeout);
1894 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1897 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1899 return sprintf(buf, "0x%016llx\n",
1900 (unsigned long long) be64_to_cpu(target->id_ext));
1903 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1906 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1908 return sprintf(buf, "0x%016llx\n",
1909 (unsigned long long) be64_to_cpu(target->ioc_guid));
1912 static ssize_t show_service_id(struct device *dev,
1913 struct device_attribute *attr, char *buf)
1915 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1917 return sprintf(buf, "0x%016llx\n",
1918 (unsigned long long) be64_to_cpu(target->service_id));
1921 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1924 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1926 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1929 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1932 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1934 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1937 static ssize_t show_orig_dgid(struct device *dev,
1938 struct device_attribute *attr, char *buf)
1940 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1942 return sprintf(buf, "%pI6\n", target->orig_dgid);
1945 static ssize_t show_req_lim(struct device *dev,
1946 struct device_attribute *attr, char *buf)
1948 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1950 return sprintf(buf, "%d\n", target->req_lim);
1953 static ssize_t show_zero_req_lim(struct device *dev,
1954 struct device_attribute *attr, char *buf)
1956 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1958 return sprintf(buf, "%d\n", target->zero_req_lim);
1961 static ssize_t show_local_ib_port(struct device *dev,
1962 struct device_attribute *attr, char *buf)
1964 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1966 return sprintf(buf, "%d\n", target->srp_host->port);
1969 static ssize_t show_local_ib_device(struct device *dev,
1970 struct device_attribute *attr, char *buf)
1972 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1974 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1977 static ssize_t show_comp_vector(struct device *dev,
1978 struct device_attribute *attr, char *buf)
1980 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1982 return sprintf(buf, "%d\n", target->comp_vector);
1985 static ssize_t show_tl_retry_count(struct device *dev,
1986 struct device_attribute *attr, char *buf)
1988 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1990 return sprintf(buf, "%d\n", target->tl_retry_count);
1993 static ssize_t show_cmd_sg_entries(struct device *dev,
1994 struct device_attribute *attr, char *buf)
1996 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1998 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2001 static ssize_t show_allow_ext_sg(struct device *dev,
2002 struct device_attribute *attr, char *buf)
2004 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2006 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2009 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2010 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2011 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2012 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2013 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2014 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2015 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2016 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2017 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2018 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2019 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2020 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2021 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2022 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2024 static struct device_attribute *srp_host_attrs[] = {
2027 &dev_attr_service_id,
2030 &dev_attr_orig_dgid,
2032 &dev_attr_zero_req_lim,
2033 &dev_attr_local_ib_port,
2034 &dev_attr_local_ib_device,
2035 &dev_attr_comp_vector,
2036 &dev_attr_tl_retry_count,
2037 &dev_attr_cmd_sg_entries,
2038 &dev_attr_allow_ext_sg,
2042 static struct scsi_host_template srp_template = {
2043 .module = THIS_MODULE,
2044 .name = "InfiniBand SRP initiator",
2045 .proc_name = DRV_NAME,
2046 .slave_configure = srp_slave_configure,
2047 .info = srp_target_info,
2048 .queuecommand = srp_queuecommand,
2049 .eh_abort_handler = srp_abort,
2050 .eh_device_reset_handler = srp_reset_device,
2051 .eh_host_reset_handler = srp_reset_host,
2052 .skip_settle_delay = true,
2053 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2054 .can_queue = SRP_CMD_SQ_SIZE,
2056 .cmd_per_lun = SRP_CMD_SQ_SIZE,
2057 .use_clustering = ENABLE_CLUSTERING,
2058 .shost_attrs = srp_host_attrs
2061 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2063 struct srp_rport_identifiers ids;
2064 struct srp_rport *rport;
2066 sprintf(target->target_name, "SRP.T10:%016llX",
2067 (unsigned long long) be64_to_cpu(target->id_ext));
2069 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2072 memcpy(ids.port_id, &target->id_ext, 8);
2073 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2074 ids.roles = SRP_RPORT_ROLE_TARGET;
2075 rport = srp_rport_add(target->scsi_host, &ids);
2076 if (IS_ERR(rport)) {
2077 scsi_remove_host(target->scsi_host);
2078 return PTR_ERR(rport);
2081 rport->lld_data = target;
2082 target->rport = rport;
2084 spin_lock(&host->target_lock);
2085 list_add_tail(&target->list, &host->target_list);
2086 spin_unlock(&host->target_lock);
2088 target->state = SRP_TARGET_LIVE;
2090 scsi_scan_target(&target->scsi_host->shost_gendev,
2091 0, target->scsi_id, SCAN_WILD_CARD, 0);
2096 static void srp_release_dev(struct device *dev)
2098 struct srp_host *host =
2099 container_of(dev, struct srp_host, dev);
2101 complete(&host->released);
2104 static struct class srp_class = {
2105 .name = "infiniband_srp",
2106 .dev_release = srp_release_dev
2110 * srp_conn_unique() - check whether the connection to a target is unique
2112 static bool srp_conn_unique(struct srp_host *host,
2113 struct srp_target_port *target)
2115 struct srp_target_port *t;
2118 if (target->state == SRP_TARGET_REMOVED)
2123 spin_lock(&host->target_lock);
2124 list_for_each_entry(t, &host->target_list, list) {
2126 target->id_ext == t->id_ext &&
2127 target->ioc_guid == t->ioc_guid &&
2128 target->initiator_ext == t->initiator_ext) {
2133 spin_unlock(&host->target_lock);
2140 * Target ports are added by writing
2142 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2143 * pkey=<P_Key>,service_id=<service ID>
2145 * to the add_target sysfs attribute.
2149 SRP_OPT_ID_EXT = 1 << 0,
2150 SRP_OPT_IOC_GUID = 1 << 1,
2151 SRP_OPT_DGID = 1 << 2,
2152 SRP_OPT_PKEY = 1 << 3,
2153 SRP_OPT_SERVICE_ID = 1 << 4,
2154 SRP_OPT_MAX_SECT = 1 << 5,
2155 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2156 SRP_OPT_IO_CLASS = 1 << 7,
2157 SRP_OPT_INITIATOR_EXT = 1 << 8,
2158 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2159 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2160 SRP_OPT_SG_TABLESIZE = 1 << 11,
2161 SRP_OPT_COMP_VECTOR = 1 << 12,
2162 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2163 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2167 SRP_OPT_SERVICE_ID),
2170 static const match_table_t srp_opt_tokens = {
2171 { SRP_OPT_ID_EXT, "id_ext=%s" },
2172 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2173 { SRP_OPT_DGID, "dgid=%s" },
2174 { SRP_OPT_PKEY, "pkey=%x" },
2175 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2176 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2177 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2178 { SRP_OPT_IO_CLASS, "io_class=%x" },
2179 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2180 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2181 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2182 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2183 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2184 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2185 { SRP_OPT_ERR, NULL }
2188 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2190 char *options, *sep_opt;
2193 substring_t args[MAX_OPT_ARGS];
2199 options = kstrdup(buf, GFP_KERNEL);
2204 while ((p = strsep(&sep_opt, ",")) != NULL) {
2208 token = match_token(p, srp_opt_tokens, args);
2212 case SRP_OPT_ID_EXT:
2213 p = match_strdup(args);
2218 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2222 case SRP_OPT_IOC_GUID:
2223 p = match_strdup(args);
2228 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2233 p = match_strdup(args);
2238 if (strlen(p) != 32) {
2239 pr_warn("bad dest GID parameter '%s'\n", p);
2244 for (i = 0; i < 16; ++i) {
2245 strlcpy(dgid, p + i * 2, 3);
2246 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2249 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2253 if (match_hex(args, &token)) {
2254 pr_warn("bad P_Key parameter '%s'\n", p);
2257 target->path.pkey = cpu_to_be16(token);
2260 case SRP_OPT_SERVICE_ID:
2261 p = match_strdup(args);
2266 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2267 target->path.service_id = target->service_id;
2271 case SRP_OPT_MAX_SECT:
2272 if (match_int(args, &token)) {
2273 pr_warn("bad max sect parameter '%s'\n", p);
2276 target->scsi_host->max_sectors = token;
2279 case SRP_OPT_MAX_CMD_PER_LUN:
2280 if (match_int(args, &token)) {
2281 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2285 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
2288 case SRP_OPT_IO_CLASS:
2289 if (match_hex(args, &token)) {
2290 pr_warn("bad IO class parameter '%s'\n", p);
2293 if (token != SRP_REV10_IB_IO_CLASS &&
2294 token != SRP_REV16A_IB_IO_CLASS) {
2295 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2296 token, SRP_REV10_IB_IO_CLASS,
2297 SRP_REV16A_IB_IO_CLASS);
2300 target->io_class = token;
2303 case SRP_OPT_INITIATOR_EXT:
2304 p = match_strdup(args);
2309 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2313 case SRP_OPT_CMD_SG_ENTRIES:
2314 if (match_int(args, &token) || token < 1 || token > 255) {
2315 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2319 target->cmd_sg_cnt = token;
2322 case SRP_OPT_ALLOW_EXT_SG:
2323 if (match_int(args, &token)) {
2324 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2327 target->allow_ext_sg = !!token;
2330 case SRP_OPT_SG_TABLESIZE:
2331 if (match_int(args, &token) || token < 1 ||
2332 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2333 pr_warn("bad max sg_tablesize parameter '%s'\n",
2337 target->sg_tablesize = token;
2340 case SRP_OPT_COMP_VECTOR:
2341 if (match_int(args, &token) || token < 0) {
2342 pr_warn("bad comp_vector parameter '%s'\n", p);
2345 target->comp_vector = token;
2348 case SRP_OPT_TL_RETRY_COUNT:
2349 if (match_int(args, &token) || token < 2 || token > 7) {
2350 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2354 target->tl_retry_count = token;
2358 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2364 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2367 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2368 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2369 !(srp_opt_tokens[i].token & opt_mask))
2370 pr_warn("target creation request is missing parameter '%s'\n",
2371 srp_opt_tokens[i].pattern);
2378 static ssize_t srp_create_target(struct device *dev,
2379 struct device_attribute *attr,
2380 const char *buf, size_t count)
2382 struct srp_host *host =
2383 container_of(dev, struct srp_host, dev);
2384 struct Scsi_Host *target_host;
2385 struct srp_target_port *target;
2386 struct ib_device *ibdev = host->srp_dev->dev;
2387 dma_addr_t dma_addr;
2390 target_host = scsi_host_alloc(&srp_template,
2391 sizeof (struct srp_target_port));
2395 target_host->transportt = ib_srp_transport_template;
2396 target_host->max_channel = 0;
2397 target_host->max_id = 1;
2398 target_host->max_lun = SRP_MAX_LUN;
2399 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2401 target = host_to_target(target_host);
2403 target->io_class = SRP_REV16A_IB_IO_CLASS;
2404 target->scsi_host = target_host;
2405 target->srp_host = host;
2406 target->lkey = host->srp_dev->mr->lkey;
2407 target->rkey = host->srp_dev->mr->rkey;
2408 target->cmd_sg_cnt = cmd_sg_entries;
2409 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2410 target->allow_ext_sg = allow_ext_sg;
2411 target->tl_retry_count = 7;
2413 ret = srp_parse_options(buf, target);
2417 if (!srp_conn_unique(target->srp_host, target)) {
2418 shost_printk(KERN_INFO, target->scsi_host,
2419 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2420 be64_to_cpu(target->id_ext),
2421 be64_to_cpu(target->ioc_guid),
2422 be64_to_cpu(target->initiator_ext));
2427 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2428 target->cmd_sg_cnt < target->sg_tablesize) {
2429 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2430 target->sg_tablesize = target->cmd_sg_cnt;
2433 target_host->sg_tablesize = target->sg_tablesize;
2434 target->indirect_size = target->sg_tablesize *
2435 sizeof (struct srp_direct_buf);
2436 target->max_iu_len = sizeof (struct srp_cmd) +
2437 sizeof (struct srp_indirect_buf) +
2438 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2440 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
2441 INIT_WORK(&target->remove_work, srp_remove_work);
2442 spin_lock_init(&target->lock);
2443 INIT_LIST_HEAD(&target->free_tx);
2444 INIT_LIST_HEAD(&target->free_reqs);
2445 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2446 struct srp_request *req = &target->req_ring[i];
2448 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2450 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2452 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2453 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
2456 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2457 target->indirect_size,
2459 if (ib_dma_mapping_error(ibdev, dma_addr))
2462 req->indirect_dma_addr = dma_addr;
2464 list_add_tail(&req->list, &target->free_reqs);
2467 ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2469 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2470 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2471 "service_id %016llx dgid %pI6\n",
2472 (unsigned long long) be64_to_cpu(target->id_ext),
2473 (unsigned long long) be64_to_cpu(target->ioc_guid),
2474 be16_to_cpu(target->path.pkey),
2475 (unsigned long long) be64_to_cpu(target->service_id),
2476 target->path.dgid.raw);
2478 ret = srp_create_target_ib(target);
2482 ret = srp_new_cm_id(target);
2486 ret = srp_connect_target(target);
2488 shost_printk(KERN_ERR, target->scsi_host,
2489 PFX "Connection failed\n");
2493 ret = srp_add_target(host, target);
2495 goto err_disconnect;
2500 srp_disconnect_target(target);
2503 ib_destroy_cm_id(target->cm_id);
2506 srp_free_target_ib(target);
2509 srp_free_req_data(target);
2512 scsi_host_put(target_host);
2517 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2519 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2522 struct srp_host *host = container_of(dev, struct srp_host, dev);
2524 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2527 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2529 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2532 struct srp_host *host = container_of(dev, struct srp_host, dev);
2534 return sprintf(buf, "%d\n", host->port);
2537 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2539 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2541 struct srp_host *host;
2543 host = kzalloc(sizeof *host, GFP_KERNEL);
2547 INIT_LIST_HEAD(&host->target_list);
2548 spin_lock_init(&host->target_lock);
2549 init_completion(&host->released);
2550 host->srp_dev = device;
2553 host->dev.class = &srp_class;
2554 host->dev.parent = device->dev->dma_device;
2555 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2557 if (device_register(&host->dev))
2559 if (device_create_file(&host->dev, &dev_attr_add_target))
2561 if (device_create_file(&host->dev, &dev_attr_ibdev))
2563 if (device_create_file(&host->dev, &dev_attr_port))
2569 device_unregister(&host->dev);
2577 static void srp_add_one(struct ib_device *device)
2579 struct srp_device *srp_dev;
2580 struct ib_device_attr *dev_attr;
2581 struct ib_fmr_pool_param fmr_param;
2582 struct srp_host *host;
2583 int max_pages_per_fmr, fmr_page_shift, s, e, p;
2585 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2589 if (ib_query_device(device, dev_attr)) {
2590 pr_warn("Query device failed for %s\n", device->name);
2594 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2599 * Use the smallest page size supported by the HCA, down to a
2600 * minimum of 4096 bytes. We're unlikely to build large sglists
2601 * out of smaller entries.
2603 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2604 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2605 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2606 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2608 INIT_LIST_HEAD(&srp_dev->dev_list);
2610 srp_dev->dev = device;
2611 srp_dev->pd = ib_alloc_pd(device);
2612 if (IS_ERR(srp_dev->pd))
2615 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2616 IB_ACCESS_LOCAL_WRITE |
2617 IB_ACCESS_REMOTE_READ |
2618 IB_ACCESS_REMOTE_WRITE);
2619 if (IS_ERR(srp_dev->mr))
2622 for (max_pages_per_fmr = SRP_FMR_SIZE;
2623 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2624 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2625 memset(&fmr_param, 0, sizeof fmr_param);
2626 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2627 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2628 fmr_param.cache = 1;
2629 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2630 fmr_param.page_shift = fmr_page_shift;
2631 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2632 IB_ACCESS_REMOTE_WRITE |
2633 IB_ACCESS_REMOTE_READ);
2635 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2636 if (!IS_ERR(srp_dev->fmr_pool))
2640 if (IS_ERR(srp_dev->fmr_pool))
2641 srp_dev->fmr_pool = NULL;
2643 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2648 e = device->phys_port_cnt;
2651 for (p = s; p <= e; ++p) {
2652 host = srp_add_port(srp_dev, p);
2654 list_add_tail(&host->list, &srp_dev->dev_list);
2657 ib_set_client_data(device, &srp_client, srp_dev);
2662 ib_dealloc_pd(srp_dev->pd);
2671 static void srp_remove_one(struct ib_device *device)
2673 struct srp_device *srp_dev;
2674 struct srp_host *host, *tmp_host;
2675 struct srp_target_port *target;
2677 srp_dev = ib_get_client_data(device, &srp_client);
2681 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2682 device_unregister(&host->dev);
2684 * Wait for the sysfs entry to go away, so that no new
2685 * target ports can be created.
2687 wait_for_completion(&host->released);
2690 * Remove all target ports.
2692 spin_lock(&host->target_lock);
2693 list_for_each_entry(target, &host->target_list, list)
2694 srp_queue_remove_work(target);
2695 spin_unlock(&host->target_lock);
2698 * Wait for target port removal tasks.
2700 flush_workqueue(system_long_wq);
2705 if (srp_dev->fmr_pool)
2706 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2707 ib_dereg_mr(srp_dev->mr);
2708 ib_dealloc_pd(srp_dev->pd);
2713 static struct srp_function_template ib_srp_transport_functions = {
2714 .has_rport_state = true,
2715 .reset_timer_if_blocked = true,
2716 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
2717 .dev_loss_tmo = &srp_dev_loss_tmo,
2718 .reconnect = srp_rport_reconnect,
2719 .rport_delete = srp_rport_delete,
2720 .terminate_rport_io = srp_terminate_io,
2723 static int __init srp_init_module(void)
2727 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2729 if (srp_sg_tablesize) {
2730 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2731 if (!cmd_sg_entries)
2732 cmd_sg_entries = srp_sg_tablesize;
2735 if (!cmd_sg_entries)
2736 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2738 if (cmd_sg_entries > 255) {
2739 pr_warn("Clamping cmd_sg_entries to 255\n");
2740 cmd_sg_entries = 255;
2743 if (!indirect_sg_entries)
2744 indirect_sg_entries = cmd_sg_entries;
2745 else if (indirect_sg_entries < cmd_sg_entries) {
2746 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2748 indirect_sg_entries = cmd_sg_entries;
2751 ib_srp_transport_template =
2752 srp_attach_transport(&ib_srp_transport_functions);
2753 if (!ib_srp_transport_template)
2756 ret = class_register(&srp_class);
2758 pr_err("couldn't register class infiniband_srp\n");
2759 srp_release_transport(ib_srp_transport_template);
2763 ib_sa_register_client(&srp_sa_client);
2765 ret = ib_register_client(&srp_client);
2767 pr_err("couldn't register IB client\n");
2768 srp_release_transport(ib_srp_transport_template);
2769 ib_sa_unregister_client(&srp_sa_client);
2770 class_unregister(&srp_class);
2777 static void __exit srp_cleanup_module(void)
2779 ib_unregister_client(&srp_client);
2780 ib_sa_unregister_client(&srp_sa_client);
2781 class_unregister(&srp_class);
2782 srp_release_transport(ib_srp_transport_template);
2785 module_init(srp_init_module);
2786 module_exit(srp_cleanup_module);