15b4d2ce49894e3dcbc4a745ba98110c43e4f77a
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) PFX fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43
44 #include <linux/atomic.h>
45
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
49 #include <scsi/srp.h>
50 #include <scsi/scsi_transport_srp.h>
51
52 #include "ib_srp.h"
53
54 #define DRV_NAME        "ib_srp"
55 #define PFX             DRV_NAME ": "
56 #define DRV_VERSION     "1.0"
57 #define DRV_RELDATE     "July 1, 2013"
58
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
61                    "v" DRV_VERSION " (" DRV_RELDATE ")");
62 MODULE_LICENSE("Dual BSD/GPL");
63
64 static unsigned int srp_sg_tablesize;
65 static unsigned int cmd_sg_entries;
66 static unsigned int indirect_sg_entries;
67 static bool allow_ext_sg;
68 static int topspin_workarounds = 1;
69
70 module_param(srp_sg_tablesize, uint, 0444);
71 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
72
73 module_param(cmd_sg_entries, uint, 0444);
74 MODULE_PARM_DESC(cmd_sg_entries,
75                  "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
76
77 module_param(indirect_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(indirect_sg_entries,
79                  "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
80
81 module_param(allow_ext_sg, bool, 0444);
82 MODULE_PARM_DESC(allow_ext_sg,
83                   "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
84
85 module_param(topspin_workarounds, int, 0444);
86 MODULE_PARM_DESC(topspin_workarounds,
87                  "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
88
89 static struct kernel_param_ops srp_tmo_ops;
90
91 static int srp_fast_io_fail_tmo = 15;
92 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
93                 S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(fast_io_fail_tmo,
95                  "Number of seconds between the observation of a transport"
96                  " layer error and failing all I/O. \"off\" means that this"
97                  " functionality is disabled.");
98
99 static int srp_dev_loss_tmo = 60;
100 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
101                 S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(dev_loss_tmo,
103                  "Maximum number of seconds that the SRP transport should"
104                  " insulate transport layer errors. After this time has been"
105                  " exceeded the SCSI host is removed. Should be"
106                  " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
107                  " if fast_io_fail_tmo has not been set. \"off\" means that"
108                  " this functionality is disabled.");
109
110 static void srp_add_one(struct ib_device *device);
111 static void srp_remove_one(struct ib_device *device);
112 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
113 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
114 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
115
116 static struct scsi_transport_template *ib_srp_transport_template;
117
118 static struct ib_client srp_client = {
119         .name   = "srp",
120         .add    = srp_add_one,
121         .remove = srp_remove_one
122 };
123
124 static struct ib_sa_client srp_sa_client;
125
126 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
127 {
128         int tmo = *(int *)kp->arg;
129
130         if (tmo >= 0)
131                 return sprintf(buffer, "%d", tmo);
132         else
133                 return sprintf(buffer, "off");
134 }
135
136 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
137 {
138         int tmo, res;
139
140         if (strncmp(val, "off", 3) != 0) {
141                 res = kstrtoint(val, 0, &tmo);
142                 if (res)
143                         goto out;
144         } else {
145                 tmo = -1;
146         }
147         if (kp->arg == &srp_fast_io_fail_tmo)
148                 res = srp_tmo_valid(tmo, srp_dev_loss_tmo);
149         else
150                 res = srp_tmo_valid(srp_fast_io_fail_tmo, tmo);
151         if (res)
152                 goto out;
153         *(int *)kp->arg = tmo;
154
155 out:
156         return res;
157 }
158
159 static struct kernel_param_ops srp_tmo_ops = {
160         .get = srp_tmo_get,
161         .set = srp_tmo_set,
162 };
163
164 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
165 {
166         return (struct srp_target_port *) host->hostdata;
167 }
168
169 static const char *srp_target_info(struct Scsi_Host *host)
170 {
171         return host_to_target(host)->target_name;
172 }
173
174 static int srp_target_is_topspin(struct srp_target_port *target)
175 {
176         static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
177         static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
178
179         return topspin_workarounds &&
180                 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
181                  !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
182 }
183
184 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
185                                    gfp_t gfp_mask,
186                                    enum dma_data_direction direction)
187 {
188         struct srp_iu *iu;
189
190         iu = kmalloc(sizeof *iu, gfp_mask);
191         if (!iu)
192                 goto out;
193
194         iu->buf = kzalloc(size, gfp_mask);
195         if (!iu->buf)
196                 goto out_free_iu;
197
198         iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
199                                     direction);
200         if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
201                 goto out_free_buf;
202
203         iu->size      = size;
204         iu->direction = direction;
205
206         return iu;
207
208 out_free_buf:
209         kfree(iu->buf);
210 out_free_iu:
211         kfree(iu);
212 out:
213         return NULL;
214 }
215
216 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
217 {
218         if (!iu)
219                 return;
220
221         ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
222                             iu->direction);
223         kfree(iu->buf);
224         kfree(iu);
225 }
226
227 static void srp_qp_event(struct ib_event *event, void *context)
228 {
229         pr_debug("QP event %d\n", event->event);
230 }
231
232 static int srp_init_qp(struct srp_target_port *target,
233                        struct ib_qp *qp)
234 {
235         struct ib_qp_attr *attr;
236         int ret;
237
238         attr = kmalloc(sizeof *attr, GFP_KERNEL);
239         if (!attr)
240                 return -ENOMEM;
241
242         ret = ib_find_pkey(target->srp_host->srp_dev->dev,
243                            target->srp_host->port,
244                            be16_to_cpu(target->path.pkey),
245                            &attr->pkey_index);
246         if (ret)
247                 goto out;
248
249         attr->qp_state        = IB_QPS_INIT;
250         attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
251                                     IB_ACCESS_REMOTE_WRITE);
252         attr->port_num        = target->srp_host->port;
253
254         ret = ib_modify_qp(qp, attr,
255                            IB_QP_STATE          |
256                            IB_QP_PKEY_INDEX     |
257                            IB_QP_ACCESS_FLAGS   |
258                            IB_QP_PORT);
259
260 out:
261         kfree(attr);
262         return ret;
263 }
264
265 static int srp_new_cm_id(struct srp_target_port *target)
266 {
267         struct ib_cm_id *new_cm_id;
268
269         new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
270                                     srp_cm_handler, target);
271         if (IS_ERR(new_cm_id))
272                 return PTR_ERR(new_cm_id);
273
274         if (target->cm_id)
275                 ib_destroy_cm_id(target->cm_id);
276         target->cm_id = new_cm_id;
277
278         return 0;
279 }
280
281 static int srp_create_target_ib(struct srp_target_port *target)
282 {
283         struct ib_qp_init_attr *init_attr;
284         struct ib_cq *recv_cq, *send_cq;
285         struct ib_qp *qp;
286         int ret;
287
288         init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
289         if (!init_attr)
290                 return -ENOMEM;
291
292         recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
293                                srp_recv_completion, NULL, target, SRP_RQ_SIZE,
294                                target->comp_vector);
295         if (IS_ERR(recv_cq)) {
296                 ret = PTR_ERR(recv_cq);
297                 goto err;
298         }
299
300         send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
301                                srp_send_completion, NULL, target, SRP_SQ_SIZE,
302                                target->comp_vector);
303         if (IS_ERR(send_cq)) {
304                 ret = PTR_ERR(send_cq);
305                 goto err_recv_cq;
306         }
307
308         ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
309
310         init_attr->event_handler       = srp_qp_event;
311         init_attr->cap.max_send_wr     = SRP_SQ_SIZE;
312         init_attr->cap.max_recv_wr     = SRP_RQ_SIZE;
313         init_attr->cap.max_recv_sge    = 1;
314         init_attr->cap.max_send_sge    = 1;
315         init_attr->sq_sig_type         = IB_SIGNAL_ALL_WR;
316         init_attr->qp_type             = IB_QPT_RC;
317         init_attr->send_cq             = send_cq;
318         init_attr->recv_cq             = recv_cq;
319
320         qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
321         if (IS_ERR(qp)) {
322                 ret = PTR_ERR(qp);
323                 goto err_send_cq;
324         }
325
326         ret = srp_init_qp(target, qp);
327         if (ret)
328                 goto err_qp;
329
330         if (target->qp)
331                 ib_destroy_qp(target->qp);
332         if (target->recv_cq)
333                 ib_destroy_cq(target->recv_cq);
334         if (target->send_cq)
335                 ib_destroy_cq(target->send_cq);
336
337         target->qp = qp;
338         target->recv_cq = recv_cq;
339         target->send_cq = send_cq;
340
341         kfree(init_attr);
342         return 0;
343
344 err_qp:
345         ib_destroy_qp(qp);
346
347 err_send_cq:
348         ib_destroy_cq(send_cq);
349
350 err_recv_cq:
351         ib_destroy_cq(recv_cq);
352
353 err:
354         kfree(init_attr);
355         return ret;
356 }
357
358 static void srp_free_target_ib(struct srp_target_port *target)
359 {
360         int i;
361
362         ib_destroy_qp(target->qp);
363         ib_destroy_cq(target->send_cq);
364         ib_destroy_cq(target->recv_cq);
365
366         target->qp = NULL;
367         target->send_cq = target->recv_cq = NULL;
368
369         for (i = 0; i < SRP_RQ_SIZE; ++i)
370                 srp_free_iu(target->srp_host, target->rx_ring[i]);
371         for (i = 0; i < SRP_SQ_SIZE; ++i)
372                 srp_free_iu(target->srp_host, target->tx_ring[i]);
373 }
374
375 static void srp_path_rec_completion(int status,
376                                     struct ib_sa_path_rec *pathrec,
377                                     void *target_ptr)
378 {
379         struct srp_target_port *target = target_ptr;
380
381         target->status = status;
382         if (status)
383                 shost_printk(KERN_ERR, target->scsi_host,
384                              PFX "Got failed path rec status %d\n", status);
385         else
386                 target->path = *pathrec;
387         complete(&target->done);
388 }
389
390 static int srp_lookup_path(struct srp_target_port *target)
391 {
392         target->path.numb_path = 1;
393
394         init_completion(&target->done);
395
396         target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
397                                                    target->srp_host->srp_dev->dev,
398                                                    target->srp_host->port,
399                                                    &target->path,
400                                                    IB_SA_PATH_REC_SERVICE_ID    |
401                                                    IB_SA_PATH_REC_DGID          |
402                                                    IB_SA_PATH_REC_SGID          |
403                                                    IB_SA_PATH_REC_NUMB_PATH     |
404                                                    IB_SA_PATH_REC_PKEY,
405                                                    SRP_PATH_REC_TIMEOUT_MS,
406                                                    GFP_KERNEL,
407                                                    srp_path_rec_completion,
408                                                    target, &target->path_query);
409         if (target->path_query_id < 0)
410                 return target->path_query_id;
411
412         wait_for_completion(&target->done);
413
414         if (target->status < 0)
415                 shost_printk(KERN_WARNING, target->scsi_host,
416                              PFX "Path record query failed\n");
417
418         return target->status;
419 }
420
421 static int srp_send_req(struct srp_target_port *target)
422 {
423         struct {
424                 struct ib_cm_req_param param;
425                 struct srp_login_req   priv;
426         } *req = NULL;
427         int status;
428
429         req = kzalloc(sizeof *req, GFP_KERNEL);
430         if (!req)
431                 return -ENOMEM;
432
433         req->param.primary_path               = &target->path;
434         req->param.alternate_path             = NULL;
435         req->param.service_id                 = target->service_id;
436         req->param.qp_num                     = target->qp->qp_num;
437         req->param.qp_type                    = target->qp->qp_type;
438         req->param.private_data               = &req->priv;
439         req->param.private_data_len           = sizeof req->priv;
440         req->param.flow_control               = 1;
441
442         get_random_bytes(&req->param.starting_psn, 4);
443         req->param.starting_psn              &= 0xffffff;
444
445         /*
446          * Pick some arbitrary defaults here; we could make these
447          * module parameters if anyone cared about setting them.
448          */
449         req->param.responder_resources        = 4;
450         req->param.remote_cm_response_timeout = 20;
451         req->param.local_cm_response_timeout  = 20;
452         req->param.retry_count                = target->tl_retry_count;
453         req->param.rnr_retry_count            = 7;
454         req->param.max_cm_retries             = 15;
455
456         req->priv.opcode        = SRP_LOGIN_REQ;
457         req->priv.tag           = 0;
458         req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
459         req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
460                                               SRP_BUF_FORMAT_INDIRECT);
461         /*
462          * In the published SRP specification (draft rev. 16a), the
463          * port identifier format is 8 bytes of ID extension followed
464          * by 8 bytes of GUID.  Older drafts put the two halves in the
465          * opposite order, so that the GUID comes first.
466          *
467          * Targets conforming to these obsolete drafts can be
468          * recognized by the I/O Class they report.
469          */
470         if (target->io_class == SRP_REV10_IB_IO_CLASS) {
471                 memcpy(req->priv.initiator_port_id,
472                        &target->path.sgid.global.interface_id, 8);
473                 memcpy(req->priv.initiator_port_id + 8,
474                        &target->initiator_ext, 8);
475                 memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
476                 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
477         } else {
478                 memcpy(req->priv.initiator_port_id,
479                        &target->initiator_ext, 8);
480                 memcpy(req->priv.initiator_port_id + 8,
481                        &target->path.sgid.global.interface_id, 8);
482                 memcpy(req->priv.target_port_id,     &target->id_ext, 8);
483                 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
484         }
485
486         /*
487          * Topspin/Cisco SRP targets will reject our login unless we
488          * zero out the first 8 bytes of our initiator port ID and set
489          * the second 8 bytes to the local node GUID.
490          */
491         if (srp_target_is_topspin(target)) {
492                 shost_printk(KERN_DEBUG, target->scsi_host,
493                              PFX "Topspin/Cisco initiator port ID workaround "
494                              "activated for target GUID %016llx\n",
495                              (unsigned long long) be64_to_cpu(target->ioc_guid));
496                 memset(req->priv.initiator_port_id, 0, 8);
497                 memcpy(req->priv.initiator_port_id + 8,
498                        &target->srp_host->srp_dev->dev->node_guid, 8);
499         }
500
501         status = ib_send_cm_req(target->cm_id, &req->param);
502
503         kfree(req);
504
505         return status;
506 }
507
508 static bool srp_queue_remove_work(struct srp_target_port *target)
509 {
510         bool changed = false;
511
512         spin_lock_irq(&target->lock);
513         if (target->state != SRP_TARGET_REMOVED) {
514                 target->state = SRP_TARGET_REMOVED;
515                 changed = true;
516         }
517         spin_unlock_irq(&target->lock);
518
519         if (changed)
520                 queue_work(system_long_wq, &target->remove_work);
521
522         return changed;
523 }
524
525 static bool srp_change_conn_state(struct srp_target_port *target,
526                                   bool connected)
527 {
528         bool changed = false;
529
530         spin_lock_irq(&target->lock);
531         if (target->connected != connected) {
532                 target->connected = connected;
533                 changed = true;
534         }
535         spin_unlock_irq(&target->lock);
536
537         return changed;
538 }
539
540 static void srp_disconnect_target(struct srp_target_port *target)
541 {
542         if (srp_change_conn_state(target, false)) {
543                 /* XXX should send SRP_I_LOGOUT request */
544
545                 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
546                         shost_printk(KERN_DEBUG, target->scsi_host,
547                                      PFX "Sending CM DREQ failed\n");
548                 }
549         }
550 }
551
552 static void srp_free_req_data(struct srp_target_port *target)
553 {
554         struct ib_device *ibdev = target->srp_host->srp_dev->dev;
555         struct srp_request *req;
556         int i;
557
558         for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
559                 kfree(req->fmr_list);
560                 kfree(req->map_page);
561                 if (req->indirect_dma_addr) {
562                         ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
563                                             target->indirect_size,
564                                             DMA_TO_DEVICE);
565                 }
566                 kfree(req->indirect_desc);
567         }
568 }
569
570 /**
571  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
572  * @shost: SCSI host whose attributes to remove from sysfs.
573  *
574  * Note: Any attributes defined in the host template and that did not exist
575  * before invocation of this function will be ignored.
576  */
577 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
578 {
579         struct device_attribute **attr;
580
581         for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
582                 device_remove_file(&shost->shost_dev, *attr);
583 }
584
585 static void srp_remove_target(struct srp_target_port *target)
586 {
587         WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
588
589         srp_del_scsi_host_attr(target->scsi_host);
590         srp_rport_get(target->rport);
591         srp_remove_host(target->scsi_host);
592         scsi_remove_host(target->scsi_host);
593         srp_disconnect_target(target);
594         ib_destroy_cm_id(target->cm_id);
595         srp_free_target_ib(target);
596         srp_rport_put(target->rport);
597         srp_free_req_data(target);
598         scsi_host_put(target->scsi_host);
599 }
600
601 static void srp_remove_work(struct work_struct *work)
602 {
603         struct srp_target_port *target =
604                 container_of(work, struct srp_target_port, remove_work);
605
606         WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
607
608         srp_remove_target(target);
609
610         spin_lock(&target->srp_host->target_lock);
611         list_del(&target->list);
612         spin_unlock(&target->srp_host->target_lock);
613 }
614
615 static void srp_rport_delete(struct srp_rport *rport)
616 {
617         struct srp_target_port *target = rport->lld_data;
618
619         srp_queue_remove_work(target);
620 }
621
622 static int srp_connect_target(struct srp_target_port *target)
623 {
624         int retries = 3;
625         int ret;
626
627         WARN_ON_ONCE(target->connected);
628
629         target->qp_in_error = false;
630
631         ret = srp_lookup_path(target);
632         if (ret)
633                 return ret;
634
635         while (1) {
636                 init_completion(&target->done);
637                 ret = srp_send_req(target);
638                 if (ret)
639                         return ret;
640                 wait_for_completion(&target->done);
641
642                 /*
643                  * The CM event handling code will set status to
644                  * SRP_PORT_REDIRECT if we get a port redirect REJ
645                  * back, or SRP_DLID_REDIRECT if we get a lid/qp
646                  * redirect REJ back.
647                  */
648                 switch (target->status) {
649                 case 0:
650                         srp_change_conn_state(target, true);
651                         return 0;
652
653                 case SRP_PORT_REDIRECT:
654                         ret = srp_lookup_path(target);
655                         if (ret)
656                                 return ret;
657                         break;
658
659                 case SRP_DLID_REDIRECT:
660                         break;
661
662                 case SRP_STALE_CONN:
663                         /* Our current CM id was stale, and is now in timewait.
664                          * Try to reconnect with a new one.
665                          */
666                         if (!retries-- || srp_new_cm_id(target)) {
667                                 shost_printk(KERN_ERR, target->scsi_host, PFX
668                                              "giving up on stale connection\n");
669                                 target->status = -ECONNRESET;
670                                 return target->status;
671                         }
672
673                         shost_printk(KERN_ERR, target->scsi_host, PFX
674                                      "retrying stale connection\n");
675                         break;
676
677                 default:
678                         return target->status;
679                 }
680         }
681 }
682
683 static void srp_unmap_data(struct scsi_cmnd *scmnd,
684                            struct srp_target_port *target,
685                            struct srp_request *req)
686 {
687         struct ib_device *ibdev = target->srp_host->srp_dev->dev;
688         struct ib_pool_fmr **pfmr;
689
690         if (!scsi_sglist(scmnd) ||
691             (scmnd->sc_data_direction != DMA_TO_DEVICE &&
692              scmnd->sc_data_direction != DMA_FROM_DEVICE))
693                 return;
694
695         pfmr = req->fmr_list;
696         while (req->nfmr--)
697                 ib_fmr_pool_unmap(*pfmr++);
698
699         ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
700                         scmnd->sc_data_direction);
701 }
702
703 /**
704  * srp_claim_req - Take ownership of the scmnd associated with a request.
705  * @target: SRP target port.
706  * @req: SRP request.
707  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
708  *         ownership of @req->scmnd if it equals @scmnd.
709  *
710  * Return value:
711  * Either NULL or a pointer to the SCSI command the caller became owner of.
712  */
713 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
714                                        struct srp_request *req,
715                                        struct scsi_cmnd *scmnd)
716 {
717         unsigned long flags;
718
719         spin_lock_irqsave(&target->lock, flags);
720         if (!scmnd) {
721                 scmnd = req->scmnd;
722                 req->scmnd = NULL;
723         } else if (req->scmnd == scmnd) {
724                 req->scmnd = NULL;
725         } else {
726                 scmnd = NULL;
727         }
728         spin_unlock_irqrestore(&target->lock, flags);
729
730         return scmnd;
731 }
732
733 /**
734  * srp_free_req() - Unmap data and add request to the free request list.
735  */
736 static void srp_free_req(struct srp_target_port *target,
737                          struct srp_request *req, struct scsi_cmnd *scmnd,
738                          s32 req_lim_delta)
739 {
740         unsigned long flags;
741
742         srp_unmap_data(scmnd, target, req);
743
744         spin_lock_irqsave(&target->lock, flags);
745         target->req_lim += req_lim_delta;
746         list_add_tail(&req->list, &target->free_reqs);
747         spin_unlock_irqrestore(&target->lock, flags);
748 }
749
750 static void srp_finish_req(struct srp_target_port *target,
751                            struct srp_request *req, int result)
752 {
753         struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
754
755         if (scmnd) {
756                 srp_free_req(target, req, scmnd, 0);
757                 scmnd->result = result;
758                 scmnd->scsi_done(scmnd);
759         }
760 }
761
762 static void srp_terminate_io(struct srp_rport *rport)
763 {
764         struct srp_target_port *target = rport->lld_data;
765         int i;
766
767         for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
768                 struct srp_request *req = &target->req_ring[i];
769                 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
770         }
771 }
772
773 /*
774  * It is up to the caller to ensure that srp_rport_reconnect() calls are
775  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
776  * srp_reset_device() or srp_reset_host() calls will occur while this function
777  * is in progress. One way to realize that is not to call this function
778  * directly but to call srp_reconnect_rport() instead since that last function
779  * serializes calls of this function via rport->mutex and also blocks
780  * srp_queuecommand() calls before invoking this function.
781  */
782 static int srp_rport_reconnect(struct srp_rport *rport)
783 {
784         struct srp_target_port *target = rport->lld_data;
785         int i, ret;
786
787         srp_disconnect_target(target);
788         /*
789          * Now get a new local CM ID so that we avoid confusing the target in
790          * case things are really fouled up. Doing so also ensures that all CM
791          * callbacks will have finished before a new QP is allocated.
792          */
793         ret = srp_new_cm_id(target);
794         /*
795          * Whether or not creating a new CM ID succeeded, create a new
796          * QP. This guarantees that all completion callback function
797          * invocations have finished before request resetting starts.
798          */
799         if (ret == 0)
800                 ret = srp_create_target_ib(target);
801         else
802                 srp_create_target_ib(target);
803
804         for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
805                 struct srp_request *req = &target->req_ring[i];
806                 srp_finish_req(target, req, DID_RESET << 16);
807         }
808
809         INIT_LIST_HEAD(&target->free_tx);
810         for (i = 0; i < SRP_SQ_SIZE; ++i)
811                 list_add(&target->tx_ring[i]->list, &target->free_tx);
812
813         if (ret == 0)
814                 ret = srp_connect_target(target);
815
816         if (ret == 0)
817                 shost_printk(KERN_INFO, target->scsi_host,
818                              PFX "reconnect succeeded\n");
819
820         return ret;
821 }
822
823 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
824                          unsigned int dma_len, u32 rkey)
825 {
826         struct srp_direct_buf *desc = state->desc;
827
828         desc->va = cpu_to_be64(dma_addr);
829         desc->key = cpu_to_be32(rkey);
830         desc->len = cpu_to_be32(dma_len);
831
832         state->total_len += dma_len;
833         state->desc++;
834         state->ndesc++;
835 }
836
837 static int srp_map_finish_fmr(struct srp_map_state *state,
838                               struct srp_target_port *target)
839 {
840         struct srp_device *dev = target->srp_host->srp_dev;
841         struct ib_pool_fmr *fmr;
842         u64 io_addr = 0;
843
844         if (!state->npages)
845                 return 0;
846
847         if (state->npages == 1) {
848                 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
849                              target->rkey);
850                 state->npages = state->fmr_len = 0;
851                 return 0;
852         }
853
854         fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
855                                    state->npages, io_addr);
856         if (IS_ERR(fmr))
857                 return PTR_ERR(fmr);
858
859         *state->next_fmr++ = fmr;
860         state->nfmr++;
861
862         srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
863         state->npages = state->fmr_len = 0;
864         return 0;
865 }
866
867 static void srp_map_update_start(struct srp_map_state *state,
868                                  struct scatterlist *sg, int sg_index,
869                                  dma_addr_t dma_addr)
870 {
871         state->unmapped_sg = sg;
872         state->unmapped_index = sg_index;
873         state->unmapped_addr = dma_addr;
874 }
875
876 static int srp_map_sg_entry(struct srp_map_state *state,
877                             struct srp_target_port *target,
878                             struct scatterlist *sg, int sg_index,
879                             int use_fmr)
880 {
881         struct srp_device *dev = target->srp_host->srp_dev;
882         struct ib_device *ibdev = dev->dev;
883         dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
884         unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
885         unsigned int len;
886         int ret;
887
888         if (!dma_len)
889                 return 0;
890
891         if (use_fmr == SRP_MAP_NO_FMR) {
892                 /* Once we're in direct map mode for a request, we don't
893                  * go back to FMR mode, so no need to update anything
894                  * other than the descriptor.
895                  */
896                 srp_map_desc(state, dma_addr, dma_len, target->rkey);
897                 return 0;
898         }
899
900         /* If we start at an offset into the FMR page, don't merge into
901          * the current FMR. Finish it out, and use the kernel's MR for this
902          * sg entry. This is to avoid potential bugs on some SRP targets
903          * that were never quite defined, but went away when the initiator
904          * avoided using FMR on such page fragments.
905          */
906         if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
907                 ret = srp_map_finish_fmr(state, target);
908                 if (ret)
909                         return ret;
910
911                 srp_map_desc(state, dma_addr, dma_len, target->rkey);
912                 srp_map_update_start(state, NULL, 0, 0);
913                 return 0;
914         }
915
916         /* If this is the first sg to go into the FMR, save our position.
917          * We need to know the first unmapped entry, its index, and the
918          * first unmapped address within that entry to be able to restart
919          * mapping after an error.
920          */
921         if (!state->unmapped_sg)
922                 srp_map_update_start(state, sg, sg_index, dma_addr);
923
924         while (dma_len) {
925                 if (state->npages == SRP_FMR_SIZE) {
926                         ret = srp_map_finish_fmr(state, target);
927                         if (ret)
928                                 return ret;
929
930                         srp_map_update_start(state, sg, sg_index, dma_addr);
931                 }
932
933                 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
934
935                 if (!state->npages)
936                         state->base_dma_addr = dma_addr;
937                 state->pages[state->npages++] = dma_addr;
938                 state->fmr_len += len;
939                 dma_addr += len;
940                 dma_len -= len;
941         }
942
943         /* If the last entry of the FMR wasn't a full page, then we need to
944          * close it out and start a new one -- we can only merge at page
945          * boundries.
946          */
947         ret = 0;
948         if (len != dev->fmr_page_size) {
949                 ret = srp_map_finish_fmr(state, target);
950                 if (!ret)
951                         srp_map_update_start(state, NULL, 0, 0);
952         }
953         return ret;
954 }
955
956 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
957                         struct srp_request *req)
958 {
959         struct scatterlist *scat, *sg;
960         struct srp_cmd *cmd = req->cmd->buf;
961         int i, len, nents, count, use_fmr;
962         struct srp_device *dev;
963         struct ib_device *ibdev;
964         struct srp_map_state state;
965         struct srp_indirect_buf *indirect_hdr;
966         u32 table_len;
967         u8 fmt;
968
969         if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
970                 return sizeof (struct srp_cmd);
971
972         if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
973             scmnd->sc_data_direction != DMA_TO_DEVICE) {
974                 shost_printk(KERN_WARNING, target->scsi_host,
975                              PFX "Unhandled data direction %d\n",
976                              scmnd->sc_data_direction);
977                 return -EINVAL;
978         }
979
980         nents = scsi_sg_count(scmnd);
981         scat  = scsi_sglist(scmnd);
982
983         dev = target->srp_host->srp_dev;
984         ibdev = dev->dev;
985
986         count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
987         if (unlikely(count == 0))
988                 return -EIO;
989
990         fmt = SRP_DATA_DESC_DIRECT;
991         len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
992
993         if (count == 1) {
994                 /*
995                  * The midlayer only generated a single gather/scatter
996                  * entry, or DMA mapping coalesced everything to a
997                  * single entry.  So a direct descriptor along with
998                  * the DMA MR suffices.
999                  */
1000                 struct srp_direct_buf *buf = (void *) cmd->add_data;
1001
1002                 buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1003                 buf->key = cpu_to_be32(target->rkey);
1004                 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1005
1006                 req->nfmr = 0;
1007                 goto map_complete;
1008         }
1009
1010         /* We have more than one scatter/gather entry, so build our indirect
1011          * descriptor table, trying to merge as many entries with FMR as we
1012          * can.
1013          */
1014         indirect_hdr = (void *) cmd->add_data;
1015
1016         ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1017                                    target->indirect_size, DMA_TO_DEVICE);
1018
1019         memset(&state, 0, sizeof(state));
1020         state.desc      = req->indirect_desc;
1021         state.pages     = req->map_page;
1022         state.next_fmr  = req->fmr_list;
1023
1024         use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1025
1026         for_each_sg(scat, sg, count, i) {
1027                 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1028                         /* FMR mapping failed, so backtrack to the first
1029                          * unmapped entry and continue on without using FMR.
1030                          */
1031                         dma_addr_t dma_addr;
1032                         unsigned int dma_len;
1033
1034 backtrack:
1035                         sg = state.unmapped_sg;
1036                         i = state.unmapped_index;
1037
1038                         dma_addr = ib_sg_dma_address(ibdev, sg);
1039                         dma_len = ib_sg_dma_len(ibdev, sg);
1040                         dma_len -= (state.unmapped_addr - dma_addr);
1041                         dma_addr = state.unmapped_addr;
1042                         use_fmr = SRP_MAP_NO_FMR;
1043                         srp_map_desc(&state, dma_addr, dma_len, target->rkey);
1044                 }
1045         }
1046
1047         if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1048                 goto backtrack;
1049
1050         /* We've mapped the request, now pull as much of the indirect
1051          * descriptor table as we can into the command buffer. If this
1052          * target is not using an external indirect table, we are
1053          * guaranteed to fit into the command, as the SCSI layer won't
1054          * give us more S/G entries than we allow.
1055          */
1056         req->nfmr = state.nfmr;
1057         if (state.ndesc == 1) {
1058                 /* FMR mapping was able to collapse this to one entry,
1059                  * so use a direct descriptor.
1060                  */
1061                 struct srp_direct_buf *buf = (void *) cmd->add_data;
1062
1063                 *buf = req->indirect_desc[0];
1064                 goto map_complete;
1065         }
1066
1067         if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1068                                                 !target->allow_ext_sg)) {
1069                 shost_printk(KERN_ERR, target->scsi_host,
1070                              "Could not fit S/G list into SRP_CMD\n");
1071                 return -EIO;
1072         }
1073
1074         count = min(state.ndesc, target->cmd_sg_cnt);
1075         table_len = state.ndesc * sizeof (struct srp_direct_buf);
1076
1077         fmt = SRP_DATA_DESC_INDIRECT;
1078         len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1079         len += count * sizeof (struct srp_direct_buf);
1080
1081         memcpy(indirect_hdr->desc_list, req->indirect_desc,
1082                count * sizeof (struct srp_direct_buf));
1083
1084         indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1085         indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1086         indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1087         indirect_hdr->len = cpu_to_be32(state.total_len);
1088
1089         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1090                 cmd->data_out_desc_cnt = count;
1091         else
1092                 cmd->data_in_desc_cnt = count;
1093
1094         ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1095                                       DMA_TO_DEVICE);
1096
1097 map_complete:
1098         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1099                 cmd->buf_fmt = fmt << 4;
1100         else
1101                 cmd->buf_fmt = fmt;
1102
1103         return len;
1104 }
1105
1106 /*
1107  * Return an IU and possible credit to the free pool
1108  */
1109 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1110                           enum srp_iu_type iu_type)
1111 {
1112         unsigned long flags;
1113
1114         spin_lock_irqsave(&target->lock, flags);
1115         list_add(&iu->list, &target->free_tx);
1116         if (iu_type != SRP_IU_RSP)
1117                 ++target->req_lim;
1118         spin_unlock_irqrestore(&target->lock, flags);
1119 }
1120
1121 /*
1122  * Must be called with target->lock held to protect req_lim and free_tx.
1123  * If IU is not sent, it must be returned using srp_put_tx_iu().
1124  *
1125  * Note:
1126  * An upper limit for the number of allocated information units for each
1127  * request type is:
1128  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1129  *   more than Scsi_Host.can_queue requests.
1130  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1131  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1132  *   one unanswered SRP request to an initiator.
1133  */
1134 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1135                                       enum srp_iu_type iu_type)
1136 {
1137         s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1138         struct srp_iu *iu;
1139
1140         srp_send_completion(target->send_cq, target);
1141
1142         if (list_empty(&target->free_tx))
1143                 return NULL;
1144
1145         /* Initiator responses to target requests do not consume credits */
1146         if (iu_type != SRP_IU_RSP) {
1147                 if (target->req_lim <= rsv) {
1148                         ++target->zero_req_lim;
1149                         return NULL;
1150                 }
1151
1152                 --target->req_lim;
1153         }
1154
1155         iu = list_first_entry(&target->free_tx, struct srp_iu, list);
1156         list_del(&iu->list);
1157         return iu;
1158 }
1159
1160 static int srp_post_send(struct srp_target_port *target,
1161                          struct srp_iu *iu, int len)
1162 {
1163         struct ib_sge list;
1164         struct ib_send_wr wr, *bad_wr;
1165
1166         list.addr   = iu->dma;
1167         list.length = len;
1168         list.lkey   = target->lkey;
1169
1170         wr.next       = NULL;
1171         wr.wr_id      = (uintptr_t) iu;
1172         wr.sg_list    = &list;
1173         wr.num_sge    = 1;
1174         wr.opcode     = IB_WR_SEND;
1175         wr.send_flags = IB_SEND_SIGNALED;
1176
1177         return ib_post_send(target->qp, &wr, &bad_wr);
1178 }
1179
1180 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1181 {
1182         struct ib_recv_wr wr, *bad_wr;
1183         struct ib_sge list;
1184
1185         list.addr   = iu->dma;
1186         list.length = iu->size;
1187         list.lkey   = target->lkey;
1188
1189         wr.next     = NULL;
1190         wr.wr_id    = (uintptr_t) iu;
1191         wr.sg_list  = &list;
1192         wr.num_sge  = 1;
1193
1194         return ib_post_recv(target->qp, &wr, &bad_wr);
1195 }
1196
1197 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1198 {
1199         struct srp_request *req;
1200         struct scsi_cmnd *scmnd;
1201         unsigned long flags;
1202
1203         if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1204                 spin_lock_irqsave(&target->lock, flags);
1205                 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1206                 spin_unlock_irqrestore(&target->lock, flags);
1207
1208                 target->tsk_mgmt_status = -1;
1209                 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1210                         target->tsk_mgmt_status = rsp->data[3];
1211                 complete(&target->tsk_mgmt_done);
1212         } else {
1213                 req = &target->req_ring[rsp->tag];
1214                 scmnd = srp_claim_req(target, req, NULL);
1215                 if (!scmnd) {
1216                         shost_printk(KERN_ERR, target->scsi_host,
1217                                      "Null scmnd for RSP w/tag %016llx\n",
1218                                      (unsigned long long) rsp->tag);
1219
1220                         spin_lock_irqsave(&target->lock, flags);
1221                         target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1222                         spin_unlock_irqrestore(&target->lock, flags);
1223
1224                         return;
1225                 }
1226                 scmnd->result = rsp->status;
1227
1228                 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1229                         memcpy(scmnd->sense_buffer, rsp->data +
1230                                be32_to_cpu(rsp->resp_data_len),
1231                                min_t(int, be32_to_cpu(rsp->sense_data_len),
1232                                      SCSI_SENSE_BUFFERSIZE));
1233                 }
1234
1235                 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
1236                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1237                 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1238                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1239
1240                 srp_free_req(target, req, scmnd,
1241                              be32_to_cpu(rsp->req_lim_delta));
1242
1243                 scmnd->host_scribble = NULL;
1244                 scmnd->scsi_done(scmnd);
1245         }
1246 }
1247
1248 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1249                                void *rsp, int len)
1250 {
1251         struct ib_device *dev = target->srp_host->srp_dev->dev;
1252         unsigned long flags;
1253         struct srp_iu *iu;
1254         int err;
1255
1256         spin_lock_irqsave(&target->lock, flags);
1257         target->req_lim += req_delta;
1258         iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1259         spin_unlock_irqrestore(&target->lock, flags);
1260
1261         if (!iu) {
1262                 shost_printk(KERN_ERR, target->scsi_host, PFX
1263                              "no IU available to send response\n");
1264                 return 1;
1265         }
1266
1267         ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1268         memcpy(iu->buf, rsp, len);
1269         ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1270
1271         err = srp_post_send(target, iu, len);
1272         if (err) {
1273                 shost_printk(KERN_ERR, target->scsi_host, PFX
1274                              "unable to post response: %d\n", err);
1275                 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1276         }
1277
1278         return err;
1279 }
1280
1281 static void srp_process_cred_req(struct srp_target_port *target,
1282                                  struct srp_cred_req *req)
1283 {
1284         struct srp_cred_rsp rsp = {
1285                 .opcode = SRP_CRED_RSP,
1286                 .tag = req->tag,
1287         };
1288         s32 delta = be32_to_cpu(req->req_lim_delta);
1289
1290         if (srp_response_common(target, delta, &rsp, sizeof rsp))
1291                 shost_printk(KERN_ERR, target->scsi_host, PFX
1292                              "problems processing SRP_CRED_REQ\n");
1293 }
1294
1295 static void srp_process_aer_req(struct srp_target_port *target,
1296                                 struct srp_aer_req *req)
1297 {
1298         struct srp_aer_rsp rsp = {
1299                 .opcode = SRP_AER_RSP,
1300                 .tag = req->tag,
1301         };
1302         s32 delta = be32_to_cpu(req->req_lim_delta);
1303
1304         shost_printk(KERN_ERR, target->scsi_host, PFX
1305                      "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1306
1307         if (srp_response_common(target, delta, &rsp, sizeof rsp))
1308                 shost_printk(KERN_ERR, target->scsi_host, PFX
1309                              "problems processing SRP_AER_REQ\n");
1310 }
1311
1312 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1313 {
1314         struct ib_device *dev = target->srp_host->srp_dev->dev;
1315         struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1316         int res;
1317         u8 opcode;
1318
1319         ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1320                                    DMA_FROM_DEVICE);
1321
1322         opcode = *(u8 *) iu->buf;
1323
1324         if (0) {
1325                 shost_printk(KERN_ERR, target->scsi_host,
1326                              PFX "recv completion, opcode 0x%02x\n", opcode);
1327                 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1328                                iu->buf, wc->byte_len, true);
1329         }
1330
1331         switch (opcode) {
1332         case SRP_RSP:
1333                 srp_process_rsp(target, iu->buf);
1334                 break;
1335
1336         case SRP_CRED_REQ:
1337                 srp_process_cred_req(target, iu->buf);
1338                 break;
1339
1340         case SRP_AER_REQ:
1341                 srp_process_aer_req(target, iu->buf);
1342                 break;
1343
1344         case SRP_T_LOGOUT:
1345                 /* XXX Handle target logout */
1346                 shost_printk(KERN_WARNING, target->scsi_host,
1347                              PFX "Got target logout request\n");
1348                 break;
1349
1350         default:
1351                 shost_printk(KERN_WARNING, target->scsi_host,
1352                              PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1353                 break;
1354         }
1355
1356         ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1357                                       DMA_FROM_DEVICE);
1358
1359         res = srp_post_recv(target, iu);
1360         if (res != 0)
1361                 shost_printk(KERN_ERR, target->scsi_host,
1362                              PFX "Recv failed with error code %d\n", res);
1363 }
1364
1365 static void srp_handle_qp_err(enum ib_wc_status wc_status,
1366                               enum ib_wc_opcode wc_opcode,
1367                               struct srp_target_port *target)
1368 {
1369         if (target->connected && !target->qp_in_error) {
1370                 shost_printk(KERN_ERR, target->scsi_host,
1371                              PFX "failed %s status %d\n",
1372                              wc_opcode & IB_WC_RECV ? "receive" : "send",
1373                              wc_status);
1374         }
1375         target->qp_in_error = true;
1376 }
1377
1378 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1379 {
1380         struct srp_target_port *target = target_ptr;
1381         struct ib_wc wc;
1382
1383         ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1384         while (ib_poll_cq(cq, 1, &wc) > 0) {
1385                 if (likely(wc.status == IB_WC_SUCCESS)) {
1386                         srp_handle_recv(target, &wc);
1387                 } else {
1388                         srp_handle_qp_err(wc.status, wc.opcode, target);
1389                 }
1390         }
1391 }
1392
1393 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1394 {
1395         struct srp_target_port *target = target_ptr;
1396         struct ib_wc wc;
1397         struct srp_iu *iu;
1398
1399         while (ib_poll_cq(cq, 1, &wc) > 0) {
1400                 if (likely(wc.status == IB_WC_SUCCESS)) {
1401                         iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1402                         list_add(&iu->list, &target->free_tx);
1403                 } else {
1404                         srp_handle_qp_err(wc.status, wc.opcode, target);
1405                 }
1406         }
1407 }
1408
1409 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1410 {
1411         struct srp_target_port *target = host_to_target(shost);
1412         struct srp_request *req;
1413         struct srp_iu *iu;
1414         struct srp_cmd *cmd;
1415         struct ib_device *dev;
1416         unsigned long flags;
1417         int len, result;
1418
1419         result = srp_chkready(target->rport);
1420         if (unlikely(result)) {
1421                 scmnd->result = result;
1422                 scmnd->scsi_done(scmnd);
1423                 return 0;
1424         }
1425
1426         spin_lock_irqsave(&target->lock, flags);
1427         iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1428         if (!iu)
1429                 goto err_unlock;
1430
1431         req = list_first_entry(&target->free_reqs, struct srp_request, list);
1432         list_del(&req->list);
1433         spin_unlock_irqrestore(&target->lock, flags);
1434
1435         dev = target->srp_host->srp_dev->dev;
1436         ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
1437                                    DMA_TO_DEVICE);
1438
1439         scmnd->result        = 0;
1440         scmnd->host_scribble = (void *) req;
1441
1442         cmd = iu->buf;
1443         memset(cmd, 0, sizeof *cmd);
1444
1445         cmd->opcode = SRP_CMD;
1446         cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
1447         cmd->tag    = req->index;
1448         memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1449
1450         req->scmnd    = scmnd;
1451         req->cmd      = iu;
1452
1453         len = srp_map_data(scmnd, target, req);
1454         if (len < 0) {
1455                 shost_printk(KERN_ERR, target->scsi_host,
1456                              PFX "Failed to map data\n");
1457                 goto err_iu;
1458         }
1459
1460         ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
1461                                       DMA_TO_DEVICE);
1462
1463         if (srp_post_send(target, iu, len)) {
1464                 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1465                 goto err_unmap;
1466         }
1467
1468         return 0;
1469
1470 err_unmap:
1471         srp_unmap_data(scmnd, target, req);
1472
1473 err_iu:
1474         srp_put_tx_iu(target, iu, SRP_IU_CMD);
1475
1476         spin_lock_irqsave(&target->lock, flags);
1477         list_add(&req->list, &target->free_reqs);
1478
1479 err_unlock:
1480         spin_unlock_irqrestore(&target->lock, flags);
1481
1482         return SCSI_MLQUEUE_HOST_BUSY;
1483 }
1484
1485 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1486 {
1487         int i;
1488
1489         for (i = 0; i < SRP_RQ_SIZE; ++i) {
1490                 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1491                                                   target->max_ti_iu_len,
1492                                                   GFP_KERNEL, DMA_FROM_DEVICE);
1493                 if (!target->rx_ring[i])
1494                         goto err;
1495         }
1496
1497         for (i = 0; i < SRP_SQ_SIZE; ++i) {
1498                 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1499                                                   target->max_iu_len,
1500                                                   GFP_KERNEL, DMA_TO_DEVICE);
1501                 if (!target->tx_ring[i])
1502                         goto err;
1503
1504                 list_add(&target->tx_ring[i]->list, &target->free_tx);
1505         }
1506
1507         return 0;
1508
1509 err:
1510         for (i = 0; i < SRP_RQ_SIZE; ++i) {
1511                 srp_free_iu(target->srp_host, target->rx_ring[i]);
1512                 target->rx_ring[i] = NULL;
1513         }
1514
1515         for (i = 0; i < SRP_SQ_SIZE; ++i) {
1516                 srp_free_iu(target->srp_host, target->tx_ring[i]);
1517                 target->tx_ring[i] = NULL;
1518         }
1519
1520         return -ENOMEM;
1521 }
1522
1523 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1524 {
1525         uint64_t T_tr_ns, max_compl_time_ms;
1526         uint32_t rq_tmo_jiffies;
1527
1528         /*
1529          * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1530          * table 91), both the QP timeout and the retry count have to be set
1531          * for RC QP's during the RTR to RTS transition.
1532          */
1533         WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1534                      (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1535
1536         /*
1537          * Set target->rq_tmo_jiffies to one second more than the largest time
1538          * it can take before an error completion is generated. See also
1539          * C9-140..142 in the IBTA spec for more information about how to
1540          * convert the QP Local ACK Timeout value to nanoseconds.
1541          */
1542         T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1543         max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1544         do_div(max_compl_time_ms, NSEC_PER_MSEC);
1545         rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1546
1547         return rq_tmo_jiffies;
1548 }
1549
1550 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1551                                struct srp_login_rsp *lrsp,
1552                                struct srp_target_port *target)
1553 {
1554         struct ib_qp_attr *qp_attr = NULL;
1555         int attr_mask = 0;
1556         int ret;
1557         int i;
1558
1559         if (lrsp->opcode == SRP_LOGIN_RSP) {
1560                 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1561                 target->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
1562
1563                 /*
1564                  * Reserve credits for task management so we don't
1565                  * bounce requests back to the SCSI mid-layer.
1566                  */
1567                 target->scsi_host->can_queue
1568                         = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1569                               target->scsi_host->can_queue);
1570         } else {
1571                 shost_printk(KERN_WARNING, target->scsi_host,
1572                              PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1573                 ret = -ECONNRESET;
1574                 goto error;
1575         }
1576
1577         if (!target->rx_ring[0]) {
1578                 ret = srp_alloc_iu_bufs(target);
1579                 if (ret)
1580                         goto error;
1581         }
1582
1583         ret = -ENOMEM;
1584         qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1585         if (!qp_attr)
1586                 goto error;
1587
1588         qp_attr->qp_state = IB_QPS_RTR;
1589         ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1590         if (ret)
1591                 goto error_free;
1592
1593         ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1594         if (ret)
1595                 goto error_free;
1596
1597         for (i = 0; i < SRP_RQ_SIZE; i++) {
1598                 struct srp_iu *iu = target->rx_ring[i];
1599                 ret = srp_post_recv(target, iu);
1600                 if (ret)
1601                         goto error_free;
1602         }
1603
1604         qp_attr->qp_state = IB_QPS_RTS;
1605         ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1606         if (ret)
1607                 goto error_free;
1608
1609         target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1610
1611         ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1612         if (ret)
1613                 goto error_free;
1614
1615         ret = ib_send_cm_rtu(cm_id, NULL, 0);
1616
1617 error_free:
1618         kfree(qp_attr);
1619
1620 error:
1621         target->status = ret;
1622 }
1623
1624 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1625                                struct ib_cm_event *event,
1626                                struct srp_target_port *target)
1627 {
1628         struct Scsi_Host *shost = target->scsi_host;
1629         struct ib_class_port_info *cpi;
1630         int opcode;
1631
1632         switch (event->param.rej_rcvd.reason) {
1633         case IB_CM_REJ_PORT_CM_REDIRECT:
1634                 cpi = event->param.rej_rcvd.ari;
1635                 target->path.dlid = cpi->redirect_lid;
1636                 target->path.pkey = cpi->redirect_pkey;
1637                 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1638                 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1639
1640                 target->status = target->path.dlid ?
1641                         SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1642                 break;
1643
1644         case IB_CM_REJ_PORT_REDIRECT:
1645                 if (srp_target_is_topspin(target)) {
1646                         /*
1647                          * Topspin/Cisco SRP gateways incorrectly send
1648                          * reject reason code 25 when they mean 24
1649                          * (port redirect).
1650                          */
1651                         memcpy(target->path.dgid.raw,
1652                                event->param.rej_rcvd.ari, 16);
1653
1654                         shost_printk(KERN_DEBUG, shost,
1655                                      PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1656                                      (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1657                                      (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1658
1659                         target->status = SRP_PORT_REDIRECT;
1660                 } else {
1661                         shost_printk(KERN_WARNING, shost,
1662                                      "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1663                         target->status = -ECONNRESET;
1664                 }
1665                 break;
1666
1667         case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1668                 shost_printk(KERN_WARNING, shost,
1669                             "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1670                 target->status = -ECONNRESET;
1671                 break;
1672
1673         case IB_CM_REJ_CONSUMER_DEFINED:
1674                 opcode = *(u8 *) event->private_data;
1675                 if (opcode == SRP_LOGIN_REJ) {
1676                         struct srp_login_rej *rej = event->private_data;
1677                         u32 reason = be32_to_cpu(rej->reason);
1678
1679                         if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1680                                 shost_printk(KERN_WARNING, shost,
1681                                              PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1682                         else
1683                                 shost_printk(KERN_WARNING, shost,
1684                                             PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1685                 } else
1686                         shost_printk(KERN_WARNING, shost,
1687                                      "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1688                                      " opcode 0x%02x\n", opcode);
1689                 target->status = -ECONNRESET;
1690                 break;
1691
1692         case IB_CM_REJ_STALE_CONN:
1693                 shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
1694                 target->status = SRP_STALE_CONN;
1695                 break;
1696
1697         default:
1698                 shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
1699                              event->param.rej_rcvd.reason);
1700                 target->status = -ECONNRESET;
1701         }
1702 }
1703
1704 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1705 {
1706         struct srp_target_port *target = cm_id->context;
1707         int comp = 0;
1708
1709         switch (event->event) {
1710         case IB_CM_REQ_ERROR:
1711                 shost_printk(KERN_DEBUG, target->scsi_host,
1712                              PFX "Sending CM REQ failed\n");
1713                 comp = 1;
1714                 target->status = -ECONNRESET;
1715                 break;
1716
1717         case IB_CM_REP_RECEIVED:
1718                 comp = 1;
1719                 srp_cm_rep_handler(cm_id, event->private_data, target);
1720                 break;
1721
1722         case IB_CM_REJ_RECEIVED:
1723                 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1724                 comp = 1;
1725
1726                 srp_cm_rej_handler(cm_id, event, target);
1727                 break;
1728
1729         case IB_CM_DREQ_RECEIVED:
1730                 shost_printk(KERN_WARNING, target->scsi_host,
1731                              PFX "DREQ received - connection closed\n");
1732                 srp_change_conn_state(target, false);
1733                 if (ib_send_cm_drep(cm_id, NULL, 0))
1734                         shost_printk(KERN_ERR, target->scsi_host,
1735                                      PFX "Sending CM DREP failed\n");
1736                 break;
1737
1738         case IB_CM_TIMEWAIT_EXIT:
1739                 shost_printk(KERN_ERR, target->scsi_host,
1740                              PFX "connection closed\n");
1741
1742                 target->status = 0;
1743                 break;
1744
1745         case IB_CM_MRA_RECEIVED:
1746         case IB_CM_DREQ_ERROR:
1747         case IB_CM_DREP_RECEIVED:
1748                 break;
1749
1750         default:
1751                 shost_printk(KERN_WARNING, target->scsi_host,
1752                              PFX "Unhandled CM event %d\n", event->event);
1753                 break;
1754         }
1755
1756         if (comp)
1757                 complete(&target->done);
1758
1759         return 0;
1760 }
1761
1762 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1763                              u64 req_tag, unsigned int lun, u8 func)
1764 {
1765         struct ib_device *dev = target->srp_host->srp_dev->dev;
1766         struct srp_iu *iu;
1767         struct srp_tsk_mgmt *tsk_mgmt;
1768
1769         if (!target->connected || target->qp_in_error)
1770                 return -1;
1771
1772         init_completion(&target->tsk_mgmt_done);
1773
1774         spin_lock_irq(&target->lock);
1775         iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1776         spin_unlock_irq(&target->lock);
1777
1778         if (!iu)
1779                 return -1;
1780
1781         ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1782                                    DMA_TO_DEVICE);
1783         tsk_mgmt = iu->buf;
1784         memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1785
1786         tsk_mgmt->opcode        = SRP_TSK_MGMT;
1787         tsk_mgmt->lun           = cpu_to_be64((u64) lun << 48);
1788         tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
1789         tsk_mgmt->tsk_mgmt_func = func;
1790         tsk_mgmt->task_tag      = req_tag;
1791
1792         ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1793                                       DMA_TO_DEVICE);
1794         if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1795                 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1796                 return -1;
1797         }
1798
1799         if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1800                                          msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1801                 return -1;
1802
1803         return 0;
1804 }
1805
1806 static int srp_abort(struct scsi_cmnd *scmnd)
1807 {
1808         struct srp_target_port *target = host_to_target(scmnd->device->host);
1809         struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1810         int ret;
1811
1812         shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1813
1814         if (!req || !srp_claim_req(target, req, scmnd))
1815                 return FAILED;
1816         if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1817                               SRP_TSK_ABORT_TASK) == 0)
1818                 ret = SUCCESS;
1819         else if (target->rport->state == SRP_RPORT_LOST)
1820                 ret = FAST_IO_FAIL;
1821         else
1822                 ret = FAILED;
1823         srp_free_req(target, req, scmnd, 0);
1824         scmnd->result = DID_ABORT << 16;
1825         scmnd->scsi_done(scmnd);
1826
1827         return ret;
1828 }
1829
1830 static int srp_reset_device(struct scsi_cmnd *scmnd)
1831 {
1832         struct srp_target_port *target = host_to_target(scmnd->device->host);
1833         int i;
1834
1835         shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1836
1837         if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1838                               SRP_TSK_LUN_RESET))
1839                 return FAILED;
1840         if (target->tsk_mgmt_status)
1841                 return FAILED;
1842
1843         for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1844                 struct srp_request *req = &target->req_ring[i];
1845                 if (req->scmnd && req->scmnd->device == scmnd->device)
1846                         srp_finish_req(target, req, DID_RESET << 16);
1847         }
1848
1849         return SUCCESS;
1850 }
1851
1852 static int srp_reset_host(struct scsi_cmnd *scmnd)
1853 {
1854         struct srp_target_port *target = host_to_target(scmnd->device->host);
1855
1856         shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1857
1858         return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
1859 }
1860
1861 static int srp_slave_configure(struct scsi_device *sdev)
1862 {
1863         struct Scsi_Host *shost = sdev->host;
1864         struct srp_target_port *target = host_to_target(shost);
1865         struct request_queue *q = sdev->request_queue;
1866         unsigned long timeout;
1867
1868         if (sdev->type == TYPE_DISK) {
1869                 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
1870                 blk_queue_rq_timeout(q, timeout);
1871         }
1872
1873         return 0;
1874 }
1875
1876 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1877                            char *buf)
1878 {
1879         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1880
1881         return sprintf(buf, "0x%016llx\n",
1882                        (unsigned long long) be64_to_cpu(target->id_ext));
1883 }
1884
1885 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1886                              char *buf)
1887 {
1888         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1889
1890         return sprintf(buf, "0x%016llx\n",
1891                        (unsigned long long) be64_to_cpu(target->ioc_guid));
1892 }
1893
1894 static ssize_t show_service_id(struct device *dev,
1895                                struct device_attribute *attr, char *buf)
1896 {
1897         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1898
1899         return sprintf(buf, "0x%016llx\n",
1900                        (unsigned long long) be64_to_cpu(target->service_id));
1901 }
1902
1903 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1904                          char *buf)
1905 {
1906         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1907
1908         return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1909 }
1910
1911 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1912                          char *buf)
1913 {
1914         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1915
1916         return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1917 }
1918
1919 static ssize_t show_orig_dgid(struct device *dev,
1920                               struct device_attribute *attr, char *buf)
1921 {
1922         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1923
1924         return sprintf(buf, "%pI6\n", target->orig_dgid);
1925 }
1926
1927 static ssize_t show_req_lim(struct device *dev,
1928                             struct device_attribute *attr, char *buf)
1929 {
1930         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1931
1932         return sprintf(buf, "%d\n", target->req_lim);
1933 }
1934
1935 static ssize_t show_zero_req_lim(struct device *dev,
1936                                  struct device_attribute *attr, char *buf)
1937 {
1938         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1939
1940         return sprintf(buf, "%d\n", target->zero_req_lim);
1941 }
1942
1943 static ssize_t show_local_ib_port(struct device *dev,
1944                                   struct device_attribute *attr, char *buf)
1945 {
1946         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1947
1948         return sprintf(buf, "%d\n", target->srp_host->port);
1949 }
1950
1951 static ssize_t show_local_ib_device(struct device *dev,
1952                                     struct device_attribute *attr, char *buf)
1953 {
1954         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1955
1956         return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1957 }
1958
1959 static ssize_t show_comp_vector(struct device *dev,
1960                                 struct device_attribute *attr, char *buf)
1961 {
1962         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1963
1964         return sprintf(buf, "%d\n", target->comp_vector);
1965 }
1966
1967 static ssize_t show_tl_retry_count(struct device *dev,
1968                                    struct device_attribute *attr, char *buf)
1969 {
1970         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1971
1972         return sprintf(buf, "%d\n", target->tl_retry_count);
1973 }
1974
1975 static ssize_t show_cmd_sg_entries(struct device *dev,
1976                                    struct device_attribute *attr, char *buf)
1977 {
1978         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1979
1980         return sprintf(buf, "%u\n", target->cmd_sg_cnt);
1981 }
1982
1983 static ssize_t show_allow_ext_sg(struct device *dev,
1984                                  struct device_attribute *attr, char *buf)
1985 {
1986         struct srp_target_port *target = host_to_target(class_to_shost(dev));
1987
1988         return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
1989 }
1990
1991 static DEVICE_ATTR(id_ext,          S_IRUGO, show_id_ext,          NULL);
1992 static DEVICE_ATTR(ioc_guid,        S_IRUGO, show_ioc_guid,        NULL);
1993 static DEVICE_ATTR(service_id,      S_IRUGO, show_service_id,      NULL);
1994 static DEVICE_ATTR(pkey,            S_IRUGO, show_pkey,            NULL);
1995 static DEVICE_ATTR(dgid,            S_IRUGO, show_dgid,            NULL);
1996 static DEVICE_ATTR(orig_dgid,       S_IRUGO, show_orig_dgid,       NULL);
1997 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
1998 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,    NULL);
1999 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2000 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2001 static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
2002 static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
2003 static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2004 static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
2005
2006 static struct device_attribute *srp_host_attrs[] = {
2007         &dev_attr_id_ext,
2008         &dev_attr_ioc_guid,
2009         &dev_attr_service_id,
2010         &dev_attr_pkey,
2011         &dev_attr_dgid,
2012         &dev_attr_orig_dgid,
2013         &dev_attr_req_lim,
2014         &dev_attr_zero_req_lim,
2015         &dev_attr_local_ib_port,
2016         &dev_attr_local_ib_device,
2017         &dev_attr_comp_vector,
2018         &dev_attr_tl_retry_count,
2019         &dev_attr_cmd_sg_entries,
2020         &dev_attr_allow_ext_sg,
2021         NULL
2022 };
2023
2024 static struct scsi_host_template srp_template = {
2025         .module                         = THIS_MODULE,
2026         .name                           = "InfiniBand SRP initiator",
2027         .proc_name                      = DRV_NAME,
2028         .slave_configure                = srp_slave_configure,
2029         .info                           = srp_target_info,
2030         .queuecommand                   = srp_queuecommand,
2031         .eh_abort_handler               = srp_abort,
2032         .eh_device_reset_handler        = srp_reset_device,
2033         .eh_host_reset_handler          = srp_reset_host,
2034         .skip_settle_delay              = true,
2035         .sg_tablesize                   = SRP_DEF_SG_TABLESIZE,
2036         .can_queue                      = SRP_CMD_SQ_SIZE,
2037         .this_id                        = -1,
2038         .cmd_per_lun                    = SRP_CMD_SQ_SIZE,
2039         .use_clustering                 = ENABLE_CLUSTERING,
2040         .shost_attrs                    = srp_host_attrs
2041 };
2042
2043 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2044 {
2045         struct srp_rport_identifiers ids;
2046         struct srp_rport *rport;
2047
2048         sprintf(target->target_name, "SRP.T10:%016llX",
2049                  (unsigned long long) be64_to_cpu(target->id_ext));
2050
2051         if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2052                 return -ENODEV;
2053
2054         memcpy(ids.port_id, &target->id_ext, 8);
2055         memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2056         ids.roles = SRP_RPORT_ROLE_TARGET;
2057         rport = srp_rport_add(target->scsi_host, &ids);
2058         if (IS_ERR(rport)) {
2059                 scsi_remove_host(target->scsi_host);
2060                 return PTR_ERR(rport);
2061         }
2062
2063         rport->lld_data = target;
2064         target->rport = rport;
2065
2066         spin_lock(&host->target_lock);
2067         list_add_tail(&target->list, &host->target_list);
2068         spin_unlock(&host->target_lock);
2069
2070         target->state = SRP_TARGET_LIVE;
2071
2072         scsi_scan_target(&target->scsi_host->shost_gendev,
2073                          0, target->scsi_id, SCAN_WILD_CARD, 0);
2074
2075         return 0;
2076 }
2077
2078 static void srp_release_dev(struct device *dev)
2079 {
2080         struct srp_host *host =
2081                 container_of(dev, struct srp_host, dev);
2082
2083         complete(&host->released);
2084 }
2085
2086 static struct class srp_class = {
2087         .name    = "infiniband_srp",
2088         .dev_release = srp_release_dev
2089 };
2090
2091 /**
2092  * srp_conn_unique() - check whether the connection to a target is unique
2093  */
2094 static bool srp_conn_unique(struct srp_host *host,
2095                             struct srp_target_port *target)
2096 {
2097         struct srp_target_port *t;
2098         bool ret = false;
2099
2100         if (target->state == SRP_TARGET_REMOVED)
2101                 goto out;
2102
2103         ret = true;
2104
2105         spin_lock(&host->target_lock);
2106         list_for_each_entry(t, &host->target_list, list) {
2107                 if (t != target &&
2108                     target->id_ext == t->id_ext &&
2109                     target->ioc_guid == t->ioc_guid &&
2110                     target->initiator_ext == t->initiator_ext) {
2111                         ret = false;
2112                         break;
2113                 }
2114         }
2115         spin_unlock(&host->target_lock);
2116
2117 out:
2118         return ret;
2119 }
2120
2121 /*
2122  * Target ports are added by writing
2123  *
2124  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2125  *     pkey=<P_Key>,service_id=<service ID>
2126  *
2127  * to the add_target sysfs attribute.
2128  */
2129 enum {
2130         SRP_OPT_ERR             = 0,
2131         SRP_OPT_ID_EXT          = 1 << 0,
2132         SRP_OPT_IOC_GUID        = 1 << 1,
2133         SRP_OPT_DGID            = 1 << 2,
2134         SRP_OPT_PKEY            = 1 << 3,
2135         SRP_OPT_SERVICE_ID      = 1 << 4,
2136         SRP_OPT_MAX_SECT        = 1 << 5,
2137         SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2138         SRP_OPT_IO_CLASS        = 1 << 7,
2139         SRP_OPT_INITIATOR_EXT   = 1 << 8,
2140         SRP_OPT_CMD_SG_ENTRIES  = 1 << 9,
2141         SRP_OPT_ALLOW_EXT_SG    = 1 << 10,
2142         SRP_OPT_SG_TABLESIZE    = 1 << 11,
2143         SRP_OPT_COMP_VECTOR     = 1 << 12,
2144         SRP_OPT_TL_RETRY_COUNT  = 1 << 13,
2145         SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
2146                                    SRP_OPT_IOC_GUID     |
2147                                    SRP_OPT_DGID         |
2148                                    SRP_OPT_PKEY         |
2149                                    SRP_OPT_SERVICE_ID),
2150 };
2151
2152 static const match_table_t srp_opt_tokens = {
2153         { SRP_OPT_ID_EXT,               "id_ext=%s"             },
2154         { SRP_OPT_IOC_GUID,             "ioc_guid=%s"           },
2155         { SRP_OPT_DGID,                 "dgid=%s"               },
2156         { SRP_OPT_PKEY,                 "pkey=%x"               },
2157         { SRP_OPT_SERVICE_ID,           "service_id=%s"         },
2158         { SRP_OPT_MAX_SECT,             "max_sect=%d"           },
2159         { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
2160         { SRP_OPT_IO_CLASS,             "io_class=%x"           },
2161         { SRP_OPT_INITIATOR_EXT,        "initiator_ext=%s"      },
2162         { SRP_OPT_CMD_SG_ENTRIES,       "cmd_sg_entries=%u"     },
2163         { SRP_OPT_ALLOW_EXT_SG,         "allow_ext_sg=%u"       },
2164         { SRP_OPT_SG_TABLESIZE,         "sg_tablesize=%u"       },
2165         { SRP_OPT_COMP_VECTOR,          "comp_vector=%u"        },
2166         { SRP_OPT_TL_RETRY_COUNT,       "tl_retry_count=%u"     },
2167         { SRP_OPT_ERR,                  NULL                    }
2168 };
2169
2170 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2171 {
2172         char *options, *sep_opt;
2173         char *p;
2174         char dgid[3];
2175         substring_t args[MAX_OPT_ARGS];
2176         int opt_mask = 0;
2177         int token;
2178         int ret = -EINVAL;
2179         int i;
2180
2181         options = kstrdup(buf, GFP_KERNEL);
2182         if (!options)
2183                 return -ENOMEM;
2184
2185         sep_opt = options;
2186         while ((p = strsep(&sep_opt, ",")) != NULL) {
2187                 if (!*p)
2188                         continue;
2189
2190                 token = match_token(p, srp_opt_tokens, args);
2191                 opt_mask |= token;
2192
2193                 switch (token) {
2194                 case SRP_OPT_ID_EXT:
2195                         p = match_strdup(args);
2196                         if (!p) {
2197                                 ret = -ENOMEM;
2198                                 goto out;
2199                         }
2200                         target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2201                         kfree(p);
2202                         break;
2203
2204                 case SRP_OPT_IOC_GUID:
2205                         p = match_strdup(args);
2206                         if (!p) {
2207                                 ret = -ENOMEM;
2208                                 goto out;
2209                         }
2210                         target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2211                         kfree(p);
2212                         break;
2213
2214                 case SRP_OPT_DGID:
2215                         p = match_strdup(args);
2216                         if (!p) {
2217                                 ret = -ENOMEM;
2218                                 goto out;
2219                         }
2220                         if (strlen(p) != 32) {
2221                                 pr_warn("bad dest GID parameter '%s'\n", p);
2222                                 kfree(p);
2223                                 goto out;
2224                         }
2225
2226                         for (i = 0; i < 16; ++i) {
2227                                 strlcpy(dgid, p + i * 2, 3);
2228                                 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2229                         }
2230                         kfree(p);
2231                         memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2232                         break;
2233
2234                 case SRP_OPT_PKEY:
2235                         if (match_hex(args, &token)) {
2236                                 pr_warn("bad P_Key parameter '%s'\n", p);
2237                                 goto out;
2238                         }
2239                         target->path.pkey = cpu_to_be16(token);
2240                         break;
2241
2242                 case SRP_OPT_SERVICE_ID:
2243                         p = match_strdup(args);
2244                         if (!p) {
2245                                 ret = -ENOMEM;
2246                                 goto out;
2247                         }
2248                         target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2249                         target->path.service_id = target->service_id;
2250                         kfree(p);
2251                         break;
2252
2253                 case SRP_OPT_MAX_SECT:
2254                         if (match_int(args, &token)) {
2255                                 pr_warn("bad max sect parameter '%s'\n", p);
2256                                 goto out;
2257                         }
2258                         target->scsi_host->max_sectors = token;
2259                         break;
2260
2261                 case SRP_OPT_MAX_CMD_PER_LUN:
2262                         if (match_int(args, &token)) {
2263                                 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2264                                         p);
2265                                 goto out;
2266                         }
2267                         target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
2268                         break;
2269
2270                 case SRP_OPT_IO_CLASS:
2271                         if (match_hex(args, &token)) {
2272                                 pr_warn("bad IO class parameter '%s'\n", p);
2273                                 goto out;
2274                         }
2275                         if (token != SRP_REV10_IB_IO_CLASS &&
2276                             token != SRP_REV16A_IB_IO_CLASS) {
2277                                 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2278                                         token, SRP_REV10_IB_IO_CLASS,
2279                                         SRP_REV16A_IB_IO_CLASS);
2280                                 goto out;
2281                         }
2282                         target->io_class = token;
2283                         break;
2284
2285                 case SRP_OPT_INITIATOR_EXT:
2286                         p = match_strdup(args);
2287                         if (!p) {
2288                                 ret = -ENOMEM;
2289                                 goto out;
2290                         }
2291                         target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2292                         kfree(p);
2293                         break;
2294
2295                 case SRP_OPT_CMD_SG_ENTRIES:
2296                         if (match_int(args, &token) || token < 1 || token > 255) {
2297                                 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2298                                         p);
2299                                 goto out;
2300                         }
2301                         target->cmd_sg_cnt = token;
2302                         break;
2303
2304                 case SRP_OPT_ALLOW_EXT_SG:
2305                         if (match_int(args, &token)) {
2306                                 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2307                                 goto out;
2308                         }
2309                         target->allow_ext_sg = !!token;
2310                         break;
2311
2312                 case SRP_OPT_SG_TABLESIZE:
2313                         if (match_int(args, &token) || token < 1 ||
2314                                         token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2315                                 pr_warn("bad max sg_tablesize parameter '%s'\n",
2316                                         p);
2317                                 goto out;
2318                         }
2319                         target->sg_tablesize = token;
2320                         break;
2321
2322                 case SRP_OPT_COMP_VECTOR:
2323                         if (match_int(args, &token) || token < 0) {
2324                                 pr_warn("bad comp_vector parameter '%s'\n", p);
2325                                 goto out;
2326                         }
2327                         target->comp_vector = token;
2328                         break;
2329
2330                 case SRP_OPT_TL_RETRY_COUNT:
2331                         if (match_int(args, &token) || token < 2 || token > 7) {
2332                                 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2333                                         p);
2334                                 goto out;
2335                         }
2336                         target->tl_retry_count = token;
2337                         break;
2338
2339                 default:
2340                         pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2341                                 p);
2342                         goto out;
2343                 }
2344         }
2345
2346         if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2347                 ret = 0;
2348         else
2349                 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2350                         if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2351                             !(srp_opt_tokens[i].token & opt_mask))
2352                                 pr_warn("target creation request is missing parameter '%s'\n",
2353                                         srp_opt_tokens[i].pattern);
2354
2355 out:
2356         kfree(options);
2357         return ret;
2358 }
2359
2360 static ssize_t srp_create_target(struct device *dev,
2361                                  struct device_attribute *attr,
2362                                  const char *buf, size_t count)
2363 {
2364         struct srp_host *host =
2365                 container_of(dev, struct srp_host, dev);
2366         struct Scsi_Host *target_host;
2367         struct srp_target_port *target;
2368         struct ib_device *ibdev = host->srp_dev->dev;
2369         dma_addr_t dma_addr;
2370         int i, ret;
2371
2372         target_host = scsi_host_alloc(&srp_template,
2373                                       sizeof (struct srp_target_port));
2374         if (!target_host)
2375                 return -ENOMEM;
2376
2377         target_host->transportt  = ib_srp_transport_template;
2378         target_host->max_channel = 0;
2379         target_host->max_id      = 1;
2380         target_host->max_lun     = SRP_MAX_LUN;
2381         target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2382
2383         target = host_to_target(target_host);
2384
2385         target->io_class        = SRP_REV16A_IB_IO_CLASS;
2386         target->scsi_host       = target_host;
2387         target->srp_host        = host;
2388         target->lkey            = host->srp_dev->mr->lkey;
2389         target->rkey            = host->srp_dev->mr->rkey;
2390         target->cmd_sg_cnt      = cmd_sg_entries;
2391         target->sg_tablesize    = indirect_sg_entries ? : cmd_sg_entries;
2392         target->allow_ext_sg    = allow_ext_sg;
2393         target->tl_retry_count  = 7;
2394
2395         ret = srp_parse_options(buf, target);
2396         if (ret)
2397                 goto err;
2398
2399         if (!srp_conn_unique(target->srp_host, target)) {
2400                 shost_printk(KERN_INFO, target->scsi_host,
2401                              PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2402                              be64_to_cpu(target->id_ext),
2403                              be64_to_cpu(target->ioc_guid),
2404                              be64_to_cpu(target->initiator_ext));
2405                 ret = -EEXIST;
2406                 goto err;
2407         }
2408
2409         if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2410                                 target->cmd_sg_cnt < target->sg_tablesize) {
2411                 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2412                 target->sg_tablesize = target->cmd_sg_cnt;
2413         }
2414
2415         target_host->sg_tablesize = target->sg_tablesize;
2416         target->indirect_size = target->sg_tablesize *
2417                                 sizeof (struct srp_direct_buf);
2418         target->max_iu_len = sizeof (struct srp_cmd) +
2419                              sizeof (struct srp_indirect_buf) +
2420                              target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2421
2422         INIT_WORK(&target->remove_work, srp_remove_work);
2423         spin_lock_init(&target->lock);
2424         INIT_LIST_HEAD(&target->free_tx);
2425         INIT_LIST_HEAD(&target->free_reqs);
2426         for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
2427                 struct srp_request *req = &target->req_ring[i];
2428
2429                 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
2430                                         GFP_KERNEL);
2431                 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
2432                                         GFP_KERNEL);
2433                 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
2434                 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
2435                         goto err_free_mem;
2436
2437                 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
2438                                              target->indirect_size,
2439                                              DMA_TO_DEVICE);
2440                 if (ib_dma_mapping_error(ibdev, dma_addr))
2441                         goto err_free_mem;
2442
2443                 req->indirect_dma_addr = dma_addr;
2444                 req->index = i;
2445                 list_add_tail(&req->list, &target->free_reqs);
2446         }
2447
2448         ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
2449
2450         shost_printk(KERN_DEBUG, target->scsi_host, PFX
2451                      "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2452                      "service_id %016llx dgid %pI6\n",
2453                (unsigned long long) be64_to_cpu(target->id_ext),
2454                (unsigned long long) be64_to_cpu(target->ioc_guid),
2455                be16_to_cpu(target->path.pkey),
2456                (unsigned long long) be64_to_cpu(target->service_id),
2457                target->path.dgid.raw);
2458
2459         ret = srp_create_target_ib(target);
2460         if (ret)
2461                 goto err_free_mem;
2462
2463         ret = srp_new_cm_id(target);
2464         if (ret)
2465                 goto err_free_ib;
2466
2467         ret = srp_connect_target(target);
2468         if (ret) {
2469                 shost_printk(KERN_ERR, target->scsi_host,
2470                              PFX "Connection failed\n");
2471                 goto err_cm_id;
2472         }
2473
2474         ret = srp_add_target(host, target);
2475         if (ret)
2476                 goto err_disconnect;
2477
2478         return count;
2479
2480 err_disconnect:
2481         srp_disconnect_target(target);
2482
2483 err_cm_id:
2484         ib_destroy_cm_id(target->cm_id);
2485
2486 err_free_ib:
2487         srp_free_target_ib(target);
2488
2489 err_free_mem:
2490         srp_free_req_data(target);
2491
2492 err:
2493         scsi_host_put(target_host);
2494
2495         return ret;
2496 }
2497
2498 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2499
2500 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2501                           char *buf)
2502 {
2503         struct srp_host *host = container_of(dev, struct srp_host, dev);
2504
2505         return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2506 }
2507
2508 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2509
2510 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2511                          char *buf)
2512 {
2513         struct srp_host *host = container_of(dev, struct srp_host, dev);
2514
2515         return sprintf(buf, "%d\n", host->port);
2516 }
2517
2518 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2519
2520 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2521 {
2522         struct srp_host *host;
2523
2524         host = kzalloc(sizeof *host, GFP_KERNEL);
2525         if (!host)
2526                 return NULL;
2527
2528         INIT_LIST_HEAD(&host->target_list);
2529         spin_lock_init(&host->target_lock);
2530         init_completion(&host->released);
2531         host->srp_dev = device;
2532         host->port = port;
2533
2534         host->dev.class = &srp_class;
2535         host->dev.parent = device->dev->dma_device;
2536         dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2537
2538         if (device_register(&host->dev))
2539                 goto free_host;
2540         if (device_create_file(&host->dev, &dev_attr_add_target))
2541                 goto err_class;
2542         if (device_create_file(&host->dev, &dev_attr_ibdev))
2543                 goto err_class;
2544         if (device_create_file(&host->dev, &dev_attr_port))
2545                 goto err_class;
2546
2547         return host;
2548
2549 err_class:
2550         device_unregister(&host->dev);
2551
2552 free_host:
2553         kfree(host);
2554
2555         return NULL;
2556 }
2557
2558 static void srp_add_one(struct ib_device *device)
2559 {
2560         struct srp_device *srp_dev;
2561         struct ib_device_attr *dev_attr;
2562         struct ib_fmr_pool_param fmr_param;
2563         struct srp_host *host;
2564         int max_pages_per_fmr, fmr_page_shift, s, e, p;
2565
2566         dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2567         if (!dev_attr)
2568                 return;
2569
2570         if (ib_query_device(device, dev_attr)) {
2571                 pr_warn("Query device failed for %s\n", device->name);
2572                 goto free_attr;
2573         }
2574
2575         srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2576         if (!srp_dev)
2577                 goto free_attr;
2578
2579         /*
2580          * Use the smallest page size supported by the HCA, down to a
2581          * minimum of 4096 bytes. We're unlikely to build large sglists
2582          * out of smaller entries.
2583          */
2584         fmr_page_shift          = max(12, ffs(dev_attr->page_size_cap) - 1);
2585         srp_dev->fmr_page_size  = 1 << fmr_page_shift;
2586         srp_dev->fmr_page_mask  = ~((u64) srp_dev->fmr_page_size - 1);
2587         srp_dev->fmr_max_size   = srp_dev->fmr_page_size * SRP_FMR_SIZE;
2588
2589         INIT_LIST_HEAD(&srp_dev->dev_list);
2590
2591         srp_dev->dev = device;
2592         srp_dev->pd  = ib_alloc_pd(device);
2593         if (IS_ERR(srp_dev->pd))
2594                 goto free_dev;
2595
2596         srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2597                                     IB_ACCESS_LOCAL_WRITE |
2598                                     IB_ACCESS_REMOTE_READ |
2599                                     IB_ACCESS_REMOTE_WRITE);
2600         if (IS_ERR(srp_dev->mr))
2601                 goto err_pd;
2602
2603         for (max_pages_per_fmr = SRP_FMR_SIZE;
2604                         max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2605                         max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2606                 memset(&fmr_param, 0, sizeof fmr_param);
2607                 fmr_param.pool_size         = SRP_FMR_POOL_SIZE;
2608                 fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
2609                 fmr_param.cache             = 1;
2610                 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2611                 fmr_param.page_shift        = fmr_page_shift;
2612                 fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
2613                                                IB_ACCESS_REMOTE_WRITE |
2614                                                IB_ACCESS_REMOTE_READ);
2615
2616                 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2617                 if (!IS_ERR(srp_dev->fmr_pool))
2618                         break;
2619         }
2620
2621         if (IS_ERR(srp_dev->fmr_pool))
2622                 srp_dev->fmr_pool = NULL;
2623
2624         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2625                 s = 0;
2626                 e = 0;
2627         } else {
2628                 s = 1;
2629                 e = device->phys_port_cnt;
2630         }
2631
2632         for (p = s; p <= e; ++p) {
2633                 host = srp_add_port(srp_dev, p);
2634                 if (host)
2635                         list_add_tail(&host->list, &srp_dev->dev_list);
2636         }
2637
2638         ib_set_client_data(device, &srp_client, srp_dev);
2639
2640         goto free_attr;
2641
2642 err_pd:
2643         ib_dealloc_pd(srp_dev->pd);
2644
2645 free_dev:
2646         kfree(srp_dev);
2647
2648 free_attr:
2649         kfree(dev_attr);
2650 }
2651
2652 static void srp_remove_one(struct ib_device *device)
2653 {
2654         struct srp_device *srp_dev;
2655         struct srp_host *host, *tmp_host;
2656         struct srp_target_port *target;
2657
2658         srp_dev = ib_get_client_data(device, &srp_client);
2659         if (!srp_dev)
2660                 return;
2661
2662         list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2663                 device_unregister(&host->dev);
2664                 /*
2665                  * Wait for the sysfs entry to go away, so that no new
2666                  * target ports can be created.
2667                  */
2668                 wait_for_completion(&host->released);
2669
2670                 /*
2671                  * Remove all target ports.
2672                  */
2673                 spin_lock(&host->target_lock);
2674                 list_for_each_entry(target, &host->target_list, list)
2675                         srp_queue_remove_work(target);
2676                 spin_unlock(&host->target_lock);
2677
2678                 /*
2679                  * Wait for target port removal tasks.
2680                  */
2681                 flush_workqueue(system_long_wq);
2682
2683                 kfree(host);
2684         }
2685
2686         if (srp_dev->fmr_pool)
2687                 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2688         ib_dereg_mr(srp_dev->mr);
2689         ib_dealloc_pd(srp_dev->pd);
2690
2691         kfree(srp_dev);
2692 }
2693
2694 static struct srp_function_template ib_srp_transport_functions = {
2695         .has_rport_state         = true,
2696         .reset_timer_if_blocked  = true,
2697         .fast_io_fail_tmo        = &srp_fast_io_fail_tmo,
2698         .dev_loss_tmo            = &srp_dev_loss_tmo,
2699         .reconnect               = srp_rport_reconnect,
2700         .rport_delete            = srp_rport_delete,
2701         .terminate_rport_io      = srp_terminate_io,
2702 };
2703
2704 static int __init srp_init_module(void)
2705 {
2706         int ret;
2707
2708         BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2709
2710         if (srp_sg_tablesize) {
2711                 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2712                 if (!cmd_sg_entries)
2713                         cmd_sg_entries = srp_sg_tablesize;
2714         }
2715
2716         if (!cmd_sg_entries)
2717                 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2718
2719         if (cmd_sg_entries > 255) {
2720                 pr_warn("Clamping cmd_sg_entries to 255\n");
2721                 cmd_sg_entries = 255;
2722         }
2723
2724         if (!indirect_sg_entries)
2725                 indirect_sg_entries = cmd_sg_entries;
2726         else if (indirect_sg_entries < cmd_sg_entries) {
2727                 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2728                         cmd_sg_entries);
2729                 indirect_sg_entries = cmd_sg_entries;
2730         }
2731
2732         ib_srp_transport_template =
2733                 srp_attach_transport(&ib_srp_transport_functions);
2734         if (!ib_srp_transport_template)
2735                 return -ENOMEM;
2736
2737         ret = class_register(&srp_class);
2738         if (ret) {
2739                 pr_err("couldn't register class infiniband_srp\n");
2740                 srp_release_transport(ib_srp_transport_template);
2741                 return ret;
2742         }
2743
2744         ib_sa_register_client(&srp_sa_client);
2745
2746         ret = ib_register_client(&srp_client);
2747         if (ret) {
2748                 pr_err("couldn't register IB client\n");
2749                 srp_release_transport(ib_srp_transport_template);
2750                 ib_sa_unregister_client(&srp_sa_client);
2751                 class_unregister(&srp_class);
2752                 return ret;
2753         }
2754
2755         return 0;
2756 }
2757
2758 static void __exit srp_cleanup_module(void)
2759 {
2760         ib_unregister_client(&srp_client);
2761         ib_sa_unregister_client(&srp_sa_client);
2762         class_unregister(&srp_class);
2763         srp_release_transport(ib_srp_transport_template);
2764 }
2765
2766 module_init(srp_init_module);
2767 module_exit(srp_cleanup_module);