295d2be6874cda9d78560568e6af68c501508cd1
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / infiniband / ulp / isert / ib_isert.c
1 /*******************************************************************************
2  * This file contains iSCSI extentions for RDMA (iSER) Verbs
3  *
4  * (c) Copyright 2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ****************************************************************************/
18
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <linux/llist.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
31
32 #include "isert_proto.h"
33 #include "ib_isert.h"
34
35 #define ISERT_MAX_CONN          8
36 #define ISER_MAX_RX_CQ_LEN      (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN      (ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
38
39 static DEFINE_MUTEX(device_list_mutex);
40 static LIST_HEAD(device_list);
41 static struct workqueue_struct *isert_rx_wq;
42 static struct workqueue_struct *isert_comp_wq;
43
44 static void
45 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
46 static int
47 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
48                struct isert_rdma_wr *wr);
49 static void
50 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51 static int
52 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53                struct isert_rdma_wr *wr);
54
55 static void
56 isert_qp_event_callback(struct ib_event *e, void *context)
57 {
58         struct isert_conn *isert_conn = (struct isert_conn *)context;
59
60         pr_err("isert_qp_event_callback event: %d\n", e->event);
61         switch (e->event) {
62         case IB_EVENT_COMM_EST:
63                 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
64                 break;
65         case IB_EVENT_QP_LAST_WQE_REACHED:
66                 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
67                 break;
68         default:
69                 break;
70         }
71 }
72
73 static int
74 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
75 {
76         int ret;
77
78         ret = ib_query_device(ib_dev, devattr);
79         if (ret) {
80                 pr_err("ib_query_device() failed: %d\n", ret);
81                 return ret;
82         }
83         pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
84         pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
85
86         return 0;
87 }
88
89 static int
90 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
91 {
92         struct isert_device *device = isert_conn->conn_device;
93         struct ib_qp_init_attr attr;
94         int ret, index, min_index = 0;
95
96         mutex_lock(&device_list_mutex);
97         for (index = 0; index < device->cqs_used; index++)
98                 if (device->cq_active_qps[index] <
99                     device->cq_active_qps[min_index])
100                         min_index = index;
101         device->cq_active_qps[min_index]++;
102         pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
103         mutex_unlock(&device_list_mutex);
104
105         memset(&attr, 0, sizeof(struct ib_qp_init_attr));
106         attr.event_handler = isert_qp_event_callback;
107         attr.qp_context = isert_conn;
108         attr.send_cq = device->dev_tx_cq[min_index];
109         attr.recv_cq = device->dev_rx_cq[min_index];
110         attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
111         attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
112         /*
113          * FIXME: Use devattr.max_sge - 2 for max_send_sge as
114          * work-around for RDMA_READ..
115          */
116         attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
117         isert_conn->max_sge = attr.cap.max_send_sge;
118
119         attr.cap.max_recv_sge = 1;
120         attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121         attr.qp_type = IB_QPT_RC;
122
123         pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124                  cma_id->device);
125         pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
126                  isert_conn->conn_pd->device);
127
128         ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
129         if (ret) {
130                 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
131                 return ret;
132         }
133         isert_conn->conn_qp = cma_id->qp;
134         pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
135
136         return 0;
137 }
138
139 static void
140 isert_cq_event_callback(struct ib_event *e, void *context)
141 {
142         pr_debug("isert_cq_event_callback event: %d\n", e->event);
143 }
144
145 static int
146 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
147 {
148         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
149         struct iser_rx_desc *rx_desc;
150         struct ib_sge *rx_sg;
151         u64 dma_addr;
152         int i, j;
153
154         isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
155                                 sizeof(struct iser_rx_desc), GFP_KERNEL);
156         if (!isert_conn->conn_rx_descs)
157                 goto fail;
158
159         rx_desc = isert_conn->conn_rx_descs;
160
161         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
162                 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
163                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
164                 if (ib_dma_mapping_error(ib_dev, dma_addr))
165                         goto dma_map_fail;
166
167                 rx_desc->dma_addr = dma_addr;
168
169                 rx_sg = &rx_desc->rx_sg;
170                 rx_sg->addr = rx_desc->dma_addr;
171                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
172                 rx_sg->lkey = isert_conn->conn_mr->lkey;
173         }
174
175         isert_conn->conn_rx_desc_head = 0;
176         return 0;
177
178 dma_map_fail:
179         rx_desc = isert_conn->conn_rx_descs;
180         for (j = 0; j < i; j++, rx_desc++) {
181                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
182                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
183         }
184         kfree(isert_conn->conn_rx_descs);
185         isert_conn->conn_rx_descs = NULL;
186 fail:
187         return -ENOMEM;
188 }
189
190 static void
191 isert_free_rx_descriptors(struct isert_conn *isert_conn)
192 {
193         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
194         struct iser_rx_desc *rx_desc;
195         int i;
196
197         if (!isert_conn->conn_rx_descs)
198                 return;
199
200         rx_desc = isert_conn->conn_rx_descs;
201         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
202                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
203                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
204         }
205
206         kfree(isert_conn->conn_rx_descs);
207         isert_conn->conn_rx_descs = NULL;
208 }
209
210 static void isert_cq_tx_callback(struct ib_cq *, void *);
211 static void isert_cq_rx_callback(struct ib_cq *, void *);
212
213 static int
214 isert_create_device_ib_res(struct isert_device *device)
215 {
216         struct ib_device *ib_dev = device->ib_device;
217         struct isert_cq_desc *cq_desc;
218         struct ib_device_attr *dev_attr;
219         int ret = 0, i, j;
220
221         dev_attr = &device->dev_attr;
222         ret = isert_query_device(ib_dev, dev_attr);
223         if (ret)
224                 return ret;
225
226         /* asign function handlers */
227         if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
228                 device->use_fastreg = 1;
229                 device->reg_rdma_mem = isert_reg_rdma;
230                 device->unreg_rdma_mem = isert_unreg_rdma;
231         } else {
232                 device->use_fastreg = 0;
233                 device->reg_rdma_mem = isert_map_rdma;
234                 device->unreg_rdma_mem = isert_unmap_cmd;
235         }
236
237         device->cqs_used = min_t(int, num_online_cpus(),
238                                  device->ib_device->num_comp_vectors);
239         device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
240         pr_debug("Using %d CQs, device %s supports %d vectors support "
241                  "Fast registration %d\n",
242                  device->cqs_used, device->ib_device->name,
243                  device->ib_device->num_comp_vectors, device->use_fastreg);
244         device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
245                                 device->cqs_used, GFP_KERNEL);
246         if (!device->cq_desc) {
247                 pr_err("Unable to allocate device->cq_desc\n");
248                 return -ENOMEM;
249         }
250         cq_desc = device->cq_desc;
251
252         for (i = 0; i < device->cqs_used; i++) {
253                 cq_desc[i].device = device;
254                 cq_desc[i].cq_index = i;
255
256                 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
257                                                 isert_cq_rx_callback,
258                                                 isert_cq_event_callback,
259                                                 (void *)&cq_desc[i],
260                                                 ISER_MAX_RX_CQ_LEN, i);
261                 if (IS_ERR(device->dev_rx_cq[i]))
262                         goto out_cq;
263
264                 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
265                                                 isert_cq_tx_callback,
266                                                 isert_cq_event_callback,
267                                                 (void *)&cq_desc[i],
268                                                 ISER_MAX_TX_CQ_LEN, i);
269                 if (IS_ERR(device->dev_tx_cq[i]))
270                         goto out_cq;
271
272                 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
273                         goto out_cq;
274
275                 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
276                         goto out_cq;
277         }
278
279         return 0;
280
281 out_cq:
282         for (j = 0; j < i; j++) {
283                 cq_desc = &device->cq_desc[j];
284
285                 if (device->dev_rx_cq[j]) {
286                         cancel_work_sync(&cq_desc->cq_rx_work);
287                         ib_destroy_cq(device->dev_rx_cq[j]);
288                 }
289                 if (device->dev_tx_cq[j]) {
290                         cancel_work_sync(&cq_desc->cq_tx_work);
291                         ib_destroy_cq(device->dev_tx_cq[j]);
292                 }
293         }
294         kfree(device->cq_desc);
295
296         return ret;
297 }
298
299 static void
300 isert_free_device_ib_res(struct isert_device *device)
301 {
302         struct isert_cq_desc *cq_desc;
303         int i;
304
305         for (i = 0; i < device->cqs_used; i++) {
306                 cq_desc = &device->cq_desc[i];
307
308                 cancel_work_sync(&cq_desc->cq_rx_work);
309                 cancel_work_sync(&cq_desc->cq_tx_work);
310                 ib_destroy_cq(device->dev_rx_cq[i]);
311                 ib_destroy_cq(device->dev_tx_cq[i]);
312                 device->dev_rx_cq[i] = NULL;
313                 device->dev_tx_cq[i] = NULL;
314         }
315
316         kfree(device->cq_desc);
317 }
318
319 static void
320 isert_device_try_release(struct isert_device *device)
321 {
322         mutex_lock(&device_list_mutex);
323         device->refcount--;
324         if (!device->refcount) {
325                 isert_free_device_ib_res(device);
326                 list_del(&device->dev_node);
327                 kfree(device);
328         }
329         mutex_unlock(&device_list_mutex);
330 }
331
332 static struct isert_device *
333 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
334 {
335         struct isert_device *device;
336         int ret;
337
338         mutex_lock(&device_list_mutex);
339         list_for_each_entry(device, &device_list, dev_node) {
340                 if (device->ib_device->node_guid == cma_id->device->node_guid) {
341                         device->refcount++;
342                         mutex_unlock(&device_list_mutex);
343                         return device;
344                 }
345         }
346
347         device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
348         if (!device) {
349                 mutex_unlock(&device_list_mutex);
350                 return ERR_PTR(-ENOMEM);
351         }
352
353         INIT_LIST_HEAD(&device->dev_node);
354
355         device->ib_device = cma_id->device;
356         ret = isert_create_device_ib_res(device);
357         if (ret) {
358                 kfree(device);
359                 mutex_unlock(&device_list_mutex);
360                 return ERR_PTR(ret);
361         }
362
363         device->refcount++;
364         list_add_tail(&device->dev_node, &device_list);
365         mutex_unlock(&device_list_mutex);
366
367         return device;
368 }
369
370 static void
371 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
372 {
373         struct fast_reg_descriptor *fr_desc, *tmp;
374         int i = 0;
375
376         if (list_empty(&isert_conn->conn_fr_pool))
377                 return;
378
379         pr_debug("Freeing conn %p fastreg pool", isert_conn);
380
381         list_for_each_entry_safe(fr_desc, tmp,
382                                  &isert_conn->conn_fr_pool, list) {
383                 list_del(&fr_desc->list);
384                 ib_free_fast_reg_page_list(fr_desc->data_frpl);
385                 ib_dereg_mr(fr_desc->data_mr);
386                 kfree(fr_desc);
387                 ++i;
388         }
389
390         if (i < isert_conn->conn_fr_pool_size)
391                 pr_warn("Pool still has %d regions registered\n",
392                         isert_conn->conn_fr_pool_size - i);
393 }
394
395 static int
396 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
397 {
398         struct fast_reg_descriptor *fr_desc;
399         struct isert_device *device = isert_conn->conn_device;
400         int i, ret;
401
402         INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
403         isert_conn->conn_fr_pool_size = 0;
404         for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
405                 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
406                 if (!fr_desc) {
407                         pr_err("Failed to allocate fast_reg descriptor\n");
408                         ret = -ENOMEM;
409                         goto err;
410                 }
411
412                 fr_desc->data_frpl =
413                         ib_alloc_fast_reg_page_list(device->ib_device,
414                                                     ISCSI_ISER_SG_TABLESIZE);
415                 if (IS_ERR(fr_desc->data_frpl)) {
416                         pr_err("Failed to allocate fr_pg_list err=%ld\n",
417                                PTR_ERR(fr_desc->data_frpl));
418                         ret = PTR_ERR(fr_desc->data_frpl);
419                         goto err;
420                 }
421
422                 fr_desc->data_mr = ib_alloc_fast_reg_mr(isert_conn->conn_pd,
423                                         ISCSI_ISER_SG_TABLESIZE);
424                 if (IS_ERR(fr_desc->data_mr)) {
425                         pr_err("Failed to allocate frmr err=%ld\n",
426                                PTR_ERR(fr_desc->data_mr));
427                         ret = PTR_ERR(fr_desc->data_mr);
428                         ib_free_fast_reg_page_list(fr_desc->data_frpl);
429                         goto err;
430                 }
431                 pr_debug("Create fr_desc %p page_list %p\n",
432                          fr_desc, fr_desc->data_frpl->page_list);
433
434                 fr_desc->valid = true;
435                 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
436                 isert_conn->conn_fr_pool_size++;
437         }
438
439         pr_debug("Creating conn %p fastreg pool size=%d",
440                  isert_conn, isert_conn->conn_fr_pool_size);
441
442         return 0;
443
444 err:
445         isert_conn_free_fastreg_pool(isert_conn);
446         return ret;
447 }
448
449 static int
450 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
451 {
452         struct iscsi_np *np = cma_id->context;
453         struct isert_np *isert_np = np->np_context;
454         struct isert_conn *isert_conn;
455         struct isert_device *device;
456         struct ib_device *ib_dev = cma_id->device;
457         int ret = 0;
458
459         pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
460                  cma_id, cma_id->context);
461
462         isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
463         if (!isert_conn) {
464                 pr_err("Unable to allocate isert_conn\n");
465                 return -ENOMEM;
466         }
467         isert_conn->state = ISER_CONN_INIT;
468         INIT_LIST_HEAD(&isert_conn->conn_accept_node);
469         init_completion(&isert_conn->conn_login_comp);
470         init_waitqueue_head(&isert_conn->conn_wait);
471         init_waitqueue_head(&isert_conn->conn_wait_comp_err);
472         kref_init(&isert_conn->conn_kref);
473         kref_get(&isert_conn->conn_kref);
474         mutex_init(&isert_conn->conn_mutex);
475         mutex_init(&isert_conn->conn_comp_mutex);
476         spin_lock_init(&isert_conn->conn_lock);
477
478         cma_id->context = isert_conn;
479         isert_conn->conn_cm_id = cma_id;
480         isert_conn->responder_resources = event->param.conn.responder_resources;
481         isert_conn->initiator_depth = event->param.conn.initiator_depth;
482         pr_debug("Using responder_resources: %u initiator_depth: %u\n",
483                  isert_conn->responder_resources, isert_conn->initiator_depth);
484
485         isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
486                                         ISER_RX_LOGIN_SIZE, GFP_KERNEL);
487         if (!isert_conn->login_buf) {
488                 pr_err("Unable to allocate isert_conn->login_buf\n");
489                 ret = -ENOMEM;
490                 goto out;
491         }
492
493         isert_conn->login_req_buf = isert_conn->login_buf;
494         isert_conn->login_rsp_buf = isert_conn->login_buf +
495                                     ISCSI_DEF_MAX_RECV_SEG_LEN;
496         pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
497                  isert_conn->login_buf, isert_conn->login_req_buf,
498                  isert_conn->login_rsp_buf);
499
500         isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
501                                 (void *)isert_conn->login_req_buf,
502                                 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
503
504         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
505         if (ret) {
506                 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
507                        ret);
508                 isert_conn->login_req_dma = 0;
509                 goto out_login_buf;
510         }
511
512         isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
513                                         (void *)isert_conn->login_rsp_buf,
514                                         ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
515
516         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
517         if (ret) {
518                 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
519                        ret);
520                 isert_conn->login_rsp_dma = 0;
521                 goto out_req_dma_map;
522         }
523
524         device = isert_device_find_by_ib_dev(cma_id);
525         if (IS_ERR(device)) {
526                 ret = PTR_ERR(device);
527                 goto out_rsp_dma_map;
528         }
529
530         isert_conn->conn_device = device;
531         isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
532         if (IS_ERR(isert_conn->conn_pd)) {
533                 ret = PTR_ERR(isert_conn->conn_pd);
534                 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
535                        isert_conn, ret);
536                 goto out_pd;
537         }
538
539         isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
540                                            IB_ACCESS_LOCAL_WRITE);
541         if (IS_ERR(isert_conn->conn_mr)) {
542                 ret = PTR_ERR(isert_conn->conn_mr);
543                 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
544                        isert_conn, ret);
545                 goto out_mr;
546         }
547
548         if (device->use_fastreg) {
549                 ret = isert_conn_create_fastreg_pool(isert_conn);
550                 if (ret) {
551                         pr_err("Conn: %p failed to create fastreg pool\n",
552                                isert_conn);
553                         goto out_fastreg;
554                 }
555         }
556
557         ret = isert_conn_setup_qp(isert_conn, cma_id);
558         if (ret)
559                 goto out_conn_dev;
560
561         mutex_lock(&isert_np->np_accept_mutex);
562         list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
563         mutex_unlock(&isert_np->np_accept_mutex);
564
565         pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
566         wake_up(&isert_np->np_accept_wq);
567         return 0;
568
569 out_conn_dev:
570         if (device->use_fastreg)
571                 isert_conn_free_fastreg_pool(isert_conn);
572 out_fastreg:
573         ib_dereg_mr(isert_conn->conn_mr);
574 out_mr:
575         ib_dealloc_pd(isert_conn->conn_pd);
576 out_pd:
577         isert_device_try_release(device);
578 out_rsp_dma_map:
579         ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
580                             ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
581 out_req_dma_map:
582         ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
583                             ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
584 out_login_buf:
585         kfree(isert_conn->login_buf);
586 out:
587         kfree(isert_conn);
588         return ret;
589 }
590
591 static void
592 isert_connect_release(struct isert_conn *isert_conn)
593 {
594         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
595         struct isert_device *device = isert_conn->conn_device;
596         int cq_index;
597
598         pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
599
600         if (device && device->use_fastreg)
601                 isert_conn_free_fastreg_pool(isert_conn);
602
603         if (isert_conn->conn_qp) {
604                 cq_index = ((struct isert_cq_desc *)
605                         isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
606                 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
607                 isert_conn->conn_device->cq_active_qps[cq_index]--;
608
609                 rdma_destroy_qp(isert_conn->conn_cm_id);
610         }
611
612         isert_free_rx_descriptors(isert_conn);
613         rdma_destroy_id(isert_conn->conn_cm_id);
614
615         ib_dereg_mr(isert_conn->conn_mr);
616         ib_dealloc_pd(isert_conn->conn_pd);
617
618         if (isert_conn->login_buf) {
619                 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
620                                     ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
621                 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
622                                     ISCSI_DEF_MAX_RECV_SEG_LEN,
623                                     DMA_FROM_DEVICE);
624                 kfree(isert_conn->login_buf);
625         }
626         kfree(isert_conn);
627
628         if (device)
629                 isert_device_try_release(device);
630
631         pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
632 }
633
634 static void
635 isert_connected_handler(struct rdma_cm_id *cma_id)
636 {
637         return;
638 }
639
640 static void
641 isert_release_conn_kref(struct kref *kref)
642 {
643         struct isert_conn *isert_conn = container_of(kref,
644                                 struct isert_conn, conn_kref);
645
646         pr_debug("Calling isert_connect_release for final kref %s/%d\n",
647                  current->comm, current->pid);
648
649         isert_connect_release(isert_conn);
650 }
651
652 static void
653 isert_put_conn(struct isert_conn *isert_conn)
654 {
655         kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
656 }
657
658 static void
659 isert_disconnect_work(struct work_struct *work)
660 {
661         struct isert_conn *isert_conn = container_of(work,
662                                 struct isert_conn, conn_logout_work);
663
664         pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
665         mutex_lock(&isert_conn->conn_mutex);
666         isert_conn->state = ISER_CONN_DOWN;
667
668         if (isert_conn->post_recv_buf_count == 0 &&
669             atomic_read(&isert_conn->post_send_buf_count) == 0) {
670                 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
671                 mutex_unlock(&isert_conn->conn_mutex);
672                 goto wake_up;
673         }
674         if (!isert_conn->conn_cm_id) {
675                 mutex_unlock(&isert_conn->conn_mutex);
676                 isert_put_conn(isert_conn);
677                 return;
678         }
679         if (!isert_conn->logout_posted) {
680                 pr_debug("Calling rdma_disconnect for !logout_posted from"
681                          " isert_disconnect_work\n");
682                 rdma_disconnect(isert_conn->conn_cm_id);
683                 mutex_unlock(&isert_conn->conn_mutex);
684                 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
685                 goto wake_up;
686         }
687         mutex_unlock(&isert_conn->conn_mutex);
688
689 wake_up:
690         wake_up(&isert_conn->conn_wait);
691         isert_put_conn(isert_conn);
692 }
693
694 static void
695 isert_disconnected_handler(struct rdma_cm_id *cma_id)
696 {
697         struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
698
699         INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
700         schedule_work(&isert_conn->conn_logout_work);
701 }
702
703 static int
704 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
705 {
706         int ret = 0;
707
708         pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
709                  event->event, event->status, cma_id->context, cma_id);
710
711         switch (event->event) {
712         case RDMA_CM_EVENT_CONNECT_REQUEST:
713                 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
714                 ret = isert_connect_request(cma_id, event);
715                 break;
716         case RDMA_CM_EVENT_ESTABLISHED:
717                 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
718                 isert_connected_handler(cma_id);
719                 break;
720         case RDMA_CM_EVENT_DISCONNECTED:
721                 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
722                 isert_disconnected_handler(cma_id);
723                 break;
724         case RDMA_CM_EVENT_DEVICE_REMOVAL:
725         case RDMA_CM_EVENT_ADDR_CHANGE:
726                 break;
727         case RDMA_CM_EVENT_CONNECT_ERROR:
728         default:
729                 pr_err("Unknown RDMA CMA event: %d\n", event->event);
730                 break;
731         }
732
733         if (ret != 0) {
734                 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
735                        event->event, ret);
736                 dump_stack();
737         }
738
739         return ret;
740 }
741
742 static int
743 isert_post_recv(struct isert_conn *isert_conn, u32 count)
744 {
745         struct ib_recv_wr *rx_wr, *rx_wr_failed;
746         int i, ret;
747         unsigned int rx_head = isert_conn->conn_rx_desc_head;
748         struct iser_rx_desc *rx_desc;
749
750         for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
751                 rx_desc         = &isert_conn->conn_rx_descs[rx_head];
752                 rx_wr->wr_id    = (unsigned long)rx_desc;
753                 rx_wr->sg_list  = &rx_desc->rx_sg;
754                 rx_wr->num_sge  = 1;
755                 rx_wr->next     = rx_wr + 1;
756                 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
757         }
758
759         rx_wr--;
760         rx_wr->next = NULL; /* mark end of work requests list */
761
762         isert_conn->post_recv_buf_count += count;
763         ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
764                                 &rx_wr_failed);
765         if (ret) {
766                 pr_err("ib_post_recv() failed with ret: %d\n", ret);
767                 isert_conn->post_recv_buf_count -= count;
768         } else {
769                 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
770                 isert_conn->conn_rx_desc_head = rx_head;
771         }
772         return ret;
773 }
774
775 static int
776 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
777 {
778         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
779         struct ib_send_wr send_wr, *send_wr_failed;
780         int ret;
781
782         ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
783                                       ISER_HEADERS_LEN, DMA_TO_DEVICE);
784
785         send_wr.next    = NULL;
786         send_wr.wr_id   = (unsigned long)tx_desc;
787         send_wr.sg_list = tx_desc->tx_sg;
788         send_wr.num_sge = tx_desc->num_sge;
789         send_wr.opcode  = IB_WR_SEND;
790         send_wr.send_flags = IB_SEND_SIGNALED;
791
792         atomic_inc(&isert_conn->post_send_buf_count);
793
794         ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
795         if (ret) {
796                 pr_err("ib_post_send() failed, ret: %d\n", ret);
797                 atomic_dec(&isert_conn->post_send_buf_count);
798         }
799
800         return ret;
801 }
802
803 static void
804 isert_create_send_desc(struct isert_conn *isert_conn,
805                        struct isert_cmd *isert_cmd,
806                        struct iser_tx_desc *tx_desc)
807 {
808         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
809
810         ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
811                                    ISER_HEADERS_LEN, DMA_TO_DEVICE);
812
813         memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
814         tx_desc->iser_header.flags = ISER_VER;
815
816         tx_desc->num_sge = 1;
817         tx_desc->isert_cmd = isert_cmd;
818
819         if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
820                 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
821                 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
822         }
823 }
824
825 static int
826 isert_init_tx_hdrs(struct isert_conn *isert_conn,
827                    struct iser_tx_desc *tx_desc)
828 {
829         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
830         u64 dma_addr;
831
832         dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
833                         ISER_HEADERS_LEN, DMA_TO_DEVICE);
834         if (ib_dma_mapping_error(ib_dev, dma_addr)) {
835                 pr_err("ib_dma_mapping_error() failed\n");
836                 return -ENOMEM;
837         }
838
839         tx_desc->dma_addr = dma_addr;
840         tx_desc->tx_sg[0].addr  = tx_desc->dma_addr;
841         tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
842         tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
843
844         pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
845                  " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
846                  tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
847
848         return 0;
849 }
850
851 static void
852 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
853                    struct ib_send_wr *send_wr, bool coalesce)
854 {
855         struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
856
857         isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
858         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
859         send_wr->opcode = IB_WR_SEND;
860         send_wr->sg_list = &tx_desc->tx_sg[0];
861         send_wr->num_sge = isert_cmd->tx_desc.num_sge;
862         /*
863          * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
864          * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
865          */
866         mutex_lock(&isert_conn->conn_comp_mutex);
867         if (coalesce &&
868             ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
869                 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
870                 mutex_unlock(&isert_conn->conn_comp_mutex);
871                 return;
872         }
873         isert_conn->conn_comp_batch = 0;
874         tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
875         mutex_unlock(&isert_conn->conn_comp_mutex);
876
877         send_wr->send_flags = IB_SEND_SIGNALED;
878 }
879
880 static int
881 isert_rdma_post_recvl(struct isert_conn *isert_conn)
882 {
883         struct ib_recv_wr rx_wr, *rx_wr_fail;
884         struct ib_sge sge;
885         int ret;
886
887         memset(&sge, 0, sizeof(struct ib_sge));
888         sge.addr = isert_conn->login_req_dma;
889         sge.length = ISER_RX_LOGIN_SIZE;
890         sge.lkey = isert_conn->conn_mr->lkey;
891
892         pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
893                 sge.addr, sge.length, sge.lkey);
894
895         memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
896         rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
897         rx_wr.sg_list = &sge;
898         rx_wr.num_sge = 1;
899
900         isert_conn->post_recv_buf_count++;
901         ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
902         if (ret) {
903                 pr_err("ib_post_recv() failed: %d\n", ret);
904                 isert_conn->post_recv_buf_count--;
905         }
906
907         pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
908         return ret;
909 }
910
911 static int
912 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
913                    u32 length)
914 {
915         struct isert_conn *isert_conn = conn->context;
916         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
917         struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
918         int ret;
919
920         isert_create_send_desc(isert_conn, NULL, tx_desc);
921
922         memcpy(&tx_desc->iscsi_header, &login->rsp[0],
923                sizeof(struct iscsi_hdr));
924
925         isert_init_tx_hdrs(isert_conn, tx_desc);
926
927         if (length > 0) {
928                 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
929
930                 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
931                                            length, DMA_TO_DEVICE);
932
933                 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
934
935                 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
936                                               length, DMA_TO_DEVICE);
937
938                 tx_dsg->addr    = isert_conn->login_rsp_dma;
939                 tx_dsg->length  = length;
940                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
941                 tx_desc->num_sge = 2;
942         }
943         if (!login->login_failed) {
944                 if (login->login_complete) {
945                         ret = isert_alloc_rx_descriptors(isert_conn);
946                         if (ret)
947                                 return ret;
948
949                         ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
950                         if (ret)
951                                 return ret;
952
953                         isert_conn->state = ISER_CONN_UP;
954                         goto post_send;
955                 }
956
957                 ret = isert_rdma_post_recvl(isert_conn);
958                 if (ret)
959                         return ret;
960         }
961 post_send:
962         ret = isert_post_send(isert_conn, tx_desc);
963         if (ret)
964                 return ret;
965
966         return 0;
967 }
968
969 static void
970 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
971                    struct isert_conn *isert_conn)
972 {
973         struct iscsi_conn *conn = isert_conn->conn;
974         struct iscsi_login *login = conn->conn_login;
975         int size;
976
977         if (!login) {
978                 pr_err("conn->conn_login is NULL\n");
979                 dump_stack();
980                 return;
981         }
982
983         if (login->first_request) {
984                 struct iscsi_login_req *login_req =
985                         (struct iscsi_login_req *)&rx_desc->iscsi_header;
986                 /*
987                  * Setup the initial iscsi_login values from the leading
988                  * login request PDU.
989                  */
990                 login->leading_connection = (!login_req->tsih) ? 1 : 0;
991                 login->current_stage =
992                         (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
993                          >> 2;
994                 login->version_min      = login_req->min_version;
995                 login->version_max      = login_req->max_version;
996                 memcpy(login->isid, login_req->isid, 6);
997                 login->cmd_sn           = be32_to_cpu(login_req->cmdsn);
998                 login->init_task_tag    = login_req->itt;
999                 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1000                 login->cid              = be16_to_cpu(login_req->cid);
1001                 login->tsih             = be16_to_cpu(login_req->tsih);
1002         }
1003
1004         memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1005
1006         size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1007         pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1008                  size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1009         memcpy(login->req_buf, &rx_desc->data[0], size);
1010
1011         if (login->first_request) {
1012                 complete(&isert_conn->conn_login_comp);
1013                 return;
1014         }
1015         schedule_delayed_work(&conn->login_work, 0);
1016 }
1017
1018 static struct iscsi_cmd
1019 *isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
1020 {
1021         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1022         struct isert_cmd *isert_cmd;
1023         struct iscsi_cmd *cmd;
1024
1025         cmd = iscsit_allocate_cmd(conn, gfp);
1026         if (!cmd) {
1027                 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1028                 return NULL;
1029         }
1030         isert_cmd = iscsit_priv_cmd(cmd);
1031         isert_cmd->conn = isert_conn;
1032         isert_cmd->iscsi_cmd = cmd;
1033
1034         return cmd;
1035 }
1036
1037 static int
1038 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1039                       struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1040                       struct iser_rx_desc *rx_desc, unsigned char *buf)
1041 {
1042         struct iscsi_conn *conn = isert_conn->conn;
1043         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1044         struct scatterlist *sg;
1045         int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1046         bool dump_payload = false;
1047
1048         rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1049         if (rc < 0)
1050                 return rc;
1051
1052         imm_data = cmd->immediate_data;
1053         imm_data_len = cmd->first_burst_len;
1054         unsol_data = cmd->unsolicited_data;
1055
1056         rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1057         if (rc < 0) {
1058                 return 0;
1059         } else if (rc > 0) {
1060                 dump_payload = true;
1061                 goto sequence_cmd;
1062         }
1063
1064         if (!imm_data)
1065                 return 0;
1066
1067         sg = &cmd->se_cmd.t_data_sg[0];
1068         sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1069
1070         pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1071                  sg, sg_nents, &rx_desc->data[0], imm_data_len);
1072
1073         sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1074
1075         cmd->write_data_done += imm_data_len;
1076
1077         if (cmd->write_data_done == cmd->se_cmd.data_length) {
1078                 spin_lock_bh(&cmd->istate_lock);
1079                 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1080                 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1081                 spin_unlock_bh(&cmd->istate_lock);
1082         }
1083
1084 sequence_cmd:
1085         rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1086
1087         if (!rc && dump_payload == false && unsol_data)
1088                 iscsit_set_unsoliticed_dataout(cmd);
1089
1090         return 0;
1091 }
1092
1093 static int
1094 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1095                            struct iser_rx_desc *rx_desc, unsigned char *buf)
1096 {
1097         struct scatterlist *sg_start;
1098         struct iscsi_conn *conn = isert_conn->conn;
1099         struct iscsi_cmd *cmd = NULL;
1100         struct iscsi_data *hdr = (struct iscsi_data *)buf;
1101         u32 unsol_data_len = ntoh24(hdr->dlength);
1102         int rc, sg_nents, sg_off, page_off;
1103
1104         rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1105         if (rc < 0)
1106                 return rc;
1107         else if (!cmd)
1108                 return 0;
1109         /*
1110          * FIXME: Unexpected unsolicited_data out
1111          */
1112         if (!cmd->unsolicited_data) {
1113                 pr_err("Received unexpected solicited data payload\n");
1114                 dump_stack();
1115                 return -1;
1116         }
1117
1118         pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1119                  unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1120
1121         sg_off = cmd->write_data_done / PAGE_SIZE;
1122         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1123         sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1124         page_off = cmd->write_data_done % PAGE_SIZE;
1125         /*
1126          * FIXME: Non page-aligned unsolicited_data out
1127          */
1128         if (page_off) {
1129                 pr_err("Received unexpected non-page aligned data payload\n");
1130                 dump_stack();
1131                 return -1;
1132         }
1133         pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1134                  sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1135
1136         sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1137                             unsol_data_len);
1138
1139         rc = iscsit_check_dataout_payload(cmd, hdr, false);
1140         if (rc < 0)
1141                 return rc;
1142
1143         return 0;
1144 }
1145
1146 static int
1147 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1148                      struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1149                      unsigned char *buf)
1150 {
1151         struct iscsi_conn *conn = isert_conn->conn;
1152         struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1153         int rc;
1154
1155         rc = iscsit_setup_nop_out(conn, cmd, hdr);
1156         if (rc < 0)
1157                 return rc;
1158         /*
1159          * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1160          */
1161
1162         return iscsit_process_nop_out(conn, cmd, hdr);
1163 }
1164
1165 static int
1166 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1167                       struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1168                       struct iscsi_text *hdr)
1169 {
1170         struct iscsi_conn *conn = isert_conn->conn;
1171         u32 payload_length = ntoh24(hdr->dlength);
1172         int rc;
1173         unsigned char *text_in;
1174
1175         rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1176         if (rc < 0)
1177                 return rc;
1178
1179         text_in = kzalloc(payload_length, GFP_KERNEL);
1180         if (!text_in) {
1181                 pr_err("Unable to allocate text_in of payload_length: %u\n",
1182                        payload_length);
1183                 return -ENOMEM;
1184         }
1185         cmd->text_in_ptr = text_in;
1186
1187         memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1188
1189         return iscsit_process_text_cmd(conn, cmd, hdr);
1190 }
1191
1192 static int
1193 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1194                 uint32_t read_stag, uint64_t read_va,
1195                 uint32_t write_stag, uint64_t write_va)
1196 {
1197         struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1198         struct iscsi_conn *conn = isert_conn->conn;
1199         struct iscsi_session *sess = conn->sess;
1200         struct iscsi_cmd *cmd;
1201         struct isert_cmd *isert_cmd;
1202         int ret = -EINVAL;
1203         u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1204
1205         if (sess->sess_ops->SessionType &&
1206            (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1207                 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1208                        " ignoring\n", opcode);
1209                 return 0;
1210         }
1211
1212         switch (opcode) {
1213         case ISCSI_OP_SCSI_CMD:
1214                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1215                 if (!cmd)
1216                         break;
1217
1218                 isert_cmd = iscsit_priv_cmd(cmd);
1219                 isert_cmd->read_stag = read_stag;
1220                 isert_cmd->read_va = read_va;
1221                 isert_cmd->write_stag = write_stag;
1222                 isert_cmd->write_va = write_va;
1223
1224                 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1225                                         rx_desc, (unsigned char *)hdr);
1226                 break;
1227         case ISCSI_OP_NOOP_OUT:
1228                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1229                 if (!cmd)
1230                         break;
1231
1232                 isert_cmd = iscsit_priv_cmd(cmd);
1233                 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1234                                            rx_desc, (unsigned char *)hdr);
1235                 break;
1236         case ISCSI_OP_SCSI_DATA_OUT:
1237                 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1238                                                 (unsigned char *)hdr);
1239                 break;
1240         case ISCSI_OP_SCSI_TMFUNC:
1241                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1242                 if (!cmd)
1243                         break;
1244
1245                 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1246                                                 (unsigned char *)hdr);
1247                 break;
1248         case ISCSI_OP_LOGOUT:
1249                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1250                 if (!cmd)
1251                         break;
1252
1253                 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1254                 if (ret > 0)
1255                         wait_for_completion_timeout(&conn->conn_logout_comp,
1256                                                     SECONDS_FOR_LOGOUT_COMP *
1257                                                     HZ);
1258                 break;
1259         case ISCSI_OP_TEXT:
1260                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1261                 if (!cmd)
1262                         break;
1263
1264                 isert_cmd = iscsit_priv_cmd(cmd);
1265                 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1266                                             rx_desc, (struct iscsi_text *)hdr);
1267                 break;
1268         default:
1269                 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1270                 dump_stack();
1271                 break;
1272         }
1273
1274         return ret;
1275 }
1276
1277 static void
1278 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1279 {
1280         struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1281         uint64_t read_va = 0, write_va = 0;
1282         uint32_t read_stag = 0, write_stag = 0;
1283         int rc;
1284
1285         switch (iser_hdr->flags & 0xF0) {
1286         case ISCSI_CTRL:
1287                 if (iser_hdr->flags & ISER_RSV) {
1288                         read_stag = be32_to_cpu(iser_hdr->read_stag);
1289                         read_va = be64_to_cpu(iser_hdr->read_va);
1290                         pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1291                                  read_stag, (unsigned long long)read_va);
1292                 }
1293                 if (iser_hdr->flags & ISER_WSV) {
1294                         write_stag = be32_to_cpu(iser_hdr->write_stag);
1295                         write_va = be64_to_cpu(iser_hdr->write_va);
1296                         pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1297                                  write_stag, (unsigned long long)write_va);
1298                 }
1299
1300                 pr_debug("ISER ISCSI_CTRL PDU\n");
1301                 break;
1302         case ISER_HELLO:
1303                 pr_err("iSER Hello message\n");
1304                 break;
1305         default:
1306                 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1307                 break;
1308         }
1309
1310         rc = isert_rx_opcode(isert_conn, rx_desc,
1311                              read_stag, read_va, write_stag, write_va);
1312 }
1313
1314 static void
1315 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1316                     unsigned long xfer_len)
1317 {
1318         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1319         struct iscsi_hdr *hdr;
1320         u64 rx_dma;
1321         int rx_buflen, outstanding;
1322
1323         if ((char *)desc == isert_conn->login_req_buf) {
1324                 rx_dma = isert_conn->login_req_dma;
1325                 rx_buflen = ISER_RX_LOGIN_SIZE;
1326                 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1327                          rx_dma, rx_buflen);
1328         } else {
1329                 rx_dma = desc->dma_addr;
1330                 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1331                 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1332                          rx_dma, rx_buflen);
1333         }
1334
1335         ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1336
1337         hdr = &desc->iscsi_header;
1338         pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1339                  hdr->opcode, hdr->itt, hdr->flags,
1340                  (int)(xfer_len - ISER_HEADERS_LEN));
1341
1342         if ((char *)desc == isert_conn->login_req_buf)
1343                 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1344                                    isert_conn);
1345         else
1346                 isert_rx_do_work(desc, isert_conn);
1347
1348         ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1349                                       DMA_FROM_DEVICE);
1350
1351         isert_conn->post_recv_buf_count--;
1352         pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1353                  isert_conn->post_recv_buf_count);
1354
1355         if ((char *)desc == isert_conn->login_req_buf)
1356                 return;
1357
1358         outstanding = isert_conn->post_recv_buf_count;
1359         if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1360                 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1361                                 ISERT_MIN_POSTED_RX);
1362                 err = isert_post_recv(isert_conn, count);
1363                 if (err) {
1364                         pr_err("isert_post_recv() count: %d failed, %d\n",
1365                                count, err);
1366                 }
1367         }
1368 }
1369
1370 static void
1371 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1372 {
1373         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1374         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1375
1376         pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1377         if (wr->sge) {
1378                 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1379                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1380                                 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1381                                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1382                 wr->sge = NULL;
1383         }
1384
1385         if (wr->send_wr) {
1386                 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1387                 kfree(wr->send_wr);
1388                 wr->send_wr = NULL;
1389         }
1390
1391         if (wr->ib_sge) {
1392                 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1393                 kfree(wr->ib_sge);
1394                 wr->ib_sge = NULL;
1395         }
1396 }
1397
1398 static void
1399 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1400 {
1401         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1402         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1403         LIST_HEAD(unmap_list);
1404
1405         pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
1406
1407         if (wr->fr_desc) {
1408                 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1409                          isert_cmd, wr->fr_desc);
1410                 spin_lock_bh(&isert_conn->conn_lock);
1411                 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1412                 spin_unlock_bh(&isert_conn->conn_lock);
1413                 wr->fr_desc = NULL;
1414         }
1415
1416         if (wr->sge) {
1417                 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1418                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1419                                 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1420                                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1421                 wr->sge = NULL;
1422         }
1423
1424         wr->ib_sge = NULL;
1425         wr->send_wr = NULL;
1426 }
1427
1428 static void
1429 isert_put_cmd(struct isert_cmd *isert_cmd)
1430 {
1431         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1432         struct isert_conn *isert_conn = isert_cmd->conn;
1433         struct iscsi_conn *conn = isert_conn->conn;
1434         struct isert_device *device = isert_conn->conn_device;
1435
1436         pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1437
1438         switch (cmd->iscsi_opcode) {
1439         case ISCSI_OP_SCSI_CMD:
1440                 spin_lock_bh(&conn->cmd_lock);
1441                 if (!list_empty(&cmd->i_conn_node))
1442                         list_del(&cmd->i_conn_node);
1443                 spin_unlock_bh(&conn->cmd_lock);
1444
1445                 if (cmd->data_direction == DMA_TO_DEVICE)
1446                         iscsit_stop_dataout_timer(cmd);
1447
1448                 device->unreg_rdma_mem(isert_cmd, isert_conn);
1449                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1450                 break;
1451         case ISCSI_OP_SCSI_TMFUNC:
1452                 spin_lock_bh(&conn->cmd_lock);
1453                 if (!list_empty(&cmd->i_conn_node))
1454                         list_del(&cmd->i_conn_node);
1455                 spin_unlock_bh(&conn->cmd_lock);
1456
1457                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1458                 break;
1459         case ISCSI_OP_REJECT:
1460         case ISCSI_OP_NOOP_OUT:
1461         case ISCSI_OP_TEXT:
1462                 spin_lock_bh(&conn->cmd_lock);
1463                 if (!list_empty(&cmd->i_conn_node))
1464                         list_del(&cmd->i_conn_node);
1465                 spin_unlock_bh(&conn->cmd_lock);
1466
1467                 /*
1468                  * Handle special case for REJECT when iscsi_add_reject*() has
1469                  * overwritten the original iscsi_opcode assignment, and the
1470                  * associated cmd->se_cmd needs to be released.
1471                  */
1472                 if (cmd->se_cmd.se_tfo != NULL) {
1473                         pr_debug("Calling transport_generic_free_cmd from"
1474                                  " isert_put_cmd for 0x%02x\n",
1475                                  cmd->iscsi_opcode);
1476                         transport_generic_free_cmd(&cmd->se_cmd, 0);
1477                         break;
1478                 }
1479                 /*
1480                  * Fall-through
1481                  */
1482         default:
1483                 iscsit_release_cmd(cmd);
1484                 break;
1485         }
1486 }
1487
1488 static void
1489 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1490 {
1491         if (tx_desc->dma_addr != 0) {
1492                 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1493                 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1494                                     ISER_HEADERS_LEN, DMA_TO_DEVICE);
1495                 tx_desc->dma_addr = 0;
1496         }
1497 }
1498
1499 static void
1500 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1501                      struct ib_device *ib_dev)
1502 {
1503         if (isert_cmd->pdu_buf_dma != 0) {
1504                 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1505                 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1506                                     isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1507                 isert_cmd->pdu_buf_dma = 0;
1508         }
1509
1510         isert_unmap_tx_desc(tx_desc, ib_dev);
1511         isert_put_cmd(isert_cmd);
1512 }
1513
1514 static void
1515 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1516                            struct isert_cmd *isert_cmd)
1517 {
1518         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1519         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1520         struct se_cmd *se_cmd = &cmd->se_cmd;
1521         struct isert_conn *isert_conn = isert_cmd->conn;
1522         struct isert_device *device = isert_conn->conn_device;
1523
1524         iscsit_stop_dataout_timer(cmd);
1525         device->unreg_rdma_mem(isert_cmd, isert_conn);
1526         cmd->write_data_done = wr->cur_rdma_length;
1527
1528         pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1529         spin_lock_bh(&cmd->istate_lock);
1530         cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1531         cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1532         spin_unlock_bh(&cmd->istate_lock);
1533
1534         target_execute_cmd(se_cmd);
1535 }
1536
1537 static void
1538 isert_do_control_comp(struct work_struct *work)
1539 {
1540         struct isert_cmd *isert_cmd = container_of(work,
1541                         struct isert_cmd, comp_work);
1542         struct isert_conn *isert_conn = isert_cmd->conn;
1543         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1544         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1545
1546         switch (cmd->i_state) {
1547         case ISTATE_SEND_TASKMGTRSP:
1548                 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1549
1550                 atomic_dec(&isert_conn->post_send_buf_count);
1551                 iscsit_tmr_post_handler(cmd, cmd->conn);
1552
1553                 cmd->i_state = ISTATE_SENT_STATUS;
1554                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1555                 break;
1556         case ISTATE_SEND_REJECT:
1557                 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1558                 atomic_dec(&isert_conn->post_send_buf_count);
1559
1560                 cmd->i_state = ISTATE_SENT_STATUS;
1561                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1562                 break;
1563         case ISTATE_SEND_LOGOUTRSP:
1564                 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1565                 /*
1566                  * Call atomic_dec(&isert_conn->post_send_buf_count)
1567                  * from isert_free_conn()
1568                  */
1569                 isert_conn->logout_posted = true;
1570                 iscsit_logout_post_handler(cmd, cmd->conn);
1571                 break;
1572         case ISTATE_SEND_TEXTRSP:
1573                 atomic_dec(&isert_conn->post_send_buf_count);
1574                 cmd->i_state = ISTATE_SENT_STATUS;
1575                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1576                 break;
1577         default:
1578                 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1579                 dump_stack();
1580                 break;
1581         }
1582 }
1583
1584 static void
1585 isert_response_completion(struct iser_tx_desc *tx_desc,
1586                           struct isert_cmd *isert_cmd,
1587                           struct isert_conn *isert_conn,
1588                           struct ib_device *ib_dev)
1589 {
1590         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1591
1592         if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1593             cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1594             cmd->i_state == ISTATE_SEND_REJECT ||
1595             cmd->i_state == ISTATE_SEND_TEXTRSP) {
1596                 isert_unmap_tx_desc(tx_desc, ib_dev);
1597
1598                 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1599                 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1600                 return;
1601         }
1602         atomic_dec(&isert_conn->post_send_buf_count);
1603
1604         cmd->i_state = ISTATE_SENT_STATUS;
1605         isert_completion_put(tx_desc, isert_cmd, ib_dev);
1606 }
1607
1608 static void
1609 __isert_send_completion(struct iser_tx_desc *tx_desc,
1610                         struct isert_conn *isert_conn)
1611 {
1612         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1613         struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1614         struct isert_rdma_wr *wr;
1615
1616         if (!isert_cmd) {
1617                 atomic_dec(&isert_conn->post_send_buf_count);
1618                 isert_unmap_tx_desc(tx_desc, ib_dev);
1619                 return;
1620         }
1621         wr = &isert_cmd->rdma_wr;
1622
1623         switch (wr->iser_ib_op) {
1624         case ISER_IB_RECV:
1625                 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1626                 dump_stack();
1627                 break;
1628         case ISER_IB_SEND:
1629                 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1630                 isert_response_completion(tx_desc, isert_cmd,
1631                                           isert_conn, ib_dev);
1632                 break;
1633         case ISER_IB_RDMA_WRITE:
1634                 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1635                 dump_stack();
1636                 break;
1637         case ISER_IB_RDMA_READ:
1638                 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1639
1640                 atomic_dec(&isert_conn->post_send_buf_count);
1641                 isert_completion_rdma_read(tx_desc, isert_cmd);
1642                 break;
1643         default:
1644                 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1645                 dump_stack();
1646                 break;
1647         }
1648 }
1649
1650 static void
1651 isert_send_completion(struct iser_tx_desc *tx_desc,
1652                       struct isert_conn *isert_conn)
1653 {
1654         struct llist_node *llnode = tx_desc->comp_llnode_batch;
1655         struct iser_tx_desc *t;
1656         /*
1657          * Drain coalesced completion llist starting from comp_llnode_batch
1658          * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1659          */
1660         while (llnode) {
1661                 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1662                 llnode = llist_next(llnode);
1663                 __isert_send_completion(t, isert_conn);
1664         }
1665         __isert_send_completion(tx_desc, isert_conn);
1666 }
1667
1668 static void
1669 isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1670 {
1671         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1672
1673         if (tx_desc) {
1674                 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1675
1676                 if (!isert_cmd)
1677                         isert_unmap_tx_desc(tx_desc, ib_dev);
1678                 else
1679                         isert_completion_put(tx_desc, isert_cmd, ib_dev);
1680         }
1681
1682         if (isert_conn->post_recv_buf_count == 0 &&
1683             atomic_read(&isert_conn->post_send_buf_count) == 0) {
1684                 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1685                 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1686
1687                 mutex_lock(&isert_conn->conn_mutex);
1688                 if (isert_conn->state != ISER_CONN_DOWN)
1689                         isert_conn->state = ISER_CONN_TERMINATING;
1690                 mutex_unlock(&isert_conn->conn_mutex);
1691
1692                 wake_up(&isert_conn->conn_wait_comp_err);
1693         }
1694 }
1695
1696 static void
1697 isert_cq_tx_work(struct work_struct *work)
1698 {
1699         struct isert_cq_desc *cq_desc = container_of(work,
1700                                 struct isert_cq_desc, cq_tx_work);
1701         struct isert_device *device = cq_desc->device;
1702         int cq_index = cq_desc->cq_index;
1703         struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1704         struct isert_conn *isert_conn;
1705         struct iser_tx_desc *tx_desc;
1706         struct ib_wc wc;
1707
1708         while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1709                 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1710                 isert_conn = wc.qp->qp_context;
1711
1712                 if (wc.status == IB_WC_SUCCESS) {
1713                         isert_send_completion(tx_desc, isert_conn);
1714                 } else {
1715                         pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1716                         pr_debug("TX wc.status: 0x%08x\n", wc.status);
1717                         pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1718                         atomic_dec(&isert_conn->post_send_buf_count);
1719                         isert_cq_comp_err(tx_desc, isert_conn);
1720                 }
1721         }
1722
1723         ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1724 }
1725
1726 static void
1727 isert_cq_tx_callback(struct ib_cq *cq, void *context)
1728 {
1729         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1730
1731         INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1732         queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1733 }
1734
1735 static void
1736 isert_cq_rx_work(struct work_struct *work)
1737 {
1738         struct isert_cq_desc *cq_desc = container_of(work,
1739                         struct isert_cq_desc, cq_rx_work);
1740         struct isert_device *device = cq_desc->device;
1741         int cq_index = cq_desc->cq_index;
1742         struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1743         struct isert_conn *isert_conn;
1744         struct iser_rx_desc *rx_desc;
1745         struct ib_wc wc;
1746         unsigned long xfer_len;
1747
1748         while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1749                 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1750                 isert_conn = wc.qp->qp_context;
1751
1752                 if (wc.status == IB_WC_SUCCESS) {
1753                         xfer_len = (unsigned long)wc.byte_len;
1754                         isert_rx_completion(rx_desc, isert_conn, xfer_len);
1755                 } else {
1756                         pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1757                         if (wc.status != IB_WC_WR_FLUSH_ERR) {
1758                                 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1759                                 pr_debug("RX wc.vendor_err: 0x%08x\n",
1760                                          wc.vendor_err);
1761                         }
1762                         isert_conn->post_recv_buf_count--;
1763                         isert_cq_comp_err(NULL, isert_conn);
1764                 }
1765         }
1766
1767         ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1768 }
1769
1770 static void
1771 isert_cq_rx_callback(struct ib_cq *cq, void *context)
1772 {
1773         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1774
1775         INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1776         queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1777 }
1778
1779 static int
1780 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1781 {
1782         struct ib_send_wr *wr_failed;
1783         int ret;
1784
1785         atomic_inc(&isert_conn->post_send_buf_count);
1786
1787         ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1788                            &wr_failed);
1789         if (ret) {
1790                 pr_err("ib_post_send failed with %d\n", ret);
1791                 atomic_dec(&isert_conn->post_send_buf_count);
1792                 return ret;
1793         }
1794         return ret;
1795 }
1796
1797 static int
1798 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1799 {
1800         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1801         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1802         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1803         struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1804                                 &isert_cmd->tx_desc.iscsi_header;
1805
1806         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1807         iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1808         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1809         /*
1810          * Attach SENSE DATA payload to iSCSI Response PDU
1811          */
1812         if (cmd->se_cmd.sense_buffer &&
1813             ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1814             (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1815                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1816                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1817                 u32 padding, pdu_len;
1818
1819                 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1820                                    cmd->sense_buffer);
1821                 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1822
1823                 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1824                 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1825                 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1826
1827                 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1828                                 (void *)cmd->sense_buffer, pdu_len,
1829                                 DMA_TO_DEVICE);
1830
1831                 isert_cmd->pdu_buf_len = pdu_len;
1832                 tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1833                 tx_dsg->length  = pdu_len;
1834                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1835                 isert_cmd->tx_desc.num_sge = 2;
1836         }
1837
1838         isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
1839
1840         pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1841
1842         return isert_post_response(isert_conn, isert_cmd);
1843 }
1844
1845 static int
1846 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1847                 bool nopout_response)
1848 {
1849         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1850         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1851         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1852
1853         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1854         iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1855                                &isert_cmd->tx_desc.iscsi_header,
1856                                nopout_response);
1857         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1858         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1859
1860         pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1861
1862         return isert_post_response(isert_conn, isert_cmd);
1863 }
1864
1865 static int
1866 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1867 {
1868         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1869         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1870         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1871
1872         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1873         iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1874                                 &isert_cmd->tx_desc.iscsi_header);
1875         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1876         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1877
1878         pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1879
1880         return isert_post_response(isert_conn, isert_cmd);
1881 }
1882
1883 static int
1884 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1885 {
1886         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1887         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1888         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1889
1890         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1891         iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1892                                   &isert_cmd->tx_desc.iscsi_header);
1893         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1894         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1895
1896         pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1897
1898         return isert_post_response(isert_conn, isert_cmd);
1899 }
1900
1901 static int
1902 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1903 {
1904         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1905         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1906         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1907         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1908         struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1909         struct iscsi_reject *hdr =
1910                 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1911
1912         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1913         iscsit_build_reject(cmd, conn, hdr);
1914         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1915
1916         hton24(hdr->dlength, ISCSI_HDR_LEN);
1917         isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1918                         (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1919                         DMA_TO_DEVICE);
1920         isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1921         tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1922         tx_dsg->length  = ISCSI_HDR_LEN;
1923         tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1924         isert_cmd->tx_desc.num_sge = 2;
1925
1926         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1927
1928         pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1929
1930         return isert_post_response(isert_conn, isert_cmd);
1931 }
1932
1933 static int
1934 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1935 {
1936         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1937         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1938         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1939         struct iscsi_text_rsp *hdr =
1940                 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1941         u32 txt_rsp_len;
1942         int rc;
1943
1944         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1945         rc = iscsit_build_text_rsp(cmd, conn, hdr);
1946         if (rc < 0)
1947                 return rc;
1948
1949         txt_rsp_len = rc;
1950         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1951
1952         if (txt_rsp_len) {
1953                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1954                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1955                 void *txt_rsp_buf = cmd->buf_ptr;
1956
1957                 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1958                                 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1959
1960                 isert_cmd->pdu_buf_len = txt_rsp_len;
1961                 tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1962                 tx_dsg->length  = txt_rsp_len;
1963                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1964                 isert_cmd->tx_desc.num_sge = 2;
1965         }
1966         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1967
1968         pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1969
1970         return isert_post_response(isert_conn, isert_cmd);
1971 }
1972
1973 static int
1974 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1975                     struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1976                     u32 data_left, u32 offset)
1977 {
1978         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1979         struct scatterlist *sg_start, *tmp_sg;
1980         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1981         u32 sg_off, page_off;
1982         int i = 0, sg_nents;
1983
1984         sg_off = offset / PAGE_SIZE;
1985         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1986         sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1987         page_off = offset % PAGE_SIZE;
1988
1989         send_wr->sg_list = ib_sge;
1990         send_wr->num_sge = sg_nents;
1991         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1992         /*
1993          * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1994          */
1995         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1996                 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1997                          (unsigned long long)tmp_sg->dma_address,
1998                          tmp_sg->length, page_off);
1999
2000                 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2001                 ib_sge->length = min_t(u32, data_left,
2002                                 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2003                 ib_sge->lkey = isert_conn->conn_mr->lkey;
2004
2005                 pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2006                          ib_sge->addr, ib_sge->length, ib_sge->lkey);
2007                 page_off = 0;
2008                 data_left -= ib_sge->length;
2009                 ib_sge++;
2010                 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2011         }
2012
2013         pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2014                  send_wr->sg_list, send_wr->num_sge);
2015
2016         return sg_nents;
2017 }
2018
2019 static int
2020 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2021                struct isert_rdma_wr *wr)
2022 {
2023         struct se_cmd *se_cmd = &cmd->se_cmd;
2024         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2025         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2026         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2027         struct ib_send_wr *send_wr;
2028         struct ib_sge *ib_sge;
2029         struct scatterlist *sg_start;
2030         u32 sg_off = 0, sg_nents;
2031         u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2032         int ret = 0, count, i, ib_sge_cnt;
2033
2034         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2035                 data_left = se_cmd->data_length;
2036         } else {
2037                 sg_off = cmd->write_data_done / PAGE_SIZE;
2038                 data_left = se_cmd->data_length - cmd->write_data_done;
2039                 offset = cmd->write_data_done;
2040                 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2041         }
2042
2043         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2044         sg_nents = se_cmd->t_data_nents - sg_off;
2045
2046         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2047                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2048                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
2049         if (unlikely(!count)) {
2050                 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2051                 return -EINVAL;
2052         }
2053         wr->sge = sg_start;
2054         wr->num_sge = sg_nents;
2055         wr->cur_rdma_length = data_left;
2056         pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2057                  isert_cmd, count, sg_start, sg_nents, data_left);
2058
2059         ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
2060         if (!ib_sge) {
2061                 pr_warn("Unable to allocate ib_sge\n");
2062                 ret = -ENOMEM;
2063                 goto unmap_sg;
2064         }
2065         wr->ib_sge = ib_sge;
2066
2067         wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2068         wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2069                                 GFP_KERNEL);
2070         if (!wr->send_wr) {
2071                 pr_debug("Unable to allocate wr->send_wr\n");
2072                 ret = -ENOMEM;
2073                 goto unmap_sg;
2074         }
2075
2076         wr->isert_cmd = isert_cmd;
2077         rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2078
2079         for (i = 0; i < wr->send_wr_num; i++) {
2080                 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2081                 data_len = min(data_left, rdma_write_max);
2082
2083                 send_wr->send_flags = 0;
2084                 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2085                         send_wr->opcode = IB_WR_RDMA_WRITE;
2086                         send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2087                         send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2088                         if (i + 1 == wr->send_wr_num)
2089                                 send_wr->next = &isert_cmd->tx_desc.send_wr;
2090                         else
2091                                 send_wr->next = &wr->send_wr[i + 1];
2092                 } else {
2093                         send_wr->opcode = IB_WR_RDMA_READ;
2094                         send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2095                         send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2096                         if (i + 1 == wr->send_wr_num)
2097                                 send_wr->send_flags = IB_SEND_SIGNALED;
2098                         else
2099                                 send_wr->next = &wr->send_wr[i + 1];
2100                 }
2101
2102                 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2103                                         send_wr, data_len, offset);
2104                 ib_sge += ib_sge_cnt;
2105
2106                 offset += data_len;
2107                 va_offset += data_len;
2108                 data_left -= data_len;
2109         }
2110
2111         return 0;
2112 unmap_sg:
2113         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2114                         (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2115                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
2116         return ret;
2117 }
2118
2119 static int
2120 isert_map_fr_pagelist(struct ib_device *ib_dev,
2121                       struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2122 {
2123         u64 start_addr, end_addr, page, chunk_start = 0;
2124         struct scatterlist *tmp_sg;
2125         int i = 0, new_chunk, last_ent, n_pages;
2126
2127         n_pages = 0;
2128         new_chunk = 1;
2129         last_ent = sg_nents - 1;
2130         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2131                 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2132                 if (new_chunk)
2133                         chunk_start = start_addr;
2134                 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2135
2136                 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2137                          i, (unsigned long long)tmp_sg->dma_address,
2138                          tmp_sg->length);
2139
2140                 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2141                         new_chunk = 0;
2142                         continue;
2143                 }
2144                 new_chunk = 1;
2145
2146                 page = chunk_start & PAGE_MASK;
2147                 do {
2148                         fr_pl[n_pages++] = page;
2149                         pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2150                                  n_pages - 1, page);
2151                         page += PAGE_SIZE;
2152                 } while (page < end_addr);
2153         }
2154
2155         return n_pages;
2156 }
2157
2158 static int
2159 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2160                   struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
2161                   struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
2162 {
2163         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2164         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2165         struct scatterlist *sg_start;
2166         u32 sg_off, page_off;
2167         struct ib_send_wr fr_wr, inv_wr;
2168         struct ib_send_wr *bad_wr, *wr = NULL;
2169         u8 key;
2170         int ret, sg_nents, pagelist_len;
2171
2172         sg_off = offset / PAGE_SIZE;
2173         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2174         sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
2175                          ISCSI_ISER_SG_TABLESIZE);
2176         page_off = offset % PAGE_SIZE;
2177
2178         pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
2179                  isert_cmd, fr_desc, sg_nents, sg_off, offset);
2180
2181         pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2182                                              &fr_desc->data_frpl->page_list[0]);
2183
2184         if (!fr_desc->valid) {
2185                 memset(&inv_wr, 0, sizeof(inv_wr));
2186                 inv_wr.opcode = IB_WR_LOCAL_INV;
2187                 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2188                 wr = &inv_wr;
2189                 /* Bump the key */
2190                 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2191                 ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2192         }
2193
2194         /* Prepare FASTREG WR */
2195         memset(&fr_wr, 0, sizeof(fr_wr));
2196         fr_wr.opcode = IB_WR_FAST_REG_MR;
2197         fr_wr.wr.fast_reg.iova_start =
2198                 fr_desc->data_frpl->page_list[0] + page_off;
2199         fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2200         fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2201         fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2202         fr_wr.wr.fast_reg.length = data_len;
2203         fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2204         fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2205
2206         if (!wr)
2207                 wr = &fr_wr;
2208         else
2209                 wr->next = &fr_wr;
2210
2211         ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2212         if (ret) {
2213                 pr_err("fast registration failed, ret:%d\n", ret);
2214                 return ret;
2215         }
2216         fr_desc->valid = false;
2217
2218         ib_sge->lkey = fr_desc->data_mr->lkey;
2219         ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2220         ib_sge->length = data_len;
2221
2222         pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2223                  ib_sge->addr, ib_sge->length, ib_sge->lkey);
2224
2225         return ret;
2226 }
2227
2228 static int
2229 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2230                struct isert_rdma_wr *wr)
2231 {
2232         struct se_cmd *se_cmd = &cmd->se_cmd;
2233         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2234         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2235         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2236         struct ib_send_wr *send_wr;
2237         struct ib_sge *ib_sge;
2238         struct scatterlist *sg_start;
2239         struct fast_reg_descriptor *fr_desc;
2240         u32 sg_off = 0, sg_nents;
2241         u32 offset = 0, data_len, data_left, rdma_write_max;
2242         int ret = 0, count;
2243         unsigned long flags;
2244
2245         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2246                 data_left = se_cmd->data_length;
2247         } else {
2248                 sg_off = cmd->write_data_done / PAGE_SIZE;
2249                 data_left = se_cmd->data_length - cmd->write_data_done;
2250                 offset = cmd->write_data_done;
2251                 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2252         }
2253
2254         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2255         sg_nents = se_cmd->t_data_nents - sg_off;
2256
2257         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2258                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2259                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
2260         if (unlikely(!count)) {
2261                 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2262                 return -EINVAL;
2263         }
2264         wr->sge = sg_start;
2265         wr->num_sge = sg_nents;
2266         pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2267                  isert_cmd, count, sg_start, sg_nents, data_left);
2268
2269         memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
2270         ib_sge = &wr->s_ib_sge;
2271         wr->ib_sge = ib_sge;
2272
2273         wr->send_wr_num = 1;
2274         memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2275         wr->send_wr = &wr->s_send_wr;
2276
2277         wr->isert_cmd = isert_cmd;
2278         rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2279
2280         send_wr = &isert_cmd->rdma_wr.s_send_wr;
2281         send_wr->sg_list = ib_sge;
2282         send_wr->num_sge = 1;
2283         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2284         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2285                 send_wr->opcode = IB_WR_RDMA_WRITE;
2286                 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2287                 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2288                 send_wr->send_flags = 0;
2289                 send_wr->next = &isert_cmd->tx_desc.send_wr;
2290         } else {
2291                 send_wr->opcode = IB_WR_RDMA_READ;
2292                 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2293                 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2294                 send_wr->send_flags = IB_SEND_SIGNALED;
2295         }
2296
2297         data_len = min(data_left, rdma_write_max);
2298         wr->cur_rdma_length = data_len;
2299
2300         /* if there is a single dma entry, dma mr is sufficient */
2301         if (count == 1) {
2302                 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2303                 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2304                 ib_sge->lkey = isert_conn->conn_mr->lkey;
2305                 wr->fr_desc = NULL;
2306         } else {
2307                 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2308                 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2309                                            struct fast_reg_descriptor, list);
2310                 list_del(&fr_desc->list);
2311                 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2312                 wr->fr_desc = fr_desc;
2313
2314                 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
2315                                   ib_sge, offset, data_len);
2316                 if (ret) {
2317                         list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2318                         goto unmap_sg;
2319                 }
2320         }
2321
2322         return 0;
2323
2324 unmap_sg:
2325         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2326                         (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2327                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
2328         return ret;
2329 }
2330
2331 static int
2332 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2333 {
2334         struct se_cmd *se_cmd = &cmd->se_cmd;
2335         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2336         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2337         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2338         struct isert_device *device = isert_conn->conn_device;
2339         struct ib_send_wr *wr_failed;
2340         int rc;
2341
2342         pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2343                  isert_cmd, se_cmd->data_length);
2344         wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2345         rc = device->reg_rdma_mem(conn, cmd, wr);
2346         if (rc) {
2347                 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2348                 return rc;
2349         }
2350
2351         /*
2352          * Build isert_conn->tx_desc for iSCSI response PDU and attach
2353          */
2354         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2355         iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2356                              &isert_cmd->tx_desc.iscsi_header);
2357         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2358         isert_init_send_wr(isert_conn, isert_cmd,
2359                            &isert_cmd->tx_desc.send_wr, true);
2360
2361         atomic_inc(&isert_conn->post_send_buf_count);
2362
2363         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2364         if (rc) {
2365                 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2366                 atomic_dec(&isert_conn->post_send_buf_count);
2367         }
2368         pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2369                  isert_cmd);
2370
2371         return 1;
2372 }
2373
2374 static int
2375 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2376 {
2377         struct se_cmd *se_cmd = &cmd->se_cmd;
2378         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2379         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2380         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2381         struct isert_device *device = isert_conn->conn_device;
2382         struct ib_send_wr *wr_failed;
2383         int rc;
2384
2385         pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2386                  isert_cmd, se_cmd->data_length, cmd->write_data_done);
2387         wr->iser_ib_op = ISER_IB_RDMA_READ;
2388         rc = device->reg_rdma_mem(conn, cmd, wr);
2389         if (rc) {
2390                 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2391                 return rc;
2392         }
2393
2394         atomic_inc(&isert_conn->post_send_buf_count);
2395
2396         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2397         if (rc) {
2398                 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2399                 atomic_dec(&isert_conn->post_send_buf_count);
2400         }
2401         pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2402                  isert_cmd);
2403
2404         return 0;
2405 }
2406
2407 static int
2408 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2409 {
2410         int ret;
2411
2412         switch (state) {
2413         case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2414                 ret = isert_put_nopin(cmd, conn, false);
2415                 break;
2416         default:
2417                 pr_err("Unknown immediate state: 0x%02x\n", state);
2418                 ret = -EINVAL;
2419                 break;
2420         }
2421
2422         return ret;
2423 }
2424
2425 static int
2426 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2427 {
2428         int ret;
2429
2430         switch (state) {
2431         case ISTATE_SEND_LOGOUTRSP:
2432                 ret = isert_put_logout_rsp(cmd, conn);
2433                 if (!ret) {
2434                         pr_debug("Returning iSER Logout -EAGAIN\n");
2435                         ret = -EAGAIN;
2436                 }
2437                 break;
2438         case ISTATE_SEND_NOPIN:
2439                 ret = isert_put_nopin(cmd, conn, true);
2440                 break;
2441         case ISTATE_SEND_TASKMGTRSP:
2442                 ret = isert_put_tm_rsp(cmd, conn);
2443                 break;
2444         case ISTATE_SEND_REJECT:
2445                 ret = isert_put_reject(cmd, conn);
2446                 break;
2447         case ISTATE_SEND_TEXTRSP:
2448                 ret = isert_put_text_rsp(cmd, conn);
2449                 break;
2450         case ISTATE_SEND_STATUS:
2451                 /*
2452                  * Special case for sending non GOOD SCSI status from TX thread
2453                  * context during pre se_cmd excecution failure.
2454                  */
2455                 ret = isert_put_response(conn, cmd);
2456                 break;
2457         default:
2458                 pr_err("Unknown response state: 0x%02x\n", state);
2459                 ret = -EINVAL;
2460                 break;
2461         }
2462
2463         return ret;
2464 }
2465
2466 static int
2467 isert_setup_np(struct iscsi_np *np,
2468                struct __kernel_sockaddr_storage *ksockaddr)
2469 {
2470         struct isert_np *isert_np;
2471         struct rdma_cm_id *isert_lid;
2472         struct sockaddr *sa;
2473         int ret;
2474
2475         isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2476         if (!isert_np) {
2477                 pr_err("Unable to allocate struct isert_np\n");
2478                 return -ENOMEM;
2479         }
2480         init_waitqueue_head(&isert_np->np_accept_wq);
2481         mutex_init(&isert_np->np_accept_mutex);
2482         INIT_LIST_HEAD(&isert_np->np_accept_list);
2483         init_completion(&isert_np->np_login_comp);
2484
2485         sa = (struct sockaddr *)ksockaddr;
2486         pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2487         /*
2488          * Setup the np->np_sockaddr from the passed sockaddr setup
2489          * in iscsi_target_configfs.c code..
2490          */
2491         memcpy(&np->np_sockaddr, ksockaddr,
2492                sizeof(struct __kernel_sockaddr_storage));
2493
2494         isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2495                                 IB_QPT_RC);
2496         if (IS_ERR(isert_lid)) {
2497                 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2498                        PTR_ERR(isert_lid));
2499                 ret = PTR_ERR(isert_lid);
2500                 goto out;
2501         }
2502
2503         ret = rdma_bind_addr(isert_lid, sa);
2504         if (ret) {
2505                 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2506                 goto out_lid;
2507         }
2508
2509         ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2510         if (ret) {
2511                 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2512                 goto out_lid;
2513         }
2514
2515         isert_np->np_cm_id = isert_lid;
2516         np->np_context = isert_np;
2517         pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2518
2519         return 0;
2520
2521 out_lid:
2522         rdma_destroy_id(isert_lid);
2523 out:
2524         kfree(isert_np);
2525         return ret;
2526 }
2527
2528 static int
2529 isert_check_accept_queue(struct isert_np *isert_np)
2530 {
2531         int empty;
2532
2533         mutex_lock(&isert_np->np_accept_mutex);
2534         empty = list_empty(&isert_np->np_accept_list);
2535         mutex_unlock(&isert_np->np_accept_mutex);
2536
2537         return empty;
2538 }
2539
2540 static int
2541 isert_rdma_accept(struct isert_conn *isert_conn)
2542 {
2543         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2544         struct rdma_conn_param cp;
2545         int ret;
2546
2547         memset(&cp, 0, sizeof(struct rdma_conn_param));
2548         cp.responder_resources = isert_conn->responder_resources;
2549         cp.initiator_depth = isert_conn->initiator_depth;
2550         cp.retry_count = 7;
2551         cp.rnr_retry_count = 7;
2552
2553         pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2554
2555         ret = rdma_accept(cm_id, &cp);
2556         if (ret) {
2557                 pr_err("rdma_accept() failed with: %d\n", ret);
2558                 return ret;
2559         }
2560
2561         pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2562
2563         return 0;
2564 }
2565
2566 static int
2567 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2568 {
2569         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2570         int ret;
2571
2572         pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2573         /*
2574          * For login requests after the first PDU, isert_rx_login_req() will
2575          * kick schedule_delayed_work(&conn->login_work) as the packet is
2576          * received, which turns this callback from iscsi_target_do_login_rx()
2577          * into a NOP.
2578          */
2579         if (!login->first_request)
2580                 return 0;
2581
2582         ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2583         if (ret)
2584                 return ret;
2585
2586         pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2587         return 0;
2588 }
2589
2590 static void
2591 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2592                     struct isert_conn *isert_conn)
2593 {
2594         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2595         struct rdma_route *cm_route = &cm_id->route;
2596         struct sockaddr_in *sock_in;
2597         struct sockaddr_in6 *sock_in6;
2598
2599         conn->login_family = np->np_sockaddr.ss_family;
2600
2601         if (np->np_sockaddr.ss_family == AF_INET6) {
2602                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2603                 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2604                          &sock_in6->sin6_addr.in6_u);
2605                 conn->login_port = ntohs(sock_in6->sin6_port);
2606
2607                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2608                 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2609                          &sock_in6->sin6_addr.in6_u);
2610                 conn->local_port = ntohs(sock_in6->sin6_port);
2611         } else {
2612                 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2613                 sprintf(conn->login_ip, "%pI4",
2614                         &sock_in->sin_addr.s_addr);
2615                 conn->login_port = ntohs(sock_in->sin_port);
2616
2617                 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2618                 sprintf(conn->local_ip, "%pI4",
2619                         &sock_in->sin_addr.s_addr);
2620                 conn->local_port = ntohs(sock_in->sin_port);
2621         }
2622 }
2623
2624 static int
2625 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2626 {
2627         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2628         struct isert_conn *isert_conn;
2629         int max_accept = 0, ret;
2630
2631 accept_wait:
2632         ret = wait_event_interruptible(isert_np->np_accept_wq,
2633                         !isert_check_accept_queue(isert_np) ||
2634                         np->np_thread_state == ISCSI_NP_THREAD_RESET);
2635         if (max_accept > 5)
2636                 return -ENODEV;
2637
2638         spin_lock_bh(&np->np_thread_lock);
2639         if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2640                 spin_unlock_bh(&np->np_thread_lock);
2641                 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2642                 return -ENODEV;
2643         }
2644         spin_unlock_bh(&np->np_thread_lock);
2645
2646         mutex_lock(&isert_np->np_accept_mutex);
2647         if (list_empty(&isert_np->np_accept_list)) {
2648                 mutex_unlock(&isert_np->np_accept_mutex);
2649                 max_accept++;
2650                 goto accept_wait;
2651         }
2652         isert_conn = list_first_entry(&isert_np->np_accept_list,
2653                         struct isert_conn, conn_accept_node);
2654         list_del_init(&isert_conn->conn_accept_node);
2655         mutex_unlock(&isert_np->np_accept_mutex);
2656
2657         conn->context = isert_conn;
2658         isert_conn->conn = conn;
2659         max_accept = 0;
2660
2661         ret = isert_rdma_post_recvl(isert_conn);
2662         if (ret)
2663                 return ret;
2664
2665         ret = isert_rdma_accept(isert_conn);
2666         if (ret)
2667                 return ret;
2668
2669         isert_set_conn_info(np, conn, isert_conn);
2670
2671         pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2672         return 0;
2673 }
2674
2675 static void
2676 isert_free_np(struct iscsi_np *np)
2677 {
2678         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2679
2680         rdma_destroy_id(isert_np->np_cm_id);
2681
2682         np->np_context = NULL;
2683         kfree(isert_np);
2684 }
2685
2686 static int isert_check_state(struct isert_conn *isert_conn, int state)
2687 {
2688         int ret;
2689
2690         mutex_lock(&isert_conn->conn_mutex);
2691         ret = (isert_conn->state == state);
2692         mutex_unlock(&isert_conn->conn_mutex);
2693
2694         return ret;
2695 }
2696
2697 static void isert_free_conn(struct iscsi_conn *conn)
2698 {
2699         struct isert_conn *isert_conn = conn->context;
2700
2701         pr_debug("isert_free_conn: Starting \n");
2702         /*
2703          * Decrement post_send_buf_count for special case when called
2704          * from isert_do_control_comp() -> iscsit_logout_post_handler()
2705          */
2706         mutex_lock(&isert_conn->conn_mutex);
2707         if (isert_conn->logout_posted)
2708                 atomic_dec(&isert_conn->post_send_buf_count);
2709
2710         if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2711                 pr_debug("Calling rdma_disconnect from isert_free_conn\n");
2712                 rdma_disconnect(isert_conn->conn_cm_id);
2713         }
2714         /*
2715          * Only wait for conn_wait_comp_err if the isert_conn made it
2716          * into full feature phase..
2717          */
2718         if (isert_conn->state == ISER_CONN_UP) {
2719                 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2720                          isert_conn->state);
2721                 mutex_unlock(&isert_conn->conn_mutex);
2722
2723                 wait_event(isert_conn->conn_wait_comp_err,
2724                           (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2725
2726                 wait_event(isert_conn->conn_wait,
2727                           (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2728
2729                 isert_put_conn(isert_conn);
2730                 return;
2731         }
2732         if (isert_conn->state == ISER_CONN_INIT) {
2733                 mutex_unlock(&isert_conn->conn_mutex);
2734                 isert_put_conn(isert_conn);
2735                 return;
2736         }
2737         pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2738                  isert_conn->state);
2739         mutex_unlock(&isert_conn->conn_mutex);
2740
2741         wait_event(isert_conn->conn_wait,
2742                   (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2743
2744         isert_put_conn(isert_conn);
2745 }
2746
2747 static struct iscsit_transport iser_target_transport = {
2748         .name                   = "IB/iSER",
2749         .transport_type         = ISCSI_INFINIBAND,
2750         .priv_size              = sizeof(struct isert_cmd),
2751         .owner                  = THIS_MODULE,
2752         .iscsit_setup_np        = isert_setup_np,
2753         .iscsit_accept_np       = isert_accept_np,
2754         .iscsit_free_np         = isert_free_np,
2755         .iscsit_free_conn       = isert_free_conn,
2756         .iscsit_get_login_rx    = isert_get_login_rx,
2757         .iscsit_put_login_tx    = isert_put_login_tx,
2758         .iscsit_immediate_queue = isert_immediate_queue,
2759         .iscsit_response_queue  = isert_response_queue,
2760         .iscsit_get_dataout     = isert_get_dataout,
2761         .iscsit_queue_data_in   = isert_put_datain,
2762         .iscsit_queue_status    = isert_put_response,
2763 };
2764
2765 static int __init isert_init(void)
2766 {
2767         int ret;
2768
2769         isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2770         if (!isert_rx_wq) {
2771                 pr_err("Unable to allocate isert_rx_wq\n");
2772                 return -ENOMEM;
2773         }
2774
2775         isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2776         if (!isert_comp_wq) {
2777                 pr_err("Unable to allocate isert_comp_wq\n");
2778                 ret = -ENOMEM;
2779                 goto destroy_rx_wq;
2780         }
2781
2782         iscsit_register_transport(&iser_target_transport);
2783         pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2784         return 0;
2785
2786 destroy_rx_wq:
2787         destroy_workqueue(isert_rx_wq);
2788         return ret;
2789 }
2790
2791 static void __exit isert_exit(void)
2792 {
2793         destroy_workqueue(isert_comp_wq);
2794         destroy_workqueue(isert_rx_wq);
2795         iscsit_unregister_transport(&iser_target_transport);
2796         pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2797 }
2798
2799 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2800 MODULE_VERSION("0.1");
2801 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2802 MODULE_LICENSE("GPL");
2803
2804 module_init(isert_init);
2805 module_exit(isert_exit);