8c9f6fc77b8f6e0f87de8db49e8c7ff2f7702af0
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / infiniband / ulp / isert / ib_isert.c
1 /*******************************************************************************
2  * This file contains iSCSI extentions for RDMA (iSER) Verbs
3  *
4  * (c) Copyright 2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ****************************************************************************/
18
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <linux/llist.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
31
32 #include "isert_proto.h"
33 #include "ib_isert.h"
34
35 #define ISERT_MAX_CONN          8
36 #define ISER_MAX_RX_CQ_LEN      (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN      (ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
38
39 static DEFINE_MUTEX(device_list_mutex);
40 static LIST_HEAD(device_list);
41 static struct workqueue_struct *isert_rx_wq;
42 static struct workqueue_struct *isert_comp_wq;
43
44 static void
45 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
46 static int
47 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
48                struct isert_rdma_wr *wr);
49 static void
50 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51 static int
52 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53                struct isert_rdma_wr *wr);
54
55 static void
56 isert_qp_event_callback(struct ib_event *e, void *context)
57 {
58         struct isert_conn *isert_conn = (struct isert_conn *)context;
59
60         pr_err("isert_qp_event_callback event: %d\n", e->event);
61         switch (e->event) {
62         case IB_EVENT_COMM_EST:
63                 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
64                 break;
65         case IB_EVENT_QP_LAST_WQE_REACHED:
66                 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
67                 break;
68         default:
69                 break;
70         }
71 }
72
73 static int
74 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
75 {
76         int ret;
77
78         ret = ib_query_device(ib_dev, devattr);
79         if (ret) {
80                 pr_err("ib_query_device() failed: %d\n", ret);
81                 return ret;
82         }
83         pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
84         pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
85
86         return 0;
87 }
88
89 static int
90 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
91 {
92         struct isert_device *device = isert_conn->conn_device;
93         struct ib_qp_init_attr attr;
94         int ret, index, min_index = 0;
95
96         mutex_lock(&device_list_mutex);
97         for (index = 0; index < device->cqs_used; index++)
98                 if (device->cq_active_qps[index] <
99                     device->cq_active_qps[min_index])
100                         min_index = index;
101         device->cq_active_qps[min_index]++;
102         pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
103         mutex_unlock(&device_list_mutex);
104
105         memset(&attr, 0, sizeof(struct ib_qp_init_attr));
106         attr.event_handler = isert_qp_event_callback;
107         attr.qp_context = isert_conn;
108         attr.send_cq = device->dev_tx_cq[min_index];
109         attr.recv_cq = device->dev_rx_cq[min_index];
110         attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
111         attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
112         /*
113          * FIXME: Use devattr.max_sge - 2 for max_send_sge as
114          * work-around for RDMA_READ..
115          */
116         attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
117         isert_conn->max_sge = attr.cap.max_send_sge;
118
119         attr.cap.max_recv_sge = 1;
120         attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121         attr.qp_type = IB_QPT_RC;
122
123         pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124                  cma_id->device);
125         pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
126                  isert_conn->conn_pd->device);
127
128         ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
129         if (ret) {
130                 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
131                 return ret;
132         }
133         isert_conn->conn_qp = cma_id->qp;
134         pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
135
136         return 0;
137 }
138
139 static void
140 isert_cq_event_callback(struct ib_event *e, void *context)
141 {
142         pr_debug("isert_cq_event_callback event: %d\n", e->event);
143 }
144
145 static int
146 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
147 {
148         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
149         struct iser_rx_desc *rx_desc;
150         struct ib_sge *rx_sg;
151         u64 dma_addr;
152         int i, j;
153
154         isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
155                                 sizeof(struct iser_rx_desc), GFP_KERNEL);
156         if (!isert_conn->conn_rx_descs)
157                 goto fail;
158
159         rx_desc = isert_conn->conn_rx_descs;
160
161         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
162                 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
163                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
164                 if (ib_dma_mapping_error(ib_dev, dma_addr))
165                         goto dma_map_fail;
166
167                 rx_desc->dma_addr = dma_addr;
168
169                 rx_sg = &rx_desc->rx_sg;
170                 rx_sg->addr = rx_desc->dma_addr;
171                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
172                 rx_sg->lkey = isert_conn->conn_mr->lkey;
173         }
174
175         isert_conn->conn_rx_desc_head = 0;
176         return 0;
177
178 dma_map_fail:
179         rx_desc = isert_conn->conn_rx_descs;
180         for (j = 0; j < i; j++, rx_desc++) {
181                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
182                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
183         }
184         kfree(isert_conn->conn_rx_descs);
185         isert_conn->conn_rx_descs = NULL;
186 fail:
187         return -ENOMEM;
188 }
189
190 static void
191 isert_free_rx_descriptors(struct isert_conn *isert_conn)
192 {
193         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
194         struct iser_rx_desc *rx_desc;
195         int i;
196
197         if (!isert_conn->conn_rx_descs)
198                 return;
199
200         rx_desc = isert_conn->conn_rx_descs;
201         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
202                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
203                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
204         }
205
206         kfree(isert_conn->conn_rx_descs);
207         isert_conn->conn_rx_descs = NULL;
208 }
209
210 static void isert_cq_tx_work(struct work_struct *);
211 static void isert_cq_tx_callback(struct ib_cq *, void *);
212 static void isert_cq_rx_work(struct work_struct *);
213 static void isert_cq_rx_callback(struct ib_cq *, void *);
214
215 static int
216 isert_create_device_ib_res(struct isert_device *device)
217 {
218         struct ib_device *ib_dev = device->ib_device;
219         struct isert_cq_desc *cq_desc;
220         struct ib_device_attr *dev_attr;
221         int ret = 0, i, j;
222
223         dev_attr = &device->dev_attr;
224         ret = isert_query_device(ib_dev, dev_attr);
225         if (ret)
226                 return ret;
227
228         /* asign function handlers */
229         if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
230                 device->use_fastreg = 1;
231                 device->reg_rdma_mem = isert_reg_rdma;
232                 device->unreg_rdma_mem = isert_unreg_rdma;
233         } else {
234                 device->use_fastreg = 0;
235                 device->reg_rdma_mem = isert_map_rdma;
236                 device->unreg_rdma_mem = isert_unmap_cmd;
237         }
238
239         device->cqs_used = min_t(int, num_online_cpus(),
240                                  device->ib_device->num_comp_vectors);
241         device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
242         pr_debug("Using %d CQs, device %s supports %d vectors support "
243                  "Fast registration %d\n",
244                  device->cqs_used, device->ib_device->name,
245                  device->ib_device->num_comp_vectors, device->use_fastreg);
246         device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
247                                 device->cqs_used, GFP_KERNEL);
248         if (!device->cq_desc) {
249                 pr_err("Unable to allocate device->cq_desc\n");
250                 return -ENOMEM;
251         }
252         cq_desc = device->cq_desc;
253
254         for (i = 0; i < device->cqs_used; i++) {
255                 cq_desc[i].device = device;
256                 cq_desc[i].cq_index = i;
257
258                 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
259                 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
260                                                 isert_cq_rx_callback,
261                                                 isert_cq_event_callback,
262                                                 (void *)&cq_desc[i],
263                                                 ISER_MAX_RX_CQ_LEN, i);
264                 if (IS_ERR(device->dev_rx_cq[i])) {
265                         ret = PTR_ERR(device->dev_rx_cq[i]);
266                         device->dev_rx_cq[i] = NULL;
267                         goto out_cq;
268                 }
269
270                 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
271                 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
272                                                 isert_cq_tx_callback,
273                                                 isert_cq_event_callback,
274                                                 (void *)&cq_desc[i],
275                                                 ISER_MAX_TX_CQ_LEN, i);
276                 if (IS_ERR(device->dev_tx_cq[i])) {
277                         ret = PTR_ERR(device->dev_tx_cq[i]);
278                         device->dev_tx_cq[i] = NULL;
279                         goto out_cq;
280                 }
281
282                 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
283                 if (ret)
284                         goto out_cq;
285
286                 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
287                 if (ret)
288                         goto out_cq;
289         }
290
291         return 0;
292
293 out_cq:
294         for (j = 0; j < i; j++) {
295                 cq_desc = &device->cq_desc[j];
296
297                 if (device->dev_rx_cq[j]) {
298                         cancel_work_sync(&cq_desc->cq_rx_work);
299                         ib_destroy_cq(device->dev_rx_cq[j]);
300                 }
301                 if (device->dev_tx_cq[j]) {
302                         cancel_work_sync(&cq_desc->cq_tx_work);
303                         ib_destroy_cq(device->dev_tx_cq[j]);
304                 }
305         }
306         kfree(device->cq_desc);
307
308         return ret;
309 }
310
311 static void
312 isert_free_device_ib_res(struct isert_device *device)
313 {
314         struct isert_cq_desc *cq_desc;
315         int i;
316
317         for (i = 0; i < device->cqs_used; i++) {
318                 cq_desc = &device->cq_desc[i];
319
320                 cancel_work_sync(&cq_desc->cq_rx_work);
321                 cancel_work_sync(&cq_desc->cq_tx_work);
322                 ib_destroy_cq(device->dev_rx_cq[i]);
323                 ib_destroy_cq(device->dev_tx_cq[i]);
324                 device->dev_rx_cq[i] = NULL;
325                 device->dev_tx_cq[i] = NULL;
326         }
327
328         kfree(device->cq_desc);
329 }
330
331 static void
332 isert_device_try_release(struct isert_device *device)
333 {
334         mutex_lock(&device_list_mutex);
335         device->refcount--;
336         if (!device->refcount) {
337                 isert_free_device_ib_res(device);
338                 list_del(&device->dev_node);
339                 kfree(device);
340         }
341         mutex_unlock(&device_list_mutex);
342 }
343
344 static struct isert_device *
345 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
346 {
347         struct isert_device *device;
348         int ret;
349
350         mutex_lock(&device_list_mutex);
351         list_for_each_entry(device, &device_list, dev_node) {
352                 if (device->ib_device->node_guid == cma_id->device->node_guid) {
353                         device->refcount++;
354                         mutex_unlock(&device_list_mutex);
355                         return device;
356                 }
357         }
358
359         device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
360         if (!device) {
361                 mutex_unlock(&device_list_mutex);
362                 return ERR_PTR(-ENOMEM);
363         }
364
365         INIT_LIST_HEAD(&device->dev_node);
366
367         device->ib_device = cma_id->device;
368         ret = isert_create_device_ib_res(device);
369         if (ret) {
370                 kfree(device);
371                 mutex_unlock(&device_list_mutex);
372                 return ERR_PTR(ret);
373         }
374
375         device->refcount++;
376         list_add_tail(&device->dev_node, &device_list);
377         mutex_unlock(&device_list_mutex);
378
379         return device;
380 }
381
382 static void
383 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
384 {
385         struct fast_reg_descriptor *fr_desc, *tmp;
386         int i = 0;
387
388         if (list_empty(&isert_conn->conn_fr_pool))
389                 return;
390
391         pr_debug("Freeing conn %p fastreg pool", isert_conn);
392
393         list_for_each_entry_safe(fr_desc, tmp,
394                                  &isert_conn->conn_fr_pool, list) {
395                 list_del(&fr_desc->list);
396                 ib_free_fast_reg_page_list(fr_desc->data_frpl);
397                 ib_dereg_mr(fr_desc->data_mr);
398                 kfree(fr_desc);
399                 ++i;
400         }
401
402         if (i < isert_conn->conn_fr_pool_size)
403                 pr_warn("Pool still has %d regions registered\n",
404                         isert_conn->conn_fr_pool_size - i);
405 }
406
407 static int
408 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
409                      struct fast_reg_descriptor *fr_desc)
410 {
411         fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
412                                                          ISCSI_ISER_SG_TABLESIZE);
413         if (IS_ERR(fr_desc->data_frpl)) {
414                 pr_err("Failed to allocate data frpl err=%ld\n",
415                        PTR_ERR(fr_desc->data_frpl));
416                 return PTR_ERR(fr_desc->data_frpl);
417         }
418
419         fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
420         if (IS_ERR(fr_desc->data_mr)) {
421                 pr_err("Failed to allocate data frmr err=%ld\n",
422                        PTR_ERR(fr_desc->data_mr));
423                 ib_free_fast_reg_page_list(fr_desc->data_frpl);
424                 return PTR_ERR(fr_desc->data_mr);
425         }
426         pr_debug("Create fr_desc %p page_list %p\n",
427                  fr_desc, fr_desc->data_frpl->page_list);
428
429         fr_desc->valid = true;
430
431         return 0;
432 }
433
434 static int
435 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
436 {
437         struct fast_reg_descriptor *fr_desc;
438         struct isert_device *device = isert_conn->conn_device;
439         struct se_session *se_sess = isert_conn->conn->sess->se_sess;
440         struct se_node_acl *se_nacl = se_sess->se_node_acl;
441         int i, ret, tag_num;
442         /*
443          * Setup the number of FRMRs based upon the number of tags
444          * available to session in iscsi_target_locate_portal().
445          */
446         tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
447         tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
448
449         isert_conn->conn_fr_pool_size = 0;
450         for (i = 0; i < tag_num; i++) {
451                 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
452                 if (!fr_desc) {
453                         pr_err("Failed to allocate fast_reg descriptor\n");
454                         ret = -ENOMEM;
455                         goto err;
456                 }
457
458                 ret = isert_create_fr_desc(device->ib_device,
459                                            isert_conn->conn_pd, fr_desc);
460                 if (ret) {
461                         pr_err("Failed to create fastreg descriptor err=%d\n",
462                                ret);
463                         kfree(fr_desc);
464                         goto err;
465                 }
466
467                 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
468                 isert_conn->conn_fr_pool_size++;
469         }
470
471         pr_debug("Creating conn %p fastreg pool size=%d",
472                  isert_conn, isert_conn->conn_fr_pool_size);
473
474         return 0;
475
476 err:
477         isert_conn_free_fastreg_pool(isert_conn);
478         return ret;
479 }
480
481 static int
482 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
483 {
484         struct iscsi_np *np = cma_id->context;
485         struct isert_np *isert_np = np->np_context;
486         struct isert_conn *isert_conn;
487         struct isert_device *device;
488         struct ib_device *ib_dev = cma_id->device;
489         int ret = 0;
490
491         pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
492                  cma_id, cma_id->context);
493
494         isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
495         if (!isert_conn) {
496                 pr_err("Unable to allocate isert_conn\n");
497                 return -ENOMEM;
498         }
499         isert_conn->state = ISER_CONN_INIT;
500         INIT_LIST_HEAD(&isert_conn->conn_accept_node);
501         init_completion(&isert_conn->conn_login_comp);
502         init_completion(&isert_conn->conn_wait);
503         init_completion(&isert_conn->conn_wait_comp_err);
504         kref_init(&isert_conn->conn_kref);
505         kref_get(&isert_conn->conn_kref);
506         mutex_init(&isert_conn->conn_mutex);
507         spin_lock_init(&isert_conn->conn_lock);
508         INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
509
510         cma_id->context = isert_conn;
511         isert_conn->conn_cm_id = cma_id;
512         isert_conn->responder_resources = event->param.conn.responder_resources;
513         isert_conn->initiator_depth = event->param.conn.initiator_depth;
514         pr_debug("Using responder_resources: %u initiator_depth: %u\n",
515                  isert_conn->responder_resources, isert_conn->initiator_depth);
516
517         isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
518                                         ISER_RX_LOGIN_SIZE, GFP_KERNEL);
519         if (!isert_conn->login_buf) {
520                 pr_err("Unable to allocate isert_conn->login_buf\n");
521                 ret = -ENOMEM;
522                 goto out;
523         }
524
525         isert_conn->login_req_buf = isert_conn->login_buf;
526         isert_conn->login_rsp_buf = isert_conn->login_buf +
527                                     ISCSI_DEF_MAX_RECV_SEG_LEN;
528         pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
529                  isert_conn->login_buf, isert_conn->login_req_buf,
530                  isert_conn->login_rsp_buf);
531
532         isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
533                                 (void *)isert_conn->login_req_buf,
534                                 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
535
536         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
537         if (ret) {
538                 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
539                        ret);
540                 isert_conn->login_req_dma = 0;
541                 goto out_login_buf;
542         }
543
544         isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
545                                         (void *)isert_conn->login_rsp_buf,
546                                         ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
547
548         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
549         if (ret) {
550                 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
551                        ret);
552                 isert_conn->login_rsp_dma = 0;
553                 goto out_req_dma_map;
554         }
555
556         device = isert_device_find_by_ib_dev(cma_id);
557         if (IS_ERR(device)) {
558                 ret = PTR_ERR(device);
559                 goto out_rsp_dma_map;
560         }
561
562         isert_conn->conn_device = device;
563         isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
564         if (IS_ERR(isert_conn->conn_pd)) {
565                 ret = PTR_ERR(isert_conn->conn_pd);
566                 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
567                        isert_conn, ret);
568                 goto out_pd;
569         }
570
571         isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
572                                            IB_ACCESS_LOCAL_WRITE);
573         if (IS_ERR(isert_conn->conn_mr)) {
574                 ret = PTR_ERR(isert_conn->conn_mr);
575                 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
576                        isert_conn, ret);
577                 goto out_mr;
578         }
579
580         ret = isert_conn_setup_qp(isert_conn, cma_id);
581         if (ret)
582                 goto out_conn_dev;
583
584         mutex_lock(&isert_np->np_accept_mutex);
585         list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
586         mutex_unlock(&isert_np->np_accept_mutex);
587
588         pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
589         wake_up(&isert_np->np_accept_wq);
590         return 0;
591
592 out_conn_dev:
593         ib_dereg_mr(isert_conn->conn_mr);
594 out_mr:
595         ib_dealloc_pd(isert_conn->conn_pd);
596 out_pd:
597         isert_device_try_release(device);
598 out_rsp_dma_map:
599         ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
600                             ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
601 out_req_dma_map:
602         ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
603                             ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
604 out_login_buf:
605         kfree(isert_conn->login_buf);
606 out:
607         kfree(isert_conn);
608         return ret;
609 }
610
611 static void
612 isert_connect_release(struct isert_conn *isert_conn)
613 {
614         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
615         struct isert_device *device = isert_conn->conn_device;
616         int cq_index;
617
618         pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
619
620         if (device && device->use_fastreg)
621                 isert_conn_free_fastreg_pool(isert_conn);
622
623         if (isert_conn->conn_qp) {
624                 cq_index = ((struct isert_cq_desc *)
625                         isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
626                 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
627                 isert_conn->conn_device->cq_active_qps[cq_index]--;
628
629                 rdma_destroy_qp(isert_conn->conn_cm_id);
630         }
631
632         isert_free_rx_descriptors(isert_conn);
633         rdma_destroy_id(isert_conn->conn_cm_id);
634
635         ib_dereg_mr(isert_conn->conn_mr);
636         ib_dealloc_pd(isert_conn->conn_pd);
637
638         if (isert_conn->login_buf) {
639                 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
640                                     ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
641                 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
642                                     ISCSI_DEF_MAX_RECV_SEG_LEN,
643                                     DMA_FROM_DEVICE);
644                 kfree(isert_conn->login_buf);
645         }
646         kfree(isert_conn);
647
648         if (device)
649                 isert_device_try_release(device);
650
651         pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
652 }
653
654 static void
655 isert_connected_handler(struct rdma_cm_id *cma_id)
656 {
657         return;
658 }
659
660 static void
661 isert_release_conn_kref(struct kref *kref)
662 {
663         struct isert_conn *isert_conn = container_of(kref,
664                                 struct isert_conn, conn_kref);
665
666         pr_debug("Calling isert_connect_release for final kref %s/%d\n",
667                  current->comm, current->pid);
668
669         isert_connect_release(isert_conn);
670 }
671
672 static void
673 isert_put_conn(struct isert_conn *isert_conn)
674 {
675         kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
676 }
677
678 static void
679 isert_disconnect_work(struct work_struct *work)
680 {
681         struct isert_conn *isert_conn = container_of(work,
682                                 struct isert_conn, conn_logout_work);
683
684         pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
685         mutex_lock(&isert_conn->conn_mutex);
686         if (isert_conn->state == ISER_CONN_UP)
687                 isert_conn->state = ISER_CONN_TERMINATING;
688
689         if (isert_conn->post_recv_buf_count == 0 &&
690             atomic_read(&isert_conn->post_send_buf_count) == 0) {
691                 mutex_unlock(&isert_conn->conn_mutex);
692                 goto wake_up;
693         }
694         if (!isert_conn->conn_cm_id) {
695                 mutex_unlock(&isert_conn->conn_mutex);
696                 isert_put_conn(isert_conn);
697                 return;
698         }
699         if (!isert_conn->logout_posted) {
700                 pr_debug("Calling rdma_disconnect for !logout_posted from"
701                          " isert_disconnect_work\n");
702                 rdma_disconnect(isert_conn->conn_cm_id);
703                 mutex_unlock(&isert_conn->conn_mutex);
704                 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
705                 goto wake_up;
706         }
707         mutex_unlock(&isert_conn->conn_mutex);
708
709 wake_up:
710         complete(&isert_conn->conn_wait);
711         isert_put_conn(isert_conn);
712 }
713
714 static void
715 isert_disconnected_handler(struct rdma_cm_id *cma_id)
716 {
717         struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
718
719         INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
720         schedule_work(&isert_conn->conn_logout_work);
721 }
722
723 static int
724 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
725 {
726         int ret = 0;
727
728         pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
729                  event->event, event->status, cma_id->context, cma_id);
730
731         switch (event->event) {
732         case RDMA_CM_EVENT_CONNECT_REQUEST:
733                 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
734                 ret = isert_connect_request(cma_id, event);
735                 break;
736         case RDMA_CM_EVENT_ESTABLISHED:
737                 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
738                 isert_connected_handler(cma_id);
739                 break;
740         case RDMA_CM_EVENT_DISCONNECTED:
741                 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
742                 isert_disconnected_handler(cma_id);
743                 break;
744         case RDMA_CM_EVENT_DEVICE_REMOVAL:
745         case RDMA_CM_EVENT_ADDR_CHANGE:
746                 break;
747         case RDMA_CM_EVENT_CONNECT_ERROR:
748         default:
749                 pr_err("Unknown RDMA CMA event: %d\n", event->event);
750                 break;
751         }
752
753         if (ret != 0) {
754                 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
755                        event->event, ret);
756                 dump_stack();
757         }
758
759         return ret;
760 }
761
762 static int
763 isert_post_recv(struct isert_conn *isert_conn, u32 count)
764 {
765         struct ib_recv_wr *rx_wr, *rx_wr_failed;
766         int i, ret;
767         unsigned int rx_head = isert_conn->conn_rx_desc_head;
768         struct iser_rx_desc *rx_desc;
769
770         for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
771                 rx_desc         = &isert_conn->conn_rx_descs[rx_head];
772                 rx_wr->wr_id    = (unsigned long)rx_desc;
773                 rx_wr->sg_list  = &rx_desc->rx_sg;
774                 rx_wr->num_sge  = 1;
775                 rx_wr->next     = rx_wr + 1;
776                 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
777         }
778
779         rx_wr--;
780         rx_wr->next = NULL; /* mark end of work requests list */
781
782         isert_conn->post_recv_buf_count += count;
783         ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
784                                 &rx_wr_failed);
785         if (ret) {
786                 pr_err("ib_post_recv() failed with ret: %d\n", ret);
787                 isert_conn->post_recv_buf_count -= count;
788         } else {
789                 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
790                 isert_conn->conn_rx_desc_head = rx_head;
791         }
792         return ret;
793 }
794
795 static int
796 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
797 {
798         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
799         struct ib_send_wr send_wr, *send_wr_failed;
800         int ret;
801
802         ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
803                                       ISER_HEADERS_LEN, DMA_TO_DEVICE);
804
805         send_wr.next    = NULL;
806         send_wr.wr_id   = (unsigned long)tx_desc;
807         send_wr.sg_list = tx_desc->tx_sg;
808         send_wr.num_sge = tx_desc->num_sge;
809         send_wr.opcode  = IB_WR_SEND;
810         send_wr.send_flags = IB_SEND_SIGNALED;
811
812         atomic_inc(&isert_conn->post_send_buf_count);
813
814         ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
815         if (ret) {
816                 pr_err("ib_post_send() failed, ret: %d\n", ret);
817                 atomic_dec(&isert_conn->post_send_buf_count);
818         }
819
820         return ret;
821 }
822
823 static void
824 isert_create_send_desc(struct isert_conn *isert_conn,
825                        struct isert_cmd *isert_cmd,
826                        struct iser_tx_desc *tx_desc)
827 {
828         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
829
830         ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
831                                    ISER_HEADERS_LEN, DMA_TO_DEVICE);
832
833         memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
834         tx_desc->iser_header.flags = ISER_VER;
835
836         tx_desc->num_sge = 1;
837         tx_desc->isert_cmd = isert_cmd;
838
839         if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
840                 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
841                 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
842         }
843 }
844
845 static int
846 isert_init_tx_hdrs(struct isert_conn *isert_conn,
847                    struct iser_tx_desc *tx_desc)
848 {
849         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
850         u64 dma_addr;
851
852         dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
853                         ISER_HEADERS_LEN, DMA_TO_DEVICE);
854         if (ib_dma_mapping_error(ib_dev, dma_addr)) {
855                 pr_err("ib_dma_mapping_error() failed\n");
856                 return -ENOMEM;
857         }
858
859         tx_desc->dma_addr = dma_addr;
860         tx_desc->tx_sg[0].addr  = tx_desc->dma_addr;
861         tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
862         tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
863
864         pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
865                  " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
866                  tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
867
868         return 0;
869 }
870
871 static void
872 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
873                    struct ib_send_wr *send_wr, bool coalesce)
874 {
875         struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
876
877         isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
878         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
879         send_wr->opcode = IB_WR_SEND;
880         send_wr->sg_list = &tx_desc->tx_sg[0];
881         send_wr->num_sge = isert_cmd->tx_desc.num_sge;
882         /*
883          * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
884          * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
885          */
886         mutex_lock(&isert_conn->conn_mutex);
887         if (coalesce && isert_conn->state == ISER_CONN_UP &&
888             ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
889                 tx_desc->llnode_active = true;
890                 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
891                 mutex_unlock(&isert_conn->conn_mutex);
892                 return;
893         }
894         isert_conn->conn_comp_batch = 0;
895         tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
896         mutex_unlock(&isert_conn->conn_mutex);
897
898         send_wr->send_flags = IB_SEND_SIGNALED;
899 }
900
901 static int
902 isert_rdma_post_recvl(struct isert_conn *isert_conn)
903 {
904         struct ib_recv_wr rx_wr, *rx_wr_fail;
905         struct ib_sge sge;
906         int ret;
907
908         memset(&sge, 0, sizeof(struct ib_sge));
909         sge.addr = isert_conn->login_req_dma;
910         sge.length = ISER_RX_LOGIN_SIZE;
911         sge.lkey = isert_conn->conn_mr->lkey;
912
913         pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
914                 sge.addr, sge.length, sge.lkey);
915
916         memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
917         rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
918         rx_wr.sg_list = &sge;
919         rx_wr.num_sge = 1;
920
921         isert_conn->post_recv_buf_count++;
922         ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
923         if (ret) {
924                 pr_err("ib_post_recv() failed: %d\n", ret);
925                 isert_conn->post_recv_buf_count--;
926         }
927
928         pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
929         return ret;
930 }
931
932 static int
933 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
934                    u32 length)
935 {
936         struct isert_conn *isert_conn = conn->context;
937         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
938         struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
939         int ret;
940
941         isert_create_send_desc(isert_conn, NULL, tx_desc);
942
943         memcpy(&tx_desc->iscsi_header, &login->rsp[0],
944                sizeof(struct iscsi_hdr));
945
946         isert_init_tx_hdrs(isert_conn, tx_desc);
947
948         if (length > 0) {
949                 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
950
951                 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
952                                            length, DMA_TO_DEVICE);
953
954                 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
955
956                 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
957                                               length, DMA_TO_DEVICE);
958
959                 tx_dsg->addr    = isert_conn->login_rsp_dma;
960                 tx_dsg->length  = length;
961                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
962                 tx_desc->num_sge = 2;
963         }
964         if (!login->login_failed) {
965                 if (login->login_complete) {
966                         if (isert_conn->conn_device->use_fastreg) {
967                                 ret = isert_conn_create_fastreg_pool(isert_conn);
968                                 if (ret) {
969                                         pr_err("Conn: %p failed to create"
970                                                " fastreg pool\n", isert_conn);
971                                         return ret;
972                                 }
973                         }
974
975                         ret = isert_alloc_rx_descriptors(isert_conn);
976                         if (ret)
977                                 return ret;
978
979                         ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
980                         if (ret)
981                                 return ret;
982
983                         isert_conn->state = ISER_CONN_UP;
984                         goto post_send;
985                 }
986
987                 ret = isert_rdma_post_recvl(isert_conn);
988                 if (ret)
989                         return ret;
990         }
991 post_send:
992         ret = isert_post_send(isert_conn, tx_desc);
993         if (ret)
994                 return ret;
995
996         return 0;
997 }
998
999 static void
1000 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
1001                    struct isert_conn *isert_conn)
1002 {
1003         struct iscsi_conn *conn = isert_conn->conn;
1004         struct iscsi_login *login = conn->conn_login;
1005         int size;
1006
1007         if (!login) {
1008                 pr_err("conn->conn_login is NULL\n");
1009                 dump_stack();
1010                 return;
1011         }
1012
1013         if (login->first_request) {
1014                 struct iscsi_login_req *login_req =
1015                         (struct iscsi_login_req *)&rx_desc->iscsi_header;
1016                 /*
1017                  * Setup the initial iscsi_login values from the leading
1018                  * login request PDU.
1019                  */
1020                 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1021                 login->current_stage =
1022                         (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1023                          >> 2;
1024                 login->version_min      = login_req->min_version;
1025                 login->version_max      = login_req->max_version;
1026                 memcpy(login->isid, login_req->isid, 6);
1027                 login->cmd_sn           = be32_to_cpu(login_req->cmdsn);
1028                 login->init_task_tag    = login_req->itt;
1029                 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1030                 login->cid              = be16_to_cpu(login_req->cid);
1031                 login->tsih             = be16_to_cpu(login_req->tsih);
1032         }
1033
1034         memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1035
1036         size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1037         pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1038                  size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1039         memcpy(login->req_buf, &rx_desc->data[0], size);
1040
1041         if (login->first_request) {
1042                 complete(&isert_conn->conn_login_comp);
1043                 return;
1044         }
1045         schedule_delayed_work(&conn->login_work, 0);
1046 }
1047
1048 static struct iscsi_cmd
1049 *isert_allocate_cmd(struct iscsi_conn *conn)
1050 {
1051         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1052         struct isert_cmd *isert_cmd;
1053         struct iscsi_cmd *cmd;
1054
1055         cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1056         if (!cmd) {
1057                 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1058                 return NULL;
1059         }
1060         isert_cmd = iscsit_priv_cmd(cmd);
1061         isert_cmd->conn = isert_conn;
1062         isert_cmd->iscsi_cmd = cmd;
1063
1064         return cmd;
1065 }
1066
1067 static int
1068 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1069                       struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1070                       struct iser_rx_desc *rx_desc, unsigned char *buf)
1071 {
1072         struct iscsi_conn *conn = isert_conn->conn;
1073         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1074         struct scatterlist *sg;
1075         int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1076         bool dump_payload = false;
1077
1078         rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1079         if (rc < 0)
1080                 return rc;
1081
1082         imm_data = cmd->immediate_data;
1083         imm_data_len = cmd->first_burst_len;
1084         unsol_data = cmd->unsolicited_data;
1085
1086         rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1087         if (rc < 0) {
1088                 return 0;
1089         } else if (rc > 0) {
1090                 dump_payload = true;
1091                 goto sequence_cmd;
1092         }
1093
1094         if (!imm_data)
1095                 return 0;
1096
1097         sg = &cmd->se_cmd.t_data_sg[0];
1098         sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1099
1100         pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1101                  sg, sg_nents, &rx_desc->data[0], imm_data_len);
1102
1103         sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1104
1105         cmd->write_data_done += imm_data_len;
1106
1107         if (cmd->write_data_done == cmd->se_cmd.data_length) {
1108                 spin_lock_bh(&cmd->istate_lock);
1109                 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1110                 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1111                 spin_unlock_bh(&cmd->istate_lock);
1112         }
1113
1114 sequence_cmd:
1115         rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1116
1117         if (!rc && dump_payload == false && unsol_data)
1118                 iscsit_set_unsoliticed_dataout(cmd);
1119
1120         return 0;
1121 }
1122
1123 static int
1124 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1125                            struct iser_rx_desc *rx_desc, unsigned char *buf)
1126 {
1127         struct scatterlist *sg_start;
1128         struct iscsi_conn *conn = isert_conn->conn;
1129         struct iscsi_cmd *cmd = NULL;
1130         struct iscsi_data *hdr = (struct iscsi_data *)buf;
1131         u32 unsol_data_len = ntoh24(hdr->dlength);
1132         int rc, sg_nents, sg_off, page_off;
1133
1134         rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1135         if (rc < 0)
1136                 return rc;
1137         else if (!cmd)
1138                 return 0;
1139         /*
1140          * FIXME: Unexpected unsolicited_data out
1141          */
1142         if (!cmd->unsolicited_data) {
1143                 pr_err("Received unexpected solicited data payload\n");
1144                 dump_stack();
1145                 return -1;
1146         }
1147
1148         pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1149                  unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1150
1151         sg_off = cmd->write_data_done / PAGE_SIZE;
1152         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1153         sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1154         page_off = cmd->write_data_done % PAGE_SIZE;
1155         /*
1156          * FIXME: Non page-aligned unsolicited_data out
1157          */
1158         if (page_off) {
1159                 pr_err("Received unexpected non-page aligned data payload\n");
1160                 dump_stack();
1161                 return -1;
1162         }
1163         pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1164                  sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1165
1166         sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1167                             unsol_data_len);
1168
1169         rc = iscsit_check_dataout_payload(cmd, hdr, false);
1170         if (rc < 0)
1171                 return rc;
1172
1173         return 0;
1174 }
1175
1176 static int
1177 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1178                      struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1179                      unsigned char *buf)
1180 {
1181         struct iscsi_conn *conn = isert_conn->conn;
1182         struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1183         int rc;
1184
1185         rc = iscsit_setup_nop_out(conn, cmd, hdr);
1186         if (rc < 0)
1187                 return rc;
1188         /*
1189          * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1190          */
1191
1192         return iscsit_process_nop_out(conn, cmd, hdr);
1193 }
1194
1195 static int
1196 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1197                       struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1198                       struct iscsi_text *hdr)
1199 {
1200         struct iscsi_conn *conn = isert_conn->conn;
1201         u32 payload_length = ntoh24(hdr->dlength);
1202         int rc;
1203         unsigned char *text_in;
1204
1205         rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1206         if (rc < 0)
1207                 return rc;
1208
1209         text_in = kzalloc(payload_length, GFP_KERNEL);
1210         if (!text_in) {
1211                 pr_err("Unable to allocate text_in of payload_length: %u\n",
1212                        payload_length);
1213                 return -ENOMEM;
1214         }
1215         cmd->text_in_ptr = text_in;
1216
1217         memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1218
1219         return iscsit_process_text_cmd(conn, cmd, hdr);
1220 }
1221
1222 static int
1223 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1224                 uint32_t read_stag, uint64_t read_va,
1225                 uint32_t write_stag, uint64_t write_va)
1226 {
1227         struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1228         struct iscsi_conn *conn = isert_conn->conn;
1229         struct iscsi_session *sess = conn->sess;
1230         struct iscsi_cmd *cmd;
1231         struct isert_cmd *isert_cmd;
1232         int ret = -EINVAL;
1233         u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1234
1235         if (sess->sess_ops->SessionType &&
1236            (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1237                 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1238                        " ignoring\n", opcode);
1239                 return 0;
1240         }
1241
1242         switch (opcode) {
1243         case ISCSI_OP_SCSI_CMD:
1244                 cmd = isert_allocate_cmd(conn);
1245                 if (!cmd)
1246                         break;
1247
1248                 isert_cmd = iscsit_priv_cmd(cmd);
1249                 isert_cmd->read_stag = read_stag;
1250                 isert_cmd->read_va = read_va;
1251                 isert_cmd->write_stag = write_stag;
1252                 isert_cmd->write_va = write_va;
1253
1254                 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1255                                         rx_desc, (unsigned char *)hdr);
1256                 break;
1257         case ISCSI_OP_NOOP_OUT:
1258                 cmd = isert_allocate_cmd(conn);
1259                 if (!cmd)
1260                         break;
1261
1262                 isert_cmd = iscsit_priv_cmd(cmd);
1263                 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1264                                            rx_desc, (unsigned char *)hdr);
1265                 break;
1266         case ISCSI_OP_SCSI_DATA_OUT:
1267                 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1268                                                 (unsigned char *)hdr);
1269                 break;
1270         case ISCSI_OP_SCSI_TMFUNC:
1271                 cmd = isert_allocate_cmd(conn);
1272                 if (!cmd)
1273                         break;
1274
1275                 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1276                                                 (unsigned char *)hdr);
1277                 break;
1278         case ISCSI_OP_LOGOUT:
1279                 cmd = isert_allocate_cmd(conn);
1280                 if (!cmd)
1281                         break;
1282
1283                 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1284                 if (ret > 0)
1285                         wait_for_completion_timeout(&conn->conn_logout_comp,
1286                                                     SECONDS_FOR_LOGOUT_COMP *
1287                                                     HZ);
1288                 break;
1289         case ISCSI_OP_TEXT:
1290                 cmd = isert_allocate_cmd(conn);
1291                 if (!cmd)
1292                         break;
1293
1294                 isert_cmd = iscsit_priv_cmd(cmd);
1295                 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1296                                             rx_desc, (struct iscsi_text *)hdr);
1297                 break;
1298         default:
1299                 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1300                 dump_stack();
1301                 break;
1302         }
1303
1304         return ret;
1305 }
1306
1307 static void
1308 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1309 {
1310         struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1311         uint64_t read_va = 0, write_va = 0;
1312         uint32_t read_stag = 0, write_stag = 0;
1313         int rc;
1314
1315         switch (iser_hdr->flags & 0xF0) {
1316         case ISCSI_CTRL:
1317                 if (iser_hdr->flags & ISER_RSV) {
1318                         read_stag = be32_to_cpu(iser_hdr->read_stag);
1319                         read_va = be64_to_cpu(iser_hdr->read_va);
1320                         pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1321                                  read_stag, (unsigned long long)read_va);
1322                 }
1323                 if (iser_hdr->flags & ISER_WSV) {
1324                         write_stag = be32_to_cpu(iser_hdr->write_stag);
1325                         write_va = be64_to_cpu(iser_hdr->write_va);
1326                         pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1327                                  write_stag, (unsigned long long)write_va);
1328                 }
1329
1330                 pr_debug("ISER ISCSI_CTRL PDU\n");
1331                 break;
1332         case ISER_HELLO:
1333                 pr_err("iSER Hello message\n");
1334                 break;
1335         default:
1336                 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1337                 break;
1338         }
1339
1340         rc = isert_rx_opcode(isert_conn, rx_desc,
1341                              read_stag, read_va, write_stag, write_va);
1342 }
1343
1344 static void
1345 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1346                     unsigned long xfer_len)
1347 {
1348         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1349         struct iscsi_hdr *hdr;
1350         u64 rx_dma;
1351         int rx_buflen, outstanding;
1352
1353         if ((char *)desc == isert_conn->login_req_buf) {
1354                 rx_dma = isert_conn->login_req_dma;
1355                 rx_buflen = ISER_RX_LOGIN_SIZE;
1356                 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1357                          rx_dma, rx_buflen);
1358         } else {
1359                 rx_dma = desc->dma_addr;
1360                 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1361                 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1362                          rx_dma, rx_buflen);
1363         }
1364
1365         ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1366
1367         hdr = &desc->iscsi_header;
1368         pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1369                  hdr->opcode, hdr->itt, hdr->flags,
1370                  (int)(xfer_len - ISER_HEADERS_LEN));
1371
1372         if ((char *)desc == isert_conn->login_req_buf)
1373                 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1374                                    isert_conn);
1375         else
1376                 isert_rx_do_work(desc, isert_conn);
1377
1378         ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1379                                       DMA_FROM_DEVICE);
1380
1381         isert_conn->post_recv_buf_count--;
1382         pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1383                  isert_conn->post_recv_buf_count);
1384
1385         if ((char *)desc == isert_conn->login_req_buf)
1386                 return;
1387
1388         outstanding = isert_conn->post_recv_buf_count;
1389         if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1390                 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1391                                 ISERT_MIN_POSTED_RX);
1392                 err = isert_post_recv(isert_conn, count);
1393                 if (err) {
1394                         pr_err("isert_post_recv() count: %d failed, %d\n",
1395                                count, err);
1396                 }
1397         }
1398 }
1399
1400 static void
1401 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1402 {
1403         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1404         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1405
1406         pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1407         if (wr->sge) {
1408                 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1409                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1410                                 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1411                                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1412                 wr->sge = NULL;
1413         }
1414
1415         if (wr->send_wr) {
1416                 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1417                 kfree(wr->send_wr);
1418                 wr->send_wr = NULL;
1419         }
1420
1421         if (wr->ib_sge) {
1422                 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1423                 kfree(wr->ib_sge);
1424                 wr->ib_sge = NULL;
1425         }
1426 }
1427
1428 static void
1429 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1430 {
1431         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1432         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1433         LIST_HEAD(unmap_list);
1434
1435         pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
1436
1437         if (wr->fr_desc) {
1438                 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1439                          isert_cmd, wr->fr_desc);
1440                 spin_lock_bh(&isert_conn->conn_lock);
1441                 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1442                 spin_unlock_bh(&isert_conn->conn_lock);
1443                 wr->fr_desc = NULL;
1444         }
1445
1446         if (wr->sge) {
1447                 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1448                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1449                                 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1450                                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1451                 wr->sge = NULL;
1452         }
1453
1454         wr->ib_sge = NULL;
1455         wr->send_wr = NULL;
1456 }
1457
1458 static void
1459 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1460 {
1461         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1462         struct isert_conn *isert_conn = isert_cmd->conn;
1463         struct iscsi_conn *conn = isert_conn->conn;
1464         struct isert_device *device = isert_conn->conn_device;
1465
1466         pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1467
1468         switch (cmd->iscsi_opcode) {
1469         case ISCSI_OP_SCSI_CMD:
1470                 spin_lock_bh(&conn->cmd_lock);
1471                 if (!list_empty(&cmd->i_conn_node))
1472                         list_del_init(&cmd->i_conn_node);
1473                 spin_unlock_bh(&conn->cmd_lock);
1474
1475                 if (cmd->data_direction == DMA_TO_DEVICE) {
1476                         iscsit_stop_dataout_timer(cmd);
1477                         /*
1478                          * Check for special case during comp_err where
1479                          * WRITE_PENDING has been handed off from core,
1480                          * but requires an extra target_put_sess_cmd()
1481                          * before transport_generic_free_cmd() below.
1482                          */
1483                         if (comp_err &&
1484                             cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1485                                 struct se_cmd *se_cmd = &cmd->se_cmd;
1486
1487                                 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1488                         }
1489                 }
1490
1491                 device->unreg_rdma_mem(isert_cmd, isert_conn);
1492                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1493                 break;
1494         case ISCSI_OP_SCSI_TMFUNC:
1495                 spin_lock_bh(&conn->cmd_lock);
1496                 if (!list_empty(&cmd->i_conn_node))
1497                         list_del_init(&cmd->i_conn_node);
1498                 spin_unlock_bh(&conn->cmd_lock);
1499
1500                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1501                 break;
1502         case ISCSI_OP_REJECT:
1503         case ISCSI_OP_NOOP_OUT:
1504         case ISCSI_OP_TEXT:
1505                 spin_lock_bh(&conn->cmd_lock);
1506                 if (!list_empty(&cmd->i_conn_node))
1507                         list_del_init(&cmd->i_conn_node);
1508                 spin_unlock_bh(&conn->cmd_lock);
1509
1510                 /*
1511                  * Handle special case for REJECT when iscsi_add_reject*() has
1512                  * overwritten the original iscsi_opcode assignment, and the
1513                  * associated cmd->se_cmd needs to be released.
1514                  */
1515                 if (cmd->se_cmd.se_tfo != NULL) {
1516                         pr_debug("Calling transport_generic_free_cmd from"
1517                                  " isert_put_cmd for 0x%02x\n",
1518                                  cmd->iscsi_opcode);
1519                         transport_generic_free_cmd(&cmd->se_cmd, 0);
1520                         break;
1521                 }
1522                 /*
1523                  * Fall-through
1524                  */
1525         default:
1526                 iscsit_release_cmd(cmd);
1527                 break;
1528         }
1529 }
1530
1531 static void
1532 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1533 {
1534         if (tx_desc->dma_addr != 0) {
1535                 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1536                 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1537                                     ISER_HEADERS_LEN, DMA_TO_DEVICE);
1538                 tx_desc->dma_addr = 0;
1539         }
1540 }
1541
1542 static void
1543 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1544                      struct ib_device *ib_dev, bool comp_err)
1545 {
1546         if (isert_cmd->pdu_buf_dma != 0) {
1547                 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1548                 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1549                                     isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1550                 isert_cmd->pdu_buf_dma = 0;
1551         }
1552
1553         isert_unmap_tx_desc(tx_desc, ib_dev);
1554         isert_put_cmd(isert_cmd, comp_err);
1555 }
1556
1557 static void
1558 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1559                            struct isert_cmd *isert_cmd)
1560 {
1561         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1562         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1563         struct se_cmd *se_cmd = &cmd->se_cmd;
1564         struct isert_conn *isert_conn = isert_cmd->conn;
1565         struct isert_device *device = isert_conn->conn_device;
1566
1567         iscsit_stop_dataout_timer(cmd);
1568         device->unreg_rdma_mem(isert_cmd, isert_conn);
1569         cmd->write_data_done = wr->cur_rdma_length;
1570         wr->send_wr_num = 0;
1571
1572         pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1573         spin_lock_bh(&cmd->istate_lock);
1574         cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1575         cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1576         spin_unlock_bh(&cmd->istate_lock);
1577
1578         target_execute_cmd(se_cmd);
1579 }
1580
1581 static void
1582 isert_do_control_comp(struct work_struct *work)
1583 {
1584         struct isert_cmd *isert_cmd = container_of(work,
1585                         struct isert_cmd, comp_work);
1586         struct isert_conn *isert_conn = isert_cmd->conn;
1587         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1588         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1589
1590         switch (cmd->i_state) {
1591         case ISTATE_SEND_TASKMGTRSP:
1592                 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1593
1594                 atomic_dec(&isert_conn->post_send_buf_count);
1595                 iscsit_tmr_post_handler(cmd, cmd->conn);
1596
1597                 cmd->i_state = ISTATE_SENT_STATUS;
1598                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1599                 break;
1600         case ISTATE_SEND_REJECT:
1601                 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1602                 atomic_dec(&isert_conn->post_send_buf_count);
1603
1604                 cmd->i_state = ISTATE_SENT_STATUS;
1605                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1606                 break;
1607         case ISTATE_SEND_LOGOUTRSP:
1608                 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1609                 /*
1610                  * Call atomic_dec(&isert_conn->post_send_buf_count)
1611                  * from isert_wait_conn()
1612                  */
1613                 isert_conn->logout_posted = true;
1614                 iscsit_logout_post_handler(cmd, cmd->conn);
1615                 break;
1616         case ISTATE_SEND_TEXTRSP:
1617                 atomic_dec(&isert_conn->post_send_buf_count);
1618                 cmd->i_state = ISTATE_SENT_STATUS;
1619                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1620                 break;
1621         default:
1622                 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1623                 dump_stack();
1624                 break;
1625         }
1626 }
1627
1628 static void
1629 isert_response_completion(struct iser_tx_desc *tx_desc,
1630                           struct isert_cmd *isert_cmd,
1631                           struct isert_conn *isert_conn,
1632                           struct ib_device *ib_dev)
1633 {
1634         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1635         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1636
1637         if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1638             cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1639             cmd->i_state == ISTATE_SEND_REJECT ||
1640             cmd->i_state == ISTATE_SEND_TEXTRSP) {
1641                 isert_unmap_tx_desc(tx_desc, ib_dev);
1642
1643                 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1644                 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1645                 return;
1646         }
1647         atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1648
1649         cmd->i_state = ISTATE_SENT_STATUS;
1650         isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1651 }
1652
1653 static void
1654 __isert_send_completion(struct iser_tx_desc *tx_desc,
1655                         struct isert_conn *isert_conn)
1656 {
1657         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1658         struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1659         struct isert_rdma_wr *wr;
1660
1661         if (!isert_cmd) {
1662                 atomic_dec(&isert_conn->post_send_buf_count);
1663                 isert_unmap_tx_desc(tx_desc, ib_dev);
1664                 return;
1665         }
1666         wr = &isert_cmd->rdma_wr;
1667
1668         switch (wr->iser_ib_op) {
1669         case ISER_IB_RECV:
1670                 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1671                 dump_stack();
1672                 break;
1673         case ISER_IB_SEND:
1674                 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1675                 isert_response_completion(tx_desc, isert_cmd,
1676                                           isert_conn, ib_dev);
1677                 break;
1678         case ISER_IB_RDMA_WRITE:
1679                 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1680                 dump_stack();
1681                 break;
1682         case ISER_IB_RDMA_READ:
1683                 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1684
1685                 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1686                 isert_completion_rdma_read(tx_desc, isert_cmd);
1687                 break;
1688         default:
1689                 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1690                 dump_stack();
1691                 break;
1692         }
1693 }
1694
1695 static void
1696 isert_send_completion(struct iser_tx_desc *tx_desc,
1697                       struct isert_conn *isert_conn)
1698 {
1699         struct llist_node *llnode = tx_desc->comp_llnode_batch;
1700         struct iser_tx_desc *t;
1701         /*
1702          * Drain coalesced completion llist starting from comp_llnode_batch
1703          * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1704          */
1705         while (llnode) {
1706                 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1707                 llnode = llist_next(llnode);
1708                 __isert_send_completion(t, isert_conn);
1709         }
1710         __isert_send_completion(tx_desc, isert_conn);
1711 }
1712
1713 static void
1714 isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
1715 {
1716         struct llist_node *llnode;
1717         struct isert_rdma_wr *wr;
1718         struct iser_tx_desc *t;
1719
1720         mutex_lock(&isert_conn->conn_mutex);
1721         llnode = llist_del_all(&isert_conn->conn_comp_llist);
1722         isert_conn->conn_comp_batch = 0;
1723         mutex_unlock(&isert_conn->conn_mutex);
1724
1725         while (llnode) {
1726                 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1727                 llnode = llist_next(llnode);
1728                 wr = &t->isert_cmd->rdma_wr;
1729
1730                 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1731                 isert_completion_put(t, t->isert_cmd, ib_dev, true);
1732         }
1733 }
1734
1735 static void
1736 isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1737 {
1738         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1739         struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1740         struct llist_node *llnode = tx_desc->comp_llnode_batch;
1741         struct isert_rdma_wr *wr;
1742         struct iser_tx_desc *t;
1743
1744         while (llnode) {
1745                 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1746                 llnode = llist_next(llnode);
1747                 wr = &t->isert_cmd->rdma_wr;
1748
1749                 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1750                 isert_completion_put(t, t->isert_cmd, ib_dev, true);
1751         }
1752         tx_desc->comp_llnode_batch = NULL;
1753
1754         if (!isert_cmd)
1755                 isert_unmap_tx_desc(tx_desc, ib_dev);
1756         else
1757                 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1758 }
1759
1760 static void
1761 isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1762 {
1763         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1764         struct iscsi_conn *conn = isert_conn->conn;
1765
1766         if (isert_conn->post_recv_buf_count)
1767                 return;
1768
1769         isert_cq_drain_comp_llist(isert_conn, ib_dev);
1770
1771         if (conn->sess) {
1772                 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1773                 target_wait_for_sess_cmds(conn->sess->se_sess);
1774         }
1775
1776         while (atomic_read(&isert_conn->post_send_buf_count))
1777                 msleep(3000);
1778
1779         mutex_lock(&isert_conn->conn_mutex);
1780         isert_conn->state = ISER_CONN_DOWN;
1781         mutex_unlock(&isert_conn->conn_mutex);
1782
1783         complete(&isert_conn->conn_wait_comp_err);
1784 }
1785
1786 static void
1787 isert_cq_tx_work(struct work_struct *work)
1788 {
1789         struct isert_cq_desc *cq_desc = container_of(work,
1790                                 struct isert_cq_desc, cq_tx_work);
1791         struct isert_device *device = cq_desc->device;
1792         int cq_index = cq_desc->cq_index;
1793         struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1794         struct isert_conn *isert_conn;
1795         struct iser_tx_desc *tx_desc;
1796         struct ib_wc wc;
1797
1798         while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1799                 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1800                 isert_conn = wc.qp->qp_context;
1801
1802                 if (wc.status == IB_WC_SUCCESS) {
1803                         isert_send_completion(tx_desc, isert_conn);
1804                 } else {
1805                         pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1806                         pr_debug("TX wc.status: 0x%08x\n", wc.status);
1807                         pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1808
1809                         if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1810                                 if (tx_desc->llnode_active)
1811                                         continue;
1812
1813                                 atomic_dec(&isert_conn->post_send_buf_count);
1814                                 isert_cq_tx_comp_err(tx_desc, isert_conn);
1815                         }
1816                 }
1817         }
1818
1819         ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1820 }
1821
1822 static void
1823 isert_cq_tx_callback(struct ib_cq *cq, void *context)
1824 {
1825         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1826
1827         queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1828 }
1829
1830 static void
1831 isert_cq_rx_work(struct work_struct *work)
1832 {
1833         struct isert_cq_desc *cq_desc = container_of(work,
1834                         struct isert_cq_desc, cq_rx_work);
1835         struct isert_device *device = cq_desc->device;
1836         int cq_index = cq_desc->cq_index;
1837         struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1838         struct isert_conn *isert_conn;
1839         struct iser_rx_desc *rx_desc;
1840         struct ib_wc wc;
1841         unsigned long xfer_len;
1842
1843         while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1844                 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1845                 isert_conn = wc.qp->qp_context;
1846
1847                 if (wc.status == IB_WC_SUCCESS) {
1848                         xfer_len = (unsigned long)wc.byte_len;
1849                         isert_rx_completion(rx_desc, isert_conn, xfer_len);
1850                 } else {
1851                         pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1852                         if (wc.status != IB_WC_WR_FLUSH_ERR) {
1853                                 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1854                                 pr_debug("RX wc.vendor_err: 0x%08x\n",
1855                                          wc.vendor_err);
1856                         }
1857                         isert_conn->post_recv_buf_count--;
1858                         isert_cq_rx_comp_err(isert_conn);
1859                 }
1860         }
1861
1862         ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1863 }
1864
1865 static void
1866 isert_cq_rx_callback(struct ib_cq *cq, void *context)
1867 {
1868         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1869
1870         queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1871 }
1872
1873 static int
1874 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1875 {
1876         struct ib_send_wr *wr_failed;
1877         int ret;
1878
1879         atomic_inc(&isert_conn->post_send_buf_count);
1880
1881         ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1882                            &wr_failed);
1883         if (ret) {
1884                 pr_err("ib_post_send failed with %d\n", ret);
1885                 atomic_dec(&isert_conn->post_send_buf_count);
1886                 return ret;
1887         }
1888         return ret;
1889 }
1890
1891 static int
1892 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1893 {
1894         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1895         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1896         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1897         struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1898                                 &isert_cmd->tx_desc.iscsi_header;
1899
1900         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1901         iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1902         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1903         /*
1904          * Attach SENSE DATA payload to iSCSI Response PDU
1905          */
1906         if (cmd->se_cmd.sense_buffer &&
1907             ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1908             (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1909                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1910                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1911                 u32 padding, pdu_len;
1912
1913                 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1914                                    cmd->sense_buffer);
1915                 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1916
1917                 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1918                 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1919                 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1920
1921                 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1922                                 (void *)cmd->sense_buffer, pdu_len,
1923                                 DMA_TO_DEVICE);
1924
1925                 isert_cmd->pdu_buf_len = pdu_len;
1926                 tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1927                 tx_dsg->length  = pdu_len;
1928                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1929                 isert_cmd->tx_desc.num_sge = 2;
1930         }
1931
1932         isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
1933
1934         pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1935
1936         return isert_post_response(isert_conn, isert_cmd);
1937 }
1938
1939 static int
1940 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1941                 bool nopout_response)
1942 {
1943         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1944         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1945         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1946
1947         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1948         iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1949                                &isert_cmd->tx_desc.iscsi_header,
1950                                nopout_response);
1951         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1952         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1953
1954         pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1955
1956         return isert_post_response(isert_conn, isert_cmd);
1957 }
1958
1959 static int
1960 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1961 {
1962         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1963         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1964         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1965
1966         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1967         iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1968                                 &isert_cmd->tx_desc.iscsi_header);
1969         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1970         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1971
1972         pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1973
1974         return isert_post_response(isert_conn, isert_cmd);
1975 }
1976
1977 static int
1978 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1979 {
1980         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1981         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1982         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1983
1984         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1985         iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1986                                   &isert_cmd->tx_desc.iscsi_header);
1987         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1988         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1989
1990         pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1991
1992         return isert_post_response(isert_conn, isert_cmd);
1993 }
1994
1995 static int
1996 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1997 {
1998         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1999         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2000         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2001         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2002         struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2003         struct iscsi_reject *hdr =
2004                 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2005
2006         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2007         iscsit_build_reject(cmd, conn, hdr);
2008         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2009
2010         hton24(hdr->dlength, ISCSI_HDR_LEN);
2011         isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2012                         (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2013                         DMA_TO_DEVICE);
2014         isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2015         tx_dsg->addr    = isert_cmd->pdu_buf_dma;
2016         tx_dsg->length  = ISCSI_HDR_LEN;
2017         tx_dsg->lkey    = isert_conn->conn_mr->lkey;
2018         isert_cmd->tx_desc.num_sge = 2;
2019
2020         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2021
2022         pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2023
2024         return isert_post_response(isert_conn, isert_cmd);
2025 }
2026
2027 static int
2028 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2029 {
2030         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2031         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2032         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2033         struct iscsi_text_rsp *hdr =
2034                 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2035         u32 txt_rsp_len;
2036         int rc;
2037
2038         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2039         rc = iscsit_build_text_rsp(cmd, conn, hdr);
2040         if (rc < 0)
2041                 return rc;
2042
2043         txt_rsp_len = rc;
2044         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2045
2046         if (txt_rsp_len) {
2047                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2048                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2049                 void *txt_rsp_buf = cmd->buf_ptr;
2050
2051                 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2052                                 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2053
2054                 isert_cmd->pdu_buf_len = txt_rsp_len;
2055                 tx_dsg->addr    = isert_cmd->pdu_buf_dma;
2056                 tx_dsg->length  = txt_rsp_len;
2057                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
2058                 isert_cmd->tx_desc.num_sge = 2;
2059         }
2060         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2061
2062         pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2063
2064         return isert_post_response(isert_conn, isert_cmd);
2065 }
2066
2067 static int
2068 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2069                     struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2070                     u32 data_left, u32 offset)
2071 {
2072         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2073         struct scatterlist *sg_start, *tmp_sg;
2074         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2075         u32 sg_off, page_off;
2076         int i = 0, sg_nents;
2077
2078         sg_off = offset / PAGE_SIZE;
2079         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2080         sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2081         page_off = offset % PAGE_SIZE;
2082
2083         send_wr->sg_list = ib_sge;
2084         send_wr->num_sge = sg_nents;
2085         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2086         /*
2087          * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2088          */
2089         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2090                 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2091                          (unsigned long long)tmp_sg->dma_address,
2092                          tmp_sg->length, page_off);
2093
2094                 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2095                 ib_sge->length = min_t(u32, data_left,
2096                                 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2097                 ib_sge->lkey = isert_conn->conn_mr->lkey;
2098
2099                 pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2100                          ib_sge->addr, ib_sge->length, ib_sge->lkey);
2101                 page_off = 0;
2102                 data_left -= ib_sge->length;
2103                 ib_sge++;
2104                 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2105         }
2106
2107         pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2108                  send_wr->sg_list, send_wr->num_sge);
2109
2110         return sg_nents;
2111 }
2112
2113 static int
2114 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2115                struct isert_rdma_wr *wr)
2116 {
2117         struct se_cmd *se_cmd = &cmd->se_cmd;
2118         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2119         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2120         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2121         struct ib_send_wr *send_wr;
2122         struct ib_sge *ib_sge;
2123         struct scatterlist *sg_start;
2124         u32 sg_off = 0, sg_nents;
2125         u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2126         int ret = 0, count, i, ib_sge_cnt;
2127
2128         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2129                 data_left = se_cmd->data_length;
2130         } else {
2131                 sg_off = cmd->write_data_done / PAGE_SIZE;
2132                 data_left = se_cmd->data_length - cmd->write_data_done;
2133                 offset = cmd->write_data_done;
2134                 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2135         }
2136
2137         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2138         sg_nents = se_cmd->t_data_nents - sg_off;
2139
2140         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2141                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2142                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
2143         if (unlikely(!count)) {
2144                 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2145                 return -EINVAL;
2146         }
2147         wr->sge = sg_start;
2148         wr->num_sge = sg_nents;
2149         wr->cur_rdma_length = data_left;
2150         pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2151                  isert_cmd, count, sg_start, sg_nents, data_left);
2152
2153         ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
2154         if (!ib_sge) {
2155                 pr_warn("Unable to allocate ib_sge\n");
2156                 ret = -ENOMEM;
2157                 goto unmap_sg;
2158         }
2159         wr->ib_sge = ib_sge;
2160
2161         wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2162         wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2163                                 GFP_KERNEL);
2164         if (!wr->send_wr) {
2165                 pr_debug("Unable to allocate wr->send_wr\n");
2166                 ret = -ENOMEM;
2167                 goto unmap_sg;
2168         }
2169
2170         wr->isert_cmd = isert_cmd;
2171         rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2172
2173         for (i = 0; i < wr->send_wr_num; i++) {
2174                 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2175                 data_len = min(data_left, rdma_write_max);
2176
2177                 send_wr->send_flags = 0;
2178                 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2179                         send_wr->opcode = IB_WR_RDMA_WRITE;
2180                         send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2181                         send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2182                         if (i + 1 == wr->send_wr_num)
2183                                 send_wr->next = &isert_cmd->tx_desc.send_wr;
2184                         else
2185                                 send_wr->next = &wr->send_wr[i + 1];
2186                 } else {
2187                         send_wr->opcode = IB_WR_RDMA_READ;
2188                         send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2189                         send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2190                         if (i + 1 == wr->send_wr_num)
2191                                 send_wr->send_flags = IB_SEND_SIGNALED;
2192                         else
2193                                 send_wr->next = &wr->send_wr[i + 1];
2194                 }
2195
2196                 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2197                                         send_wr, data_len, offset);
2198                 ib_sge += ib_sge_cnt;
2199
2200                 offset += data_len;
2201                 va_offset += data_len;
2202                 data_left -= data_len;
2203         }
2204
2205         return 0;
2206 unmap_sg:
2207         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2208                         (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2209                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
2210         return ret;
2211 }
2212
2213 static int
2214 isert_map_fr_pagelist(struct ib_device *ib_dev,
2215                       struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2216 {
2217         u64 start_addr, end_addr, page, chunk_start = 0;
2218         struct scatterlist *tmp_sg;
2219         int i = 0, new_chunk, last_ent, n_pages;
2220
2221         n_pages = 0;
2222         new_chunk = 1;
2223         last_ent = sg_nents - 1;
2224         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2225                 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2226                 if (new_chunk)
2227                         chunk_start = start_addr;
2228                 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2229
2230                 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2231                          i, (unsigned long long)tmp_sg->dma_address,
2232                          tmp_sg->length);
2233
2234                 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2235                         new_chunk = 0;
2236                         continue;
2237                 }
2238                 new_chunk = 1;
2239
2240                 page = chunk_start & PAGE_MASK;
2241                 do {
2242                         fr_pl[n_pages++] = page;
2243                         pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2244                                  n_pages - 1, page);
2245                         page += PAGE_SIZE;
2246                 } while (page < end_addr);
2247         }
2248
2249         return n_pages;
2250 }
2251
2252 static int
2253 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2254                   struct isert_conn *isert_conn, struct scatterlist *sg_start,
2255                   struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
2256                   unsigned int data_len)
2257 {
2258         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2259         struct ib_send_wr fr_wr, inv_wr;
2260         struct ib_send_wr *bad_wr, *wr = NULL;
2261         int ret, pagelist_len;
2262         u32 page_off;
2263         u8 key;
2264
2265         sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
2266         page_off = offset % PAGE_SIZE;
2267
2268         pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2269                  fr_desc, sg_nents, offset);
2270
2271         pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2272                                              &fr_desc->data_frpl->page_list[0]);
2273
2274         if (!fr_desc->valid) {
2275                 memset(&inv_wr, 0, sizeof(inv_wr));
2276                 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2277                 inv_wr.opcode = IB_WR_LOCAL_INV;
2278                 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2279                 wr = &inv_wr;
2280                 /* Bump the key */
2281                 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2282                 ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2283         }
2284
2285         /* Prepare FASTREG WR */
2286         memset(&fr_wr, 0, sizeof(fr_wr));
2287         fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2288         fr_wr.opcode = IB_WR_FAST_REG_MR;
2289         fr_wr.wr.fast_reg.iova_start =
2290                 fr_desc->data_frpl->page_list[0] + page_off;
2291         fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2292         fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2293         fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2294         fr_wr.wr.fast_reg.length = data_len;
2295         fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2296         fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2297
2298         if (!wr)
2299                 wr = &fr_wr;
2300         else
2301                 wr->next = &fr_wr;
2302
2303         ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2304         if (ret) {
2305                 pr_err("fast registration failed, ret:%d\n", ret);
2306                 return ret;
2307         }
2308         fr_desc->valid = false;
2309
2310         ib_sge->lkey = fr_desc->data_mr->lkey;
2311         ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2312         ib_sge->length = data_len;
2313
2314         pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2315                  ib_sge->addr, ib_sge->length, ib_sge->lkey);
2316
2317         return ret;
2318 }
2319
2320 static int
2321 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2322                struct isert_rdma_wr *wr)
2323 {
2324         struct se_cmd *se_cmd = &cmd->se_cmd;
2325         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2326         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2327         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2328         struct ib_send_wr *send_wr;
2329         struct ib_sge *ib_sge;
2330         struct scatterlist *sg_start;
2331         struct fast_reg_descriptor *fr_desc;
2332         u32 sg_off = 0, sg_nents;
2333         u32 offset = 0, data_len, data_left, rdma_write_max;
2334         int ret = 0, count;
2335         unsigned long flags;
2336
2337         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2338                 data_left = se_cmd->data_length;
2339         } else {
2340                 offset = cmd->write_data_done;
2341                 sg_off = offset / PAGE_SIZE;
2342                 data_left = se_cmd->data_length - cmd->write_data_done;
2343                 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2344         }
2345
2346         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2347         sg_nents = se_cmd->t_data_nents - sg_off;
2348
2349         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2350                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2351                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
2352         if (unlikely(!count)) {
2353                 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2354                 return -EINVAL;
2355         }
2356         wr->sge = sg_start;
2357         wr->num_sge = sg_nents;
2358         pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2359                  isert_cmd, count, sg_start, sg_nents, data_left);
2360
2361         memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
2362         ib_sge = &wr->s_ib_sge;
2363         wr->ib_sge = ib_sge;
2364
2365         wr->send_wr_num = 1;
2366         memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2367         wr->send_wr = &wr->s_send_wr;
2368
2369         wr->isert_cmd = isert_cmd;
2370         rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2371
2372         send_wr = &isert_cmd->rdma_wr.s_send_wr;
2373         send_wr->sg_list = ib_sge;
2374         send_wr->num_sge = 1;
2375         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2376         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2377                 send_wr->opcode = IB_WR_RDMA_WRITE;
2378                 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2379                 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2380                 send_wr->send_flags = 0;
2381                 send_wr->next = &isert_cmd->tx_desc.send_wr;
2382         } else {
2383                 send_wr->opcode = IB_WR_RDMA_READ;
2384                 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2385                 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2386                 send_wr->send_flags = IB_SEND_SIGNALED;
2387         }
2388
2389         data_len = min(data_left, rdma_write_max);
2390         wr->cur_rdma_length = data_len;
2391
2392         /* if there is a single dma entry, dma mr is sufficient */
2393         if (count == 1) {
2394                 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2395                 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2396                 ib_sge->lkey = isert_conn->conn_mr->lkey;
2397                 wr->fr_desc = NULL;
2398         } else {
2399                 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2400                 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2401                                            struct fast_reg_descriptor, list);
2402                 list_del(&fr_desc->list);
2403                 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2404                 wr->fr_desc = fr_desc;
2405
2406                 ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2407                                         ib_sge, sg_nents, offset, data_len);
2408                 if (ret) {
2409                         list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2410                         goto unmap_sg;
2411                 }
2412         }
2413
2414         return 0;
2415
2416 unmap_sg:
2417         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2418                         (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2419                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
2420         return ret;
2421 }
2422
2423 static int
2424 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2425 {
2426         struct se_cmd *se_cmd = &cmd->se_cmd;
2427         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2428         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2429         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2430         struct isert_device *device = isert_conn->conn_device;
2431         struct ib_send_wr *wr_failed;
2432         int rc;
2433
2434         pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2435                  isert_cmd, se_cmd->data_length);
2436         wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2437         rc = device->reg_rdma_mem(conn, cmd, wr);
2438         if (rc) {
2439                 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2440                 return rc;
2441         }
2442
2443         /*
2444          * Build isert_conn->tx_desc for iSCSI response PDU and attach
2445          */
2446         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2447         iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2448                              &isert_cmd->tx_desc.iscsi_header);
2449         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2450         isert_init_send_wr(isert_conn, isert_cmd,
2451                            &isert_cmd->tx_desc.send_wr, true);
2452
2453         atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2454
2455         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2456         if (rc) {
2457                 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2458                 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2459         }
2460         pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2461                  isert_cmd);
2462
2463         return 1;
2464 }
2465
2466 static int
2467 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2468 {
2469         struct se_cmd *se_cmd = &cmd->se_cmd;
2470         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2471         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2472         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2473         struct isert_device *device = isert_conn->conn_device;
2474         struct ib_send_wr *wr_failed;
2475         int rc;
2476
2477         pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2478                  isert_cmd, se_cmd->data_length, cmd->write_data_done);
2479         wr->iser_ib_op = ISER_IB_RDMA_READ;
2480         rc = device->reg_rdma_mem(conn, cmd, wr);
2481         if (rc) {
2482                 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2483                 return rc;
2484         }
2485
2486         atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2487
2488         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2489         if (rc) {
2490                 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2491                 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2492         }
2493         pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2494                  isert_cmd);
2495
2496         return 0;
2497 }
2498
2499 static int
2500 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2501 {
2502         int ret;
2503
2504         switch (state) {
2505         case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2506                 ret = isert_put_nopin(cmd, conn, false);
2507                 break;
2508         default:
2509                 pr_err("Unknown immediate state: 0x%02x\n", state);
2510                 ret = -EINVAL;
2511                 break;
2512         }
2513
2514         return ret;
2515 }
2516
2517 static int
2518 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2519 {
2520         int ret;
2521
2522         switch (state) {
2523         case ISTATE_SEND_LOGOUTRSP:
2524                 ret = isert_put_logout_rsp(cmd, conn);
2525                 if (!ret) {
2526                         pr_debug("Returning iSER Logout -EAGAIN\n");
2527                         ret = -EAGAIN;
2528                 }
2529                 break;
2530         case ISTATE_SEND_NOPIN:
2531                 ret = isert_put_nopin(cmd, conn, true);
2532                 break;
2533         case ISTATE_SEND_TASKMGTRSP:
2534                 ret = isert_put_tm_rsp(cmd, conn);
2535                 break;
2536         case ISTATE_SEND_REJECT:
2537                 ret = isert_put_reject(cmd, conn);
2538                 break;
2539         case ISTATE_SEND_TEXTRSP:
2540                 ret = isert_put_text_rsp(cmd, conn);
2541                 break;
2542         case ISTATE_SEND_STATUS:
2543                 /*
2544                  * Special case for sending non GOOD SCSI status from TX thread
2545                  * context during pre se_cmd excecution failure.
2546                  */
2547                 ret = isert_put_response(conn, cmd);
2548                 break;
2549         default:
2550                 pr_err("Unknown response state: 0x%02x\n", state);
2551                 ret = -EINVAL;
2552                 break;
2553         }
2554
2555         return ret;
2556 }
2557
2558 static int
2559 isert_setup_np(struct iscsi_np *np,
2560                struct __kernel_sockaddr_storage *ksockaddr)
2561 {
2562         struct isert_np *isert_np;
2563         struct rdma_cm_id *isert_lid;
2564         struct sockaddr *sa;
2565         int ret;
2566
2567         isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2568         if (!isert_np) {
2569                 pr_err("Unable to allocate struct isert_np\n");
2570                 return -ENOMEM;
2571         }
2572         init_waitqueue_head(&isert_np->np_accept_wq);
2573         mutex_init(&isert_np->np_accept_mutex);
2574         INIT_LIST_HEAD(&isert_np->np_accept_list);
2575         init_completion(&isert_np->np_login_comp);
2576
2577         sa = (struct sockaddr *)ksockaddr;
2578         pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2579         /*
2580          * Setup the np->np_sockaddr from the passed sockaddr setup
2581          * in iscsi_target_configfs.c code..
2582          */
2583         memcpy(&np->np_sockaddr, ksockaddr,
2584                sizeof(struct __kernel_sockaddr_storage));
2585
2586         isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2587                                 IB_QPT_RC);
2588         if (IS_ERR(isert_lid)) {
2589                 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2590                        PTR_ERR(isert_lid));
2591                 ret = PTR_ERR(isert_lid);
2592                 goto out;
2593         }
2594
2595         ret = rdma_bind_addr(isert_lid, sa);
2596         if (ret) {
2597                 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2598                 goto out_lid;
2599         }
2600
2601         ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2602         if (ret) {
2603                 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2604                 goto out_lid;
2605         }
2606
2607         isert_np->np_cm_id = isert_lid;
2608         np->np_context = isert_np;
2609         pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2610
2611         return 0;
2612
2613 out_lid:
2614         rdma_destroy_id(isert_lid);
2615 out:
2616         kfree(isert_np);
2617         return ret;
2618 }
2619
2620 static int
2621 isert_check_accept_queue(struct isert_np *isert_np)
2622 {
2623         int empty;
2624
2625         mutex_lock(&isert_np->np_accept_mutex);
2626         empty = list_empty(&isert_np->np_accept_list);
2627         mutex_unlock(&isert_np->np_accept_mutex);
2628
2629         return empty;
2630 }
2631
2632 static int
2633 isert_rdma_accept(struct isert_conn *isert_conn)
2634 {
2635         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2636         struct rdma_conn_param cp;
2637         int ret;
2638
2639         memset(&cp, 0, sizeof(struct rdma_conn_param));
2640         cp.responder_resources = isert_conn->responder_resources;
2641         cp.initiator_depth = isert_conn->initiator_depth;
2642         cp.retry_count = 7;
2643         cp.rnr_retry_count = 7;
2644
2645         pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2646
2647         ret = rdma_accept(cm_id, &cp);
2648         if (ret) {
2649                 pr_err("rdma_accept() failed with: %d\n", ret);
2650                 return ret;
2651         }
2652
2653         pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2654
2655         return 0;
2656 }
2657
2658 static int
2659 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2660 {
2661         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2662         int ret;
2663
2664         pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2665         /*
2666          * For login requests after the first PDU, isert_rx_login_req() will
2667          * kick schedule_delayed_work(&conn->login_work) as the packet is
2668          * received, which turns this callback from iscsi_target_do_login_rx()
2669          * into a NOP.
2670          */
2671         if (!login->first_request)
2672                 return 0;
2673
2674         ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2675         if (ret)
2676                 return ret;
2677
2678         pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2679         return 0;
2680 }
2681
2682 static void
2683 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2684                     struct isert_conn *isert_conn)
2685 {
2686         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2687         struct rdma_route *cm_route = &cm_id->route;
2688         struct sockaddr_in *sock_in;
2689         struct sockaddr_in6 *sock_in6;
2690
2691         conn->login_family = np->np_sockaddr.ss_family;
2692
2693         if (np->np_sockaddr.ss_family == AF_INET6) {
2694                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2695                 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2696                          &sock_in6->sin6_addr.in6_u);
2697                 conn->login_port = ntohs(sock_in6->sin6_port);
2698
2699                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2700                 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2701                          &sock_in6->sin6_addr.in6_u);
2702                 conn->local_port = ntohs(sock_in6->sin6_port);
2703         } else {
2704                 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2705                 sprintf(conn->login_ip, "%pI4",
2706                         &sock_in->sin_addr.s_addr);
2707                 conn->login_port = ntohs(sock_in->sin_port);
2708
2709                 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2710                 sprintf(conn->local_ip, "%pI4",
2711                         &sock_in->sin_addr.s_addr);
2712                 conn->local_port = ntohs(sock_in->sin_port);
2713         }
2714 }
2715
2716 static int
2717 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2718 {
2719         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2720         struct isert_conn *isert_conn;
2721         int max_accept = 0, ret;
2722
2723 accept_wait:
2724         ret = wait_event_interruptible(isert_np->np_accept_wq,
2725                         !isert_check_accept_queue(isert_np) ||
2726                         np->np_thread_state == ISCSI_NP_THREAD_RESET);
2727         if (max_accept > 5)
2728                 return -ENODEV;
2729
2730         spin_lock_bh(&np->np_thread_lock);
2731         if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2732                 spin_unlock_bh(&np->np_thread_lock);
2733                 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2734                 return -ENODEV;
2735         }
2736         spin_unlock_bh(&np->np_thread_lock);
2737
2738         mutex_lock(&isert_np->np_accept_mutex);
2739         if (list_empty(&isert_np->np_accept_list)) {
2740                 mutex_unlock(&isert_np->np_accept_mutex);
2741                 max_accept++;
2742                 goto accept_wait;
2743         }
2744         isert_conn = list_first_entry(&isert_np->np_accept_list,
2745                         struct isert_conn, conn_accept_node);
2746         list_del_init(&isert_conn->conn_accept_node);
2747         mutex_unlock(&isert_np->np_accept_mutex);
2748
2749         conn->context = isert_conn;
2750         isert_conn->conn = conn;
2751         max_accept = 0;
2752
2753         ret = isert_rdma_post_recvl(isert_conn);
2754         if (ret)
2755                 return ret;
2756
2757         ret = isert_rdma_accept(isert_conn);
2758         if (ret)
2759                 return ret;
2760
2761         isert_set_conn_info(np, conn, isert_conn);
2762
2763         pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2764         return 0;
2765 }
2766
2767 static void
2768 isert_free_np(struct iscsi_np *np)
2769 {
2770         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2771
2772         rdma_destroy_id(isert_np->np_cm_id);
2773
2774         np->np_context = NULL;
2775         kfree(isert_np);
2776 }
2777
2778 static void isert_wait_conn(struct iscsi_conn *conn)
2779 {
2780         struct isert_conn *isert_conn = conn->context;
2781
2782         pr_debug("isert_wait_conn: Starting \n");
2783         /*
2784          * Decrement post_send_buf_count for special case when called
2785          * from isert_do_control_comp() -> iscsit_logout_post_handler()
2786          */
2787         mutex_lock(&isert_conn->conn_mutex);
2788         if (isert_conn->logout_posted)
2789                 atomic_dec(&isert_conn->post_send_buf_count);
2790
2791         if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2792                 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
2793                 rdma_disconnect(isert_conn->conn_cm_id);
2794         }
2795         /*
2796          * Only wait for conn_wait_comp_err if the isert_conn made it
2797          * into full feature phase..
2798          */
2799         if (isert_conn->state == ISER_CONN_INIT) {
2800                 mutex_unlock(&isert_conn->conn_mutex);
2801                 return;
2802         }
2803         if (isert_conn->state == ISER_CONN_UP)
2804                 isert_conn->state = ISER_CONN_TERMINATING;
2805         mutex_unlock(&isert_conn->conn_mutex);
2806
2807         wait_for_completion(&isert_conn->conn_wait_comp_err);
2808
2809         wait_for_completion(&isert_conn->conn_wait);
2810 }
2811
2812 static void isert_free_conn(struct iscsi_conn *conn)
2813 {
2814         struct isert_conn *isert_conn = conn->context;
2815
2816         isert_put_conn(isert_conn);
2817 }
2818
2819 static struct iscsit_transport iser_target_transport = {
2820         .name                   = "IB/iSER",
2821         .transport_type         = ISCSI_INFINIBAND,
2822         .priv_size              = sizeof(struct isert_cmd),
2823         .owner                  = THIS_MODULE,
2824         .iscsit_setup_np        = isert_setup_np,
2825         .iscsit_accept_np       = isert_accept_np,
2826         .iscsit_free_np         = isert_free_np,
2827         .iscsit_wait_conn       = isert_wait_conn,
2828         .iscsit_free_conn       = isert_free_conn,
2829         .iscsit_get_login_rx    = isert_get_login_rx,
2830         .iscsit_put_login_tx    = isert_put_login_tx,
2831         .iscsit_immediate_queue = isert_immediate_queue,
2832         .iscsit_response_queue  = isert_response_queue,
2833         .iscsit_get_dataout     = isert_get_dataout,
2834         .iscsit_queue_data_in   = isert_put_datain,
2835         .iscsit_queue_status    = isert_put_response,
2836 };
2837
2838 static int __init isert_init(void)
2839 {
2840         int ret;
2841
2842         isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2843         if (!isert_rx_wq) {
2844                 pr_err("Unable to allocate isert_rx_wq\n");
2845                 return -ENOMEM;
2846         }
2847
2848         isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2849         if (!isert_comp_wq) {
2850                 pr_err("Unable to allocate isert_comp_wq\n");
2851                 ret = -ENOMEM;
2852                 goto destroy_rx_wq;
2853         }
2854
2855         iscsit_register_transport(&iser_target_transport);
2856         pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2857         return 0;
2858
2859 destroy_rx_wq:
2860         destroy_workqueue(isert_rx_wq);
2861         return ret;
2862 }
2863
2864 static void __exit isert_exit(void)
2865 {
2866         destroy_workqueue(isert_comp_wq);
2867         destroy_workqueue(isert_rx_wq);
2868         iscsit_unregister_transport(&iser_target_transport);
2869         pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2870 }
2871
2872 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2873 MODULE_VERSION("0.1");
2874 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2875 MODULE_LICENSE("GPL");
2876
2877 module_init(isert_init);
2878 module_exit(isert_exit);