Merge branches 'clk-baikal', 'clk-broadcom', 'clk-vc5' and 'clk-versaclock' into...
[platform/kernel/linux-starfive.git] / drivers / infiniband / ulp / iser / iser_initiator.c
1 /*
2  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
40
41 #include "iscsi_iser.h"
42
43 /* Register user buffer memory and initialize passive rdma
44  *  dto descriptor. Data size is stored in
45  *  task->data[ISER_DIR_IN].data_len, Protection size
46  *  os stored in task->prot[ISER_DIR_IN].data_len
47  */
48 static int iser_prepare_read_cmd(struct iscsi_task *task)
49
50 {
51         struct iscsi_iser_task *iser_task = task->dd_data;
52         struct iser_mem_reg *mem_reg;
53         int err;
54         struct iser_ctrl *hdr = &iser_task->desc.iser_header;
55
56         err = iser_dma_map_task_data(iser_task,
57                                      ISER_DIR_IN,
58                                      DMA_FROM_DEVICE);
59         if (err)
60                 return err;
61
62         err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
63         if (err) {
64                 iser_err("Failed to set up Data-IN RDMA\n");
65                 goto out_err;
66         }
67         mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
68
69         hdr->flags    |= ISER_RSV;
70         hdr->read_stag = cpu_to_be32(mem_reg->rkey);
71         hdr->read_va   = cpu_to_be64(mem_reg->sge.addr);
72
73         iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
74                  task->itt, mem_reg->rkey,
75                  (unsigned long long)mem_reg->sge.addr);
76
77         return 0;
78
79 out_err:
80         iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE);
81         return err;
82 }
83
84 /* Register user buffer memory and initialize passive rdma
85  *  dto descriptor. Data size is stored in
86  *  task->data[ISER_DIR_OUT].data_len, Protection size
87  *  is stored at task->prot[ISER_DIR_OUT].data_len
88  */
89 static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
90                                   unsigned int unsol_sz, unsigned int edtl)
91 {
92         struct iscsi_iser_task *iser_task = task->dd_data;
93         struct iser_mem_reg *mem_reg;
94         int err;
95         struct iser_ctrl *hdr = &iser_task->desc.iser_header;
96         struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
97         struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
98
99         err = iser_dma_map_task_data(iser_task,
100                                      ISER_DIR_OUT,
101                                      DMA_TO_DEVICE);
102         if (err)
103                 return err;
104
105         err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
106                                    buf_out->data_len == imm_sz);
107         if (err) {
108                 iser_err("Failed to register write cmd RDMA mem\n");
109                 goto out_err;
110         }
111
112         mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
113
114         if (unsol_sz < edtl) {
115                 hdr->flags     |= ISER_WSV;
116                 if (buf_out->data_len > imm_sz) {
117                         hdr->write_stag = cpu_to_be32(mem_reg->rkey);
118                         hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
119                 }
120
121                 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
122                          task->itt, mem_reg->rkey,
123                          (unsigned long long)mem_reg->sge.addr, unsol_sz);
124         }
125
126         if (imm_sz > 0) {
127                 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
128                          task->itt, imm_sz);
129                 tx_dsg->addr = mem_reg->sge.addr;
130                 tx_dsg->length = imm_sz;
131                 tx_dsg->lkey = mem_reg->sge.lkey;
132                 iser_task->desc.num_sge = 2;
133         }
134
135         return 0;
136
137 out_err:
138         iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE);
139         return err;
140 }
141
142 /* creates a new tx descriptor and adds header regd buffer */
143 static void iser_create_send_desc(struct iser_conn *iser_conn,
144                                   struct iser_tx_desc *tx_desc)
145 {
146         struct iser_device *device = iser_conn->ib_conn.device;
147
148         ib_dma_sync_single_for_cpu(device->ib_device,
149                 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
150
151         memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
152         tx_desc->iser_header.flags = ISER_VER;
153         tx_desc->num_sge = 1;
154 }
155
156 static void iser_free_login_buf(struct iser_conn *iser_conn)
157 {
158         struct iser_device *device = iser_conn->ib_conn.device;
159         struct iser_login_desc *desc = &iser_conn->login_desc;
160
161         if (!desc->req)
162                 return;
163
164         ib_dma_unmap_single(device->ib_device, desc->req_dma,
165                             ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
166
167         ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
168                             ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
169
170         kfree(desc->req);
171         kfree(desc->rsp);
172
173         /* make sure we never redo any unmapping */
174         desc->req = NULL;
175         desc->rsp = NULL;
176 }
177
178 static int iser_alloc_login_buf(struct iser_conn *iser_conn)
179 {
180         struct iser_device *device = iser_conn->ib_conn.device;
181         struct iser_login_desc *desc = &iser_conn->login_desc;
182
183         desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
184         if (!desc->req)
185                 return -ENOMEM;
186
187         desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
188                                           ISCSI_DEF_MAX_RECV_SEG_LEN,
189                                           DMA_TO_DEVICE);
190         if (ib_dma_mapping_error(device->ib_device,
191                                 desc->req_dma))
192                 goto free_req;
193
194         desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
195         if (!desc->rsp)
196                 goto unmap_req;
197
198         desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
199                                            ISER_RX_LOGIN_SIZE,
200                                            DMA_FROM_DEVICE);
201         if (ib_dma_mapping_error(device->ib_device,
202                                 desc->rsp_dma))
203                 goto free_rsp;
204
205         return 0;
206
207 free_rsp:
208         kfree(desc->rsp);
209 unmap_req:
210         ib_dma_unmap_single(device->ib_device, desc->req_dma,
211                             ISCSI_DEF_MAX_RECV_SEG_LEN,
212                             DMA_TO_DEVICE);
213 free_req:
214         kfree(desc->req);
215
216         return -ENOMEM;
217 }
218
219 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
220                               struct iscsi_session *session)
221 {
222         int i, j;
223         u64 dma_addr;
224         struct iser_rx_desc *rx_desc;
225         struct ib_sge       *rx_sg;
226         struct ib_conn *ib_conn = &iser_conn->ib_conn;
227         struct iser_device *device = ib_conn->device;
228
229         iser_conn->qp_max_recv_dtos = session->cmds_max;
230
231         if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
232                                     iser_conn->pages_per_mr))
233                 goto create_rdma_reg_res_failed;
234
235         if (iser_alloc_login_buf(iser_conn))
236                 goto alloc_login_buf_fail;
237
238         iser_conn->num_rx_descs = session->cmds_max;
239         iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs,
240                                             sizeof(struct iser_rx_desc),
241                                             GFP_KERNEL);
242         if (!iser_conn->rx_descs)
243                 goto rx_desc_alloc_fail;
244
245         rx_desc = iser_conn->rx_descs;
246
247         for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)  {
248                 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
249                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
250                 if (ib_dma_mapping_error(device->ib_device, dma_addr))
251                         goto rx_desc_dma_map_failed;
252
253                 rx_desc->dma_addr = dma_addr;
254                 rx_desc->cqe.done = iser_task_rsp;
255                 rx_sg = &rx_desc->rx_sg;
256                 rx_sg->addr = rx_desc->dma_addr;
257                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
258                 rx_sg->lkey = device->pd->local_dma_lkey;
259         }
260
261         return 0;
262
263 rx_desc_dma_map_failed:
264         rx_desc = iser_conn->rx_descs;
265         for (j = 0; j < i; j++, rx_desc++)
266                 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
267                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
268         kfree(iser_conn->rx_descs);
269         iser_conn->rx_descs = NULL;
270 rx_desc_alloc_fail:
271         iser_free_login_buf(iser_conn);
272 alloc_login_buf_fail:
273         iser_free_fastreg_pool(ib_conn);
274 create_rdma_reg_res_failed:
275         iser_err("failed allocating rx descriptors / data buffers\n");
276         return -ENOMEM;
277 }
278
279 void iser_free_rx_descriptors(struct iser_conn *iser_conn)
280 {
281         int i;
282         struct iser_rx_desc *rx_desc;
283         struct ib_conn *ib_conn = &iser_conn->ib_conn;
284         struct iser_device *device = ib_conn->device;
285
286         iser_free_fastreg_pool(ib_conn);
287
288         rx_desc = iser_conn->rx_descs;
289         for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
290                 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
291                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
292         kfree(iser_conn->rx_descs);
293         /* make sure we never redo any unmapping */
294         iser_conn->rx_descs = NULL;
295
296         iser_free_login_buf(iser_conn);
297 }
298
299 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
300 {
301         struct iser_conn *iser_conn = conn->dd_data;
302         struct iscsi_session *session = conn->session;
303         int err = 0;
304         int i;
305
306         iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
307         /* check if this is the last login - going to full feature phase */
308         if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
309                 goto out;
310
311         if (session->discovery_sess) {
312                 iser_info("Discovery session, re-using login RX buffer\n");
313                 goto out;
314         }
315
316         iser_info("Normal session, posting batch of RX %d buffers\n",
317                   iser_conn->qp_max_recv_dtos - 1);
318
319         /*
320          * Initial post receive buffers.
321          * There is one already posted recv buffer (for the last login
322          * response). Therefore, the first recv buffer is skipped here.
323          */
324         for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
325                 err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
326                 if (err)
327                         goto out;
328         }
329 out:
330         return err;
331 }
332
333 /**
334  * iser_send_command - send command PDU
335  * @conn: link to matching iscsi connection
336  * @task: SCSI command task
337  */
338 int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
339 {
340         struct iser_conn *iser_conn = conn->dd_data;
341         struct iscsi_iser_task *iser_task = task->dd_data;
342         unsigned long edtl;
343         int err;
344         struct iser_data_buf *data_buf, *prot_buf;
345         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
346         struct scsi_cmnd *sc  =  task->sc;
347         struct iser_tx_desc *tx_desc = &iser_task->desc;
348
349         edtl = ntohl(hdr->data_length);
350
351         /* build the tx desc regd header and add it to the tx desc dto */
352         tx_desc->type = ISCSI_TX_SCSI_COMMAND;
353         tx_desc->cqe.done = iser_cmd_comp;
354         iser_create_send_desc(iser_conn, tx_desc);
355
356         if (hdr->flags & ISCSI_FLAG_CMD_READ) {
357                 data_buf = &iser_task->data[ISER_DIR_IN];
358                 prot_buf = &iser_task->prot[ISER_DIR_IN];
359         } else {
360                 data_buf = &iser_task->data[ISER_DIR_OUT];
361                 prot_buf = &iser_task->prot[ISER_DIR_OUT];
362         }
363
364         if (scsi_sg_count(sc)) { /* using a scatter list */
365                 data_buf->sg = scsi_sglist(sc);
366                 data_buf->size = scsi_sg_count(sc);
367         }
368         data_buf->data_len = scsi_bufflen(sc);
369
370         if (scsi_prot_sg_count(sc)) {
371                 prot_buf->sg  = scsi_prot_sglist(sc);
372                 prot_buf->size = scsi_prot_sg_count(sc);
373                 prot_buf->data_len = (data_buf->data_len >>
374                                      ilog2(sc->device->sector_size)) * 8;
375         }
376
377         if (hdr->flags & ISCSI_FLAG_CMD_READ) {
378                 err = iser_prepare_read_cmd(task);
379                 if (err)
380                         goto send_command_error;
381         }
382         if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
383                 err = iser_prepare_write_cmd(task,
384                                              task->imm_count,
385                                              task->imm_count +
386                                              task->unsol_r2t.data_length,
387                                              edtl);
388                 if (err)
389                         goto send_command_error;
390         }
391
392         iser_task->status = ISER_TASK_STATUS_STARTED;
393
394         err = iser_post_send(&iser_conn->ib_conn, tx_desc);
395         if (!err)
396                 return 0;
397
398 send_command_error:
399         iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
400         return err;
401 }
402
403 /**
404  * iser_send_data_out - send data out PDU
405  * @conn: link to matching iscsi connection
406  * @task: SCSI command task
407  * @hdr: pointer to the LLD's iSCSI message header
408  */
409 int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
410                        struct iscsi_data *hdr)
411 {
412         struct iser_conn *iser_conn = conn->dd_data;
413         struct iscsi_iser_task *iser_task = task->dd_data;
414         struct iser_tx_desc *tx_desc;
415         struct iser_mem_reg *mem_reg;
416         unsigned long buf_offset;
417         unsigned long data_seg_len;
418         uint32_t itt;
419         int err;
420         struct ib_sge *tx_dsg;
421
422         itt = (__force uint32_t)hdr->itt;
423         data_seg_len = ntoh24(hdr->dlength);
424         buf_offset   = ntohl(hdr->offset);
425
426         iser_dbg("%s itt %d dseg_len %d offset %d\n",
427                  __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
428
429         tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
430         if (!tx_desc)
431                 return -ENOMEM;
432
433         tx_desc->type = ISCSI_TX_DATAOUT;
434         tx_desc->cqe.done = iser_dataout_comp;
435         tx_desc->iser_header.flags = ISER_VER;
436         memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
437
438         /* build the tx desc */
439         err = iser_initialize_task_headers(task, tx_desc);
440         if (err)
441                 goto send_data_out_error;
442
443         mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
444         tx_dsg = &tx_desc->tx_sg[1];
445         tx_dsg->addr = mem_reg->sge.addr + buf_offset;
446         tx_dsg->length = data_seg_len;
447         tx_dsg->lkey = mem_reg->sge.lkey;
448         tx_desc->num_sge = 2;
449
450         if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
451                 iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
452                          buf_offset, data_seg_len,
453                          iser_task->data[ISER_DIR_OUT].data_len, itt);
454                 err = -EINVAL;
455                 goto send_data_out_error;
456         }
457         iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
458                  itt, buf_offset, data_seg_len);
459
460
461         err = iser_post_send(&iser_conn->ib_conn, tx_desc);
462         if (!err)
463                 return 0;
464
465 send_data_out_error:
466         kmem_cache_free(ig.desc_cache, tx_desc);
467         iser_err("conn %p failed err %d\n", conn, err);
468         return err;
469 }
470
471 int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
472 {
473         struct iser_conn *iser_conn = conn->dd_data;
474         struct iscsi_iser_task *iser_task = task->dd_data;
475         struct iser_tx_desc *mdesc = &iser_task->desc;
476         unsigned long data_seg_len;
477         int err = 0;
478         struct iser_device *device;
479
480         /* build the tx desc regd header and add it to the tx desc dto */
481         mdesc->type = ISCSI_TX_CONTROL;
482         mdesc->cqe.done = iser_ctrl_comp;
483         iser_create_send_desc(iser_conn, mdesc);
484
485         device = iser_conn->ib_conn.device;
486
487         data_seg_len = ntoh24(task->hdr->dlength);
488
489         if (data_seg_len > 0) {
490                 struct iser_login_desc *desc = &iser_conn->login_desc;
491                 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
492
493                 if (task != conn->login_task) {
494                         iser_err("data present on non login task!!!\n");
495                         goto send_control_error;
496                 }
497
498                 ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
499                                            task->data_count, DMA_TO_DEVICE);
500
501                 memcpy(desc->req, task->data, task->data_count);
502
503                 ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
504                                               task->data_count, DMA_TO_DEVICE);
505
506                 tx_dsg->addr = desc->req_dma;
507                 tx_dsg->length = task->data_count;
508                 tx_dsg->lkey = device->pd->local_dma_lkey;
509                 mdesc->num_sge = 2;
510         }
511
512         if (task == conn->login_task) {
513                 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
514                          task->hdr->opcode, data_seg_len);
515                 err = iser_post_recvl(iser_conn);
516                 if (err)
517                         goto send_control_error;
518                 err = iser_post_rx_bufs(conn, task->hdr);
519                 if (err)
520                         goto send_control_error;
521         }
522
523         err = iser_post_send(&iser_conn->ib_conn, mdesc);
524         if (!err)
525                 return 0;
526
527 send_control_error:
528         iser_err("conn %p failed err %d\n",conn, err);
529         return err;
530 }
531
532 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
533 {
534         struct ib_conn *ib_conn = wc->qp->qp_context;
535         struct iser_conn *iser_conn = to_iser_conn(ib_conn);
536         struct iser_login_desc *desc = iser_login(wc->wr_cqe);
537         struct iscsi_hdr *hdr;
538         char *data;
539         int length;
540         bool full_feature_phase;
541
542         if (unlikely(wc->status != IB_WC_SUCCESS)) {
543                 iser_err_comp(wc, "login_rsp");
544                 return;
545         }
546
547         ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
548                                    desc->rsp_dma, ISER_RX_LOGIN_SIZE,
549                                    DMA_FROM_DEVICE);
550
551         hdr = desc->rsp + sizeof(struct iser_ctrl);
552         data = desc->rsp + ISER_HEADERS_LEN;
553         length = wc->byte_len - ISER_HEADERS_LEN;
554         full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
555                               ISCSI_FULL_FEATURE_PHASE) &&
556                              (hdr->flags & ISCSI_FLAG_CMD_FINAL);
557
558         iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
559                  hdr->itt, length);
560
561         iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);
562
563         ib_dma_sync_single_for_device(ib_conn->device->ib_device,
564                                       desc->rsp_dma, ISER_RX_LOGIN_SIZE,
565                                       DMA_FROM_DEVICE);
566
567         if (!full_feature_phase ||
568             iser_conn->iscsi_conn->session->discovery_sess)
569                 return;
570
571         /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
572         iser_post_recvm(iser_conn, iser_conn->rx_descs);
573 }
574
575 static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
576 {
577         if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
578                      (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
579                 iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
580                 return -EINVAL;
581         }
582
583         desc->rsc.mr_valid = 0;
584
585         return 0;
586 }
587
588 static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
589                                  struct iscsi_hdr *hdr)
590 {
591         if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
592                 struct iscsi_task *task;
593                 u32 rkey = wc->ex.invalidate_rkey;
594
595                 iser_dbg("conn %p: remote invalidation for rkey %#x\n",
596                          iser_conn, rkey);
597
598                 if (unlikely(!iser_conn->snd_w_inv)) {
599                         iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
600                                  iser_conn);
601                         return -EPROTO;
602                 }
603
604                 task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
605                 if (likely(task)) {
606                         struct iscsi_iser_task *iser_task = task->dd_data;
607                         struct iser_fr_desc *desc;
608
609                         if (iser_task->dir[ISER_DIR_IN]) {
610                                 desc = iser_task->rdma_reg[ISER_DIR_IN].desc;
611                                 if (unlikely(iser_inv_desc(desc, rkey)))
612                                         return -EINVAL;
613                         }
614
615                         if (iser_task->dir[ISER_DIR_OUT]) {
616                                 desc = iser_task->rdma_reg[ISER_DIR_OUT].desc;
617                                 if (unlikely(iser_inv_desc(desc, rkey)))
618                                         return -EINVAL;
619                         }
620                 } else {
621                         iser_err("failed to get task for itt=%d\n", hdr->itt);
622                         return -EINVAL;
623                 }
624         }
625
626         return 0;
627 }
628
629
630 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
631 {
632         struct ib_conn *ib_conn = wc->qp->qp_context;
633         struct iser_conn *iser_conn = to_iser_conn(ib_conn);
634         struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
635         struct iscsi_hdr *hdr;
636         int length, err;
637
638         if (unlikely(wc->status != IB_WC_SUCCESS)) {
639                 iser_err_comp(wc, "task_rsp");
640                 return;
641         }
642
643         ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
644                                    desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
645                                    DMA_FROM_DEVICE);
646
647         hdr = &desc->iscsi_header;
648         length = wc->byte_len - ISER_HEADERS_LEN;
649
650         iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
651                  hdr->itt, length);
652
653         if (iser_check_remote_inv(iser_conn, wc, hdr)) {
654                 iscsi_conn_failure(iser_conn->iscsi_conn,
655                                    ISCSI_ERR_CONN_FAILED);
656                 return;
657         }
658
659         iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
660
661         ib_dma_sync_single_for_device(ib_conn->device->ib_device,
662                                       desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
663                                       DMA_FROM_DEVICE);
664
665         err = iser_post_recvm(iser_conn, desc);
666         if (err)
667                 iser_err("posting rx buffer err %d\n", err);
668 }
669
670 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
671 {
672         if (unlikely(wc->status != IB_WC_SUCCESS))
673                 iser_err_comp(wc, "command");
674 }
675
676 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
677 {
678         struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
679         struct iscsi_task *task;
680
681         if (unlikely(wc->status != IB_WC_SUCCESS)) {
682                 iser_err_comp(wc, "control");
683                 return;
684         }
685
686         /* this arithmetic is legal by libiscsi dd_data allocation */
687         task = (void *)desc - sizeof(struct iscsi_task);
688         if (task->hdr->itt == RESERVED_ITT)
689                 iscsi_put_task(task);
690 }
691
692 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
693 {
694         struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
695         struct ib_conn *ib_conn = wc->qp->qp_context;
696         struct iser_device *device = ib_conn->device;
697
698         if (unlikely(wc->status != IB_WC_SUCCESS))
699                 iser_err_comp(wc, "dataout");
700
701         ib_dma_unmap_single(device->ib_device, desc->dma_addr,
702                             ISER_HEADERS_LEN, DMA_TO_DEVICE);
703         kmem_cache_free(ig.desc_cache, desc);
704 }
705
706 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
707
708 {
709         iser_task->status = ISER_TASK_STATUS_INIT;
710
711         iser_task->dir[ISER_DIR_IN] = 0;
712         iser_task->dir[ISER_DIR_OUT] = 0;
713
714         iser_task->data[ISER_DIR_IN].data_len  = 0;
715         iser_task->data[ISER_DIR_OUT].data_len = 0;
716
717         iser_task->prot[ISER_DIR_IN].data_len  = 0;
718         iser_task->prot[ISER_DIR_OUT].data_len = 0;
719
720         iser_task->prot[ISER_DIR_IN].dma_nents = 0;
721         iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
722
723         memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
724                sizeof(struct iser_mem_reg));
725         memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
726                sizeof(struct iser_mem_reg));
727 }
728
729 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
730 {
731
732         if (iser_task->dir[ISER_DIR_IN]) {
733                 iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
734                 iser_dma_unmap_task_data(iser_task, ISER_DIR_IN,
735                                          DMA_FROM_DEVICE);
736         }
737
738         if (iser_task->dir[ISER_DIR_OUT]) {
739                 iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
740                 iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT,
741                                          DMA_TO_DEVICE);
742         }
743 }