scsi: treewide: Consolidate {get,put}_unaligned_[bl]e24() definitions
[platform/kernel/linux-starfive.git] / drivers / nvme / target / rdma.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics RDMA target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/atomic.h>
8 #include <linux/ctype.h>
9 #include <linux/delay.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/nvme.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/wait.h>
17 #include <linux/inet.h>
18 #include <asm/unaligned.h>
19
20 #include <rdma/ib_verbs.h>
21 #include <rdma/rdma_cm.h>
22 #include <rdma/rw.h>
23
24 #include <linux/nvme-rdma.h>
25 #include "nvmet.h"
26
27 /*
28  * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
29  */
30 #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE     PAGE_SIZE
31 #define NVMET_RDMA_MAX_INLINE_SGE               4
32 #define NVMET_RDMA_MAX_INLINE_DATA_SIZE         max_t(int, SZ_16K, PAGE_SIZE)
33
34 struct nvmet_rdma_cmd {
35         struct ib_sge           sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
36         struct ib_cqe           cqe;
37         struct ib_recv_wr       wr;
38         struct scatterlist      inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
39         struct nvme_command     *nvme_cmd;
40         struct nvmet_rdma_queue *queue;
41 };
42
43 enum {
44         NVMET_RDMA_REQ_INLINE_DATA      = (1 << 0),
45         NVMET_RDMA_REQ_INVALIDATE_RKEY  = (1 << 1),
46 };
47
48 struct nvmet_rdma_rsp {
49         struct ib_sge           send_sge;
50         struct ib_cqe           send_cqe;
51         struct ib_send_wr       send_wr;
52
53         struct nvmet_rdma_cmd   *cmd;
54         struct nvmet_rdma_queue *queue;
55
56         struct ib_cqe           read_cqe;
57         struct rdma_rw_ctx      rw;
58
59         struct nvmet_req        req;
60
61         bool                    allocated;
62         u8                      n_rdma;
63         u32                     flags;
64         u32                     invalidate_rkey;
65
66         struct list_head        wait_list;
67         struct list_head        free_list;
68 };
69
70 enum nvmet_rdma_queue_state {
71         NVMET_RDMA_Q_CONNECTING,
72         NVMET_RDMA_Q_LIVE,
73         NVMET_RDMA_Q_DISCONNECTING,
74 };
75
76 struct nvmet_rdma_queue {
77         struct rdma_cm_id       *cm_id;
78         struct nvmet_port       *port;
79         struct ib_cq            *cq;
80         atomic_t                sq_wr_avail;
81         struct nvmet_rdma_device *dev;
82         spinlock_t              state_lock;
83         enum nvmet_rdma_queue_state state;
84         struct nvmet_cq         nvme_cq;
85         struct nvmet_sq         nvme_sq;
86
87         struct nvmet_rdma_rsp   *rsps;
88         struct list_head        free_rsps;
89         spinlock_t              rsps_lock;
90         struct nvmet_rdma_cmd   *cmds;
91
92         struct work_struct      release_work;
93         struct list_head        rsp_wait_list;
94         struct list_head        rsp_wr_wait_list;
95         spinlock_t              rsp_wr_wait_lock;
96
97         int                     idx;
98         int                     host_qid;
99         int                     recv_queue_size;
100         int                     send_queue_size;
101
102         struct list_head        queue_list;
103 };
104
105 struct nvmet_rdma_device {
106         struct ib_device        *device;
107         struct ib_pd            *pd;
108         struct ib_srq           *srq;
109         struct nvmet_rdma_cmd   *srq_cmds;
110         size_t                  srq_size;
111         struct kref             ref;
112         struct list_head        entry;
113         int                     inline_data_size;
114         int                     inline_page_count;
115 };
116
117 static bool nvmet_rdma_use_srq;
118 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
119 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
120
121 static DEFINE_IDA(nvmet_rdma_queue_ida);
122 static LIST_HEAD(nvmet_rdma_queue_list);
123 static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
124
125 static LIST_HEAD(device_list);
126 static DEFINE_MUTEX(device_list_mutex);
127
128 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
129 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
130 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
131 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
132 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
133 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
134 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
135                                 struct nvmet_rdma_rsp *r);
136 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
137                                 struct nvmet_rdma_rsp *r);
138
139 static const struct nvmet_fabrics_ops nvmet_rdma_ops;
140
141 static int num_pages(int len)
142 {
143         return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
144 }
145
146 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
147 {
148         return nvme_is_write(rsp->req.cmd) &&
149                 rsp->req.transfer_len &&
150                 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
151 }
152
153 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
154 {
155         return !nvme_is_write(rsp->req.cmd) &&
156                 rsp->req.transfer_len &&
157                 !rsp->req.cqe->status &&
158                 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
159 }
160
161 static inline struct nvmet_rdma_rsp *
162 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
163 {
164         struct nvmet_rdma_rsp *rsp;
165         unsigned long flags;
166
167         spin_lock_irqsave(&queue->rsps_lock, flags);
168         rsp = list_first_entry_or_null(&queue->free_rsps,
169                                 struct nvmet_rdma_rsp, free_list);
170         if (likely(rsp))
171                 list_del(&rsp->free_list);
172         spin_unlock_irqrestore(&queue->rsps_lock, flags);
173
174         if (unlikely(!rsp)) {
175                 int ret;
176
177                 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
178                 if (unlikely(!rsp))
179                         return NULL;
180                 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
181                 if (unlikely(ret)) {
182                         kfree(rsp);
183                         return NULL;
184                 }
185
186                 rsp->allocated = true;
187         }
188
189         return rsp;
190 }
191
192 static inline void
193 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
194 {
195         unsigned long flags;
196
197         if (unlikely(rsp->allocated)) {
198                 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
199                 kfree(rsp);
200                 return;
201         }
202
203         spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
204         list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
205         spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
206 }
207
208 static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
209                                 struct nvmet_rdma_cmd *c)
210 {
211         struct scatterlist *sg;
212         struct ib_sge *sge;
213         int i;
214
215         if (!ndev->inline_data_size)
216                 return;
217
218         sg = c->inline_sg;
219         sge = &c->sge[1];
220
221         for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
222                 if (sge->length)
223                         ib_dma_unmap_page(ndev->device, sge->addr,
224                                         sge->length, DMA_FROM_DEVICE);
225                 if (sg_page(sg))
226                         __free_page(sg_page(sg));
227         }
228 }
229
230 static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
231                                 struct nvmet_rdma_cmd *c)
232 {
233         struct scatterlist *sg;
234         struct ib_sge *sge;
235         struct page *pg;
236         int len;
237         int i;
238
239         if (!ndev->inline_data_size)
240                 return 0;
241
242         sg = c->inline_sg;
243         sg_init_table(sg, ndev->inline_page_count);
244         sge = &c->sge[1];
245         len = ndev->inline_data_size;
246
247         for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
248                 pg = alloc_page(GFP_KERNEL);
249                 if (!pg)
250                         goto out_err;
251                 sg_assign_page(sg, pg);
252                 sge->addr = ib_dma_map_page(ndev->device,
253                         pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
254                 if (ib_dma_mapping_error(ndev->device, sge->addr))
255                         goto out_err;
256                 sge->length = min_t(int, len, PAGE_SIZE);
257                 sge->lkey = ndev->pd->local_dma_lkey;
258                 len -= sge->length;
259         }
260
261         return 0;
262 out_err:
263         for (; i >= 0; i--, sg--, sge--) {
264                 if (sge->length)
265                         ib_dma_unmap_page(ndev->device, sge->addr,
266                                         sge->length, DMA_FROM_DEVICE);
267                 if (sg_page(sg))
268                         __free_page(sg_page(sg));
269         }
270         return -ENOMEM;
271 }
272
273 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
274                         struct nvmet_rdma_cmd *c, bool admin)
275 {
276         /* NVMe command / RDMA RECV */
277         c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
278         if (!c->nvme_cmd)
279                 goto out;
280
281         c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
282                         sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
283         if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
284                 goto out_free_cmd;
285
286         c->sge[0].length = sizeof(*c->nvme_cmd);
287         c->sge[0].lkey = ndev->pd->local_dma_lkey;
288
289         if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
290                 goto out_unmap_cmd;
291
292         c->cqe.done = nvmet_rdma_recv_done;
293
294         c->wr.wr_cqe = &c->cqe;
295         c->wr.sg_list = c->sge;
296         c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
297
298         return 0;
299
300 out_unmap_cmd:
301         ib_dma_unmap_single(ndev->device, c->sge[0].addr,
302                         sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
303 out_free_cmd:
304         kfree(c->nvme_cmd);
305
306 out:
307         return -ENOMEM;
308 }
309
310 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
311                 struct nvmet_rdma_cmd *c, bool admin)
312 {
313         if (!admin)
314                 nvmet_rdma_free_inline_pages(ndev, c);
315         ib_dma_unmap_single(ndev->device, c->sge[0].addr,
316                                 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
317         kfree(c->nvme_cmd);
318 }
319
320 static struct nvmet_rdma_cmd *
321 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
322                 int nr_cmds, bool admin)
323 {
324         struct nvmet_rdma_cmd *cmds;
325         int ret = -EINVAL, i;
326
327         cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
328         if (!cmds)
329                 goto out;
330
331         for (i = 0; i < nr_cmds; i++) {
332                 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
333                 if (ret)
334                         goto out_free;
335         }
336
337         return cmds;
338
339 out_free:
340         while (--i >= 0)
341                 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
342         kfree(cmds);
343 out:
344         return ERR_PTR(ret);
345 }
346
347 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
348                 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
349 {
350         int i;
351
352         for (i = 0; i < nr_cmds; i++)
353                 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
354         kfree(cmds);
355 }
356
357 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
358                 struct nvmet_rdma_rsp *r)
359 {
360         /* NVMe CQE / RDMA SEND */
361         r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
362         if (!r->req.cqe)
363                 goto out;
364
365         r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
366                         sizeof(*r->req.cqe), DMA_TO_DEVICE);
367         if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
368                 goto out_free_rsp;
369
370         r->req.p2p_client = &ndev->device->dev;
371         r->send_sge.length = sizeof(*r->req.cqe);
372         r->send_sge.lkey = ndev->pd->local_dma_lkey;
373
374         r->send_cqe.done = nvmet_rdma_send_done;
375
376         r->send_wr.wr_cqe = &r->send_cqe;
377         r->send_wr.sg_list = &r->send_sge;
378         r->send_wr.num_sge = 1;
379         r->send_wr.send_flags = IB_SEND_SIGNALED;
380
381         /* Data In / RDMA READ */
382         r->read_cqe.done = nvmet_rdma_read_data_done;
383         return 0;
384
385 out_free_rsp:
386         kfree(r->req.cqe);
387 out:
388         return -ENOMEM;
389 }
390
391 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
392                 struct nvmet_rdma_rsp *r)
393 {
394         ib_dma_unmap_single(ndev->device, r->send_sge.addr,
395                                 sizeof(*r->req.cqe), DMA_TO_DEVICE);
396         kfree(r->req.cqe);
397 }
398
399 static int
400 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
401 {
402         struct nvmet_rdma_device *ndev = queue->dev;
403         int nr_rsps = queue->recv_queue_size * 2;
404         int ret = -EINVAL, i;
405
406         queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
407                         GFP_KERNEL);
408         if (!queue->rsps)
409                 goto out;
410
411         for (i = 0; i < nr_rsps; i++) {
412                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
413
414                 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
415                 if (ret)
416                         goto out_free;
417
418                 list_add_tail(&rsp->free_list, &queue->free_rsps);
419         }
420
421         return 0;
422
423 out_free:
424         while (--i >= 0) {
425                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
426
427                 list_del(&rsp->free_list);
428                 nvmet_rdma_free_rsp(ndev, rsp);
429         }
430         kfree(queue->rsps);
431 out:
432         return ret;
433 }
434
435 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
436 {
437         struct nvmet_rdma_device *ndev = queue->dev;
438         int i, nr_rsps = queue->recv_queue_size * 2;
439
440         for (i = 0; i < nr_rsps; i++) {
441                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
442
443                 list_del(&rsp->free_list);
444                 nvmet_rdma_free_rsp(ndev, rsp);
445         }
446         kfree(queue->rsps);
447 }
448
449 static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
450                 struct nvmet_rdma_cmd *cmd)
451 {
452         int ret;
453
454         ib_dma_sync_single_for_device(ndev->device,
455                 cmd->sge[0].addr, cmd->sge[0].length,
456                 DMA_FROM_DEVICE);
457
458         if (ndev->srq)
459                 ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
460         else
461                 ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
462
463         if (unlikely(ret))
464                 pr_err("post_recv cmd failed\n");
465
466         return ret;
467 }
468
469 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
470 {
471         spin_lock(&queue->rsp_wr_wait_lock);
472         while (!list_empty(&queue->rsp_wr_wait_list)) {
473                 struct nvmet_rdma_rsp *rsp;
474                 bool ret;
475
476                 rsp = list_entry(queue->rsp_wr_wait_list.next,
477                                 struct nvmet_rdma_rsp, wait_list);
478                 list_del(&rsp->wait_list);
479
480                 spin_unlock(&queue->rsp_wr_wait_lock);
481                 ret = nvmet_rdma_execute_command(rsp);
482                 spin_lock(&queue->rsp_wr_wait_lock);
483
484                 if (!ret) {
485                         list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
486                         break;
487                 }
488         }
489         spin_unlock(&queue->rsp_wr_wait_lock);
490 }
491
492
493 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
494 {
495         struct nvmet_rdma_queue *queue = rsp->queue;
496
497         atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
498
499         if (rsp->n_rdma) {
500                 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
501                                 queue->cm_id->port_num, rsp->req.sg,
502                                 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
503         }
504
505         if (rsp->req.sg != rsp->cmd->inline_sg)
506                 nvmet_req_free_sgl(&rsp->req);
507
508         if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
509                 nvmet_rdma_process_wr_wait_list(queue);
510
511         nvmet_rdma_put_rsp(rsp);
512 }
513
514 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
515 {
516         if (queue->nvme_sq.ctrl) {
517                 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
518         } else {
519                 /*
520                  * we didn't setup the controller yet in case
521                  * of admin connect error, just disconnect and
522                  * cleanup the queue
523                  */
524                 nvmet_rdma_queue_disconnect(queue);
525         }
526 }
527
528 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
529 {
530         struct nvmet_rdma_rsp *rsp =
531                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
532         struct nvmet_rdma_queue *queue = cq->cq_context;
533
534         nvmet_rdma_release_rsp(rsp);
535
536         if (unlikely(wc->status != IB_WC_SUCCESS &&
537                      wc->status != IB_WC_WR_FLUSH_ERR)) {
538                 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
539                         wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
540                 nvmet_rdma_error_comp(queue);
541         }
542 }
543
544 static void nvmet_rdma_queue_response(struct nvmet_req *req)
545 {
546         struct nvmet_rdma_rsp *rsp =
547                 container_of(req, struct nvmet_rdma_rsp, req);
548         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
549         struct ib_send_wr *first_wr;
550
551         if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
552                 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
553                 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
554         } else {
555                 rsp->send_wr.opcode = IB_WR_SEND;
556         }
557
558         if (nvmet_rdma_need_data_out(rsp))
559                 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
560                                 cm_id->port_num, NULL, &rsp->send_wr);
561         else
562                 first_wr = &rsp->send_wr;
563
564         nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
565
566         ib_dma_sync_single_for_device(rsp->queue->dev->device,
567                 rsp->send_sge.addr, rsp->send_sge.length,
568                 DMA_TO_DEVICE);
569
570         if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
571                 pr_err("sending cmd response failed\n");
572                 nvmet_rdma_release_rsp(rsp);
573         }
574 }
575
576 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
577 {
578         struct nvmet_rdma_rsp *rsp =
579                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
580         struct nvmet_rdma_queue *queue = cq->cq_context;
581
582         WARN_ON(rsp->n_rdma <= 0);
583         atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
584         rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
585                         queue->cm_id->port_num, rsp->req.sg,
586                         rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
587         rsp->n_rdma = 0;
588
589         if (unlikely(wc->status != IB_WC_SUCCESS)) {
590                 nvmet_req_uninit(&rsp->req);
591                 nvmet_rdma_release_rsp(rsp);
592                 if (wc->status != IB_WC_WR_FLUSH_ERR) {
593                         pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
594                                 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
595                         nvmet_rdma_error_comp(queue);
596                 }
597                 return;
598         }
599
600         rsp->req.execute(&rsp->req);
601 }
602
603 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
604                 u64 off)
605 {
606         int sg_count = num_pages(len);
607         struct scatterlist *sg;
608         int i;
609
610         sg = rsp->cmd->inline_sg;
611         for (i = 0; i < sg_count; i++, sg++) {
612                 if (i < sg_count - 1)
613                         sg_unmark_end(sg);
614                 else
615                         sg_mark_end(sg);
616                 sg->offset = off;
617                 sg->length = min_t(int, len, PAGE_SIZE - off);
618                 len -= sg->length;
619                 if (!i)
620                         off = 0;
621         }
622
623         rsp->req.sg = rsp->cmd->inline_sg;
624         rsp->req.sg_cnt = sg_count;
625 }
626
627 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
628 {
629         struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
630         u64 off = le64_to_cpu(sgl->addr);
631         u32 len = le32_to_cpu(sgl->length);
632
633         if (!nvme_is_write(rsp->req.cmd)) {
634                 rsp->req.error_loc =
635                         offsetof(struct nvme_common_command, opcode);
636                 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
637         }
638
639         if (off + len > rsp->queue->dev->inline_data_size) {
640                 pr_err("invalid inline data offset!\n");
641                 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
642         }
643
644         /* no data command? */
645         if (!len)
646                 return 0;
647
648         nvmet_rdma_use_inline_sg(rsp, len, off);
649         rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
650         rsp->req.transfer_len += len;
651         return 0;
652 }
653
654 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
655                 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
656 {
657         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
658         u64 addr = le64_to_cpu(sgl->addr);
659         u32 key = get_unaligned_le32(sgl->key);
660         int ret;
661
662         rsp->req.transfer_len = get_unaligned_le24(sgl->length);
663
664         /* no data command? */
665         if (!rsp->req.transfer_len)
666                 return 0;
667
668         ret = nvmet_req_alloc_sgl(&rsp->req);
669         if (unlikely(ret < 0))
670                 goto error_out;
671
672         ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
673                         rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
674                         nvmet_data_dir(&rsp->req));
675         if (unlikely(ret < 0))
676                 goto error_out;
677         rsp->n_rdma += ret;
678
679         if (invalidate) {
680                 rsp->invalidate_rkey = key;
681                 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
682         }
683
684         return 0;
685
686 error_out:
687         rsp->req.transfer_len = 0;
688         return NVME_SC_INTERNAL;
689 }
690
691 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
692 {
693         struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
694
695         switch (sgl->type >> 4) {
696         case NVME_SGL_FMT_DATA_DESC:
697                 switch (sgl->type & 0xf) {
698                 case NVME_SGL_FMT_OFFSET:
699                         return nvmet_rdma_map_sgl_inline(rsp);
700                 default:
701                         pr_err("invalid SGL subtype: %#x\n", sgl->type);
702                         rsp->req.error_loc =
703                                 offsetof(struct nvme_common_command, dptr);
704                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
705                 }
706         case NVME_KEY_SGL_FMT_DATA_DESC:
707                 switch (sgl->type & 0xf) {
708                 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
709                         return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
710                 case NVME_SGL_FMT_ADDRESS:
711                         return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
712                 default:
713                         pr_err("invalid SGL subtype: %#x\n", sgl->type);
714                         rsp->req.error_loc =
715                                 offsetof(struct nvme_common_command, dptr);
716                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
717                 }
718         default:
719                 pr_err("invalid SGL type: %#x\n", sgl->type);
720                 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
721                 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
722         }
723 }
724
725 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
726 {
727         struct nvmet_rdma_queue *queue = rsp->queue;
728
729         if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
730                         &queue->sq_wr_avail) < 0)) {
731                 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
732                                 1 + rsp->n_rdma, queue->idx,
733                                 queue->nvme_sq.ctrl->cntlid);
734                 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
735                 return false;
736         }
737
738         if (nvmet_rdma_need_data_in(rsp)) {
739                 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
740                                 queue->cm_id->port_num, &rsp->read_cqe, NULL))
741                         nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
742         } else {
743                 rsp->req.execute(&rsp->req);
744         }
745
746         return true;
747 }
748
749 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
750                 struct nvmet_rdma_rsp *cmd)
751 {
752         u16 status;
753
754         ib_dma_sync_single_for_cpu(queue->dev->device,
755                 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
756                 DMA_FROM_DEVICE);
757         ib_dma_sync_single_for_cpu(queue->dev->device,
758                 cmd->send_sge.addr, cmd->send_sge.length,
759                 DMA_TO_DEVICE);
760
761         if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
762                         &queue->nvme_sq, &nvmet_rdma_ops))
763                 return;
764
765         status = nvmet_rdma_map_sgl(cmd);
766         if (status)
767                 goto out_err;
768
769         if (unlikely(!nvmet_rdma_execute_command(cmd))) {
770                 spin_lock(&queue->rsp_wr_wait_lock);
771                 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
772                 spin_unlock(&queue->rsp_wr_wait_lock);
773         }
774
775         return;
776
777 out_err:
778         nvmet_req_complete(&cmd->req, status);
779 }
780
781 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
782 {
783         struct nvmet_rdma_cmd *cmd =
784                 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
785         struct nvmet_rdma_queue *queue = cq->cq_context;
786         struct nvmet_rdma_rsp *rsp;
787
788         if (unlikely(wc->status != IB_WC_SUCCESS)) {
789                 if (wc->status != IB_WC_WR_FLUSH_ERR) {
790                         pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
791                                 wc->wr_cqe, ib_wc_status_msg(wc->status),
792                                 wc->status);
793                         nvmet_rdma_error_comp(queue);
794                 }
795                 return;
796         }
797
798         if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
799                 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
800                 nvmet_rdma_error_comp(queue);
801                 return;
802         }
803
804         cmd->queue = queue;
805         rsp = nvmet_rdma_get_rsp(queue);
806         if (unlikely(!rsp)) {
807                 /*
808                  * we get here only under memory pressure,
809                  * silently drop and have the host retry
810                  * as we can't even fail it.
811                  */
812                 nvmet_rdma_post_recv(queue->dev, cmd);
813                 return;
814         }
815         rsp->queue = queue;
816         rsp->cmd = cmd;
817         rsp->flags = 0;
818         rsp->req.cmd = cmd->nvme_cmd;
819         rsp->req.port = queue->port;
820         rsp->n_rdma = 0;
821
822         if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
823                 unsigned long flags;
824
825                 spin_lock_irqsave(&queue->state_lock, flags);
826                 if (queue->state == NVMET_RDMA_Q_CONNECTING)
827                         list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
828                 else
829                         nvmet_rdma_put_rsp(rsp);
830                 spin_unlock_irqrestore(&queue->state_lock, flags);
831                 return;
832         }
833
834         nvmet_rdma_handle_command(queue, rsp);
835 }
836
837 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
838 {
839         if (!ndev->srq)
840                 return;
841
842         nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
843         ib_destroy_srq(ndev->srq);
844 }
845
846 static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
847 {
848         struct ib_srq_init_attr srq_attr = { NULL, };
849         struct ib_srq *srq;
850         size_t srq_size;
851         int ret, i;
852
853         srq_size = 4095;        /* XXX: tune */
854
855         srq_attr.attr.max_wr = srq_size;
856         srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
857         srq_attr.attr.srq_limit = 0;
858         srq_attr.srq_type = IB_SRQT_BASIC;
859         srq = ib_create_srq(ndev->pd, &srq_attr);
860         if (IS_ERR(srq)) {
861                 /*
862                  * If SRQs aren't supported we just go ahead and use normal
863                  * non-shared receive queues.
864                  */
865                 pr_info("SRQ requested but not supported.\n");
866                 return 0;
867         }
868
869         ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
870         if (IS_ERR(ndev->srq_cmds)) {
871                 ret = PTR_ERR(ndev->srq_cmds);
872                 goto out_destroy_srq;
873         }
874
875         ndev->srq = srq;
876         ndev->srq_size = srq_size;
877
878         for (i = 0; i < srq_size; i++) {
879                 ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
880                 if (ret)
881                         goto out_free_cmds;
882         }
883
884         return 0;
885
886 out_free_cmds:
887         nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
888 out_destroy_srq:
889         ib_destroy_srq(srq);
890         return ret;
891 }
892
893 static void nvmet_rdma_free_dev(struct kref *ref)
894 {
895         struct nvmet_rdma_device *ndev =
896                 container_of(ref, struct nvmet_rdma_device, ref);
897
898         mutex_lock(&device_list_mutex);
899         list_del(&ndev->entry);
900         mutex_unlock(&device_list_mutex);
901
902         nvmet_rdma_destroy_srq(ndev);
903         ib_dealloc_pd(ndev->pd);
904
905         kfree(ndev);
906 }
907
908 static struct nvmet_rdma_device *
909 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
910 {
911         struct nvmet_port *port = cm_id->context;
912         struct nvmet_rdma_device *ndev;
913         int inline_page_count;
914         int inline_sge_count;
915         int ret;
916
917         mutex_lock(&device_list_mutex);
918         list_for_each_entry(ndev, &device_list, entry) {
919                 if (ndev->device->node_guid == cm_id->device->node_guid &&
920                     kref_get_unless_zero(&ndev->ref))
921                         goto out_unlock;
922         }
923
924         ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
925         if (!ndev)
926                 goto out_err;
927
928         inline_page_count = num_pages(port->inline_data_size);
929         inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
930                                 cm_id->device->attrs.max_recv_sge) - 1;
931         if (inline_page_count > inline_sge_count) {
932                 pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
933                         port->inline_data_size, cm_id->device->name,
934                         inline_sge_count * PAGE_SIZE);
935                 port->inline_data_size = inline_sge_count * PAGE_SIZE;
936                 inline_page_count = inline_sge_count;
937         }
938         ndev->inline_data_size = port->inline_data_size;
939         ndev->inline_page_count = inline_page_count;
940         ndev->device = cm_id->device;
941         kref_init(&ndev->ref);
942
943         ndev->pd = ib_alloc_pd(ndev->device, 0);
944         if (IS_ERR(ndev->pd))
945                 goto out_free_dev;
946
947         if (nvmet_rdma_use_srq) {
948                 ret = nvmet_rdma_init_srq(ndev);
949                 if (ret)
950                         goto out_free_pd;
951         }
952
953         list_add(&ndev->entry, &device_list);
954 out_unlock:
955         mutex_unlock(&device_list_mutex);
956         pr_debug("added %s.\n", ndev->device->name);
957         return ndev;
958
959 out_free_pd:
960         ib_dealloc_pd(ndev->pd);
961 out_free_dev:
962         kfree(ndev);
963 out_err:
964         mutex_unlock(&device_list_mutex);
965         return NULL;
966 }
967
968 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
969 {
970         struct ib_qp_init_attr qp_attr;
971         struct nvmet_rdma_device *ndev = queue->dev;
972         int comp_vector, nr_cqe, ret, i;
973
974         /*
975          * Spread the io queues across completion vectors,
976          * but still keep all admin queues on vector 0.
977          */
978         comp_vector = !queue->host_qid ? 0 :
979                 queue->idx % ndev->device->num_comp_vectors;
980
981         /*
982          * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
983          */
984         nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
985
986         queue->cq = ib_alloc_cq(ndev->device, queue,
987                         nr_cqe + 1, comp_vector,
988                         IB_POLL_WORKQUEUE);
989         if (IS_ERR(queue->cq)) {
990                 ret = PTR_ERR(queue->cq);
991                 pr_err("failed to create CQ cqe= %d ret= %d\n",
992                        nr_cqe + 1, ret);
993                 goto out;
994         }
995
996         memset(&qp_attr, 0, sizeof(qp_attr));
997         qp_attr.qp_context = queue;
998         qp_attr.event_handler = nvmet_rdma_qp_event;
999         qp_attr.send_cq = queue->cq;
1000         qp_attr.recv_cq = queue->cq;
1001         qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1002         qp_attr.qp_type = IB_QPT_RC;
1003         /* +1 for drain */
1004         qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
1005         qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
1006         qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
1007                                         ndev->device->attrs.max_send_sge);
1008
1009         if (ndev->srq) {
1010                 qp_attr.srq = ndev->srq;
1011         } else {
1012                 /* +1 for drain */
1013                 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
1014                 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1015         }
1016
1017         ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1018         if (ret) {
1019                 pr_err("failed to create_qp ret= %d\n", ret);
1020                 goto err_destroy_cq;
1021         }
1022
1023         atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1024
1025         pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1026                  __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1027                  qp_attr.cap.max_send_wr, queue->cm_id);
1028
1029         if (!ndev->srq) {
1030                 for (i = 0; i < queue->recv_queue_size; i++) {
1031                         queue->cmds[i].queue = queue;
1032                         ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1033                         if (ret)
1034                                 goto err_destroy_qp;
1035                 }
1036         }
1037
1038 out:
1039         return ret;
1040
1041 err_destroy_qp:
1042         rdma_destroy_qp(queue->cm_id);
1043 err_destroy_cq:
1044         ib_free_cq(queue->cq);
1045         goto out;
1046 }
1047
1048 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1049 {
1050         struct ib_qp *qp = queue->cm_id->qp;
1051
1052         ib_drain_qp(qp);
1053         rdma_destroy_id(queue->cm_id);
1054         ib_destroy_qp(qp);
1055         ib_free_cq(queue->cq);
1056 }
1057
1058 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1059 {
1060         pr_debug("freeing queue %d\n", queue->idx);
1061
1062         nvmet_sq_destroy(&queue->nvme_sq);
1063
1064         nvmet_rdma_destroy_queue_ib(queue);
1065         if (!queue->dev->srq) {
1066                 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1067                                 queue->recv_queue_size,
1068                                 !queue->host_qid);
1069         }
1070         nvmet_rdma_free_rsps(queue);
1071         ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1072         kfree(queue);
1073 }
1074
1075 static void nvmet_rdma_release_queue_work(struct work_struct *w)
1076 {
1077         struct nvmet_rdma_queue *queue =
1078                 container_of(w, struct nvmet_rdma_queue, release_work);
1079         struct nvmet_rdma_device *dev = queue->dev;
1080
1081         nvmet_rdma_free_queue(queue);
1082
1083         kref_put(&dev->ref, nvmet_rdma_free_dev);
1084 }
1085
1086 static int
1087 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1088                                 struct nvmet_rdma_queue *queue)
1089 {
1090         struct nvme_rdma_cm_req *req;
1091
1092         req = (struct nvme_rdma_cm_req *)conn->private_data;
1093         if (!req || conn->private_data_len == 0)
1094                 return NVME_RDMA_CM_INVALID_LEN;
1095
1096         if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1097                 return NVME_RDMA_CM_INVALID_RECFMT;
1098
1099         queue->host_qid = le16_to_cpu(req->qid);
1100
1101         /*
1102          * req->hsqsize corresponds to our recv queue size plus 1
1103          * req->hrqsize corresponds to our send queue size
1104          */
1105         queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1106         queue->send_queue_size = le16_to_cpu(req->hrqsize);
1107
1108         if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
1109                 return NVME_RDMA_CM_INVALID_HSQSIZE;
1110
1111         /* XXX: Should we enforce some kind of max for IO queues? */
1112
1113         return 0;
1114 }
1115
1116 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1117                                 enum nvme_rdma_cm_status status)
1118 {
1119         struct nvme_rdma_cm_rej rej;
1120
1121         pr_debug("rejecting connect request: status %d (%s)\n",
1122                  status, nvme_rdma_cm_msg(status));
1123
1124         rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1125         rej.sts = cpu_to_le16(status);
1126
1127         return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1128 }
1129
1130 static struct nvmet_rdma_queue *
1131 nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1132                 struct rdma_cm_id *cm_id,
1133                 struct rdma_cm_event *event)
1134 {
1135         struct nvmet_rdma_queue *queue;
1136         int ret;
1137
1138         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1139         if (!queue) {
1140                 ret = NVME_RDMA_CM_NO_RSC;
1141                 goto out_reject;
1142         }
1143
1144         ret = nvmet_sq_init(&queue->nvme_sq);
1145         if (ret) {
1146                 ret = NVME_RDMA_CM_NO_RSC;
1147                 goto out_free_queue;
1148         }
1149
1150         ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1151         if (ret)
1152                 goto out_destroy_sq;
1153
1154         /*
1155          * Schedules the actual release because calling rdma_destroy_id from
1156          * inside a CM callback would trigger a deadlock. (great API design..)
1157          */
1158         INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1159         queue->dev = ndev;
1160         queue->cm_id = cm_id;
1161
1162         spin_lock_init(&queue->state_lock);
1163         queue->state = NVMET_RDMA_Q_CONNECTING;
1164         INIT_LIST_HEAD(&queue->rsp_wait_list);
1165         INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1166         spin_lock_init(&queue->rsp_wr_wait_lock);
1167         INIT_LIST_HEAD(&queue->free_rsps);
1168         spin_lock_init(&queue->rsps_lock);
1169         INIT_LIST_HEAD(&queue->queue_list);
1170
1171         queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1172         if (queue->idx < 0) {
1173                 ret = NVME_RDMA_CM_NO_RSC;
1174                 goto out_destroy_sq;
1175         }
1176
1177         ret = nvmet_rdma_alloc_rsps(queue);
1178         if (ret) {
1179                 ret = NVME_RDMA_CM_NO_RSC;
1180                 goto out_ida_remove;
1181         }
1182
1183         if (!ndev->srq) {
1184                 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1185                                 queue->recv_queue_size,
1186                                 !queue->host_qid);
1187                 if (IS_ERR(queue->cmds)) {
1188                         ret = NVME_RDMA_CM_NO_RSC;
1189                         goto out_free_responses;
1190                 }
1191         }
1192
1193         ret = nvmet_rdma_create_queue_ib(queue);
1194         if (ret) {
1195                 pr_err("%s: creating RDMA queue failed (%d).\n",
1196                         __func__, ret);
1197                 ret = NVME_RDMA_CM_NO_RSC;
1198                 goto out_free_cmds;
1199         }
1200
1201         return queue;
1202
1203 out_free_cmds:
1204         if (!ndev->srq) {
1205                 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1206                                 queue->recv_queue_size,
1207                                 !queue->host_qid);
1208         }
1209 out_free_responses:
1210         nvmet_rdma_free_rsps(queue);
1211 out_ida_remove:
1212         ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1213 out_destroy_sq:
1214         nvmet_sq_destroy(&queue->nvme_sq);
1215 out_free_queue:
1216         kfree(queue);
1217 out_reject:
1218         nvmet_rdma_cm_reject(cm_id, ret);
1219         return NULL;
1220 }
1221
1222 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1223 {
1224         struct nvmet_rdma_queue *queue = priv;
1225
1226         switch (event->event) {
1227         case IB_EVENT_COMM_EST:
1228                 rdma_notify(queue->cm_id, event->event);
1229                 break;
1230         default:
1231                 pr_err("received IB QP event: %s (%d)\n",
1232                        ib_event_msg(event->event), event->event);
1233                 break;
1234         }
1235 }
1236
1237 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1238                 struct nvmet_rdma_queue *queue,
1239                 struct rdma_conn_param *p)
1240 {
1241         struct rdma_conn_param  param = { };
1242         struct nvme_rdma_cm_rep priv = { };
1243         int ret = -ENOMEM;
1244
1245         param.rnr_retry_count = 7;
1246         param.flow_control = 1;
1247         param.initiator_depth = min_t(u8, p->initiator_depth,
1248                 queue->dev->device->attrs.max_qp_init_rd_atom);
1249         param.private_data = &priv;
1250         param.private_data_len = sizeof(priv);
1251         priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1252         priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1253
1254         ret = rdma_accept(cm_id, &param);
1255         if (ret)
1256                 pr_err("rdma_accept failed (error code = %d)\n", ret);
1257
1258         return ret;
1259 }
1260
1261 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1262                 struct rdma_cm_event *event)
1263 {
1264         struct nvmet_rdma_device *ndev;
1265         struct nvmet_rdma_queue *queue;
1266         int ret = -EINVAL;
1267
1268         ndev = nvmet_rdma_find_get_device(cm_id);
1269         if (!ndev) {
1270                 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1271                 return -ECONNREFUSED;
1272         }
1273
1274         queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1275         if (!queue) {
1276                 ret = -ENOMEM;
1277                 goto put_device;
1278         }
1279         queue->port = cm_id->context;
1280
1281         if (queue->host_qid == 0) {
1282                 /* Let inflight controller teardown complete */
1283                 flush_scheduled_work();
1284         }
1285
1286         ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1287         if (ret) {
1288                 schedule_work(&queue->release_work);
1289                 /* Destroying rdma_cm id is not needed here */
1290                 return 0;
1291         }
1292
1293         mutex_lock(&nvmet_rdma_queue_mutex);
1294         list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1295         mutex_unlock(&nvmet_rdma_queue_mutex);
1296
1297         return 0;
1298
1299 put_device:
1300         kref_put(&ndev->ref, nvmet_rdma_free_dev);
1301
1302         return ret;
1303 }
1304
1305 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1306 {
1307         unsigned long flags;
1308
1309         spin_lock_irqsave(&queue->state_lock, flags);
1310         if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1311                 pr_warn("trying to establish a connected queue\n");
1312                 goto out_unlock;
1313         }
1314         queue->state = NVMET_RDMA_Q_LIVE;
1315
1316         while (!list_empty(&queue->rsp_wait_list)) {
1317                 struct nvmet_rdma_rsp *cmd;
1318
1319                 cmd = list_first_entry(&queue->rsp_wait_list,
1320                                         struct nvmet_rdma_rsp, wait_list);
1321                 list_del(&cmd->wait_list);
1322
1323                 spin_unlock_irqrestore(&queue->state_lock, flags);
1324                 nvmet_rdma_handle_command(queue, cmd);
1325                 spin_lock_irqsave(&queue->state_lock, flags);
1326         }
1327
1328 out_unlock:
1329         spin_unlock_irqrestore(&queue->state_lock, flags);
1330 }
1331
1332 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1333 {
1334         bool disconnect = false;
1335         unsigned long flags;
1336
1337         pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1338
1339         spin_lock_irqsave(&queue->state_lock, flags);
1340         switch (queue->state) {
1341         case NVMET_RDMA_Q_CONNECTING:
1342         case NVMET_RDMA_Q_LIVE:
1343                 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1344                 disconnect = true;
1345                 break;
1346         case NVMET_RDMA_Q_DISCONNECTING:
1347                 break;
1348         }
1349         spin_unlock_irqrestore(&queue->state_lock, flags);
1350
1351         if (disconnect) {
1352                 rdma_disconnect(queue->cm_id);
1353                 schedule_work(&queue->release_work);
1354         }
1355 }
1356
1357 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1358 {
1359         bool disconnect = false;
1360
1361         mutex_lock(&nvmet_rdma_queue_mutex);
1362         if (!list_empty(&queue->queue_list)) {
1363                 list_del_init(&queue->queue_list);
1364                 disconnect = true;
1365         }
1366         mutex_unlock(&nvmet_rdma_queue_mutex);
1367
1368         if (disconnect)
1369                 __nvmet_rdma_queue_disconnect(queue);
1370 }
1371
1372 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1373                 struct nvmet_rdma_queue *queue)
1374 {
1375         WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1376
1377         mutex_lock(&nvmet_rdma_queue_mutex);
1378         if (!list_empty(&queue->queue_list))
1379                 list_del_init(&queue->queue_list);
1380         mutex_unlock(&nvmet_rdma_queue_mutex);
1381
1382         pr_err("failed to connect queue %d\n", queue->idx);
1383         schedule_work(&queue->release_work);
1384 }
1385
1386 /**
1387  * nvme_rdma_device_removal() - Handle RDMA device removal
1388  * @cm_id:      rdma_cm id, used for nvmet port
1389  * @queue:      nvmet rdma queue (cm id qp_context)
1390  *
1391  * DEVICE_REMOVAL event notifies us that the RDMA device is about
1392  * to unplug. Note that this event can be generated on a normal
1393  * queue cm_id and/or a device bound listener cm_id (where in this
1394  * case queue will be null).
1395  *
1396  * We registered an ib_client to handle device removal for queues,
1397  * so we only need to handle the listening port cm_ids. In this case
1398  * we nullify the priv to prevent double cm_id destruction and destroying
1399  * the cm_id implicitely by returning a non-zero rc to the callout.
1400  */
1401 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1402                 struct nvmet_rdma_queue *queue)
1403 {
1404         struct nvmet_port *port;
1405
1406         if (queue) {
1407                 /*
1408                  * This is a queue cm_id. we have registered
1409                  * an ib_client to handle queues removal
1410                  * so don't interfear and just return.
1411                  */
1412                 return 0;
1413         }
1414
1415         port = cm_id->context;
1416
1417         /*
1418          * This is a listener cm_id. Make sure that
1419          * future remove_port won't invoke a double
1420          * cm_id destroy. use atomic xchg to make sure
1421          * we don't compete with remove_port.
1422          */
1423         if (xchg(&port->priv, NULL) != cm_id)
1424                 return 0;
1425
1426         /*
1427          * We need to return 1 so that the core will destroy
1428          * it's own ID.  What a great API design..
1429          */
1430         return 1;
1431 }
1432
1433 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1434                 struct rdma_cm_event *event)
1435 {
1436         struct nvmet_rdma_queue *queue = NULL;
1437         int ret = 0;
1438
1439         if (cm_id->qp)
1440                 queue = cm_id->qp->qp_context;
1441
1442         pr_debug("%s (%d): status %d id %p\n",
1443                 rdma_event_msg(event->event), event->event,
1444                 event->status, cm_id);
1445
1446         switch (event->event) {
1447         case RDMA_CM_EVENT_CONNECT_REQUEST:
1448                 ret = nvmet_rdma_queue_connect(cm_id, event);
1449                 break;
1450         case RDMA_CM_EVENT_ESTABLISHED:
1451                 nvmet_rdma_queue_established(queue);
1452                 break;
1453         case RDMA_CM_EVENT_ADDR_CHANGE:
1454         case RDMA_CM_EVENT_DISCONNECTED:
1455         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1456                 nvmet_rdma_queue_disconnect(queue);
1457                 break;
1458         case RDMA_CM_EVENT_DEVICE_REMOVAL:
1459                 ret = nvmet_rdma_device_removal(cm_id, queue);
1460                 break;
1461         case RDMA_CM_EVENT_REJECTED:
1462                 pr_debug("Connection rejected: %s\n",
1463                          rdma_reject_msg(cm_id, event->status));
1464                 /* FALLTHROUGH */
1465         case RDMA_CM_EVENT_UNREACHABLE:
1466         case RDMA_CM_EVENT_CONNECT_ERROR:
1467                 nvmet_rdma_queue_connect_fail(cm_id, queue);
1468                 break;
1469         default:
1470                 pr_err("received unrecognized RDMA CM event %d\n",
1471                         event->event);
1472                 break;
1473         }
1474
1475         return ret;
1476 }
1477
1478 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1479 {
1480         struct nvmet_rdma_queue *queue;
1481
1482 restart:
1483         mutex_lock(&nvmet_rdma_queue_mutex);
1484         list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1485                 if (queue->nvme_sq.ctrl == ctrl) {
1486                         list_del_init(&queue->queue_list);
1487                         mutex_unlock(&nvmet_rdma_queue_mutex);
1488
1489                         __nvmet_rdma_queue_disconnect(queue);
1490                         goto restart;
1491                 }
1492         }
1493         mutex_unlock(&nvmet_rdma_queue_mutex);
1494 }
1495
1496 static int nvmet_rdma_add_port(struct nvmet_port *port)
1497 {
1498         struct rdma_cm_id *cm_id;
1499         struct sockaddr_storage addr = { };
1500         __kernel_sa_family_t af;
1501         int ret;
1502
1503         switch (port->disc_addr.adrfam) {
1504         case NVMF_ADDR_FAMILY_IP4:
1505                 af = AF_INET;
1506                 break;
1507         case NVMF_ADDR_FAMILY_IP6:
1508                 af = AF_INET6;
1509                 break;
1510         default:
1511                 pr_err("address family %d not supported\n",
1512                                 port->disc_addr.adrfam);
1513                 return -EINVAL;
1514         }
1515
1516         if (port->inline_data_size < 0) {
1517                 port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
1518         } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
1519                 pr_warn("inline_data_size %u is too large, reducing to %u\n",
1520                         port->inline_data_size,
1521                         NVMET_RDMA_MAX_INLINE_DATA_SIZE);
1522                 port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
1523         }
1524
1525         ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
1526                         port->disc_addr.trsvcid, &addr);
1527         if (ret) {
1528                 pr_err("malformed ip/port passed: %s:%s\n",
1529                         port->disc_addr.traddr, port->disc_addr.trsvcid);
1530                 return ret;
1531         }
1532
1533         cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1534                         RDMA_PS_TCP, IB_QPT_RC);
1535         if (IS_ERR(cm_id)) {
1536                 pr_err("CM ID creation failed\n");
1537                 return PTR_ERR(cm_id);
1538         }
1539
1540         /*
1541          * Allow both IPv4 and IPv6 sockets to bind a single port
1542          * at the same time.
1543          */
1544         ret = rdma_set_afonly(cm_id, 1);
1545         if (ret) {
1546                 pr_err("rdma_set_afonly failed (%d)\n", ret);
1547                 goto out_destroy_id;
1548         }
1549
1550         ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
1551         if (ret) {
1552                 pr_err("binding CM ID to %pISpcs failed (%d)\n",
1553                         (struct sockaddr *)&addr, ret);
1554                 goto out_destroy_id;
1555         }
1556
1557         ret = rdma_listen(cm_id, 128);
1558         if (ret) {
1559                 pr_err("listening to %pISpcs failed (%d)\n",
1560                         (struct sockaddr *)&addr, ret);
1561                 goto out_destroy_id;
1562         }
1563
1564         pr_info("enabling port %d (%pISpcs)\n",
1565                 le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
1566         port->priv = cm_id;
1567         return 0;
1568
1569 out_destroy_id:
1570         rdma_destroy_id(cm_id);
1571         return ret;
1572 }
1573
1574 static void nvmet_rdma_remove_port(struct nvmet_port *port)
1575 {
1576         struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1577
1578         if (cm_id)
1579                 rdma_destroy_id(cm_id);
1580 }
1581
1582 static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1583                 struct nvmet_port *port, char *traddr)
1584 {
1585         struct rdma_cm_id *cm_id = port->priv;
1586
1587         if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
1588                 struct nvmet_rdma_rsp *rsp =
1589                         container_of(req, struct nvmet_rdma_rsp, req);
1590                 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
1591                 struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
1592
1593                 sprintf(traddr, "%pISc", addr);
1594         } else {
1595                 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
1596         }
1597 }
1598
1599 static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
1600         .owner                  = THIS_MODULE,
1601         .type                   = NVMF_TRTYPE_RDMA,
1602         .msdbd                  = 1,
1603         .has_keyed_sgls         = 1,
1604         .add_port               = nvmet_rdma_add_port,
1605         .remove_port            = nvmet_rdma_remove_port,
1606         .queue_response         = nvmet_rdma_queue_response,
1607         .delete_ctrl            = nvmet_rdma_delete_ctrl,
1608         .disc_traddr            = nvmet_rdma_disc_port_addr,
1609 };
1610
1611 static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1612 {
1613         struct nvmet_rdma_queue *queue, *tmp;
1614         struct nvmet_rdma_device *ndev;
1615         bool found = false;
1616
1617         mutex_lock(&device_list_mutex);
1618         list_for_each_entry(ndev, &device_list, entry) {
1619                 if (ndev->device == ib_device) {
1620                         found = true;
1621                         break;
1622                 }
1623         }
1624         mutex_unlock(&device_list_mutex);
1625
1626         if (!found)
1627                 return;
1628
1629         /*
1630          * IB Device that is used by nvmet controllers is being removed,
1631          * delete all queues using this device.
1632          */
1633         mutex_lock(&nvmet_rdma_queue_mutex);
1634         list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
1635                                  queue_list) {
1636                 if (queue->dev->device != ib_device)
1637                         continue;
1638
1639                 pr_info("Removing queue %d\n", queue->idx);
1640                 list_del_init(&queue->queue_list);
1641                 __nvmet_rdma_queue_disconnect(queue);
1642         }
1643         mutex_unlock(&nvmet_rdma_queue_mutex);
1644
1645         flush_scheduled_work();
1646 }
1647
1648 static struct ib_client nvmet_rdma_ib_client = {
1649         .name   = "nvmet_rdma",
1650         .remove = nvmet_rdma_remove_one
1651 };
1652
1653 static int __init nvmet_rdma_init(void)
1654 {
1655         int ret;
1656
1657         ret = ib_register_client(&nvmet_rdma_ib_client);
1658         if (ret)
1659                 return ret;
1660
1661         ret = nvmet_register_transport(&nvmet_rdma_ops);
1662         if (ret)
1663                 goto err_ib_client;
1664
1665         return 0;
1666
1667 err_ib_client:
1668         ib_unregister_client(&nvmet_rdma_ib_client);
1669         return ret;
1670 }
1671
1672 static void __exit nvmet_rdma_exit(void)
1673 {
1674         nvmet_unregister_transport(&nvmet_rdma_ops);
1675         ib_unregister_client(&nvmet_rdma_ib_client);
1676         WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
1677         ida_destroy(&nvmet_rdma_queue_ida);
1678 }
1679
1680 module_init(nvmet_rdma_init);
1681 module_exit(nvmet_rdma_exit);
1682
1683 MODULE_LICENSE("GPL v2");
1684 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */