1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/random.h>
38 #include <linux/highmem.h>
39 #include <linux/time.h>
40 #include <linux/hugetlb.h>
41 #include <asm/byteorder.h>
43 #include <rdma/ib_verbs.h>
44 #include <rdma/iw_cm.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/ib_umem.h>
50 * i40iw_query_device - get device attributes
51 * @ibdev: device pointer from stack
52 * @props: returning device attributes
55 static int i40iw_query_device(struct ib_device *ibdev,
56 struct ib_device_attr *props,
57 struct ib_udata *udata)
59 struct i40iw_device *iwdev = to_iwdev(ibdev);
61 if (udata->inlen || udata->outlen)
63 memset(props, 0, sizeof(*props));
64 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
65 props->fw_ver = I40IW_FW_VERSION;
66 props->device_cap_flags = iwdev->device_cap_flags;
67 props->vendor_id = iwdev->ldev->pcidev->vendor;
68 props->vendor_part_id = iwdev->ldev->pcidev->device;
69 props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
70 props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
71 props->max_qp = iwdev->max_qp - iwdev->used_qps;
72 props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
73 props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
74 props->max_cq = iwdev->max_cq - iwdev->used_cqs;
75 props->max_cqe = iwdev->max_cqe;
76 props->max_mr = iwdev->max_mr - iwdev->used_mrs;
77 props->max_pd = iwdev->max_pd - iwdev->used_pds;
78 props->max_sge_rd = I40IW_MAX_SGE_RD;
79 props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
80 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
81 props->atomic_cap = IB_ATOMIC_NONE;
82 props->max_map_per_fmr = 1;
83 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
88 * i40iw_query_port - get port attrubutes
89 * @ibdev: device pointer from stack
90 * @port: port number for query
91 * @props: returning device attributes
93 static int i40iw_query_port(struct ib_device *ibdev,
95 struct ib_port_attr *props)
97 struct i40iw_device *iwdev = to_iwdev(ibdev);
98 struct net_device *netdev = iwdev->netdev;
100 /* props being zeroed by the caller, avoid zeroing it here */
101 props->max_mtu = IB_MTU_4096;
102 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
105 if (netif_carrier_ok(iwdev->netdev))
106 props->state = IB_PORT_ACTIVE;
108 props->state = IB_PORT_DOWN;
109 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
110 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
111 props->gid_tbl_len = 1;
112 props->pkey_tbl_len = 1;
113 props->active_width = IB_WIDTH_4X;
114 props->active_speed = 1;
115 props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
120 * i40iw_alloc_ucontext - Allocate the user context data structure
121 * @ibdev: device pointer from stack
124 * This keeps track of all objects associated with a particular
127 static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
128 struct ib_udata *udata)
130 struct i40iw_device *iwdev = to_iwdev(ibdev);
131 struct i40iw_alloc_ucontext_req req;
132 struct i40iw_alloc_ucontext_resp uresp;
133 struct i40iw_ucontext *ucontext;
135 if (ib_copy_from_udata(&req, udata, sizeof(req)))
136 return ERR_PTR(-EINVAL);
138 if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
139 i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
140 return ERR_PTR(-EINVAL);
143 memset(&uresp, 0, sizeof(uresp));
144 uresp.max_qps = iwdev->max_qp;
145 uresp.max_pds = iwdev->max_pd;
146 uresp.wq_size = iwdev->max_qp_wr * 2;
147 uresp.kernel_ver = req.userspace_ver;
149 ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
151 return ERR_PTR(-ENOMEM);
153 ucontext->iwdev = iwdev;
154 ucontext->abi_ver = req.userspace_ver;
156 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
158 return ERR_PTR(-EFAULT);
161 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
162 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
163 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
164 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
166 return &ucontext->ibucontext;
170 * i40iw_dealloc_ucontext - deallocate the user context data structure
171 * @context: user context created during alloc
173 static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
175 struct i40iw_ucontext *ucontext = to_ucontext(context);
178 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
179 if (!list_empty(&ucontext->cq_reg_mem_list)) {
180 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
183 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
184 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
185 if (!list_empty(&ucontext->qp_reg_mem_list)) {
186 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
189 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
196 * i40iw_mmap - user memory map
197 * @context: context created during alloc
198 * @vma: kernel info for user memory map
200 static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
202 struct i40iw_ucontext *ucontext;
206 ucontext = to_ucontext(context);
207 if (ucontext->iwdev->sc_dev.is_pf) {
208 db_addr_offset = I40IW_DB_ADDR_OFFSET;
209 push_offset = I40IW_PUSH_OFFSET;
211 vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
213 db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
214 push_offset = I40IW_VF_PUSH_OFFSET;
216 vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
219 vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
221 if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
222 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
223 vma->vm_private_data = ucontext;
225 if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
226 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
228 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
231 if (io_remap_pfn_range(vma, vma->vm_start,
232 vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
233 PAGE_SIZE, vma->vm_page_prot))
240 * i40iw_alloc_push_page - allocate a push page for qp
241 * @iwdev: iwarp device
242 * @qp: hardware control qp
244 static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
246 struct i40iw_cqp_request *cqp_request;
247 struct cqp_commands_info *cqp_info;
248 enum i40iw_status_code status;
250 if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
253 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
257 atomic_inc(&cqp_request->refcount);
259 cqp_info = &cqp_request->info;
260 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
261 cqp_info->post_sq = 1;
263 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
264 cqp_info->in.u.manage_push_page.info.free_page = 0;
265 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
266 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
268 status = i40iw_handle_cqp_op(iwdev, cqp_request);
270 qp->push_idx = cqp_request->compl_info.op_ret_val;
272 i40iw_pr_err("CQP-OP Push page fail");
273 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
277 * i40iw_dealloc_push_page - free a push page for qp
278 * @iwdev: iwarp device
279 * @qp: hardware control qp
281 static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
283 struct i40iw_cqp_request *cqp_request;
284 struct cqp_commands_info *cqp_info;
285 enum i40iw_status_code status;
287 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
290 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
294 cqp_info = &cqp_request->info;
295 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
296 cqp_info->post_sq = 1;
298 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
299 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
300 cqp_info->in.u.manage_push_page.info.free_page = 1;
301 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
302 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
304 status = i40iw_handle_cqp_op(iwdev, cqp_request);
306 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
308 i40iw_pr_err("CQP-OP Push page fail");
312 * i40iw_alloc_pd - allocate protection domain
313 * @ibdev: device pointer from stack
314 * @context: user context created during alloc
317 static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
318 struct ib_ucontext *context,
319 struct ib_udata *udata)
321 struct i40iw_pd *iwpd;
322 struct i40iw_device *iwdev = to_iwdev(ibdev);
323 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
324 struct i40iw_alloc_pd_resp uresp;
325 struct i40iw_sc_pd *sc_pd;
326 struct i40iw_ucontext *ucontext;
331 return ERR_PTR(-ENODEV);
333 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
334 iwdev->max_pd, &pd_id, &iwdev->next_pd);
336 i40iw_pr_err("alloc resource failed\n");
340 iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
346 sc_pd = &iwpd->sc_pd;
349 ucontext = to_ucontext(context);
350 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
351 memset(&uresp, 0, sizeof(uresp));
353 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
358 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
361 i40iw_add_pdusecount(iwpd);
366 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
371 * i40iw_dealloc_pd - deallocate pd
372 * @ibpd: ptr of pd to be deallocated
374 static int i40iw_dealloc_pd(struct ib_pd *ibpd)
376 struct i40iw_pd *iwpd = to_iwpd(ibpd);
377 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
379 i40iw_rem_pdusecount(iwpd, iwdev);
384 * i40iw_qp_roundup - return round up qp ring size
385 * @wr_ring_size: ring size to round up
387 static int i40iw_qp_roundup(u32 wr_ring_size)
391 if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
392 wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
394 for (wr_ring_size--; scount <= 16; scount *= 2)
395 wr_ring_size |= wr_ring_size >> scount;
396 return ++wr_ring_size;
400 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
402 * @va: user virtual address
403 * @pbl_list: pbl list to search in (QP's or CQ's)
405 static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
406 struct list_head *pbl_list)
408 struct i40iw_pbl *iwpbl;
410 list_for_each_entry(iwpbl, pbl_list, list) {
411 if (iwpbl->user_base == va) {
412 list_del(&iwpbl->list);
420 * i40iw_free_qp_resources - free up memory resources for qp
421 * @iwdev: iwarp device
422 * @iwqp: qp ptr (user or kernel)
423 * @qp_num: qp number assigned
425 void i40iw_free_qp_resources(struct i40iw_device *iwdev,
426 struct i40iw_qp *iwqp,
429 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
431 i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
432 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
434 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
435 if (iwpbl->pbl_allocated)
436 i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
437 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
438 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
439 kfree(iwqp->kqp.wrid_mem);
440 iwqp->kqp.wrid_mem = NULL;
441 kfree(iwqp->allocated_buffer);
445 * i40iw_clean_cqes - clean cq entries for qp
446 * @iwqp: qp ptr (user or kernel)
449 static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
451 struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
453 ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
457 * i40iw_destroy_qp - destroy qp
458 * @ibqp: qp's ib pointer also to get to device's qp address
460 static int i40iw_destroy_qp(struct ib_qp *ibqp)
462 struct i40iw_qp *iwqp = to_iwqp(ibqp);
466 if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
467 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
469 if (!iwqp->user_mode) {
471 i40iw_clean_cqes(iwqp, iwqp->iwscq);
472 if (iwqp->iwrcq != iwqp->iwscq)
473 i40iw_clean_cqes(iwqp, iwqp->iwrcq);
477 i40iw_rem_ref(&iwqp->ibqp);
482 * i40iw_setup_virt_qp - setup for allocation of virtual qp
485 * @init_info: initialize info to return
487 static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
488 struct i40iw_qp *iwqp,
489 struct i40iw_qp_init_info *init_info)
491 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
492 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
494 iwqp->page = qpmr->sq_page;
495 init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
496 if (iwpbl->pbl_allocated) {
497 init_info->virtual_map = true;
498 init_info->sq_pa = qpmr->sq_pbl.idx;
499 init_info->rq_pa = qpmr->rq_pbl.idx;
501 init_info->sq_pa = qpmr->sq_pbl.addr;
502 init_info->rq_pa = qpmr->rq_pbl.addr;
508 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
509 * @iwdev: iwarp device
510 * @iwqp: qp ptr (user or kernel)
511 * @info: initialize info to return
513 static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
514 struct i40iw_qp *iwqp,
515 struct i40iw_qp_init_info *info)
517 struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
518 u32 sqdepth, rqdepth;
519 u32 sq_size, rq_size;
522 enum i40iw_status_code status;
523 struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
525 sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
526 rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
528 status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
532 sqdepth = sq_size << sqshift;
533 rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
535 size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
536 iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
538 ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
539 if (!ukinfo->sq_wrtrk_array)
542 ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
544 size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
545 size += (I40IW_SHADOW_AREA_SIZE << 3);
547 status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
549 kfree(ukinfo->sq_wrtrk_array);
550 ukinfo->sq_wrtrk_array = NULL;
554 ukinfo->sq = mem->va;
555 info->sq_pa = mem->pa;
557 ukinfo->rq = &ukinfo->sq[sqdepth];
558 info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
560 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
561 info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
563 ukinfo->sq_size = sq_size;
564 ukinfo->rq_size = rq_size;
565 ukinfo->qp_id = iwqp->ibqp.qp_num;
570 * i40iw_create_qp - create qp
572 * @init_attr: attributes for qp
573 * @udata: user data for create qp
575 static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
576 struct ib_qp_init_attr *init_attr,
577 struct ib_udata *udata)
579 struct i40iw_pd *iwpd = to_iwpd(ibpd);
580 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
581 struct i40iw_cqp *iwcqp = &iwdev->cqp;
582 struct i40iw_qp *iwqp;
583 struct i40iw_ucontext *ucontext;
584 struct i40iw_create_qp_req req;
585 struct i40iw_create_qp_resp uresp;
588 enum i40iw_status_code ret;
592 struct i40iw_sc_qp *qp;
593 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
594 struct i40iw_qp_init_info init_info;
595 struct i40iw_create_qp_info *qp_info;
596 struct i40iw_cqp_request *cqp_request;
597 struct cqp_commands_info *cqp_info;
599 struct i40iw_qp_host_ctx_info *ctx_info;
600 struct i40iwarp_offload_info *iwarp_info;
604 return ERR_PTR(-ENODEV);
606 if (init_attr->create_flags)
607 return ERR_PTR(-EINVAL);
608 if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
609 init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
611 if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
612 init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
614 if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
615 init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
617 memset(&init_info, 0, sizeof(init_info));
619 sq_size = init_attr->cap.max_send_wr;
620 rq_size = init_attr->cap.max_recv_wr;
622 init_info.vsi = &iwdev->vsi;
623 init_info.qp_uk_init_info.sq_size = sq_size;
624 init_info.qp_uk_init_info.rq_size = rq_size;
625 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
626 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
627 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
629 mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
631 return ERR_PTR(-ENOMEM);
633 iwqp = (struct i40iw_qp *)mem;
635 qp->back_qp = (void *)iwqp;
636 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
638 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
640 if (i40iw_allocate_dma_mem(dev->hw,
642 I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
644 i40iw_pr_err("dma_mem failed\n");
649 init_info.q2 = iwqp->q2_ctx_mem.va;
650 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
652 init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
653 init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
655 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
656 &qp_num, &iwdev->next_qp);
658 i40iw_pr_err("qp resource\n");
662 iwqp->allocated_buffer = mem;
665 iwqp->ibqp.qp_num = qp_num;
667 iwqp->iwscq = to_iwcq(init_attr->send_cq);
668 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
670 iwqp->host_ctx.va = init_info.host_ctx;
671 iwqp->host_ctx.pa = init_info.host_ctx_pa;
672 iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
674 init_info.pd = &iwpd->sc_pd;
675 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
676 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
678 if (init_attr->qp_type != IB_QPT_RC) {
682 if (iwdev->push_mode)
683 i40iw_alloc_push_page(iwdev, qp);
685 err_code = ib_copy_from_udata(&req, udata, sizeof(req));
687 i40iw_pr_err("ib_copy_from_data\n");
690 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
691 if (ibpd->uobject && ibpd->uobject->context) {
693 ucontext = to_ucontext(ibpd->uobject->context);
695 if (req.user_wqe_buffers) {
696 struct i40iw_pbl *iwpbl;
699 &ucontext->qp_reg_mem_list_lock, flags);
700 iwpbl = i40iw_get_pbl(
701 (unsigned long)req.user_wqe_buffers,
702 &ucontext->qp_reg_mem_list);
703 spin_unlock_irqrestore(
704 &ucontext->qp_reg_mem_list_lock, flags);
708 i40iw_pr_err("no pbl info\n");
711 memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
714 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
716 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
720 i40iw_pr_err("setup qp failed\n");
724 init_info.type = I40IW_QP_TYPE_IWARP;
725 ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
728 i40iw_pr_err("qp_init fail\n");
731 ctx_info = &iwqp->ctx_info;
732 iwarp_info = &iwqp->iwarp_info;
733 iwarp_info->rd_enable = true;
734 iwarp_info->wr_rdresp_en = true;
735 if (!iwqp->user_mode) {
736 iwarp_info->fast_reg_en = true;
737 iwarp_info->priv_mode_en = true;
739 iwarp_info->ddp_ver = 1;
740 iwarp_info->rdmap_ver = 1;
742 ctx_info->iwarp_info_valid = true;
743 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
744 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
745 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
746 ctx_info->push_mode_en = false;
748 ctx_info->push_mode_en = true;
749 ctx_info->push_idx = qp->push_idx;
752 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
753 (u64 *)iwqp->host_ctx.va,
755 ctx_info->iwarp_info_valid = false;
756 cqp_request = i40iw_get_cqp_request(iwcqp, true);
761 cqp_info = &cqp_request->info;
762 qp_info = &cqp_request->info.in.u.qp_create.info;
764 memset(qp_info, 0, sizeof(*qp_info));
766 qp_info->cq_num_valid = true;
767 qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
769 cqp_info->cqp_cmd = OP_QP_CREATE;
770 cqp_info->post_sq = 1;
771 cqp_info->in.u.qp_create.qp = qp;
772 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
773 ret = i40iw_handle_cqp_op(iwdev, cqp_request);
775 i40iw_pr_err("CQP-OP QP create fail");
780 i40iw_add_ref(&iwqp->ibqp);
781 spin_lock_init(&iwqp->lock);
782 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
783 iwdev->qp_table[qp_num] = iwqp;
784 i40iw_add_pdusecount(iwqp->iwpd);
785 i40iw_add_devusecount(iwdev);
786 if (ibpd->uobject && udata) {
787 memset(&uresp, 0, sizeof(uresp));
788 uresp.actual_sq_size = sq_size;
789 uresp.actual_rq_size = rq_size;
790 uresp.qp_id = qp_num;
791 uresp.push_idx = qp->push_idx;
792 err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
794 i40iw_pr_err("copy_to_udata failed\n");
795 i40iw_destroy_qp(&iwqp->ibqp);
796 /* let the completion of the qp destroy free the qp */
797 return ERR_PTR(err_code);
800 init_completion(&iwqp->sq_drained);
801 init_completion(&iwqp->rq_drained);
805 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
806 return ERR_PTR(err_code);
810 * i40iw_query - query qp attributes
812 * @attr: attributes pointer
813 * @attr_mask: Not used
814 * @init_attr: qp attributes to return
816 static int i40iw_query_qp(struct ib_qp *ibqp,
817 struct ib_qp_attr *attr,
819 struct ib_qp_init_attr *init_attr)
821 struct i40iw_qp *iwqp = to_iwqp(ibqp);
822 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
824 attr->qp_access_flags = 0;
825 attr->cap.max_send_wr = qp->qp_uk.sq_size;
826 attr->cap.max_recv_wr = qp->qp_uk.rq_size;
827 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
828 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
829 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
831 init_attr->event_handler = iwqp->ibqp.event_handler;
832 init_attr->qp_context = iwqp->ibqp.qp_context;
833 init_attr->send_cq = iwqp->ibqp.send_cq;
834 init_attr->recv_cq = iwqp->ibqp.recv_cq;
835 init_attr->srq = iwqp->ibqp.srq;
836 init_attr->cap = attr->cap;
837 init_attr->port_num = 1;
842 * i40iw_hw_modify_qp - setup cqp for modify qp
843 * @iwdev: iwarp device
844 * @iwqp: qp ptr (user or kernel)
845 * @info: info for modify qp
846 * @wait: flag to wait or not for modify qp completion
848 void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
849 struct i40iw_modify_qp_info *info, bool wait)
851 enum i40iw_status_code status;
852 struct i40iw_cqp_request *cqp_request;
853 struct cqp_commands_info *cqp_info;
854 struct i40iw_modify_qp_info *m_info;
856 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
860 cqp_info = &cqp_request->info;
861 m_info = &cqp_info->in.u.qp_modify.info;
862 memcpy(m_info, info, sizeof(*m_info));
863 cqp_info->cqp_cmd = OP_QP_MODIFY;
864 cqp_info->post_sq = 1;
865 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
866 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
867 status = i40iw_handle_cqp_op(iwdev, cqp_request);
869 i40iw_pr_err("CQP-OP Modify QP fail");
873 * i40iw_modify_qp - modify qp request
874 * @ibqp: qp's pointer for modify
875 * @attr: access attributes
876 * @attr_mask: state mask
879 int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
880 int attr_mask, struct ib_udata *udata)
882 struct i40iw_qp *iwqp = to_iwqp(ibqp);
883 struct i40iw_device *iwdev = iwqp->iwdev;
884 struct i40iw_qp_host_ctx_info *ctx_info;
885 struct i40iwarp_offload_info *iwarp_info;
886 struct i40iw_modify_qp_info info;
887 u8 issue_modify_qp = 0;
892 memset(&info, 0, sizeof(info));
893 ctx_info = &iwqp->ctx_info;
894 iwarp_info = &iwqp->iwarp_info;
896 spin_lock_irqsave(&iwqp->lock, flags);
898 if (attr_mask & IB_QP_STATE) {
899 if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
904 switch (attr->qp_state) {
907 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
911 if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
912 info.next_iwarp_state = I40IW_QP_STATE_IDLE;
917 if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
924 iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
926 info.next_iwarp_state = I40IW_QP_STATE_RTS;
927 info.tcp_ctx_valid = true;
928 info.ord_valid = true;
929 info.arp_cache_idx_valid = true;
930 info.cq_num_valid = true;
933 if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
937 if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
938 (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
942 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
946 info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
950 if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
954 info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
959 if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
963 if (iwqp->sc_qp.term_flags)
964 i40iw_terminate_del_timer(&iwqp->sc_qp);
965 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
966 if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
968 (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
969 info.reset_tcp_conn = true;
973 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
980 iwqp->ibqp_state = attr->qp_state;
983 iwqp->iwarp_state = info.next_iwarp_state;
985 info.next_iwarp_state = iwqp->iwarp_state;
987 if (attr_mask & IB_QP_ACCESS_FLAGS) {
988 ctx_info->iwarp_info_valid = true;
989 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
990 iwarp_info->wr_rdresp_en = true;
991 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
992 iwarp_info->wr_rdresp_en = true;
993 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
994 iwarp_info->rd_enable = true;
995 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
996 iwarp_info->bind_en = true;
998 if (iwqp->user_mode) {
999 iwarp_info->rd_enable = true;
1000 iwarp_info->wr_rdresp_en = true;
1001 iwarp_info->priv_mode_en = false;
1005 if (ctx_info->iwarp_info_valid) {
1006 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1009 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1010 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1011 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
1012 (u64 *)iwqp->host_ctx.va,
1015 i40iw_pr_err("setting QP context\n");
1021 spin_unlock_irqrestore(&iwqp->lock, flags);
1023 if (issue_modify_qp)
1024 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
1026 if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
1028 if (iwqp->cm_id && iwqp->hw_tcp_state) {
1029 spin_lock_irqsave(&iwqp->lock, flags);
1030 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1031 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1032 spin_unlock_irqrestore(&iwqp->lock, flags);
1033 i40iw_cm_disconn(iwqp);
1036 spin_lock_irqsave(&iwqp->lock, flags);
1038 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
1039 iwqp->cm_id->add_ref(iwqp->cm_id);
1040 i40iw_schedule_cm_timer(iwqp->cm_node,
1041 (struct i40iw_puda_buf *)iwqp,
1042 I40IW_TIMER_TYPE_CLOSE, 1, 0);
1045 spin_unlock_irqrestore(&iwqp->lock, flags);
1050 spin_unlock_irqrestore(&iwqp->lock, flags);
1055 * cq_free_resources - free up recources for cq
1056 * @iwdev: iwarp device
1059 static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1061 struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1063 if (!iwcq->user_mode)
1064 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1065 i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1069 * i40iw_cq_wq_destroy - send cq destroy cqp
1070 * @iwdev: iwarp device
1071 * @cq: hardware control cq
1073 void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1075 enum i40iw_status_code status;
1076 struct i40iw_cqp_request *cqp_request;
1077 struct cqp_commands_info *cqp_info;
1079 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1083 cqp_info = &cqp_request->info;
1085 cqp_info->cqp_cmd = OP_CQ_DESTROY;
1086 cqp_info->post_sq = 1;
1087 cqp_info->in.u.cq_destroy.cq = cq;
1088 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1089 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1091 i40iw_pr_err("CQP-OP Destroy QP fail");
1095 * i40iw_destroy_cq - destroy cq
1096 * @ib_cq: cq pointer
1098 static int i40iw_destroy_cq(struct ib_cq *ib_cq)
1100 struct i40iw_cq *iwcq;
1101 struct i40iw_device *iwdev;
1102 struct i40iw_sc_cq *cq;
1105 i40iw_pr_err("ib_cq == NULL\n");
1109 iwcq = to_iwcq(ib_cq);
1110 iwdev = to_iwdev(ib_cq->device);
1112 i40iw_cq_wq_destroy(iwdev, cq);
1113 cq_free_resources(iwdev, iwcq);
1115 i40iw_rem_devusecount(iwdev);
1120 * i40iw_create_cq - create cq
1121 * @ibdev: device pointer from stack
1122 * @attr: attributes for cq
1123 * @context: user context created during alloc
1126 static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1127 const struct ib_cq_init_attr *attr,
1128 struct ib_ucontext *context,
1129 struct ib_udata *udata)
1131 struct i40iw_device *iwdev = to_iwdev(ibdev);
1132 struct i40iw_cq *iwcq;
1133 struct i40iw_pbl *iwpbl;
1135 struct i40iw_sc_cq *cq;
1136 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1137 struct i40iw_cq_init_info info;
1138 enum i40iw_status_code status;
1139 struct i40iw_cqp_request *cqp_request;
1140 struct cqp_commands_info *cqp_info;
1141 struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1142 unsigned long flags;
1144 int entries = attr->cqe;
1147 return ERR_PTR(-ENODEV);
1149 if (entries > iwdev->max_cqe)
1150 return ERR_PTR(-EINVAL);
1152 iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1154 return ERR_PTR(-ENOMEM);
1156 memset(&info, 0, sizeof(info));
1158 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1159 iwdev->max_cq, &cq_num,
1165 cq->back_cq = (void *)iwcq;
1166 spin_lock_init(&iwcq->lock);
1169 ukinfo->cq_size = max(entries, 4);
1170 ukinfo->cq_id = cq_num;
1171 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1173 if (attr->comp_vector < iwdev->ceqs_count)
1174 info.ceq_id = attr->comp_vector;
1175 info.ceq_id_valid = true;
1177 info.type = I40IW_CQ_TYPE_IWARP;
1179 struct i40iw_ucontext *ucontext;
1180 struct i40iw_create_cq_req req;
1181 struct i40iw_cq_mr *cqmr;
1183 memset(&req, 0, sizeof(req));
1184 iwcq->user_mode = true;
1185 ucontext = to_ucontext(context);
1186 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1188 goto cq_free_resources;
1191 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1192 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1193 &ucontext->cq_reg_mem_list);
1194 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1197 goto cq_free_resources;
1200 iwcq->iwpbl = iwpbl;
1201 iwcq->cq_mem_size = 0;
1202 cqmr = &iwpbl->cq_mr;
1203 info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1204 if (iwpbl->pbl_allocated) {
1205 info.virtual_map = true;
1206 info.pbl_chunk_size = 1;
1207 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1209 info.cq_base_pa = cqmr->cq_pbl.addr;
1212 /* Kmode allocations */
1216 rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1217 rsize = round_up(rsize, 256);
1218 shadow = I40IW_SHADOW_AREA_SIZE << 3;
1219 status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1220 rsize + shadow, 256);
1223 goto cq_free_resources;
1225 ukinfo->cq_base = iwcq->kmem.va;
1226 info.cq_base_pa = iwcq->kmem.pa;
1227 info.shadow_area_pa = info.cq_base_pa + rsize;
1228 ukinfo->shadow_area = iwcq->kmem.va + rsize;
1231 if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1232 i40iw_pr_err("init cq fail\n");
1234 goto cq_free_resources;
1237 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1240 goto cq_free_resources;
1243 cqp_info = &cqp_request->info;
1244 cqp_info->cqp_cmd = OP_CQ_CREATE;
1245 cqp_info->post_sq = 1;
1246 cqp_info->in.u.cq_create.cq = cq;
1247 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1248 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1250 i40iw_pr_err("CQP-OP Create QP fail");
1252 goto cq_free_resources;
1256 struct i40iw_create_cq_resp resp;
1258 memset(&resp, 0, sizeof(resp));
1259 resp.cq_id = info.cq_uk_init_info.cq_id;
1260 resp.cq_size = info.cq_uk_init_info.cq_size;
1261 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1262 i40iw_pr_err("copy to user data\n");
1268 i40iw_add_devusecount(iwdev);
1269 return (struct ib_cq *)iwcq;
1272 i40iw_cq_wq_destroy(iwdev, cq);
1274 cq_free_resources(iwdev, iwcq);
1277 return ERR_PTR(err_code);
1281 * i40iw_get_user_access - get hw access from IB access
1282 * @acc: IB access to return hw access
1284 static inline u16 i40iw_get_user_access(int acc)
1288 access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1289 access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1290 access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1291 access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1296 * i40iw_free_stag - free stag resource
1297 * @iwdev: iwarp device
1298 * @stag: stag to free
1300 static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1304 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1305 i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
1306 i40iw_rem_devusecount(iwdev);
1310 * i40iw_create_stag - create random stag
1311 * @iwdev: iwarp device
1313 static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1317 u32 next_stag_index;
1323 get_random_bytes(&random, sizeof(random));
1324 consumer_key = (u8)random;
1326 driver_key = random & ~iwdev->mr_stagmask;
1327 next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1328 next_stag_index %= iwdev->max_mr;
1330 ret = i40iw_alloc_resource(iwdev,
1331 iwdev->allocated_mrs, iwdev->max_mr,
1332 &stag_index, &next_stag_index);
1334 stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1336 stag += (u32)consumer_key;
1337 i40iw_add_devusecount(iwdev);
1343 * i40iw_next_pbl_addr - Get next pbl address
1344 * @pbl: pointer to a pble
1345 * @pinfo: info pointer
1348 static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
1349 struct i40iw_pble_info **pinfo,
1353 if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1357 return (u64 *)(*pinfo)->addr;
1361 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1362 * @iwmr: iwmr for IB's user page addresses
1363 * @pbl: ple pointer to save 1 level or 0 level pble
1364 * @level: indicated level 0, 1 or 2
1366 static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1368 enum i40iw_pble_level level)
1370 struct ib_umem *region = iwmr->region;
1371 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1372 int chunk_pages, entry, i;
1373 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1374 struct i40iw_pble_info *pinfo;
1375 struct scatterlist *sg;
1379 pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
1381 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1382 chunk_pages = sg_dma_len(sg) >> region->page_shift;
1383 if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
1384 !iwpbl->qp_mr.sq_page)
1385 iwpbl->qp_mr.sq_page = sg_page(sg);
1386 for (i = 0; i < chunk_pages; i++) {
1387 pg_addr = sg_dma_address(sg) +
1388 (i << region->page_shift);
1390 if ((entry + i) == 0)
1391 *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
1392 else if (!(pg_addr & ~iwmr->page_msk))
1393 *pbl = cpu_to_le64(pg_addr);
1396 pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
1402 * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
1403 * @addr: virtual address
1404 * @iwmr: mr pointer for this memory registration
1406 static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
1408 struct vm_area_struct *vma;
1411 down_read(¤t->mm->mmap_sem);
1412 vma = find_vma(current->mm, addr);
1413 if (vma && is_vm_hugetlb_page(vma)) {
1414 h = hstate_vma(vma);
1415 if (huge_page_size(h) == 0x200000) {
1416 iwmr->page_size = huge_page_size(h);
1417 iwmr->page_msk = huge_page_mask(h);
1420 up_read(¤t->mm->mmap_sem);
1424 * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1425 * @arr: lvl1 pbl array
1426 * @npages: page count
1427 * pg_size: page size
1430 static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1434 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1435 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1442 * i40iw_check_mr_contiguous - check if MR is physically contiguous
1443 * @palloc: pbl allocation struct
1444 * pg_size: page size
1446 static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
1448 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
1449 struct i40iw_pble_info *leaf = lvl2->leaf;
1451 u64 *start_addr = NULL;
1455 if (palloc->level == I40IW_LEVEL_1) {
1456 arr = (u64 *)palloc->level1.addr;
1457 ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
1461 start_addr = (u64 *)leaf->addr;
1463 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1464 arr = (u64 *)leaf->addr;
1465 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1467 ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
1476 * i40iw_setup_pbles - copy user pg address to pble's
1477 * @iwdev: iwarp device
1478 * @iwmr: mr pointer for this memory registration
1479 * @use_pbles: flag if to use pble's
1481 static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1482 struct i40iw_mr *iwmr,
1485 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1486 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1487 struct i40iw_pble_info *pinfo;
1489 enum i40iw_status_code status;
1490 enum i40iw_pble_level level = I40IW_LEVEL_1;
1493 mutex_lock(&iwdev->pbl_mutex);
1494 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1495 mutex_unlock(&iwdev->pbl_mutex);
1499 iwpbl->pbl_allocated = true;
1500 level = palloc->level;
1501 pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1502 pbl = (u64 *)pinfo->addr;
1504 pbl = iwmr->pgaddrmem;
1507 i40iw_copy_user_pgaddrs(iwmr, pbl, level);
1510 iwmr->pgaddrmem[0] = *pbl;
1516 * i40iw_handle_q_mem - handle memory for qp and cq
1517 * @iwdev: iwarp device
1518 * @req: information for q memory management
1519 * @iwpbl: pble struct
1520 * @use_pbles: flag to use pble
1522 static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1523 struct i40iw_mem_reg_req *req,
1524 struct i40iw_pbl *iwpbl,
1527 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1528 struct i40iw_mr *iwmr = iwpbl->iwmr;
1529 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1530 struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1531 struct i40iw_hmc_pble *hmc_p;
1532 u64 *arr = iwmr->pgaddrmem;
1538 total = req->sq_pages + req->rq_pages + req->cq_pages;
1539 pg_size = iwmr->page_size;
1541 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1545 if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1546 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1547 iwpbl->pbl_allocated = false;
1552 arr = (u64 *)palloc->level1.addr;
1554 if (iwmr->type == IW_MEMREG_TYPE_QP) {
1555 hmc_p = &qpmr->sq_pbl;
1556 qpmr->shadow = (dma_addr_t)arr[total];
1559 ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
1561 ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
1565 hmc_p->idx = palloc->level1.idx;
1566 hmc_p = &qpmr->rq_pbl;
1567 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1569 hmc_p->addr = arr[0];
1570 hmc_p = &qpmr->rq_pbl;
1571 hmc_p->addr = arr[req->sq_pages];
1574 hmc_p = &cqmr->cq_pbl;
1575 cqmr->shadow = (dma_addr_t)arr[total];
1578 ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
1581 hmc_p->idx = palloc->level1.idx;
1583 hmc_p->addr = arr[0];
1586 if (use_pbles && ret) {
1587 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1588 iwpbl->pbl_allocated = false;
1595 * i40iw_hw_alloc_stag - cqp command to allocate stag
1596 * @iwdev: iwarp device
1597 * @iwmr: iwarp mr pointer
1599 static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1601 struct i40iw_allocate_stag_info *info;
1602 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1603 enum i40iw_status_code status;
1605 struct i40iw_cqp_request *cqp_request;
1606 struct cqp_commands_info *cqp_info;
1608 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1612 cqp_info = &cqp_request->info;
1613 info = &cqp_info->in.u.alloc_stag.info;
1614 memset(info, 0, sizeof(*info));
1615 info->page_size = PAGE_SIZE;
1616 info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1617 info->pd_id = iwpd->sc_pd.pd_id;
1618 info->total_len = iwmr->length;
1619 info->remote_access = true;
1620 cqp_info->cqp_cmd = OP_ALLOC_STAG;
1621 cqp_info->post_sq = 1;
1622 cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1623 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1625 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1628 i40iw_pr_err("CQP-OP MR Reg fail");
1634 * i40iw_alloc_mr - register stag for fast memory registration
1636 * @mr_type: memory for stag registrion
1637 * @max_num_sg: man number of pages
1639 static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1640 enum ib_mr_type mr_type,
1643 struct i40iw_pd *iwpd = to_iwpd(pd);
1644 struct i40iw_device *iwdev = to_iwdev(pd->device);
1645 struct i40iw_pble_alloc *palloc;
1646 struct i40iw_pbl *iwpbl;
1647 struct i40iw_mr *iwmr;
1648 enum i40iw_status_code status;
1650 int err_code = -ENOMEM;
1652 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1654 return ERR_PTR(-ENOMEM);
1656 stag = i40iw_create_stag(iwdev);
1658 err_code = -EOVERFLOW;
1661 stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
1663 iwmr->ibmr.rkey = stag;
1664 iwmr->ibmr.lkey = stag;
1666 iwmr->ibmr.device = pd->device;
1667 iwpbl = &iwmr->iwpbl;
1669 iwmr->type = IW_MEMREG_TYPE_MEM;
1670 palloc = &iwpbl->pble_alloc;
1671 iwmr->page_cnt = max_num_sg;
1672 mutex_lock(&iwdev->pbl_mutex);
1673 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1674 mutex_unlock(&iwdev->pbl_mutex);
1678 if (palloc->level != I40IW_LEVEL_1)
1680 err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1683 iwpbl->pbl_allocated = true;
1684 i40iw_add_pdusecount(iwpd);
1687 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1689 i40iw_free_stag(iwdev, stag);
1692 return ERR_PTR(err_code);
1696 * i40iw_set_page - populate pbl list for fmr
1697 * @ibmr: ib mem to access iwarp mr pointer
1698 * @addr: page dma address fro pbl list
1700 static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1702 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1703 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1704 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1707 if (unlikely(iwmr->npages == iwmr->page_cnt))
1710 pbl = (u64 *)palloc->level1.addr;
1711 pbl[iwmr->npages++] = cpu_to_le64(addr);
1716 * i40iw_map_mr_sg - map of sg list for fmr
1717 * @ibmr: ib mem to access iwarp mr pointer
1718 * @sg: scatter gather list for fmr
1719 * @sg_nents: number of sg pages
1721 static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1722 int sg_nents, unsigned int *sg_offset)
1724 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1727 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
1731 * i40iw_drain_sq - drain the send queue
1732 * @ibqp: ib qp pointer
1734 static void i40iw_drain_sq(struct ib_qp *ibqp)
1736 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1737 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1739 if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1740 wait_for_completion(&iwqp->sq_drained);
1744 * i40iw_drain_rq - drain the receive queue
1745 * @ibqp: ib qp pointer
1747 static void i40iw_drain_rq(struct ib_qp *ibqp)
1749 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1750 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1752 if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1753 wait_for_completion(&iwqp->rq_drained);
1757 * i40iw_hwreg_mr - send cqp command for memory registration
1758 * @iwdev: iwarp device
1759 * @iwmr: iwarp mr pointer
1760 * @access: access for MR
1762 static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1763 struct i40iw_mr *iwmr,
1766 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1767 struct i40iw_reg_ns_stag_info *stag_info;
1768 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1769 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1770 enum i40iw_status_code status;
1772 struct i40iw_cqp_request *cqp_request;
1773 struct cqp_commands_info *cqp_info;
1775 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1779 cqp_info = &cqp_request->info;
1780 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1781 memset(stag_info, 0, sizeof(*stag_info));
1782 stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1783 stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1784 stag_info->stag_key = (u8)iwmr->stag;
1785 stag_info->total_len = iwmr->length;
1786 stag_info->access_rights = access;
1787 stag_info->pd_id = iwpd->sc_pd.pd_id;
1788 stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
1789 stag_info->page_size = iwmr->page_size;
1791 if (iwpbl->pbl_allocated) {
1792 if (palloc->level == I40IW_LEVEL_1) {
1793 stag_info->first_pm_pbl_index = palloc->level1.idx;
1794 stag_info->chunk_size = 1;
1796 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1797 stag_info->chunk_size = 3;
1800 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1803 cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1804 cqp_info->post_sq = 1;
1805 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1806 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1808 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1811 i40iw_pr_err("CQP-OP MR Reg fail");
1817 * i40iw_reg_user_mr - Register a user memory region
1819 * @start: virtual start address
1820 * @length: length of mr
1821 * @virt: virtual address
1822 * @acc: access of mr
1825 static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1830 struct ib_udata *udata)
1832 struct i40iw_pd *iwpd = to_iwpd(pd);
1833 struct i40iw_device *iwdev = to_iwdev(pd->device);
1834 struct i40iw_ucontext *ucontext;
1835 struct i40iw_pble_alloc *palloc;
1836 struct i40iw_pbl *iwpbl;
1837 struct i40iw_mr *iwmr;
1838 struct ib_umem *region;
1839 struct i40iw_mem_reg_req req;
1844 bool use_pbles = false;
1845 unsigned long flags;
1851 return ERR_PTR(-ENODEV);
1853 if (length > I40IW_MAX_MR_SIZE)
1854 return ERR_PTR(-EINVAL);
1855 region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
1857 return (struct ib_mr *)region;
1859 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1860 ib_umem_release(region);
1861 return ERR_PTR(-EFAULT);
1864 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1866 ib_umem_release(region);
1867 return ERR_PTR(-ENOMEM);
1870 iwpbl = &iwmr->iwpbl;
1872 iwmr->region = region;
1874 iwmr->ibmr.device = pd->device;
1875 ucontext = to_ucontext(pd->uobject->context);
1877 iwmr->page_size = PAGE_SIZE;
1878 iwmr->page_msk = PAGE_MASK;
1880 if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
1881 i40iw_set_hugetlb_values(start, iwmr);
1883 region_length = region->length + (start & (iwmr->page_size - 1));
1884 pg_shift = ffs(iwmr->page_size) - 1;
1885 pbl_depth = region_length >> pg_shift;
1886 pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
1887 iwmr->length = region->length;
1889 iwpbl->user_base = virt;
1890 palloc = &iwpbl->pble_alloc;
1892 iwmr->type = req.reg_type;
1893 iwmr->page_cnt = (u32)pbl_depth;
1895 switch (req.reg_type) {
1896 case IW_MEMREG_TYPE_QP:
1897 use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1898 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1901 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1902 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1903 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1905 case IW_MEMREG_TYPE_CQ:
1906 use_pbles = (req.cq_pages > 1);
1907 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1911 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1912 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1913 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1915 case IW_MEMREG_TYPE_MEM:
1916 use_pbles = (iwmr->page_cnt != 1);
1917 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1919 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1924 ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
1926 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1927 iwpbl->pbl_allocated = false;
1931 access |= i40iw_get_user_access(acc);
1932 stag = i40iw_create_stag(iwdev);
1939 iwmr->ibmr.rkey = stag;
1940 iwmr->ibmr.lkey = stag;
1942 err = i40iw_hwreg_mr(iwdev, iwmr, access);
1944 i40iw_free_stag(iwdev, stag);
1953 iwmr->type = req.reg_type;
1954 if (req.reg_type == IW_MEMREG_TYPE_MEM)
1955 i40iw_add_pdusecount(iwpd);
1959 if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
1960 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1961 ib_umem_release(region);
1963 return ERR_PTR(err);
1967 * i40iw_reg_phys_mr - register kernel physical memory
1969 * @addr: physical address of memory to register
1970 * @size: size of memory to register
1971 * @acc: Access rights
1972 * @iova_start: start of virtual address for physical buffers
1974 struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1980 struct i40iw_pd *iwpd = to_iwpd(pd);
1981 struct i40iw_device *iwdev = to_iwdev(pd->device);
1982 struct i40iw_pbl *iwpbl;
1983 struct i40iw_mr *iwmr;
1984 enum i40iw_status_code status;
1986 u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1989 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1991 return ERR_PTR(-ENOMEM);
1993 iwmr->ibmr.device = pd->device;
1994 iwpbl = &iwmr->iwpbl;
1996 iwmr->type = IW_MEMREG_TYPE_MEM;
1997 iwpbl->user_base = *iova_start;
1998 stag = i40iw_create_stag(iwdev);
2003 access |= i40iw_get_user_access(acc);
2005 iwmr->ibmr.rkey = stag;
2006 iwmr->ibmr.lkey = stag;
2008 iwmr->pgaddrmem[0] = addr;
2009 iwmr->length = size;
2010 status = i40iw_hwreg_mr(iwdev, iwmr, access);
2012 i40iw_free_stag(iwdev, stag);
2017 i40iw_add_pdusecount(iwpd);
2021 return ERR_PTR(ret);
2025 * i40iw_get_dma_mr - register physical mem
2027 * @acc: access for memory
2029 static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
2033 return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
2037 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
2038 * @iwmr: iwmr for IB's user page addresses
2039 * @ucontext: ptr to user context
2041 static void i40iw_del_memlist(struct i40iw_mr *iwmr,
2042 struct i40iw_ucontext *ucontext)
2044 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2045 unsigned long flags;
2047 switch (iwmr->type) {
2048 case IW_MEMREG_TYPE_CQ:
2049 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2050 if (!list_empty(&ucontext->cq_reg_mem_list))
2051 list_del(&iwpbl->list);
2052 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2054 case IW_MEMREG_TYPE_QP:
2055 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2056 if (!list_empty(&ucontext->qp_reg_mem_list))
2057 list_del(&iwpbl->list);
2058 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2066 * i40iw_dereg_mr - deregister mr
2067 * @ib_mr: mr ptr for dereg
2069 static int i40iw_dereg_mr(struct ib_mr *ib_mr)
2071 struct ib_pd *ibpd = ib_mr->pd;
2072 struct i40iw_pd *iwpd = to_iwpd(ibpd);
2073 struct i40iw_mr *iwmr = to_iwmr(ib_mr);
2074 struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
2075 enum i40iw_status_code status;
2076 struct i40iw_dealloc_stag_info *info;
2077 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2078 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
2079 struct i40iw_cqp_request *cqp_request;
2080 struct cqp_commands_info *cqp_info;
2084 ib_umem_release(iwmr->region);
2086 if (iwmr->type != IW_MEMREG_TYPE_MEM) {
2087 if (ibpd->uobject) {
2088 struct i40iw_ucontext *ucontext;
2090 ucontext = to_ucontext(ibpd->uobject->context);
2091 i40iw_del_memlist(iwmr, ucontext);
2093 if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
2094 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2099 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
2103 cqp_info = &cqp_request->info;
2104 info = &cqp_info->in.u.dealloc_stag.info;
2105 memset(info, 0, sizeof(*info));
2107 info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
2108 info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
2109 stag_idx = info->stag_idx;
2111 if (iwpbl->pbl_allocated)
2112 info->dealloc_pbl = true;
2114 cqp_info->cqp_cmd = OP_DEALLOC_STAG;
2115 cqp_info->post_sq = 1;
2116 cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2117 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2118 status = i40iw_handle_cqp_op(iwdev, cqp_request);
2120 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
2121 i40iw_rem_pdusecount(iwpd, iwdev);
2122 i40iw_free_stag(iwdev, iwmr->stag);
2123 if (iwpbl->pbl_allocated)
2124 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2132 static ssize_t i40iw_show_rev(struct device *dev,
2133 struct device_attribute *attr, char *buf)
2135 struct i40iw_ib_device *iwibdev = container_of(dev,
2136 struct i40iw_ib_device,
2138 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2140 return sprintf(buf, "%x\n", hw_rev);
2146 static ssize_t i40iw_show_hca(struct device *dev,
2147 struct device_attribute *attr, char *buf)
2149 return sprintf(buf, "I40IW\n");
2155 static ssize_t i40iw_show_board(struct device *dev,
2156 struct device_attribute *attr,
2159 return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
2162 static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
2163 static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
2164 static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
2166 static struct device_attribute *i40iw_dev_attributes[] = {
2173 * i40iw_copy_sg_list - copy sg list for qp
2174 * @sg_list: copied into sg_list
2175 * @sgl: copy from sgl
2176 * @num_sges: count of sg entries
2178 static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2182 for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2183 sg_list[i].tag_off = sgl[i].addr;
2184 sg_list[i].len = sgl[i].length;
2185 sg_list[i].stag = sgl[i].lkey;
2190 * i40iw_post_send - kernel application wr
2191 * @ibqp: qp ptr for wr
2192 * @ib_wr: work request ptr
2193 * @bad_wr: return of bad wr if err
2195 static int i40iw_post_send(struct ib_qp *ibqp,
2196 struct ib_send_wr *ib_wr,
2197 struct ib_send_wr **bad_wr)
2199 struct i40iw_qp *iwqp;
2200 struct i40iw_qp_uk *ukqp;
2201 struct i40iw_post_sq_info info;
2202 enum i40iw_status_code ret;
2204 unsigned long flags;
2207 iwqp = (struct i40iw_qp *)ibqp;
2208 ukqp = &iwqp->sc_qp.qp_uk;
2210 spin_lock_irqsave(&iwqp->lock, flags);
2213 memset(&info, 0, sizeof(info));
2214 info.wr_id = (u64)(ib_wr->wr_id);
2215 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2216 info.signaled = true;
2217 if (ib_wr->send_flags & IB_SEND_FENCE)
2218 info.read_fence = true;
2220 switch (ib_wr->opcode) {
2223 case IB_WR_SEND_WITH_INV:
2224 if (ib_wr->opcode == IB_WR_SEND) {
2225 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2226 info.op_type = I40IW_OP_TYPE_SEND_SOL;
2228 info.op_type = I40IW_OP_TYPE_SEND;
2230 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2231 info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2233 info.op_type = I40IW_OP_TYPE_SEND_INV;
2236 if (ib_wr->send_flags & IB_SEND_INLINE) {
2237 info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2238 info.op.inline_send.len = ib_wr->sg_list[0].length;
2239 ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2241 info.op.send.num_sges = ib_wr->num_sge;
2242 info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
2243 ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2247 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2253 case IB_WR_RDMA_WRITE:
2254 info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2256 if (ib_wr->send_flags & IB_SEND_INLINE) {
2257 info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2258 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2259 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2260 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2261 info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
2262 ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2264 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2265 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2266 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2267 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2268 info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
2269 ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2273 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2279 case IB_WR_RDMA_READ_WITH_INV:
2282 case IB_WR_RDMA_READ:
2283 if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2287 info.op_type = I40IW_OP_TYPE_RDMA_READ;
2288 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2289 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2290 info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
2291 info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2292 info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2293 info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
2294 ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
2296 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2302 case IB_WR_LOCAL_INV:
2303 info.op_type = I40IW_OP_TYPE_INV_STAG;
2304 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2305 ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2311 struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2312 int flags = reg_wr(ib_wr)->access;
2313 struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2314 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2315 struct i40iw_fast_reg_stag_info info;
2317 memset(&info, 0, sizeof(info));
2318 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2319 info.access_rights |= i40iw_get_user_access(flags);
2320 info.stag_key = reg_wr(ib_wr)->key & 0xff;
2321 info.stag_idx = reg_wr(ib_wr)->key >> 8;
2322 info.page_size = reg_wr(ib_wr)->mr->page_size;
2323 info.wr_id = ib_wr->wr_id;
2325 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2326 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2327 info.total_len = iwmr->ibmr.length;
2328 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
2329 info.first_pm_pbl_index = palloc->level1.idx;
2330 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2331 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2333 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2334 info.chunk_size = 1;
2336 ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2343 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2350 ib_wr = ib_wr->next;
2356 ukqp->ops.iw_qp_post_wr(ukqp);
2357 spin_unlock_irqrestore(&iwqp->lock, flags);
2363 * i40iw_post_recv - post receive wr for kernel application
2364 * @ibqp: ib qp pointer
2365 * @ib_wr: work request for receive
2366 * @bad_wr: bad wr caused an error
2368 static int i40iw_post_recv(struct ib_qp *ibqp,
2369 struct ib_recv_wr *ib_wr,
2370 struct ib_recv_wr **bad_wr)
2372 struct i40iw_qp *iwqp;
2373 struct i40iw_qp_uk *ukqp;
2374 struct i40iw_post_rq_info post_recv;
2375 struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2376 enum i40iw_status_code ret = 0;
2377 unsigned long flags;
2380 iwqp = (struct i40iw_qp *)ibqp;
2381 ukqp = &iwqp->sc_qp.qp_uk;
2383 memset(&post_recv, 0, sizeof(post_recv));
2384 spin_lock_irqsave(&iwqp->lock, flags);
2386 post_recv.num_sges = ib_wr->num_sge;
2387 post_recv.wr_id = ib_wr->wr_id;
2388 i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2389 post_recv.sg_list = sg_list;
2390 ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2392 i40iw_pr_err(" post_recv err %d\n", ret);
2393 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2400 ib_wr = ib_wr->next;
2403 spin_unlock_irqrestore(&iwqp->lock, flags);
2408 * i40iw_poll_cq - poll cq for completion (kernel apps)
2410 * @num_entries: number of entries to poll
2411 * @entry: wr of entry completed
2413 static int i40iw_poll_cq(struct ib_cq *ibcq,
2415 struct ib_wc *entry)
2417 struct i40iw_cq *iwcq;
2419 struct i40iw_cq_poll_info cq_poll_info;
2420 enum i40iw_status_code ret;
2421 struct i40iw_cq_uk *ukcq;
2422 struct i40iw_sc_qp *qp;
2423 struct i40iw_qp *iwqp;
2424 unsigned long flags;
2426 iwcq = (struct i40iw_cq *)ibcq;
2427 ukcq = &iwcq->sc_cq.cq_uk;
2429 spin_lock_irqsave(&iwcq->lock, flags);
2430 while (cqe_count < num_entries) {
2431 ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
2432 if (ret == I40IW_ERR_QUEUE_EMPTY) {
2434 } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2441 entry->wc_flags = 0;
2442 entry->wr_id = cq_poll_info.wr_id;
2443 if (cq_poll_info.error) {
2444 entry->status = IB_WC_WR_FLUSH_ERR;
2445 entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2447 entry->status = IB_WC_SUCCESS;
2450 switch (cq_poll_info.op_type) {
2451 case I40IW_OP_TYPE_RDMA_WRITE:
2452 entry->opcode = IB_WC_RDMA_WRITE;
2454 case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2455 case I40IW_OP_TYPE_RDMA_READ:
2456 entry->opcode = IB_WC_RDMA_READ;
2458 case I40IW_OP_TYPE_SEND_SOL:
2459 case I40IW_OP_TYPE_SEND_SOL_INV:
2460 case I40IW_OP_TYPE_SEND_INV:
2461 case I40IW_OP_TYPE_SEND:
2462 entry->opcode = IB_WC_SEND;
2464 case I40IW_OP_TYPE_REC:
2465 entry->opcode = IB_WC_RECV;
2468 entry->opcode = IB_WC_RECV;
2472 entry->ex.imm_data = 0;
2473 qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2474 entry->qp = (struct ib_qp *)qp->back_qp;
2475 entry->src_qp = cq_poll_info.qp_id;
2476 iwqp = (struct i40iw_qp *)qp->back_qp;
2477 if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2478 if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2479 complete(&iwqp->sq_drained);
2480 if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2481 complete(&iwqp->rq_drained);
2483 entry->byte_len = cq_poll_info.bytes_xfered;
2487 spin_unlock_irqrestore(&iwcq->lock, flags);
2492 * i40iw_req_notify_cq - arm cq kernel application
2494 * @notify_flags: notofication flags
2496 static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2497 enum ib_cq_notify_flags notify_flags)
2499 struct i40iw_cq *iwcq;
2500 struct i40iw_cq_uk *ukcq;
2501 unsigned long flags;
2502 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
2504 iwcq = (struct i40iw_cq *)ibcq;
2505 ukcq = &iwcq->sc_cq.cq_uk;
2506 if (notify_flags == IB_CQ_SOLICITED)
2507 cq_notify = IW_CQ_COMPL_SOLICITED;
2508 spin_lock_irqsave(&iwcq->lock, flags);
2509 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
2510 spin_unlock_irqrestore(&iwcq->lock, flags);
2515 * i40iw_port_immutable - return port's immutable data
2516 * @ibdev: ib dev struct
2517 * @port_num: port number
2518 * @immutable: immutable data for the port return
2520 static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2521 struct ib_port_immutable *immutable)
2523 struct ib_port_attr attr;
2526 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2528 err = ib_query_port(ibdev, port_num, &attr);
2533 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2534 immutable->gid_tbl_len = attr.gid_tbl_len;
2539 static const char * const i40iw_hw_stat_names[] = {
2541 [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2542 [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2543 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2544 [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2545 [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2546 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2547 [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2548 [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2549 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2551 [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2553 [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2555 [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2557 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2559 [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2561 [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2563 [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2565 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2567 [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2569 [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2571 [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2573 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2575 [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2577 [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2579 [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2581 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2583 [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2585 [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2587 [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2589 [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2591 [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2593 [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2595 [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2597 [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2599 [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2601 [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2605 static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
2607 u32 firmware_version = I40IW_FW_VERSION;
2609 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
2610 (firmware_version & 0x000000ff));
2614 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2615 * @ibdev: device pointer from stack
2616 * @port_num: port number
2618 static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2621 struct i40iw_device *iwdev = to_iwdev(ibdev);
2622 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2623 int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2624 I40IW_HW_STAT_INDEX_MAX_64;
2625 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2627 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2628 (I40IW_HW_STAT_INDEX_MAX_32 +
2629 I40IW_HW_STAT_INDEX_MAX_64));
2632 * PFs get the default update lifespan, but VFs only update once
2637 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2642 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2643 * @ibdev: device pointer from stack
2644 * @stats: stats pointer from stack
2645 * @port_num: port number
2646 * @index: which hw counter the stack is requesting we update
2648 static int i40iw_get_hw_stats(struct ib_device *ibdev,
2649 struct rdma_hw_stats *stats,
2650 u8 port_num, int index)
2652 struct i40iw_device *iwdev = to_iwdev(ibdev);
2653 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2654 struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
2655 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
2658 i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
2660 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2664 memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
2666 return stats->num_counters;
2670 * i40iw_query_gid - Query port GID
2671 * @ibdev: device pointer from stack
2672 * @port: port number
2673 * @index: Entry index
2676 static int i40iw_query_gid(struct ib_device *ibdev,
2681 struct i40iw_device *iwdev = to_iwdev(ibdev);
2683 memset(gid->raw, 0, sizeof(gid->raw));
2684 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2689 * i40iw_modify_port Modify port properties
2690 * @ibdev: device pointer from stack
2691 * @port: port number
2692 * @port_modify_mask: mask for port modifications
2693 * @props: port properties
2695 static int i40iw_modify_port(struct ib_device *ibdev,
2697 int port_modify_mask,
2698 struct ib_port_modify *props)
2704 * i40iw_query_pkey - Query partition key
2705 * @ibdev: device pointer from stack
2706 * @port: port number
2707 * @index: index of pkey
2708 * @pkey: pointer to store the pkey
2710 static int i40iw_query_pkey(struct ib_device *ibdev,
2720 * i40iw_create_ah - create address handle
2722 * @ah_attr: address handle attributes
2724 static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
2725 struct rdma_ah_attr *attr,
2726 struct ib_udata *udata)
2729 return ERR_PTR(-ENOSYS);
2733 * i40iw_destroy_ah - Destroy address handle
2734 * @ah: pointer to address handle
2736 static int i40iw_destroy_ah(struct ib_ah *ah)
2742 * i40iw_init_rdma_device - initialization of iwarp device
2743 * @iwdev: iwarp device
2745 static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2747 struct i40iw_ib_device *iwibdev;
2748 struct net_device *netdev = iwdev->netdev;
2749 struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2751 iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
2753 i40iw_pr_err("iwdev == NULL\n");
2756 strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
2757 iwibdev->ibdev.owner = THIS_MODULE;
2758 iwdev->iwibdev = iwibdev;
2759 iwibdev->iwdev = iwdev;
2761 iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2762 ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2764 iwibdev->ibdev.uverbs_cmd_mask =
2765 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2766 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2767 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2768 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2769 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2770 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2771 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2772 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2773 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2774 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2775 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2776 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2777 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2778 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2779 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2780 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2781 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2782 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2783 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2784 (1ull << IB_USER_VERBS_CMD_POST_SEND);
2785 iwibdev->ibdev.phys_port_cnt = 1;
2786 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
2787 iwibdev->ibdev.dev.parent = &pcidev->dev;
2788 iwibdev->ibdev.query_port = i40iw_query_port;
2789 iwibdev->ibdev.modify_port = i40iw_modify_port;
2790 iwibdev->ibdev.query_pkey = i40iw_query_pkey;
2791 iwibdev->ibdev.query_gid = i40iw_query_gid;
2792 iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
2793 iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
2794 iwibdev->ibdev.mmap = i40iw_mmap;
2795 iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
2796 iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
2797 iwibdev->ibdev.create_qp = i40iw_create_qp;
2798 iwibdev->ibdev.modify_qp = i40iw_modify_qp;
2799 iwibdev->ibdev.query_qp = i40iw_query_qp;
2800 iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
2801 iwibdev->ibdev.create_cq = i40iw_create_cq;
2802 iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
2803 iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
2804 iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
2805 iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
2806 iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
2807 iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
2808 iwibdev->ibdev.query_device = i40iw_query_device;
2809 iwibdev->ibdev.create_ah = i40iw_create_ah;
2810 iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
2811 iwibdev->ibdev.drain_sq = i40iw_drain_sq;
2812 iwibdev->ibdev.drain_rq = i40iw_drain_rq;
2813 iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
2814 iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
2815 iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
2816 if (!iwibdev->ibdev.iwcm) {
2817 ib_dealloc_device(&iwibdev->ibdev);
2821 iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
2822 iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
2823 iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
2824 iwibdev->ibdev.iwcm->connect = i40iw_connect;
2825 iwibdev->ibdev.iwcm->accept = i40iw_accept;
2826 iwibdev->ibdev.iwcm->reject = i40iw_reject;
2827 iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
2828 iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
2829 memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
2830 sizeof(iwibdev->ibdev.iwcm->ifname));
2831 iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
2832 iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
2833 iwibdev->ibdev.poll_cq = i40iw_poll_cq;
2834 iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
2835 iwibdev->ibdev.post_send = i40iw_post_send;
2836 iwibdev->ibdev.post_recv = i40iw_post_recv;
2842 * i40iw_port_ibevent - indicate port event
2843 * @iwdev: iwarp device
2845 void i40iw_port_ibevent(struct i40iw_device *iwdev)
2847 struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2848 struct ib_event event;
2850 event.device = &iwibdev->ibdev;
2851 event.element.port_num = 1;
2852 event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2853 ib_dispatch_event(&event);
2857 * i40iw_unregister_rdma_device - unregister of iwarp from IB
2858 * @iwibdev: rdma device ptr
2860 static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
2864 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
2865 device_remove_file(&iwibdev->ibdev.dev,
2866 i40iw_dev_attributes[i]);
2867 ib_unregister_device(&iwibdev->ibdev);
2871 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2872 * @iwibdev: IB device ptr
2874 void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2879 i40iw_unregister_rdma_device(iwibdev);
2880 kfree(iwibdev->ibdev.iwcm);
2881 iwibdev->ibdev.iwcm = NULL;
2882 wait_event_timeout(iwibdev->iwdev->close_wq,
2883 !atomic64_read(&iwibdev->iwdev->use_count),
2884 I40IW_EVENT_TIMEOUT);
2885 ib_dealloc_device(&iwibdev->ibdev);
2889 * i40iw_register_rdma_device - register iwarp device to IB
2890 * @iwdev: iwarp device
2892 int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2895 struct i40iw_ib_device *iwibdev;
2897 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2898 if (!iwdev->iwibdev)
2900 iwibdev = iwdev->iwibdev;
2902 ret = ib_register_device(&iwibdev->ibdev, NULL);
2906 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
2908 device_create_file(&iwibdev->ibdev.dev,
2909 i40iw_dev_attributes[i]);
2913 device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
2915 ib_unregister_device(&iwibdev->ibdev);
2921 kfree(iwdev->iwibdev->ibdev.iwcm);
2922 iwdev->iwibdev->ibdev.iwcm = NULL;
2923 ib_dealloc_device(&iwdev->iwibdev->ibdev);