1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
6 * irdma_query_device - get device attributes
7 * @ibdev: device pointer from stack
8 * @props: returning device attributes
11 static int irdma_query_device(struct ib_device *ibdev,
12 struct ib_device_attr *props,
13 struct ib_udata *udata)
15 struct irdma_device *iwdev = to_iwdev(ibdev);
16 struct irdma_pci_f *rf = iwdev->rf;
17 struct pci_dev *pcidev = iwdev->rf->pcidev;
18 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
20 if (udata->inlen || udata->outlen)
23 memset(props, 0, sizeof(*props));
24 addrconf_addr_eui48((u8 *)&props->sys_image_guid,
25 iwdev->netdev->dev_addr);
26 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
27 irdma_fw_minor_ver(&rf->sc_dev);
28 props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
29 IB_DEVICE_MEM_MGT_EXTENSIONS;
30 props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
31 props->vendor_id = pcidev->vendor;
32 props->vendor_part_id = pcidev->device;
34 props->hw_ver = rf->pcidev->revision;
35 props->page_size_cap = hw_attrs->page_size_cap;
36 props->max_mr_size = hw_attrs->max_mr_size;
37 props->max_qp = rf->max_qp - rf->used_qps;
38 props->max_qp_wr = hw_attrs->max_qp_wr;
39 props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
40 props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
41 props->max_cq = rf->max_cq - rf->used_cqs;
42 props->max_cqe = rf->max_cqe - 1;
43 props->max_mr = rf->max_mr - rf->used_mrs;
44 props->max_mw = props->max_mr;
45 props->max_pd = rf->max_pd - rf->used_pds;
46 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
47 props->max_qp_rd_atom = hw_attrs->max_hw_ird;
48 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
49 if (rdma_protocol_roce(ibdev, 1)) {
50 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
51 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
54 props->max_ah = rf->max_ah;
55 props->max_mcast_grp = rf->max_mcg;
56 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
57 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
58 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
59 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
60 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
61 props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
67 * irdma_query_port - get port attributes
68 * @ibdev: device pointer from stack
69 * @port: port number for query
70 * @props: returning device attributes
72 static int irdma_query_port(struct ib_device *ibdev, u32 port,
73 struct ib_port_attr *props)
75 struct irdma_device *iwdev = to_iwdev(ibdev);
76 struct net_device *netdev = iwdev->netdev;
78 /* no need to zero out pros here. done by caller */
80 props->max_mtu = IB_MTU_4096;
81 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
86 if (netif_carrier_ok(netdev) && netif_running(netdev)) {
87 props->state = IB_PORT_ACTIVE;
88 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
90 props->state = IB_PORT_DOWN;
91 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
94 ib_get_eth_speed(ibdev, port, &props->active_speed,
95 &props->active_width);
97 if (rdma_protocol_roce(ibdev, 1)) {
98 props->gid_tbl_len = 32;
99 props->ip_gids = true;
100 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
102 props->gid_tbl_len = 1;
104 props->qkey_viol_cntr = 0;
105 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
106 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
112 * irdma_disassociate_ucontext - Disassociate user context
113 * @context: ib user context
115 static void irdma_disassociate_ucontext(struct ib_ucontext *context)
119 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
120 struct vm_area_struct *vma)
124 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
127 vma->vm_private_data = ucontext;
128 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
129 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
131 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
132 pgprot_noncached(vma->vm_page_prot), NULL);
135 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
137 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
142 static struct rdma_user_mmap_entry*
143 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
144 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
146 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
152 entry->bar_offset = bar_offset;
153 entry->mmap_flag = mmap_flag;
155 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
156 &entry->rdma_entry, PAGE_SIZE);
161 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
163 return &entry->rdma_entry;
167 * irdma_mmap - user memory map
168 * @context: context created during alloc
169 * @vma: kernel info for user memory map
171 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
173 struct rdma_user_mmap_entry *rdma_entry;
174 struct irdma_user_mmap_entry *entry;
175 struct irdma_ucontext *ucontext;
179 ucontext = to_ucontext(context);
181 /* Legacy support for libi40iw with hard-coded mmap key */
182 if (ucontext->legacy_mode)
183 return irdma_mmap_legacy(ucontext, vma);
185 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
187 ibdev_dbg(&ucontext->iwdev->ibdev,
188 "VERBS: pgoff[0x%lx] does not have valid entry\n",
193 entry = to_irdma_mmap_entry(rdma_entry);
194 ibdev_dbg(&ucontext->iwdev->ibdev,
195 "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
196 entry->bar_offset, entry->mmap_flag);
198 pfn = (entry->bar_offset +
199 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
201 switch (entry->mmap_flag) {
202 case IRDMA_MMAP_IO_NC:
203 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
204 pgprot_noncached(vma->vm_page_prot),
207 case IRDMA_MMAP_IO_WC:
208 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
209 pgprot_writecombine(vma->vm_page_prot),
217 ibdev_dbg(&ucontext->iwdev->ibdev,
218 "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
219 entry->bar_offset, entry->mmap_flag, ret);
220 rdma_user_mmap_entry_put(rdma_entry);
226 * irdma_alloc_push_page - allocate a push page for qp
229 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
231 struct irdma_cqp_request *cqp_request;
232 struct cqp_cmds_info *cqp_info;
233 struct irdma_device *iwdev = iwqp->iwdev;
234 struct irdma_sc_qp *qp = &iwqp->sc_qp;
237 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
241 cqp_info = &cqp_request->info;
242 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
243 cqp_info->post_sq = 1;
244 cqp_info->in.u.manage_push_page.info.push_idx = 0;
245 cqp_info->in.u.manage_push_page.info.qs_handle =
246 qp->vsi->qos[qp->user_pri].qs_handle;
247 cqp_info->in.u.manage_push_page.info.free_page = 0;
248 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
249 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
250 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
252 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
253 if (!status && cqp_request->compl_info.op_ret_val <
254 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
255 qp->push_idx = cqp_request->compl_info.op_ret_val;
259 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
263 * irdma_alloc_ucontext - Allocate the user context data structure
264 * @uctx: uverbs context pointer
267 * This keeps track of all objects associated with a particular
270 static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
271 struct ib_udata *udata)
273 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
274 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
275 struct ib_device *ibdev = uctx->device;
276 struct irdma_device *iwdev = to_iwdev(ibdev);
277 struct irdma_alloc_ucontext_req req = {};
278 struct irdma_alloc_ucontext_resp uresp = {};
279 struct irdma_ucontext *ucontext = to_ucontext(uctx);
280 struct irdma_uk_attrs *uk_attrs;
282 if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
283 udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
286 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
289 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
292 ucontext->iwdev = iwdev;
293 ucontext->abi_ver = req.userspace_ver;
295 uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
296 /* GEN_1 legacy support with libi40iw */
297 if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
298 if (uk_attrs->hw_rev != IRDMA_GEN_1)
301 ucontext->legacy_mode = true;
302 uresp.max_qps = iwdev->rf->max_qp;
303 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
304 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
305 uresp.kernel_ver = req.userspace_ver;
306 if (ib_copy_to_udata(udata, &uresp,
307 min(sizeof(uresp), udata->outlen)))
310 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
312 ucontext->db_mmap_entry =
313 irdma_user_mmap_entry_insert(ucontext, bar_off,
316 if (!ucontext->db_mmap_entry)
319 uresp.kernel_ver = IRDMA_ABI_VER;
320 uresp.feature_flags = uk_attrs->feature_flags;
321 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
322 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
323 uresp.max_hw_inline = uk_attrs->max_hw_inline;
324 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
325 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
326 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
327 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
328 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
329 uresp.hw_rev = uk_attrs->hw_rev;
330 if (ib_copy_to_udata(udata, &uresp,
331 min(sizeof(uresp), udata->outlen))) {
332 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
337 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
338 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
339 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
340 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
345 ibdev_err(&iwdev->ibdev,
346 "Invalid userspace driver version detected. Detected version %d, should be %d\n",
347 req.userspace_ver, IRDMA_ABI_VER);
352 * irdma_dealloc_ucontext - deallocate the user context data structure
353 * @context: user context created during alloc
355 static void irdma_dealloc_ucontext(struct ib_ucontext *context)
357 struct irdma_ucontext *ucontext = to_ucontext(context);
359 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
363 * irdma_alloc_pd - allocate protection domain
367 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
369 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
370 struct irdma_pd *iwpd = to_iwpd(pd);
371 struct irdma_device *iwdev = to_iwdev(pd->device);
372 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
373 struct irdma_pci_f *rf = iwdev->rf;
374 struct irdma_alloc_pd_resp uresp = {};
375 struct irdma_sc_pd *sc_pd;
379 if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
382 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
387 sc_pd = &iwpd->sc_pd;
389 struct irdma_ucontext *ucontext =
390 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
392 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
394 if (ib_copy_to_udata(udata, &uresp,
395 min(sizeof(uresp), udata->outlen))) {
400 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
405 irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
411 * irdma_dealloc_pd - deallocate pd
412 * @ibpd: ptr of pd to be deallocated
415 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
417 struct irdma_pd *iwpd = to_iwpd(ibpd);
418 struct irdma_device *iwdev = to_iwdev(ibpd->device);
420 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
426 * irdma_get_pbl - Retrieve pbl from a list given a virtual
428 * @va: user virtual address
429 * @pbl_list: pbl list to search in (QP's or CQ's)
431 static struct irdma_pbl *irdma_get_pbl(unsigned long va,
432 struct list_head *pbl_list)
434 struct irdma_pbl *iwpbl;
436 list_for_each_entry (iwpbl, pbl_list, list) {
437 if (iwpbl->user_base == va) {
438 list_del(&iwpbl->list);
439 iwpbl->on_list = false;
448 * irdma_clean_cqes - clean cq entries for qp
449 * @iwqp: qp ptr (user or kernel)
452 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
454 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
457 spin_lock_irqsave(&iwcq->lock, flags);
458 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
459 spin_unlock_irqrestore(&iwcq->lock, flags);
462 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
464 if (iwqp->push_db_mmap_entry) {
465 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
466 iwqp->push_db_mmap_entry = NULL;
468 if (iwqp->push_wqe_mmap_entry) {
469 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
470 iwqp->push_wqe_mmap_entry = NULL;
474 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
475 struct irdma_qp *iwqp,
476 u64 *push_wqe_mmap_key,
477 u64 *push_db_mmap_key)
479 struct irdma_device *iwdev = ucontext->iwdev;
482 rsvd = IRDMA_PF_BAR_RSVD;
483 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
484 /* skip over db page */
485 bar_off += IRDMA_HW_PAGE_SIZE;
487 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
488 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
489 bar_off, IRDMA_MMAP_IO_WC,
491 if (!iwqp->push_wqe_mmap_entry)
494 /* push doorbell page */
495 bar_off += IRDMA_HW_PAGE_SIZE;
496 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
497 bar_off, IRDMA_MMAP_IO_NC,
499 if (!iwqp->push_db_mmap_entry) {
500 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
508 * irdma_destroy_qp - destroy qp
509 * @ibqp: qp's ib pointer also to get to device's qp address
512 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
514 struct irdma_qp *iwqp = to_iwqp(ibqp);
515 struct irdma_device *iwdev = iwqp->iwdev;
517 iwqp->sc_qp.qp_uk.destroy_pending = true;
519 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
520 irdma_modify_qp_to_err(&iwqp->sc_qp);
522 if (!iwqp->user_mode)
523 cancel_delayed_work_sync(&iwqp->dwork_flush);
525 if (!iwqp->user_mode) {
527 irdma_clean_cqes(iwqp, iwqp->iwscq);
528 if (iwqp->iwrcq != iwqp->iwscq)
529 irdma_clean_cqes(iwqp, iwqp->iwrcq);
533 irdma_qp_rem_ref(&iwqp->ibqp);
534 wait_for_completion(&iwqp->free_qp);
535 irdma_free_lsmm_rsrc(iwqp);
536 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
538 irdma_remove_push_mmap_entries(iwqp);
539 irdma_free_qp_rsrc(iwqp);
545 * irdma_setup_virt_qp - setup for allocation of virtual qp
546 * @iwdev: irdma device
548 * @init_info: initialize info to return
550 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
551 struct irdma_qp *iwqp,
552 struct irdma_qp_init_info *init_info)
554 struct irdma_pbl *iwpbl = iwqp->iwpbl;
555 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
557 iwqp->page = qpmr->sq_page;
558 init_info->shadow_area_pa = qpmr->shadow;
559 if (iwpbl->pbl_allocated) {
560 init_info->virtual_map = true;
561 init_info->sq_pa = qpmr->sq_pbl.idx;
562 init_info->rq_pa = qpmr->rq_pbl.idx;
564 init_info->sq_pa = qpmr->sq_pbl.addr;
565 init_info->rq_pa = qpmr->rq_pbl.addr;
570 * irdma_setup_kmode_qp - setup initialization for kernel mode qp
571 * @iwdev: iwarp device
572 * @iwqp: qp ptr (user or kernel)
573 * @info: initialize info to return
574 * @init_attr: Initial QP create attributes
576 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
577 struct irdma_qp *iwqp,
578 struct irdma_qp_init_info *info,
579 struct ib_qp_init_attr *init_attr)
581 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
582 u32 sqdepth, rqdepth;
586 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
587 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
589 irdma_get_wqe_shift(uk_attrs,
590 uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
591 ukinfo->max_sq_frag_cnt,
592 ukinfo->max_inline_data, &sqshift);
593 status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
598 if (uk_attrs->hw_rev == IRDMA_GEN_1)
599 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
601 irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
604 status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
609 iwqp->kqp.sq_wrid_mem =
610 kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
611 if (!iwqp->kqp.sq_wrid_mem)
614 iwqp->kqp.rq_wrid_mem =
615 kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
616 if (!iwqp->kqp.rq_wrid_mem) {
617 kfree(iwqp->kqp.sq_wrid_mem);
618 iwqp->kqp.sq_wrid_mem = NULL;
622 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
623 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
625 size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
626 size += (IRDMA_SHADOW_AREA_SIZE << 3);
628 mem->size = ALIGN(size, 256);
629 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
630 &mem->pa, GFP_KERNEL);
632 kfree(iwqp->kqp.sq_wrid_mem);
633 iwqp->kqp.sq_wrid_mem = NULL;
634 kfree(iwqp->kqp.rq_wrid_mem);
635 iwqp->kqp.rq_wrid_mem = NULL;
639 ukinfo->sq = mem->va;
640 info->sq_pa = mem->pa;
641 ukinfo->rq = &ukinfo->sq[sqdepth];
642 info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
643 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
644 info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
645 ukinfo->sq_size = sqdepth >> sqshift;
646 ukinfo->rq_size = rqdepth >> rqshift;
647 ukinfo->qp_id = iwqp->ibqp.qp_num;
649 init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
650 init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
655 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
657 struct irdma_pci_f *rf = iwqp->iwdev->rf;
658 struct irdma_cqp_request *cqp_request;
659 struct cqp_cmds_info *cqp_info;
660 struct irdma_create_qp_info *qp_info;
663 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
667 cqp_info = &cqp_request->info;
668 qp_info = &cqp_request->info.in.u.qp_create.info;
669 memset(qp_info, 0, sizeof(*qp_info));
670 qp_info->mac_valid = true;
671 qp_info->cq_num_valid = true;
672 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
674 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
675 cqp_info->post_sq = 1;
676 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
677 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
678 status = irdma_handle_cqp_op(rf, cqp_request);
679 irdma_put_cqp_request(&rf->cqp, cqp_request);
684 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
685 struct irdma_qp_host_ctx_info *ctx_info)
687 struct irdma_device *iwdev = iwqp->iwdev;
688 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
689 struct irdma_roce_offload_info *roce_info;
690 struct irdma_udp_offload_info *udp_info;
692 udp_info = &iwqp->udp_info;
693 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
694 udp_info->cwnd = iwdev->roce_cwnd;
695 udp_info->rexmit_thresh = 2;
696 udp_info->rnr_nak_thresh = 2;
697 udp_info->src_port = 0xc000;
698 udp_info->dst_port = ROCE_V2_UDP_DPORT;
699 roce_info = &iwqp->roce_info;
700 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
702 roce_info->rd_en = true;
703 roce_info->wr_rdresp_en = true;
704 roce_info->bind_en = true;
705 roce_info->dcqcn_en = false;
706 roce_info->rtomin = 5;
708 roce_info->ack_credits = iwdev->roce_ackcreds;
709 roce_info->ird_size = dev->hw_attrs.max_hw_ird;
710 roce_info->ord_size = dev->hw_attrs.max_hw_ord;
712 if (!iwqp->user_mode) {
713 roce_info->priv_mode_en = true;
714 roce_info->fast_reg_en = true;
715 roce_info->udprivcq_en = true;
717 roce_info->roce_tver = 0;
719 ctx_info->roce_info = &iwqp->roce_info;
720 ctx_info->udp_info = &iwqp->udp_info;
721 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
724 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
725 struct irdma_qp_host_ctx_info *ctx_info)
727 struct irdma_device *iwdev = iwqp->iwdev;
728 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
729 struct irdma_iwarp_offload_info *iwarp_info;
731 iwarp_info = &iwqp->iwarp_info;
732 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
733 iwarp_info->rd_en = true;
734 iwarp_info->wr_rdresp_en = true;
735 iwarp_info->bind_en = true;
736 iwarp_info->ecn_en = true;
737 iwarp_info->rtomin = 5;
739 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
740 iwarp_info->ib_rd_en = true;
741 if (!iwqp->user_mode) {
742 iwarp_info->priv_mode_en = true;
743 iwarp_info->fast_reg_en = true;
745 iwarp_info->ddp_ver = 1;
746 iwarp_info->rdmap_ver = 1;
748 ctx_info->iwarp_info = &iwqp->iwarp_info;
749 ctx_info->iwarp_info_valid = true;
750 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
751 ctx_info->iwarp_info_valid = false;
754 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
755 struct irdma_device *iwdev)
757 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
758 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
760 if (init_attr->create_flags)
763 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
764 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
765 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
768 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
769 if (init_attr->qp_type != IB_QPT_RC &&
770 init_attr->qp_type != IB_QPT_UD &&
771 init_attr->qp_type != IB_QPT_GSI)
774 if (init_attr->qp_type != IB_QPT_RC)
781 static void irdma_flush_worker(struct work_struct *work)
783 struct delayed_work *dwork = to_delayed_work(work);
784 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
786 irdma_generate_flush_completions(iwqp);
790 * irdma_create_qp - create qp
792 * @init_attr: attributes for qp
793 * @udata: user data for create qp
795 static int irdma_create_qp(struct ib_qp *ibqp,
796 struct ib_qp_init_attr *init_attr,
797 struct ib_udata *udata)
799 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
800 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
801 struct ib_pd *ibpd = ibqp->pd;
802 struct irdma_pd *iwpd = to_iwpd(ibpd);
803 struct irdma_device *iwdev = to_iwdev(ibpd->device);
804 struct irdma_pci_f *rf = iwdev->rf;
805 struct irdma_qp *iwqp = to_iwqp(ibqp);
806 struct irdma_create_qp_req req = {};
807 struct irdma_create_qp_resp uresp = {};
812 struct irdma_sc_qp *qp;
813 struct irdma_sc_dev *dev = &rf->sc_dev;
814 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
815 struct irdma_qp_init_info init_info = {};
816 struct irdma_qp_host_ctx_info *ctx_info;
819 err_code = irdma_validate_qp_attrs(init_attr, iwdev);
823 if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
824 udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
827 sq_size = init_attr->cap.max_send_wr;
828 rq_size = init_attr->cap.max_recv_wr;
830 init_info.vsi = &iwdev->vsi;
831 init_info.qp_uk_init_info.uk_attrs = uk_attrs;
832 init_info.qp_uk_init_info.sq_size = sq_size;
833 init_info.qp_uk_init_info.rq_size = rq_size;
834 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
835 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
836 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
839 qp->qp_uk.back_qp = iwqp;
840 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
843 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
845 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
846 iwqp->q2_ctx_mem.size,
847 &iwqp->q2_ctx_mem.pa,
849 if (!iwqp->q2_ctx_mem.va)
852 init_info.q2 = iwqp->q2_ctx_mem.va;
853 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
854 init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
855 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
857 if (init_attr->qp_type == IB_QPT_GSI)
860 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
861 &qp_num, &rf->next_qp);
866 iwqp->ibqp.qp_num = qp_num;
868 iwqp->iwscq = to_iwcq(init_attr->send_cq);
869 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
870 iwqp->host_ctx.va = init_info.host_ctx;
871 iwqp->host_ctx.pa = init_info.host_ctx_pa;
872 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
874 init_info.pd = &iwpd->sc_pd;
875 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
876 if (!rdma_protocol_roce(&iwdev->ibdev, 1))
877 init_info.qp_uk_init_info.first_sq_wq = 1;
878 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
879 init_waitqueue_head(&iwqp->waitq);
880 init_waitqueue_head(&iwqp->mod_qp_waitq);
883 err_code = ib_copy_from_udata(&req, udata,
884 min(sizeof(req), udata->inlen));
886 ibdev_dbg(&iwdev->ibdev,
887 "VERBS: ib_copy_from_data fail\n");
891 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
893 if (req.user_wqe_bufs) {
894 struct irdma_ucontext *ucontext =
895 rdma_udata_to_drv_context(udata,
896 struct irdma_ucontext,
899 init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
900 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
901 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
902 &ucontext->qp_reg_mem_list);
903 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
907 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
911 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
912 irdma_setup_virt_qp(iwdev, iwqp, &init_info);
914 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
915 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
916 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
920 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
924 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
925 if (init_attr->qp_type == IB_QPT_RC) {
926 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
927 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
928 IRDMA_WRITE_WITH_IMM |
931 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
932 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
936 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
937 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
940 if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
941 init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
943 err_code = irdma_sc_qp_init(qp, &init_info);
945 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
949 ctx_info = &iwqp->ctx_info;
950 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
951 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
953 if (rdma_protocol_roce(&iwdev->ibdev, 1))
954 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
956 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
958 err_code = irdma_cqp_create_qp_cmd(iwqp);
962 refcount_set(&iwqp->refcnt, 1);
963 spin_lock_init(&iwqp->lock);
964 spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
965 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
966 rf->qp_table[qp_num] = iwqp;
967 iwqp->max_send_wr = sq_size;
968 iwqp->max_recv_wr = rq_size;
970 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
971 if (dev->ws_add(&iwdev->vsi, 0)) {
972 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
977 irdma_qp_add_qos(&iwqp->sc_qp);
981 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
982 if (udata->outlen < sizeof(uresp)) {
984 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
986 if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
989 uresp.actual_sq_size = sq_size;
990 uresp.actual_rq_size = rq_size;
991 uresp.qp_id = qp_num;
992 uresp.qp_caps = qp->qp_uk.qp_caps;
994 err_code = ib_copy_to_udata(udata, &uresp,
995 min(sizeof(uresp), udata->outlen));
997 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
998 irdma_destroy_qp(&iwqp->ibqp, udata);
1003 init_completion(&iwqp->free_qp);
1007 irdma_free_qp_rsrc(iwqp);
1011 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1015 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1016 if (iwqp->roce_info.wr_rdresp_en) {
1017 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1018 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1020 if (iwqp->roce_info.rd_en)
1021 acc_flags |= IB_ACCESS_REMOTE_READ;
1022 if (iwqp->roce_info.bind_en)
1023 acc_flags |= IB_ACCESS_MW_BIND;
1025 if (iwqp->iwarp_info.wr_rdresp_en) {
1026 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1027 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1029 if (iwqp->iwarp_info.rd_en)
1030 acc_flags |= IB_ACCESS_REMOTE_READ;
1031 if (iwqp->iwarp_info.bind_en)
1032 acc_flags |= IB_ACCESS_MW_BIND;
1038 * irdma_query_qp - query qp attributes
1040 * @attr: attributes pointer
1041 * @attr_mask: Not used
1042 * @init_attr: qp attributes to return
1044 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1045 int attr_mask, struct ib_qp_init_attr *init_attr)
1047 struct irdma_qp *iwqp = to_iwqp(ibqp);
1048 struct irdma_sc_qp *qp = &iwqp->sc_qp;
1050 memset(attr, 0, sizeof(*attr));
1051 memset(init_attr, 0, sizeof(*init_attr));
1053 attr->qp_state = iwqp->ibqp_state;
1054 attr->cur_qp_state = iwqp->ibqp_state;
1055 attr->cap.max_send_wr = iwqp->max_send_wr;
1056 attr->cap.max_recv_wr = iwqp->max_recv_wr;
1057 attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1058 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1059 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1060 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1062 if (rdma_protocol_roce(ibqp->device, 1)) {
1063 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1064 attr->qkey = iwqp->roce_info.qkey;
1065 attr->rq_psn = iwqp->udp_info.epsn;
1066 attr->sq_psn = iwqp->udp_info.psn_nxt;
1067 attr->dest_qp_num = iwqp->roce_info.dest_qp;
1068 attr->pkey_index = iwqp->roce_info.p_key;
1069 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1070 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1071 attr->max_rd_atomic = iwqp->roce_info.ord_size;
1072 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1075 init_attr->event_handler = iwqp->ibqp.event_handler;
1076 init_attr->qp_context = iwqp->ibqp.qp_context;
1077 init_attr->send_cq = iwqp->ibqp.send_cq;
1078 init_attr->recv_cq = iwqp->ibqp.recv_cq;
1079 init_attr->cap = attr->cap;
1085 * irdma_query_pkey - Query partition key
1086 * @ibdev: device pointer from stack
1087 * @port: port number
1088 * @index: index of pkey
1089 * @pkey: pointer to store the pkey
1091 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1094 if (index >= IRDMA_PKEY_TBL_SZ)
1097 *pkey = IRDMA_DEFAULT_PKEY;
1102 * irdma_modify_qp_roce - modify qp request
1103 * @ibqp: qp's pointer for modify
1104 * @attr: access attributes
1105 * @attr_mask: state mask
1108 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1109 int attr_mask, struct ib_udata *udata)
1111 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1112 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1113 struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1114 struct irdma_qp *iwqp = to_iwqp(ibqp);
1115 struct irdma_device *iwdev = iwqp->iwdev;
1116 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1117 struct irdma_qp_host_ctx_info *ctx_info;
1118 struct irdma_roce_offload_info *roce_info;
1119 struct irdma_udp_offload_info *udp_info;
1120 struct irdma_modify_qp_info info = {};
1121 struct irdma_modify_qp_resp uresp = {};
1122 struct irdma_modify_qp_req ureq = {};
1123 unsigned long flags;
1124 u8 issue_modify_qp = 0;
1127 ctx_info = &iwqp->ctx_info;
1128 roce_info = &iwqp->roce_info;
1129 udp_info = &iwqp->udp_info;
1132 /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1133 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1134 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1138 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1141 if (attr_mask & IB_QP_DEST_QPN)
1142 roce_info->dest_qp = attr->dest_qp_num;
1144 if (attr_mask & IB_QP_PKEY_INDEX) {
1145 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1151 if (attr_mask & IB_QP_QKEY)
1152 roce_info->qkey = attr->qkey;
1154 if (attr_mask & IB_QP_PATH_MTU)
1155 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1157 if (attr_mask & IB_QP_SQ_PSN) {
1158 udp_info->psn_nxt = attr->sq_psn;
1159 udp_info->lsn = 0xffff;
1160 udp_info->psn_una = attr->sq_psn;
1161 udp_info->psn_max = attr->sq_psn;
1164 if (attr_mask & IB_QP_RQ_PSN)
1165 udp_info->epsn = attr->rq_psn;
1167 if (attr_mask & IB_QP_RNR_RETRY)
1168 udp_info->rnr_nak_thresh = attr->rnr_retry;
1170 if (attr_mask & IB_QP_RETRY_CNT)
1171 udp_info->rexmit_thresh = attr->retry_cnt;
1173 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1175 if (attr_mask & IB_QP_AV) {
1176 struct irdma_av *av = &iwqp->roce_ah.av;
1177 const struct ib_gid_attr *sgid_attr;
1178 u16 vlan_id = VLAN_N_VID;
1181 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1182 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1183 udp_info->ttl = attr->ah_attr.grh.hop_limit;
1184 udp_info->flow_label = attr->ah_attr.grh.flow_label;
1185 udp_info->tos = attr->ah_attr.grh.traffic_class;
1186 udp_info->src_port =
1187 rdma_get_udp_sport(udp_info->flow_label,
1189 roce_info->dest_qp);
1190 irdma_qp_rem_qos(&iwqp->sc_qp);
1191 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1192 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1193 iwqp->sc_qp.user_pri = ctx_info->user_pri;
1194 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1196 irdma_qp_add_qos(&iwqp->sc_qp);
1198 sgid_attr = attr->ah_attr.grh.sgid_attr;
1199 ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1200 ctx_info->roce_info->mac_addr);
1204 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1206 if (vlan_id < VLAN_N_VID) {
1207 udp_info->insert_vlan_tag = true;
1208 udp_info->vlan_tag = vlan_id |
1209 ctx_info->user_pri << VLAN_PRIO_SHIFT;
1211 udp_info->insert_vlan_tag = false;
1214 av->attrs = attr->ah_attr;
1215 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1216 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1217 av->net_type = rdma_gid_attr_network_type(sgid_attr);
1218 if (av->net_type == RDMA_NETWORK_IPV6) {
1220 av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1222 av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1224 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1225 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1227 udp_info->ipv4 = false;
1228 irdma_copy_ip_ntohl(local_ip, daddr);
1230 } else if (av->net_type == RDMA_NETWORK_IPV4) {
1231 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1232 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1234 local_ip[0] = ntohl(daddr);
1236 udp_info->ipv4 = true;
1237 udp_info->dest_ip_addr[0] = 0;
1238 udp_info->dest_ip_addr[1] = 0;
1239 udp_info->dest_ip_addr[2] = 0;
1240 udp_info->dest_ip_addr[3] = local_ip[0];
1242 udp_info->local_ipaddr[0] = 0;
1243 udp_info->local_ipaddr[1] = 0;
1244 udp_info->local_ipaddr[2] = 0;
1245 udp_info->local_ipaddr[3] = ntohl(saddr);
1248 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1249 attr->ah_attr.roce.dmac);
1252 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1253 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1254 ibdev_err(&iwdev->ibdev,
1255 "rd_atomic = %d, above max_hw_ord=%d\n",
1256 attr->max_rd_atomic,
1257 dev->hw_attrs.max_hw_ord);
1260 if (attr->max_rd_atomic)
1261 roce_info->ord_size = attr->max_rd_atomic;
1262 info.ord_valid = true;
1265 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1266 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1267 ibdev_err(&iwdev->ibdev,
1268 "rd_atomic = %d, above max_hw_ird=%d\n",
1269 attr->max_rd_atomic,
1270 dev->hw_attrs.max_hw_ird);
1273 if (attr->max_dest_rd_atomic)
1274 roce_info->ird_size = attr->max_dest_rd_atomic;
1277 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1278 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1279 roce_info->wr_rdresp_en = true;
1280 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1281 roce_info->wr_rdresp_en = true;
1282 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1283 roce_info->rd_en = true;
1286 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1288 ibdev_dbg(&iwdev->ibdev,
1289 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1290 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1291 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1293 spin_lock_irqsave(&iwqp->lock, flags);
1294 if (attr_mask & IB_QP_STATE) {
1295 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1296 iwqp->ibqp.qp_type, attr_mask)) {
1297 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1298 iwqp->ibqp.qp_num, iwqp->ibqp_state,
1303 info.curr_iwarp_state = iwqp->iwarp_state;
1305 switch (attr->qp_state) {
1307 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1312 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1313 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1314 issue_modify_qp = 1;
1318 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1322 info.arp_cache_idx_valid = true;
1323 info.cq_num_valid = true;
1324 info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1325 issue_modify_qp = 1;
1328 if (iwqp->ibqp_state < IB_QPS_RTR ||
1329 iwqp->ibqp_state == IB_QPS_ERR) {
1334 info.arp_cache_idx_valid = true;
1335 info.cq_num_valid = true;
1336 info.ord_valid = true;
1337 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1338 issue_modify_qp = 1;
1339 if (iwdev->push_mode && udata &&
1340 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1341 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1342 spin_unlock_irqrestore(&iwqp->lock, flags);
1343 irdma_alloc_push_page(iwqp);
1344 spin_lock_irqsave(&iwqp->lock, flags);
1348 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1351 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1356 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1357 issue_modify_qp = 1;
1362 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1363 spin_unlock_irqrestore(&iwqp->lock, flags);
1364 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1365 irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1366 spin_lock_irqsave(&iwqp->lock, flags);
1369 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1370 spin_unlock_irqrestore(&iwqp->lock, flags);
1371 if (udata && udata->inlen) {
1372 if (ib_copy_from_udata(&ureq, udata,
1373 min(sizeof(ureq), udata->inlen)))
1376 irdma_flush_wqes(iwqp,
1377 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1378 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1384 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1385 issue_modify_qp = 1;
1392 iwqp->ibqp_state = attr->qp_state;
1395 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1396 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1397 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1398 spin_unlock_irqrestore(&iwqp->lock, flags);
1400 if (attr_mask & IB_QP_STATE) {
1401 if (issue_modify_qp) {
1402 ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1403 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1405 spin_lock_irqsave(&iwqp->lock, flags);
1406 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1407 iwqp->iwarp_state = info.next_iwarp_state;
1408 iwqp->ibqp_state = attr->qp_state;
1410 if (iwqp->ibqp_state > IB_QPS_RTS &&
1411 !iwqp->flush_issued) {
1412 spin_unlock_irqrestore(&iwqp->lock, flags);
1413 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1416 iwqp->flush_issued = 1;
1418 spin_unlock_irqrestore(&iwqp->lock, flags);
1421 iwqp->ibqp_state = attr->qp_state;
1423 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1424 struct irdma_ucontext *ucontext;
1426 ucontext = rdma_udata_to_drv_context(udata,
1427 struct irdma_ucontext, ibucontext);
1428 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1429 !iwqp->push_wqe_mmap_entry &&
1430 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1431 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1432 uresp.push_valid = 1;
1433 uresp.push_offset = iwqp->sc_qp.push_offset;
1435 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1438 irdma_remove_push_mmap_entries(iwqp);
1439 ibdev_dbg(&iwdev->ibdev,
1440 "VERBS: copy_to_udata failed\n");
1448 spin_unlock_irqrestore(&iwqp->lock, flags);
1454 * irdma_modify_qp - modify qp request
1455 * @ibqp: qp's pointer for modify
1456 * @attr: access attributes
1457 * @attr_mask: state mask
1460 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1461 struct ib_udata *udata)
1463 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1464 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1465 struct irdma_qp *iwqp = to_iwqp(ibqp);
1466 struct irdma_device *iwdev = iwqp->iwdev;
1467 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1468 struct irdma_qp_host_ctx_info *ctx_info;
1469 struct irdma_tcp_offload_info *tcp_info;
1470 struct irdma_iwarp_offload_info *offload_info;
1471 struct irdma_modify_qp_info info = {};
1472 struct irdma_modify_qp_resp uresp = {};
1473 struct irdma_modify_qp_req ureq = {};
1474 u8 issue_modify_qp = 0;
1477 unsigned long flags;
1480 /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1481 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1482 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1486 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1489 ctx_info = &iwqp->ctx_info;
1490 offload_info = &iwqp->iwarp_info;
1491 tcp_info = &iwqp->tcp_info;
1492 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1493 ibdev_dbg(&iwdev->ibdev,
1494 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1495 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1496 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1497 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1499 spin_lock_irqsave(&iwqp->lock, flags);
1500 if (attr_mask & IB_QP_STATE) {
1501 info.curr_iwarp_state = iwqp->iwarp_state;
1502 switch (attr->qp_state) {
1505 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1510 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1511 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1512 issue_modify_qp = 1;
1514 if (iwdev->push_mode && udata &&
1515 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1516 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1517 spin_unlock_irqrestore(&iwqp->lock, flags);
1518 irdma_alloc_push_page(iwqp);
1519 spin_lock_irqsave(&iwqp->lock, flags);
1523 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1529 issue_modify_qp = 1;
1530 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1531 iwqp->hte_added = 1;
1532 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1533 info.tcp_ctx_valid = true;
1534 info.ord_valid = true;
1535 info.arp_cache_idx_valid = true;
1536 info.cq_num_valid = true;
1539 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1544 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1545 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1550 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1555 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1556 issue_modify_qp = 1;
1559 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1564 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1565 issue_modify_qp = 1;
1569 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1570 spin_unlock_irqrestore(&iwqp->lock, flags);
1571 if (udata && udata->inlen) {
1572 if (ib_copy_from_udata(&ureq, udata,
1573 min(sizeof(ureq), udata->inlen)))
1576 irdma_flush_wqes(iwqp,
1577 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1578 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1584 if (iwqp->sc_qp.term_flags) {
1585 spin_unlock_irqrestore(&iwqp->lock, flags);
1586 irdma_terminate_del_timer(&iwqp->sc_qp);
1587 spin_lock_irqsave(&iwqp->lock, flags);
1589 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1590 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1592 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1593 info.reset_tcp_conn = true;
1597 issue_modify_qp = 1;
1598 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1605 iwqp->ibqp_state = attr->qp_state;
1607 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1608 ctx_info->iwarp_info_valid = true;
1609 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1610 offload_info->wr_rdresp_en = true;
1611 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1612 offload_info->wr_rdresp_en = true;
1613 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1614 offload_info->rd_en = true;
1617 if (ctx_info->iwarp_info_valid) {
1618 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1619 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1620 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1622 spin_unlock_irqrestore(&iwqp->lock, flags);
1624 if (attr_mask & IB_QP_STATE) {
1625 if (issue_modify_qp) {
1626 ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1627 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1631 spin_lock_irqsave(&iwqp->lock, flags);
1632 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1633 iwqp->iwarp_state = info.next_iwarp_state;
1634 iwqp->ibqp_state = attr->qp_state;
1636 spin_unlock_irqrestore(&iwqp->lock, flags);
1639 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1641 if (iwqp->hw_tcp_state) {
1642 spin_lock_irqsave(&iwqp->lock, flags);
1643 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1644 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1645 spin_unlock_irqrestore(&iwqp->lock, flags);
1647 irdma_cm_disconn(iwqp);
1649 int close_timer_started;
1651 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1653 if (iwqp->cm_node) {
1654 refcount_inc(&iwqp->cm_node->refcnt);
1655 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1656 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1657 if (iwqp->cm_id && close_timer_started == 1)
1658 irdma_schedule_cm_timer(iwqp->cm_node,
1659 (struct irdma_puda_buf *)iwqp,
1660 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1662 irdma_rem_ref_cm_node(iwqp->cm_node);
1664 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1668 if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1669 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1670 struct irdma_ucontext *ucontext;
1672 ucontext = rdma_udata_to_drv_context(udata,
1673 struct irdma_ucontext, ibucontext);
1674 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1675 !iwqp->push_wqe_mmap_entry &&
1676 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1677 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1678 uresp.push_valid = 1;
1679 uresp.push_offset = iwqp->sc_qp.push_offset;
1682 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1685 irdma_remove_push_mmap_entries(iwqp);
1686 ibdev_dbg(&iwdev->ibdev,
1687 "VERBS: copy_to_udata failed\n");
1694 spin_unlock_irqrestore(&iwqp->lock, flags);
1700 * irdma_cq_free_rsrc - free up resources for cq
1701 * @rf: RDMA PCI function
1704 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1706 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1708 if (!iwcq->user_mode) {
1709 dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1710 iwcq->kmem.va, iwcq->kmem.pa);
1711 iwcq->kmem.va = NULL;
1712 dma_free_coherent(rf->sc_dev.hw->device,
1713 iwcq->kmem_shadow.size,
1714 iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1715 iwcq->kmem_shadow.va = NULL;
1718 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1722 * irdma_free_cqbuf - worker to free a cq buffer
1723 * @work: provides access to the cq buffer to free
1725 static void irdma_free_cqbuf(struct work_struct *work)
1727 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1729 dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1730 cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1731 cq_buf->kmem_buf.va = NULL;
1736 * irdma_process_resize_list - remove resized cq buffers from the resize_list
1737 * @iwcq: cq which owns the resize_list
1738 * @iwdev: irdma device
1739 * @lcqe_buf: the buffer where the last cqe is received
1741 static int irdma_process_resize_list(struct irdma_cq *iwcq,
1742 struct irdma_device *iwdev,
1743 struct irdma_cq_buf *lcqe_buf)
1745 struct list_head *tmp_node, *list_node;
1746 struct irdma_cq_buf *cq_buf;
1749 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1750 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1751 if (cq_buf == lcqe_buf)
1754 list_del(&cq_buf->list);
1755 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1763 * irdma_destroy_cq - destroy cq
1764 * @ib_cq: cq pointer
1767 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1769 struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1770 struct irdma_cq *iwcq = to_iwcq(ib_cq);
1771 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1772 struct irdma_sc_dev *dev = cq->dev;
1773 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1774 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1775 unsigned long flags;
1777 spin_lock_irqsave(&iwcq->lock, flags);
1778 if (!list_empty(&iwcq->cmpl_generated))
1779 irdma_remove_cmpls_list(iwcq);
1780 if (!list_empty(&iwcq->resize_list))
1781 irdma_process_resize_list(iwcq, iwdev, NULL);
1782 spin_unlock_irqrestore(&iwcq->lock, flags);
1784 irdma_cq_wq_destroy(iwdev->rf, cq);
1786 spin_lock_irqsave(&iwceq->ce_lock, flags);
1787 irdma_sc_cleanup_ceqes(cq, ceq);
1788 spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1789 irdma_cq_free_rsrc(iwdev->rf, iwcq);
1795 * irdma_resize_cq - resize cq
1796 * @ibcq: cq to be resized
1797 * @entries: desired cq size
1800 static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
1801 struct ib_udata *udata)
1803 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
1804 struct irdma_cq *iwcq = to_iwcq(ibcq);
1805 struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1806 struct irdma_cqp_request *cqp_request;
1807 struct cqp_cmds_info *cqp_info;
1808 struct irdma_modify_cq_info *m_info;
1809 struct irdma_modify_cq_info info = {};
1810 struct irdma_dma_mem kmem_buf;
1811 struct irdma_cq_mr *cqmr_buf;
1812 struct irdma_pbl *iwpbl_buf;
1813 struct irdma_device *iwdev;
1814 struct irdma_pci_f *rf;
1815 struct irdma_cq_buf *cq_buf = NULL;
1816 unsigned long flags;
1819 iwdev = to_iwdev(ibcq->device);
1822 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1823 IRDMA_FEATURE_CQ_RESIZE))
1826 if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
1829 if (entries > rf->max_cqe)
1832 if (!iwcq->user_mode) {
1834 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1838 info.cq_size = max(entries, 4);
1840 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1844 struct irdma_resize_cq_req req = {};
1845 struct irdma_ucontext *ucontext =
1846 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1849 /* CQ resize not supported with legacy GEN_1 libi40iw */
1850 if (ucontext->legacy_mode)
1853 if (ib_copy_from_udata(&req, udata,
1854 min(sizeof(req), udata->inlen)))
1857 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1858 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1859 &ucontext->cq_reg_mem_list);
1860 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1865 cqmr_buf = &iwpbl_buf->cq_mr;
1866 if (iwpbl_buf->pbl_allocated) {
1867 info.virtual_map = true;
1868 info.pbl_chunk_size = 1;
1869 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1871 info.cq_pa = cqmr_buf->cq_pbl.addr;
1874 /* Kmode CQ resize */
1877 rsize = info.cq_size * sizeof(struct irdma_cqe);
1878 kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
1879 kmem_buf.va = dma_alloc_coherent(dev->hw->device,
1880 kmem_buf.size, &kmem_buf.pa,
1885 info.cq_base = kmem_buf.va;
1886 info.cq_pa = kmem_buf.pa;
1887 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1894 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1900 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1901 info.cq_resize = true;
1903 cqp_info = &cqp_request->info;
1904 m_info = &cqp_info->in.u.cq_modify.info;
1905 memcpy(m_info, &info, sizeof(*m_info));
1907 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1908 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1909 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1910 cqp_info->post_sq = 1;
1911 ret = irdma_handle_cqp_op(rf, cqp_request);
1912 irdma_put_cqp_request(&rf->cqp, cqp_request);
1916 spin_lock_irqsave(&iwcq->lock, flags);
1918 cq_buf->kmem_buf = iwcq->kmem;
1919 cq_buf->hw = dev->hw;
1920 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1921 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1922 list_add_tail(&cq_buf->list, &iwcq->resize_list);
1923 iwcq->kmem = kmem_buf;
1926 irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1927 ibcq->cqe = info.cq_size - 1;
1928 spin_unlock_irqrestore(&iwcq->lock, flags);
1933 dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
1942 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
1944 /* GEN1 does not support CQ create flags */
1945 if (hw_rev == IRDMA_GEN_1)
1946 return flags ? -EOPNOTSUPP : 0;
1948 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
1952 * irdma_create_cq - create cq
1953 * @ibcq: CQ allocated
1954 * @attr: attributes for cq
1957 static int irdma_create_cq(struct ib_cq *ibcq,
1958 const struct ib_cq_init_attr *attr,
1959 struct ib_udata *udata)
1961 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
1962 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
1963 struct ib_device *ibdev = ibcq->device;
1964 struct irdma_device *iwdev = to_iwdev(ibdev);
1965 struct irdma_pci_f *rf = iwdev->rf;
1966 struct irdma_cq *iwcq = to_iwcq(ibcq);
1968 struct irdma_sc_cq *cq;
1969 struct irdma_sc_dev *dev = &rf->sc_dev;
1970 struct irdma_cq_init_info info = {};
1971 struct irdma_cqp_request *cqp_request;
1972 struct cqp_cmds_info *cqp_info;
1973 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1974 unsigned long flags;
1976 int entries = attr->cqe;
1978 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1982 if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
1983 udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
1986 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
1993 spin_lock_init(&iwcq->lock);
1994 INIT_LIST_HEAD(&iwcq->resize_list);
1995 INIT_LIST_HEAD(&iwcq->cmpl_generated);
1997 ukinfo->cq_size = max(entries, 4);
1998 ukinfo->cq_id = cq_num;
1999 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
2000 if (attr->comp_vector < rf->ceqs_count)
2001 info.ceq_id = attr->comp_vector;
2002 info.ceq_id_valid = true;
2004 info.type = IRDMA_CQ_TYPE_IWARP;
2005 info.vsi = &iwdev->vsi;
2008 struct irdma_ucontext *ucontext;
2009 struct irdma_create_cq_req req = {};
2010 struct irdma_cq_mr *cqmr;
2011 struct irdma_pbl *iwpbl;
2012 struct irdma_pbl *iwpbl_shadow;
2013 struct irdma_cq_mr *cqmr_shadow;
2015 iwcq->user_mode = true;
2017 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2019 if (ib_copy_from_udata(&req, udata,
2020 min(sizeof(req), udata->inlen))) {
2025 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2026 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
2027 &ucontext->cq_reg_mem_list);
2028 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2034 iwcq->iwpbl = iwpbl;
2035 iwcq->cq_mem_size = 0;
2036 cqmr = &iwpbl->cq_mr;
2038 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2039 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2040 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2041 iwpbl_shadow = irdma_get_pbl(
2042 (unsigned long)req.user_shadow_area,
2043 &ucontext->cq_reg_mem_list);
2044 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2046 if (!iwpbl_shadow) {
2050 iwcq->iwpbl_shadow = iwpbl_shadow;
2051 cqmr_shadow = &iwpbl_shadow->cq_mr;
2052 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2055 info.shadow_area_pa = cqmr->shadow;
2057 if (iwpbl->pbl_allocated) {
2058 info.virtual_map = true;
2059 info.pbl_chunk_size = 1;
2060 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2062 info.cq_base_pa = cqmr->cq_pbl.addr;
2065 /* Kmode allocations */
2068 if (entries < 1 || entries > rf->max_cqe) {
2074 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2076 ukinfo->cq_size = entries;
2078 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2079 iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2080 iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2082 &iwcq->kmem.pa, GFP_KERNEL);
2083 if (!iwcq->kmem.va) {
2088 iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2090 iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2091 iwcq->kmem_shadow.size,
2092 &iwcq->kmem_shadow.pa,
2094 if (!iwcq->kmem_shadow.va) {
2098 info.shadow_area_pa = iwcq->kmem_shadow.pa;
2099 ukinfo->shadow_area = iwcq->kmem_shadow.va;
2100 ukinfo->cq_base = iwcq->kmem.va;
2101 info.cq_base_pa = iwcq->kmem.pa;
2104 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2105 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2106 (u32)IRDMA_MAX_CQ_READ_THRESH);
2108 if (irdma_sc_cq_init(cq, &info)) {
2109 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2114 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2120 cqp_info = &cqp_request->info;
2121 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2122 cqp_info->post_sq = 1;
2123 cqp_info->in.u.cq_create.cq = cq;
2124 cqp_info->in.u.cq_create.check_overflow = true;
2125 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2126 err_code = irdma_handle_cqp_op(rf, cqp_request);
2127 irdma_put_cqp_request(&rf->cqp, cqp_request);
2132 struct irdma_create_cq_resp resp = {};
2134 resp.cq_id = info.cq_uk_init_info.cq_id;
2135 resp.cq_size = info.cq_uk_init_info.cq_size;
2136 if (ib_copy_to_udata(udata, &resp,
2137 min(sizeof(resp), udata->outlen))) {
2138 ibdev_dbg(&iwdev->ibdev,
2139 "VERBS: copy to user data\n");
2146 irdma_cq_wq_destroy(rf, cq);
2148 irdma_cq_free_rsrc(rf, iwcq);
2154 * irdma_get_mr_access - get hw MR access permissions from IB access flags
2155 * @access: IB access flags
2157 static inline u16 irdma_get_mr_access(int access)
2161 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2162 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2163 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2164 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2165 hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2166 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2167 hw_access |= (access & IB_ACCESS_MW_BIND) ?
2168 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2169 hw_access |= (access & IB_ZERO_BASED) ?
2170 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2171 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2177 * irdma_free_stag - free stag resource
2178 * @iwdev: irdma device
2179 * @stag: stag to free
2181 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2185 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2186 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2190 * irdma_create_stag - create random stag
2191 * @iwdev: irdma device
2193 static u32 irdma_create_stag(struct irdma_device *iwdev)
2197 u32 next_stag_index;
2203 get_random_bytes(&random, sizeof(random));
2204 consumer_key = (u8)random;
2206 driver_key = random & ~iwdev->rf->mr_stagmask;
2207 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2208 next_stag_index %= iwdev->rf->max_mr;
2210 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2211 iwdev->rf->max_mr, &stag_index,
2215 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2217 stag += (u32)consumer_key;
2223 * irdma_next_pbl_addr - Get next pbl address
2224 * @pbl: pointer to a pble
2225 * @pinfo: info pointer
2228 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2232 if (!(*pinfo) || *idx != (*pinfo)->cnt)
2237 return (*pinfo)->addr;
2241 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
2242 * @iwmr: iwmr for IB's user page addresses
2243 * @pbl: ple pointer to save 1 level or 0 level pble
2244 * @level: indicated level 0, 1 or 2
2246 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2247 enum irdma_pble_level level)
2249 struct ib_umem *region = iwmr->region;
2250 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2251 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2252 struct irdma_pble_info *pinfo;
2253 struct ib_block_iter biter;
2257 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2259 if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2260 iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2262 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2263 *pbl = rdma_block_iter_dma_address(&biter);
2264 if (++pbl_cnt == palloc->total_cnt)
2266 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2271 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
2272 * @arr: lvl1 pbl array
2273 * @npages: page count
2274 * @pg_size: page size
2277 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2281 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2282 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2290 * irdma_check_mr_contiguous - check if MR is physically contiguous
2291 * @palloc: pbl allocation struct
2292 * @pg_size: page size
2294 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2297 struct irdma_pble_level2 *lvl2 = &palloc->level2;
2298 struct irdma_pble_info *leaf = lvl2->leaf;
2300 u64 *start_addr = NULL;
2304 if (palloc->level == PBLE_LEVEL_1) {
2305 arr = palloc->level1.addr;
2306 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2311 start_addr = leaf->addr;
2313 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2315 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2317 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2326 * irdma_setup_pbles - copy user pg address to pble's
2327 * @rf: RDMA PCI function
2328 * @iwmr: mr pointer for this memory registration
2329 * @lvl: requested pble levels
2331 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2334 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2335 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2336 struct irdma_pble_info *pinfo;
2339 enum irdma_pble_level level = PBLE_LEVEL_1;
2342 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2347 iwpbl->pbl_allocated = true;
2348 level = palloc->level;
2349 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2350 palloc->level2.leaf;
2353 pbl = iwmr->pgaddrmem;
2356 irdma_copy_user_pgaddrs(iwmr, pbl, level);
2359 iwmr->pgaddrmem[0] = *pbl;
2365 * irdma_handle_q_mem - handle memory for qp and cq
2366 * @iwdev: irdma device
2367 * @req: information for q memory management
2368 * @iwpbl: pble struct
2369 * @lvl: pble level mask
2371 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2372 struct irdma_mem_reg_req *req,
2373 struct irdma_pbl *iwpbl, u8 lvl)
2375 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2376 struct irdma_mr *iwmr = iwpbl->iwmr;
2377 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2378 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2379 struct irdma_hmc_pble *hmc_p;
2380 u64 *arr = iwmr->pgaddrmem;
2385 pg_size = iwmr->page_size;
2386 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2391 arr = palloc->level1.addr;
2393 switch (iwmr->type) {
2394 case IRDMA_MEMREG_TYPE_QP:
2395 total = req->sq_pages + req->rq_pages;
2396 hmc_p = &qpmr->sq_pbl;
2397 qpmr->shadow = (dma_addr_t)arr[total];
2400 ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2403 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2409 hmc_p->idx = palloc->level1.idx;
2410 hmc_p = &qpmr->rq_pbl;
2411 hmc_p->idx = palloc->level1.idx + req->sq_pages;
2413 hmc_p->addr = arr[0];
2414 hmc_p = &qpmr->rq_pbl;
2415 hmc_p->addr = arr[req->sq_pages];
2418 case IRDMA_MEMREG_TYPE_CQ:
2419 hmc_p = &cqmr->cq_pbl;
2422 cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2425 ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2429 hmc_p->idx = palloc->level1.idx;
2431 hmc_p->addr = arr[0];
2434 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2439 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2440 iwpbl->pbl_allocated = false;
2447 * irdma_hw_alloc_mw - create the hw memory window
2448 * @iwdev: irdma device
2449 * @iwmr: pointer to memory window info
2451 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2453 struct irdma_mw_alloc_info *info;
2454 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2455 struct irdma_cqp_request *cqp_request;
2456 struct cqp_cmds_info *cqp_info;
2459 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2463 cqp_info = &cqp_request->info;
2464 info = &cqp_info->in.u.mw_alloc.info;
2465 memset(info, 0, sizeof(*info));
2466 if (iwmr->ibmw.type == IB_MW_TYPE_1)
2467 info->mw_wide = true;
2469 info->page_size = PAGE_SIZE;
2470 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2471 info->pd_id = iwpd->sc_pd.pd_id;
2472 info->remote_access = true;
2473 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2474 cqp_info->post_sq = 1;
2475 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2476 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2477 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2478 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2484 * irdma_alloc_mw - Allocate memory window
2485 * @ibmw: Memory Window
2486 * @udata: user data pointer
2488 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2490 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2491 struct irdma_mr *iwmr = to_iwmw(ibmw);
2495 stag = irdma_create_stag(iwdev);
2502 err_code = irdma_hw_alloc_mw(iwdev, iwmr);
2504 irdma_free_stag(iwdev, stag);
2512 * irdma_dealloc_mw - Dealloc memory window
2513 * @ibmw: memory window structure.
2515 static int irdma_dealloc_mw(struct ib_mw *ibmw)
2517 struct ib_pd *ibpd = ibmw->pd;
2518 struct irdma_pd *iwpd = to_iwpd(ibpd);
2519 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2520 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2521 struct irdma_cqp_request *cqp_request;
2522 struct cqp_cmds_info *cqp_info;
2523 struct irdma_dealloc_stag_info *info;
2525 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2529 cqp_info = &cqp_request->info;
2530 info = &cqp_info->in.u.dealloc_stag.info;
2531 memset(info, 0, sizeof(*info));
2532 info->pd_id = iwpd->sc_pd.pd_id;
2533 info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
2535 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2536 cqp_info->post_sq = 1;
2537 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2538 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2539 irdma_handle_cqp_op(iwdev->rf, cqp_request);
2540 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2541 irdma_free_stag(iwdev, iwmr->stag);
2547 * irdma_hw_alloc_stag - cqp command to allocate stag
2548 * @iwdev: irdma device
2549 * @iwmr: irdma mr pointer
2551 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
2552 struct irdma_mr *iwmr)
2554 struct irdma_allocate_stag_info *info;
2555 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2557 struct irdma_cqp_request *cqp_request;
2558 struct cqp_cmds_info *cqp_info;
2560 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2564 cqp_info = &cqp_request->info;
2565 info = &cqp_info->in.u.alloc_stag.info;
2566 memset(info, 0, sizeof(*info));
2567 info->page_size = PAGE_SIZE;
2568 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2569 info->pd_id = iwpd->sc_pd.pd_id;
2570 info->total_len = iwmr->len;
2571 info->remote_access = true;
2572 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2573 cqp_info->post_sq = 1;
2574 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2575 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2576 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2577 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2583 * irdma_alloc_mr - register stag for fast memory registration
2585 * @mr_type: memory for stag registrion
2586 * @max_num_sg: man number of pages
2588 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2591 struct irdma_device *iwdev = to_iwdev(pd->device);
2592 struct irdma_pble_alloc *palloc;
2593 struct irdma_pbl *iwpbl;
2594 struct irdma_mr *iwmr;
2598 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2600 return ERR_PTR(-ENOMEM);
2602 stag = irdma_create_stag(iwdev);
2609 iwmr->ibmr.rkey = stag;
2610 iwmr->ibmr.lkey = stag;
2612 iwmr->ibmr.device = pd->device;
2613 iwpbl = &iwmr->iwpbl;
2615 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2616 palloc = &iwpbl->pble_alloc;
2617 iwmr->page_cnt = max_num_sg;
2618 err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
2623 err_code = irdma_hw_alloc_stag(iwdev, iwmr);
2625 goto err_alloc_stag;
2627 iwpbl->pbl_allocated = true;
2631 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2633 irdma_free_stag(iwdev, stag);
2637 return ERR_PTR(err_code);
2641 * irdma_set_page - populate pbl list for fmr
2642 * @ibmr: ib mem to access iwarp mr pointer
2643 * @addr: page dma address fro pbl list
2645 static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
2647 struct irdma_mr *iwmr = to_iwmr(ibmr);
2648 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2649 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2652 if (unlikely(iwmr->npages == iwmr->page_cnt))
2655 if (palloc->level == PBLE_LEVEL_2) {
2656 struct irdma_pble_info *palloc_info =
2657 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
2659 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
2661 pbl = palloc->level1.addr;
2662 pbl[iwmr->npages] = addr;
2670 * irdma_map_mr_sg - map of sg list for fmr
2671 * @ibmr: ib mem to access iwarp mr pointer
2672 * @sg: scatter gather list
2673 * @sg_nents: number of sg pages
2674 * @sg_offset: scatter gather list for fmr
2676 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2677 int sg_nents, unsigned int *sg_offset)
2679 struct irdma_mr *iwmr = to_iwmr(ibmr);
2683 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2687 * irdma_hwreg_mr - send cqp command for memory registration
2688 * @iwdev: irdma device
2689 * @iwmr: irdma mr pointer
2690 * @access: access for MR
2692 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2695 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2696 struct irdma_reg_ns_stag_info *stag_info;
2697 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2698 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2699 struct irdma_cqp_request *cqp_request;
2700 struct cqp_cmds_info *cqp_info;
2703 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2707 cqp_info = &cqp_request->info;
2708 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2709 memset(stag_info, 0, sizeof(*stag_info));
2710 stag_info->va = iwpbl->user_base;
2711 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2712 stag_info->stag_key = (u8)iwmr->stag;
2713 stag_info->total_len = iwmr->len;
2714 stag_info->access_rights = irdma_get_mr_access(access);
2715 stag_info->pd_id = iwpd->sc_pd.pd_id;
2716 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2717 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2719 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2720 stag_info->page_size = iwmr->page_size;
2722 if (iwpbl->pbl_allocated) {
2723 if (palloc->level == PBLE_LEVEL_1) {
2724 stag_info->first_pm_pbl_index = palloc->level1.idx;
2725 stag_info->chunk_size = 1;
2727 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2728 stag_info->chunk_size = 3;
2731 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2734 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2735 cqp_info->post_sq = 1;
2736 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2737 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2738 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2739 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2744 static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
2746 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2747 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2752 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
2754 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2759 err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
2762 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2763 iwpbl->pbl_allocated = false;
2767 stag = irdma_create_stag(iwdev);
2774 iwmr->ibmr.rkey = stag;
2775 iwmr->ibmr.lkey = stag;
2776 err = irdma_hwreg_mr(iwdev, iwmr, access);
2783 irdma_free_stag(iwdev, stag);
2786 if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2787 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2792 static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
2793 struct ib_pd *pd, u64 virt,
2794 enum irdma_memreg_type reg_type)
2796 struct irdma_device *iwdev = to_iwdev(pd->device);
2797 struct irdma_pbl *iwpbl = NULL;
2798 struct irdma_mr *iwmr = NULL;
2799 unsigned long pgsz_bitmap;
2801 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2803 return ERR_PTR(-ENOMEM);
2805 iwpbl = &iwmr->iwpbl;
2807 iwmr->region = region;
2809 iwmr->ibmr.device = pd->device;
2810 iwmr->ibmr.iova = virt;
2811 iwmr->type = reg_type;
2813 pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
2814 iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
2816 iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
2817 if (unlikely(!iwmr->page_size)) {
2819 return ERR_PTR(-EOPNOTSUPP);
2822 iwmr->len = region->length;
2823 iwpbl->user_base = virt;
2824 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
2829 static void irdma_free_iwmr(struct irdma_mr *iwmr)
2834 static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
2835 struct ib_udata *udata,
2836 struct irdma_mr *iwmr)
2838 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2839 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2840 struct irdma_ucontext *ucontext = NULL;
2841 unsigned long flags;
2846 total = req.sq_pages + req.rq_pages + 1;
2847 if (total > iwmr->page_cnt)
2850 total = req.sq_pages + req.rq_pages;
2851 lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
2852 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2856 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2858 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2859 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2860 iwpbl->on_list = true;
2861 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2866 static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
2867 struct ib_udata *udata,
2868 struct irdma_mr *iwmr)
2870 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2871 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2872 struct irdma_ucontext *ucontext = NULL;
2873 u8 shadow_pgcnt = 1;
2874 unsigned long flags;
2879 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2881 total = req.cq_pages + shadow_pgcnt;
2882 if (total > iwmr->page_cnt)
2885 lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
2886 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2890 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2892 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2893 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2894 iwpbl->on_list = true;
2895 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2901 * irdma_reg_user_mr - Register a user memory region
2903 * @start: virtual start address
2904 * @len: length of mr
2905 * @virt: virtual address
2906 * @access: access of mr
2909 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2910 u64 virt, int access,
2911 struct ib_udata *udata)
2913 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
2914 struct irdma_device *iwdev = to_iwdev(pd->device);
2915 struct irdma_mem_reg_req req = {};
2916 struct ib_umem *region = NULL;
2917 struct irdma_mr *iwmr = NULL;
2920 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2921 return ERR_PTR(-EINVAL);
2923 if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
2924 return ERR_PTR(-EINVAL);
2926 region = ib_umem_get(pd->device, start, len, access);
2928 if (IS_ERR(region)) {
2929 ibdev_dbg(&iwdev->ibdev,
2930 "VERBS: Failed to create ib_umem region\n");
2931 return (struct ib_mr *)region;
2934 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
2935 ib_umem_release(region);
2936 return ERR_PTR(-EFAULT);
2939 iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
2941 ib_umem_release(region);
2942 return (struct ib_mr *)iwmr;
2945 switch (req.reg_type) {
2946 case IRDMA_MEMREG_TYPE_QP:
2947 err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
2952 case IRDMA_MEMREG_TYPE_CQ:
2953 err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
2957 case IRDMA_MEMREG_TYPE_MEM:
2958 err = irdma_reg_user_mr_type_mem(iwmr, access);
2970 ib_umem_release(region);
2971 irdma_free_iwmr(iwmr);
2973 return ERR_PTR(err);
2976 static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
2979 struct ib_udata *udata)
2981 struct irdma_device *iwdev = to_iwdev(pd->device);
2982 struct ib_umem_dmabuf *umem_dmabuf;
2983 struct irdma_mr *iwmr;
2986 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2987 return ERR_PTR(-EINVAL);
2989 umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
2990 if (IS_ERR(umem_dmabuf)) {
2991 err = PTR_ERR(umem_dmabuf);
2992 ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
2993 return ERR_PTR(err);
2996 iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
2998 err = PTR_ERR(iwmr);
3002 err = irdma_reg_user_mr_type_mem(iwmr, access);
3009 irdma_free_iwmr(iwmr);
3012 ib_umem_release(&umem_dmabuf->umem);
3014 return ERR_PTR(err);
3018 * irdma_reg_phys_mr - register kernel physical memory
3020 * @addr: physical address of memory to register
3021 * @size: size of memory to register
3022 * @access: Access rights
3023 * @iova_start: start of virtual address for physical buffers
3025 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
3028 struct irdma_device *iwdev = to_iwdev(pd->device);
3029 struct irdma_pbl *iwpbl;
3030 struct irdma_mr *iwmr;
3034 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
3036 return ERR_PTR(-ENOMEM);
3039 iwmr->ibmr.device = pd->device;
3040 iwpbl = &iwmr->iwpbl;
3042 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
3043 iwpbl->user_base = *iova_start;
3044 stag = irdma_create_stag(iwdev);
3051 iwmr->ibmr.iova = *iova_start;
3052 iwmr->ibmr.rkey = stag;
3053 iwmr->ibmr.lkey = stag;
3055 iwmr->pgaddrmem[0] = addr;
3057 iwmr->page_size = SZ_4K;
3058 ret = irdma_hwreg_mr(iwdev, iwmr, access);
3060 irdma_free_stag(iwdev, stag);
3069 return ERR_PTR(ret);
3073 * irdma_get_dma_mr - register physical mem
3075 * @acc: access for memory
3077 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
3081 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
3085 * irdma_del_memlist - Deleting pbl list entries for CQ/QP
3086 * @iwmr: iwmr for IB's user page addresses
3087 * @ucontext: ptr to user context
3089 static void irdma_del_memlist(struct irdma_mr *iwmr,
3090 struct irdma_ucontext *ucontext)
3092 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3093 unsigned long flags;
3095 switch (iwmr->type) {
3096 case IRDMA_MEMREG_TYPE_CQ:
3097 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3098 if (iwpbl->on_list) {
3099 iwpbl->on_list = false;
3100 list_del(&iwpbl->list);
3102 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3104 case IRDMA_MEMREG_TYPE_QP:
3105 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
3106 if (iwpbl->on_list) {
3107 iwpbl->on_list = false;
3108 list_del(&iwpbl->list);
3110 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
3118 * irdma_dereg_mr - deregister mr
3119 * @ib_mr: mr ptr for dereg
3122 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3124 struct ib_pd *ibpd = ib_mr->pd;
3125 struct irdma_pd *iwpd = to_iwpd(ibpd);
3126 struct irdma_mr *iwmr = to_iwmr(ib_mr);
3127 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3128 struct irdma_dealloc_stag_info *info;
3129 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3130 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3131 struct irdma_cqp_request *cqp_request;
3132 struct cqp_cmds_info *cqp_info;
3135 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3137 struct irdma_ucontext *ucontext;
3139 ucontext = rdma_udata_to_drv_context(udata,
3140 struct irdma_ucontext,
3142 irdma_del_memlist(iwmr, ucontext);
3147 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3151 cqp_info = &cqp_request->info;
3152 info = &cqp_info->in.u.dealloc_stag.info;
3153 memset(info, 0, sizeof(*info));
3154 info->pd_id = iwpd->sc_pd.pd_id;
3155 info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3157 if (iwpbl->pbl_allocated)
3158 info->dealloc_pbl = true;
3160 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3161 cqp_info->post_sq = 1;
3162 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3163 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3164 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3165 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3169 irdma_free_stag(iwdev, iwmr->stag);
3171 if (iwpbl->pbl_allocated)
3172 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3173 ib_umem_release(iwmr->region);
3180 * irdma_post_send - kernel application wr
3181 * @ibqp: qp ptr for wr
3182 * @ib_wr: work request ptr
3183 * @bad_wr: return of bad wr if err
3185 static int irdma_post_send(struct ib_qp *ibqp,
3186 const struct ib_send_wr *ib_wr,
3187 const struct ib_send_wr **bad_wr)
3189 struct irdma_qp *iwqp;
3190 struct irdma_qp_uk *ukqp;
3191 struct irdma_sc_dev *dev;
3192 struct irdma_post_sq_info info;
3194 unsigned long flags;
3196 struct irdma_ah *ah;
3198 iwqp = to_iwqp(ibqp);
3199 ukqp = &iwqp->sc_qp.qp_uk;
3200 dev = &iwqp->iwdev->rf->sc_dev;
3202 spin_lock_irqsave(&iwqp->lock, flags);
3204 memset(&info, 0, sizeof(info));
3206 info.wr_id = (ib_wr->wr_id);
3207 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3208 info.signaled = true;
3209 if (ib_wr->send_flags & IB_SEND_FENCE)
3210 info.read_fence = true;
3211 switch (ib_wr->opcode) {
3212 case IB_WR_SEND_WITH_IMM:
3213 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
3214 info.imm_data_valid = true;
3215 info.imm_data = ntohl(ib_wr->ex.imm_data);
3222 case IB_WR_SEND_WITH_INV:
3223 if (ib_wr->opcode == IB_WR_SEND ||
3224 ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
3225 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3226 info.op_type = IRDMA_OP_TYPE_SEND_SOL;
3228 info.op_type = IRDMA_OP_TYPE_SEND;
3230 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3231 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
3233 info.op_type = IRDMA_OP_TYPE_SEND_INV;
3234 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
3237 info.op.send.num_sges = ib_wr->num_sge;
3238 info.op.send.sg_list = ib_wr->sg_list;
3239 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3240 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3241 ah = to_iwah(ud_wr(ib_wr)->ah);
3242 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
3243 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
3244 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3247 if (ib_wr->send_flags & IB_SEND_INLINE)
3248 err = irdma_uk_inline_send(ukqp, &info, false);
3250 err = irdma_uk_send(ukqp, &info, false);
3252 case IB_WR_RDMA_WRITE_WITH_IMM:
3253 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
3254 info.imm_data_valid = true;
3255 info.imm_data = ntohl(ib_wr->ex.imm_data);
3261 case IB_WR_RDMA_WRITE:
3262 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3263 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
3265 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
3267 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
3268 info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
3269 info.op.rdma_write.rem_addr.addr =
3270 rdma_wr(ib_wr)->remote_addr;
3271 info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3272 if (ib_wr->send_flags & IB_SEND_INLINE)
3273 err = irdma_uk_inline_rdma_write(ukqp, &info, false);
3275 err = irdma_uk_rdma_write(ukqp, &info, false);
3277 case IB_WR_RDMA_READ_WITH_INV:
3280 case IB_WR_RDMA_READ:
3281 if (ib_wr->num_sge >
3282 dev->hw_attrs.uk_attrs.max_hw_read_sges) {
3286 info.op_type = IRDMA_OP_TYPE_RDMA_READ;
3287 info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
3288 info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3289 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
3290 info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
3291 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
3293 case IB_WR_LOCAL_INV:
3294 info.op_type = IRDMA_OP_TYPE_INV_STAG;
3295 info.local_fence = info.read_fence;
3296 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
3297 err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
3299 case IB_WR_REG_MR: {
3300 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
3301 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
3302 struct irdma_fast_reg_stag_info stag_info = {};
3304 stag_info.signaled = info.signaled;
3305 stag_info.read_fence = info.read_fence;
3306 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
3307 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
3308 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
3309 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
3310 stag_info.wr_id = ib_wr->wr_id;
3311 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3312 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
3313 stag_info.total_len = iwmr->ibmr.length;
3314 stag_info.reg_addr_pa = *palloc->level1.addr;
3315 stag_info.first_pm_pbl_index = palloc->level1.idx;
3316 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
3317 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
3318 stag_info.chunk_size = 1;
3319 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
3325 ibdev_dbg(&iwqp->iwdev->ibdev,
3326 "VERBS: upost_send bad opcode = 0x%x\n",
3333 ib_wr = ib_wr->next;
3336 if (!iwqp->flush_issued) {
3337 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
3338 irdma_uk_qp_post_wr(ukqp);
3339 spin_unlock_irqrestore(&iwqp->lock, flags);
3341 spin_unlock_irqrestore(&iwqp->lock, flags);
3342 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3343 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3352 * irdma_post_recv - post receive wr for kernel application
3353 * @ibqp: ib qp pointer
3354 * @ib_wr: work request for receive
3355 * @bad_wr: bad wr caused an error
3357 static int irdma_post_recv(struct ib_qp *ibqp,
3358 const struct ib_recv_wr *ib_wr,
3359 const struct ib_recv_wr **bad_wr)
3361 struct irdma_qp *iwqp;
3362 struct irdma_qp_uk *ukqp;
3363 struct irdma_post_rq_info post_recv = {};
3364 unsigned long flags;
3367 iwqp = to_iwqp(ibqp);
3368 ukqp = &iwqp->sc_qp.qp_uk;
3370 spin_lock_irqsave(&iwqp->lock, flags);
3372 post_recv.num_sges = ib_wr->num_sge;
3373 post_recv.wr_id = ib_wr->wr_id;
3374 post_recv.sg_list = ib_wr->sg_list;
3375 err = irdma_uk_post_receive(ukqp, &post_recv);
3377 ibdev_dbg(&iwqp->iwdev->ibdev,
3378 "VERBS: post_recv err %d\n", err);
3382 ib_wr = ib_wr->next;
3386 spin_unlock_irqrestore(&iwqp->lock, flags);
3387 if (iwqp->flush_issued)
3388 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3389 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3398 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
3399 * @opcode: iwarp flush code
3401 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
3404 case FLUSH_PROT_ERR:
3405 return IB_WC_LOC_PROT_ERR;
3406 case FLUSH_REM_ACCESS_ERR:
3407 return IB_WC_REM_ACCESS_ERR;
3408 case FLUSH_LOC_QP_OP_ERR:
3409 return IB_WC_LOC_QP_OP_ERR;
3410 case FLUSH_REM_OP_ERR:
3411 return IB_WC_REM_OP_ERR;
3412 case FLUSH_LOC_LEN_ERR:
3413 return IB_WC_LOC_LEN_ERR;
3414 case FLUSH_GENERAL_ERR:
3415 return IB_WC_WR_FLUSH_ERR;
3416 case FLUSH_RETRY_EXC_ERR:
3417 return IB_WC_RETRY_EXC_ERR;
3418 case FLUSH_MW_BIND_ERR:
3419 return IB_WC_MW_BIND_ERR;
3420 case FLUSH_REM_INV_REQ_ERR:
3421 return IB_WC_REM_INV_REQ_ERR;
3422 case FLUSH_FATAL_ERR:
3424 return IB_WC_FATAL_ERR;
3429 * irdma_process_cqe - process cqe info
3430 * @entry: processed cqe
3431 * @cq_poll_info: cqe info
3433 static void irdma_process_cqe(struct ib_wc *entry,
3434 struct irdma_cq_poll_info *cq_poll_info)
3436 struct irdma_sc_qp *qp;
3438 entry->wc_flags = 0;
3439 entry->pkey_index = 0;
3440 entry->wr_id = cq_poll_info->wr_id;
3442 qp = cq_poll_info->qp_handle;
3443 entry->qp = qp->qp_uk.back_qp;
3445 if (cq_poll_info->error) {
3446 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
3447 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
3449 entry->vendor_err = cq_poll_info->major_err << 16 |
3450 cq_poll_info->minor_err;
3452 entry->status = IB_WC_SUCCESS;
3453 if (cq_poll_info->imm_valid) {
3454 entry->ex.imm_data = htonl(cq_poll_info->imm_data);
3455 entry->wc_flags |= IB_WC_WITH_IMM;
3457 if (cq_poll_info->ud_smac_valid) {
3458 ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
3459 entry->wc_flags |= IB_WC_WITH_SMAC;
3462 if (cq_poll_info->ud_vlan_valid) {
3463 u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
3465 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
3467 entry->vlan_id = vlan;
3468 entry->wc_flags |= IB_WC_WITH_VLAN;
3475 if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
3476 set_ib_wc_op_sq(cq_poll_info, entry);
3478 set_ib_wc_op_rq(cq_poll_info, entry,
3479 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
3481 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
3482 cq_poll_info->stag_invalid_set) {
3483 entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
3484 entry->wc_flags |= IB_WC_WITH_INVALIDATE;
3488 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
3489 entry->src_qp = cq_poll_info->ud_src_qpn;
3492 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
3493 entry->network_hdr_type = cq_poll_info->ipv4 ?
3497 entry->src_qp = cq_poll_info->qp_id;
3500 entry->byte_len = cq_poll_info->bytes_xfered;
3504 * irdma_poll_one - poll one entry of the CQ
3505 * @ukcq: ukcq to poll
3506 * @cur_cqe: current CQE info to be filled in
3507 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3509 * Returns the internal irdma device error code or 0 on success
3511 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
3512 struct irdma_cq_poll_info *cur_cqe,
3513 struct ib_wc *entry)
3515 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3520 irdma_process_cqe(entry, cur_cqe);
3526 * __irdma_poll_cq - poll cq for completion (kernel apps)
3528 * @num_entries: number of entries to poll
3529 * @entry: wr of a completed entry
3531 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3533 struct list_head *tmp_node, *list_node;
3534 struct irdma_cq_buf *last_buf = NULL;
3535 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3536 struct irdma_cq_buf *cq_buf;
3538 struct irdma_device *iwdev;
3539 struct irdma_cq_uk *ukcq;
3540 bool cq_new_cqe = false;
3541 int resized_bufs = 0;
3544 iwdev = to_iwdev(iwcq->ibcq.device);
3545 ukcq = &iwcq->sc_cq.cq_uk;
3547 /* go through the list of previously resized CQ buffers */
3548 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3549 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3550 while (npolled < num_entries) {
3551 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3559 /* QP using the CQ is destroyed. Skip reporting this CQE */
3560 if (ret == -EFAULT) {
3567 /* save the resized CQ buffer which received the last cqe */
3573 /* check the current CQ for new cqes */
3574 while (npolled < num_entries) {
3575 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3576 if (ret == -ENOENT) {
3577 ret = irdma_generated_cmpls(iwcq, cur_cqe);
3579 irdma_process_cqe(entry + npolled, cur_cqe);
3589 /* QP using the CQ is destroyed. Skip reporting this CQE */
3590 if (ret == -EFAULT) {
3598 /* all previous CQ resizes are complete */
3599 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3601 /* only CQ resizes up to the last_buf are complete */
3602 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3604 /* report to the HW the number of complete CQ resizes */
3605 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3609 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
3616 * irdma_poll_cq - poll cq for completion (kernel apps)
3618 * @num_entries: number of entries to poll
3619 * @entry: wr of a completed entry
3621 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3622 struct ib_wc *entry)
3624 struct irdma_cq *iwcq;
3625 unsigned long flags;
3628 iwcq = to_iwcq(ibcq);
3630 spin_lock_irqsave(&iwcq->lock, flags);
3631 ret = __irdma_poll_cq(iwcq, num_entries, entry);
3632 spin_unlock_irqrestore(&iwcq->lock, flags);
3638 * irdma_req_notify_cq - arm cq kernel application
3640 * @notify_flags: notofication flags
3642 static int irdma_req_notify_cq(struct ib_cq *ibcq,
3643 enum ib_cq_notify_flags notify_flags)
3645 struct irdma_cq *iwcq;
3646 struct irdma_cq_uk *ukcq;
3647 unsigned long flags;
3648 enum irdma_cmpl_notify cq_notify;
3649 bool promo_event = false;
3652 cq_notify = notify_flags == IB_CQ_SOLICITED ?
3653 IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
3654 iwcq = to_iwcq(ibcq);
3655 ukcq = &iwcq->sc_cq.cq_uk;
3657 spin_lock_irqsave(&iwcq->lock, flags);
3658 /* Only promote to arm the CQ for any event if the last arm event was solicited. */
3659 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
3662 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
3663 iwcq->last_notify = cq_notify;
3664 irdma_uk_cq_request_notification(ukcq, cq_notify);
3667 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3668 (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
3670 spin_unlock_irqrestore(&iwcq->lock, flags);
3675 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
3676 struct ib_port_immutable *immutable)
3678 struct ib_port_attr attr;
3681 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3682 err = ib_query_port(ibdev, port_num, &attr);
3686 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3687 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3688 immutable->gid_tbl_len = attr.gid_tbl_len;
3693 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
3694 struct ib_port_immutable *immutable)
3696 struct ib_port_attr attr;
3699 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
3700 err = ib_query_port(ibdev, port_num, &attr);
3703 immutable->gid_tbl_len = attr.gid_tbl_len;
3708 static const struct rdma_stat_desc irdma_hw_stat_names[] = {
3710 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
3711 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
3712 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
3713 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
3714 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
3715 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
3716 [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
3717 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
3718 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
3719 [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
3721 [IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets",
3722 [IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts",
3723 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd",
3724 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts",
3725 [IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets",
3726 [IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts",
3727 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd",
3728 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts",
3729 [IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets",
3730 [IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts",
3731 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd",
3732 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts",
3733 [IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets",
3734 [IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts",
3735 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd",
3736 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts",
3737 [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs",
3738 [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs",
3739 [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads",
3740 [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends",
3741 [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites",
3742 [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads",
3743 [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends",
3744 [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites",
3745 [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd",
3746 [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv",
3749 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
3750 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
3751 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
3753 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets",
3754 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets",
3755 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets",
3756 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets",
3757 [IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP",
3758 [IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP",
3759 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd",
3763 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
3765 struct irdma_device *iwdev = to_iwdev(dev);
3767 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
3768 irdma_fw_major_ver(&iwdev->rf->sc_dev),
3769 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
3773 * irdma_alloc_hw_port_stats - Allocate a hw stats structure
3774 * @ibdev: device pointer from stack
3775 * @port_num: port number
3777 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
3780 struct irdma_device *iwdev = to_iwdev(ibdev);
3781 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
3783 int num_counters = dev->hw_attrs.max_stat_idx;
3784 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
3786 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
3791 * irdma_get_hw_stats - Populates the rdma_hw_stats structure
3792 * @ibdev: device pointer from stack
3793 * @stats: stats pointer from stack
3794 * @port_num: port number
3795 * @index: which hw counter the stack is requesting we update
3797 static int irdma_get_hw_stats(struct ib_device *ibdev,
3798 struct rdma_hw_stats *stats, u32 port_num,
3801 struct irdma_device *iwdev = to_iwdev(ibdev);
3802 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
3804 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
3805 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
3807 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
3809 memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters);
3811 return stats->num_counters;
3815 * irdma_query_gid - Query port GID
3816 * @ibdev: device pointer from stack
3817 * @port: port number
3818 * @index: Entry index
3821 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
3824 struct irdma_device *iwdev = to_iwdev(ibdev);
3826 memset(gid->raw, 0, sizeof(gid->raw));
3827 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
3833 * mcast_list_add - Add a new mcast item to list
3834 * @rf: RDMA PCI function
3835 * @new_elem: pointer to element to add
3837 static void mcast_list_add(struct irdma_pci_f *rf,
3838 struct mc_table_list *new_elem)
3840 list_add(&new_elem->list, &rf->mc_qht_list.list);
3844 * mcast_list_del - Remove an mcast item from list
3845 * @mc_qht_elem: pointer to mcast table list element
3847 static void mcast_list_del(struct mc_table_list *mc_qht_elem)
3850 list_del(&mc_qht_elem->list);
3854 * mcast_list_lookup_ip - Search mcast list for address
3855 * @rf: RDMA PCI function
3856 * @ip_mcast: pointer to mcast IP address
3858 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
3861 struct mc_table_list *mc_qht_el;
3862 struct list_head *pos, *q;
3864 list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
3865 mc_qht_el = list_entry(pos, struct mc_table_list, list);
3866 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3867 sizeof(mc_qht_el->mc_info.dest_ip)))
3875 * irdma_mcast_cqp_op - perform a mcast cqp operation
3876 * @iwdev: irdma device
3877 * @mc_grp_ctx: mcast group info
3880 * returns error status
3882 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
3883 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3885 struct cqp_cmds_info *cqp_info;
3886 struct irdma_cqp_request *cqp_request;
3889 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3893 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3894 cqp_info = &cqp_request->info;
3895 cqp_info->cqp_cmd = op;
3896 cqp_info->post_sq = 1;
3897 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3898 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3899 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3900 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3906 * irdma_mcast_mac - Get the multicast MAC for an IP address
3907 * @ip_addr: IPv4 or IPv6 address
3908 * @mac: pointer to result MAC address
3909 * @ipv4: flag indicating IPv4 or IPv6
3912 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
3914 u8 *ip = (u8 *)ip_addr;
3917 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
3920 mac4[3] = ip[2] & 0x7F;
3923 ether_addr_copy(mac, mac4);
3925 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
3932 ether_addr_copy(mac, mac6);
3937 * irdma_attach_mcast - attach a qp to a multicast group
3939 * @ibgid: pointer to global ID
3942 * returns error status
3944 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3946 struct irdma_qp *iwqp = to_iwqp(ibqp);
3947 struct irdma_device *iwdev = iwqp->iwdev;
3948 struct irdma_pci_f *rf = iwdev->rf;
3949 struct mc_table_list *mc_qht_elem;
3950 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
3951 unsigned long flags;
3952 u32 ip_addr[4] = {};
3958 union irdma_sockaddr sgid_addr;
3959 unsigned char dmac[ETH_ALEN];
3961 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3963 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
3964 irdma_copy_ip_ntohl(ip_addr,
3965 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
3966 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
3968 ibdev_dbg(&iwdev->ibdev,
3969 "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
3971 irdma_mcast_mac(ip_addr, dmac, false);
3973 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3975 vlan_id = irdma_get_vlan_ipv4(ip_addr);
3976 irdma_mcast_mac(ip_addr, dmac, true);
3977 ibdev_dbg(&iwdev->ibdev,
3978 "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
3979 ibqp->qp_num, ip_addr, dmac);
3982 spin_lock_irqsave(&rf->qh_list_lock, flags);
3983 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3985 struct irdma_dma_mem *dma_mem_mc;
3987 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3988 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
3992 mc_qht_elem->mc_info.ipv4_valid = ipv4;
3993 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
3994 sizeof(mc_qht_elem->mc_info.dest_ip));
3995 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
3996 &mgn, &rf->next_mcg);
4002 mc_qht_elem->mc_info.mgn = mgn;
4003 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
4004 dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
4005 IRDMA_HW_PAGE_SIZE);
4006 dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
4010 if (!dma_mem_mc->va) {
4011 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
4016 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
4017 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
4018 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
4019 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
4020 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
4021 if (vlan_id < VLAN_N_VID)
4022 mc_qht_elem->mc_grp_ctx.vlan_valid = true;
4023 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
4024 mc_qht_elem->mc_grp_ctx.qs_handle =
4025 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4026 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
4028 spin_lock_irqsave(&rf->qh_list_lock, flags);
4029 mcast_list_add(rf, mc_qht_elem);
4031 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
4032 IRDMA_MAX_MGS_PER_CTX) {
4033 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4038 mcg_info.qp_id = iwqp->ibqp.qp_num;
4039 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
4040 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4041 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4043 /* Only if there is a change do we need to modify or create */
4045 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4046 IRDMA_OP_MC_CREATE);
4047 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4048 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4049 IRDMA_OP_MC_MODIFY);
4060 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4061 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4062 mcast_list_del(mc_qht_elem);
4063 dma_free_coherent(rf->hw.device,
4064 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4065 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4066 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4067 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4068 irdma_free_rsrc(rf, rf->allocated_mcgs,
4069 mc_qht_elem->mc_grp_ctx.mg_id);
4077 * irdma_detach_mcast - detach a qp from a multicast group
4079 * @ibgid: pointer to global ID
4082 * returns error status
4084 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4086 struct irdma_qp *iwqp = to_iwqp(ibqp);
4087 struct irdma_device *iwdev = iwqp->iwdev;
4088 struct irdma_pci_f *rf = iwdev->rf;
4089 u32 ip_addr[4] = {};
4090 struct mc_table_list *mc_qht_elem;
4091 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4093 unsigned long flags;
4094 union irdma_sockaddr sgid_addr;
4096 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4097 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
4098 irdma_copy_ip_ntohl(ip_addr,
4099 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4101 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4103 spin_lock_irqsave(&rf->qh_list_lock, flags);
4104 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4106 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4107 ibdev_dbg(&iwdev->ibdev,
4108 "VERBS: address not found MCG\n");
4112 mcg_info.qp_id = iwqp->ibqp.qp_num;
4113 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4114 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4115 mcast_list_del(mc_qht_elem);
4116 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4117 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4118 IRDMA_OP_MC_DESTROY);
4120 ibdev_dbg(&iwdev->ibdev,
4121 "VERBS: failed MC_DESTROY MCG\n");
4122 spin_lock_irqsave(&rf->qh_list_lock, flags);
4123 mcast_list_add(rf, mc_qht_elem);
4124 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4128 dma_free_coherent(rf->hw.device,
4129 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4130 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4131 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4132 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4133 irdma_free_rsrc(rf, rf->allocated_mcgs,
4134 mc_qht_elem->mc_grp_ctx.mg_id);
4137 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4138 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4139 IRDMA_OP_MC_MODIFY);
4141 ibdev_dbg(&iwdev->ibdev,
4142 "VERBS: failed Modify MCG\n");
4150 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
4152 struct irdma_pci_f *rf = iwdev->rf;
4155 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
4160 err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
4161 irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
4164 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
4169 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
4172 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
4174 } while (!ah->sc_ah.ah_info.ah_valid && --cnt);
4177 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
4185 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
4190 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
4192 struct irdma_pd *pd = to_iwpd(ibah->pd);
4193 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4194 struct rdma_ah_attr *ah_attr = attr->ah_attr;
4195 const struct ib_gid_attr *sgid_attr;
4196 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4197 struct irdma_pci_f *rf = iwdev->rf;
4198 struct irdma_sc_ah *sc_ah;
4199 struct irdma_ah_info *ah_info;
4200 union irdma_sockaddr sgid_addr, dgid_addr;
4206 sc_ah->ah_info.vsi = &iwdev->vsi;
4207 irdma_sc_init_ah(&rf->sc_dev, sc_ah);
4208 ah->sgid_index = ah_attr->grh.sgid_index;
4209 sgid_attr = ah_attr->grh.sgid_attr;
4210 memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
4211 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
4212 rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
4213 ah->av.attrs = *ah_attr;
4214 ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
4215 ah_info = &sc_ah->ah_info;
4216 ah_info->pd_idx = pd->sc_pd.pd_id;
4217 if (ah_attr->ah_flags & IB_AH_GRH) {
4218 ah_info->flow_label = ah_attr->grh.flow_label;
4219 ah_info->hop_ttl = ah_attr->grh.hop_limit;
4220 ah_info->tc_tos = ah_attr->grh.traffic_class;
4223 ether_addr_copy(dmac, ah_attr->roce.dmac);
4224 if (ah->av.net_type == RDMA_NETWORK_IPV4) {
4225 ah_info->ipv4_valid = true;
4226 ah_info->dest_ip_addr[0] =
4227 ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
4228 ah_info->src_ip_addr[0] =
4229 ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4230 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
4231 ah_info->dest_ip_addr[0]);
4232 if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
4233 ah_info->do_lpbk = true;
4234 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
4237 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
4238 dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4239 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
4240 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4241 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
4242 ah_info->dest_ip_addr);
4243 if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
4244 ah_info->do_lpbk = true;
4245 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
4249 err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
4254 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
4255 ah_info->ipv4_valid, dmac);
4257 if (ah_info->dst_arpindex == -1)
4260 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
4261 ah_info->vlan_tag = 0;
4263 if (ah_info->vlan_tag < VLAN_N_VID) {
4264 ah_info->insert_vlan_tag = true;
4265 ah_info->vlan_tag |=
4266 rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
4273 * irdma_ah_exists - Check for existing identical AH
4274 * @iwdev: irdma device
4275 * @new_ah: AH to check for
4277 * returns true if AH is found, false if not found.
4279 static bool irdma_ah_exists(struct irdma_device *iwdev,
4280 struct irdma_ah *new_ah)
4282 struct irdma_ah *ah;
4283 u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4284 new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4285 new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4286 new_ah->sc_ah.ah_info.dest_ip_addr[3];
4288 hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
4289 /* Set ah_valid and ah_id the same so memcmp can work */
4290 new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
4291 new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
4292 if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
4293 sizeof(ah->sc_ah.ah_info))) {
4294 refcount_inc(&ah->refcnt);
4295 new_ah->parent_ah = ah;
4304 * irdma_destroy_ah - Destroy address handle
4305 * @ibah: pointer to address handle
4306 * @ah_flags: flags for sleepable
4308 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
4310 struct irdma_device *iwdev = to_iwdev(ibah->device);
4311 struct irdma_ah *ah = to_iwah(ibah);
4313 if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
4314 mutex_lock(&iwdev->ah_tbl_lock);
4315 if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
4316 mutex_unlock(&iwdev->ah_tbl_lock);
4319 hash_del(&ah->parent_ah->list);
4320 kfree(ah->parent_ah);
4321 mutex_unlock(&iwdev->ah_tbl_lock);
4324 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
4327 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
4328 ah->sc_ah.ah_info.ah_idx);
4334 * irdma_create_user_ah - create user address handle
4335 * @ibah: address handle
4336 * @attr: address handle attributes
4339 * returns 0 on success, error otherwise
4341 static int irdma_create_user_ah(struct ib_ah *ibah,
4342 struct rdma_ah_init_attr *attr,
4343 struct ib_udata *udata)
4345 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
4346 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4347 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4348 struct irdma_create_ah_resp uresp;
4349 struct irdma_ah *parent_ah;
4352 if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
4355 err = irdma_setup_ah(ibah, attr);
4358 mutex_lock(&iwdev->ah_tbl_lock);
4359 if (!irdma_ah_exists(iwdev, ah)) {
4360 err = irdma_create_hw_ah(iwdev, ah, true);
4362 mutex_unlock(&iwdev->ah_tbl_lock);
4365 /* Add new AH to list */
4366 parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
4368 u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4369 parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4370 parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4371 parent_ah->sc_ah.ah_info.dest_ip_addr[3];
4373 ah->parent_ah = parent_ah;
4374 hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
4375 refcount_set(&parent_ah->refcnt, 1);
4378 mutex_unlock(&iwdev->ah_tbl_lock);
4380 uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
4381 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
4383 irdma_destroy_ah(ibah, attr->flags);
4389 * irdma_create_ah - create address handle
4390 * @ibah: address handle
4391 * @attr: address handle attributes
4394 * returns 0 on success, error otherwise
4396 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
4397 struct ib_udata *udata)
4399 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4400 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4403 err = irdma_setup_ah(ibah, attr);
4406 err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
4412 * irdma_query_ah - Query address handle
4413 * @ibah: pointer to address handle
4414 * @ah_attr: address handle attributes
4416 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
4418 struct irdma_ah *ah = to_iwah(ibah);
4420 memset(ah_attr, 0, sizeof(*ah_attr));
4421 if (ah->av.attrs.ah_flags & IB_AH_GRH) {
4422 ah_attr->ah_flags = IB_AH_GRH;
4423 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
4424 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
4425 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
4426 ah_attr->grh.sgid_index = ah->sgid_index;
4427 ah_attr->grh.sgid_index = ah->sgid_index;
4428 memcpy(&ah_attr->grh.dgid, &ah->dgid,
4429 sizeof(ah_attr->grh.dgid));
4435 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
4438 return IB_LINK_LAYER_ETHERNET;
4441 static const struct ib_device_ops irdma_roce_dev_ops = {
4442 .attach_mcast = irdma_attach_mcast,
4443 .create_ah = irdma_create_ah,
4444 .create_user_ah = irdma_create_user_ah,
4445 .destroy_ah = irdma_destroy_ah,
4446 .detach_mcast = irdma_detach_mcast,
4447 .get_link_layer = irdma_get_link_layer,
4448 .get_port_immutable = irdma_roce_port_immutable,
4449 .modify_qp = irdma_modify_qp_roce,
4450 .query_ah = irdma_query_ah,
4451 .query_pkey = irdma_query_pkey,
4454 static const struct ib_device_ops irdma_iw_dev_ops = {
4455 .modify_qp = irdma_modify_qp,
4456 .get_port_immutable = irdma_iw_port_immutable,
4457 .query_gid = irdma_query_gid,
4460 static const struct ib_device_ops irdma_dev_ops = {
4461 .owner = THIS_MODULE,
4462 .driver_id = RDMA_DRIVER_IRDMA,
4463 .uverbs_abi_ver = IRDMA_ABI_VER,
4465 .alloc_hw_port_stats = irdma_alloc_hw_port_stats,
4466 .alloc_mr = irdma_alloc_mr,
4467 .alloc_mw = irdma_alloc_mw,
4468 .alloc_pd = irdma_alloc_pd,
4469 .alloc_ucontext = irdma_alloc_ucontext,
4470 .create_cq = irdma_create_cq,
4471 .create_qp = irdma_create_qp,
4472 .dealloc_driver = irdma_ib_dealloc_device,
4473 .dealloc_mw = irdma_dealloc_mw,
4474 .dealloc_pd = irdma_dealloc_pd,
4475 .dealloc_ucontext = irdma_dealloc_ucontext,
4476 .dereg_mr = irdma_dereg_mr,
4477 .destroy_cq = irdma_destroy_cq,
4478 .destroy_qp = irdma_destroy_qp,
4479 .disassociate_ucontext = irdma_disassociate_ucontext,
4480 .get_dev_fw_str = irdma_get_dev_fw_str,
4481 .get_dma_mr = irdma_get_dma_mr,
4482 .get_hw_stats = irdma_get_hw_stats,
4483 .map_mr_sg = irdma_map_mr_sg,
4485 .mmap_free = irdma_mmap_free,
4486 .poll_cq = irdma_poll_cq,
4487 .post_recv = irdma_post_recv,
4488 .post_send = irdma_post_send,
4489 .query_device = irdma_query_device,
4490 .query_port = irdma_query_port,
4491 .query_qp = irdma_query_qp,
4492 .reg_user_mr = irdma_reg_user_mr,
4493 .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
4494 .req_notify_cq = irdma_req_notify_cq,
4495 .resize_cq = irdma_resize_cq,
4496 INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
4497 INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
4498 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
4499 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
4500 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
4501 INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
4505 * irdma_init_roce_device - initialization of roce rdma device
4506 * @iwdev: irdma device
4508 static void irdma_init_roce_device(struct irdma_device *iwdev)
4510 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
4511 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4512 iwdev->netdev->dev_addr);
4513 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
4517 * irdma_init_iw_device - initialization of iwarp rdma device
4518 * @iwdev: irdma device
4520 static int irdma_init_iw_device(struct irdma_device *iwdev)
4522 struct net_device *netdev = iwdev->netdev;
4524 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
4525 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4527 iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
4528 iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
4529 iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
4530 iwdev->ibdev.ops.iw_connect = irdma_connect;
4531 iwdev->ibdev.ops.iw_accept = irdma_accept;
4532 iwdev->ibdev.ops.iw_reject = irdma_reject;
4533 iwdev->ibdev.ops.iw_create_listen = irdma_create_listen;
4534 iwdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen;
4535 memcpy(iwdev->ibdev.iw_ifname, netdev->name,
4536 sizeof(iwdev->ibdev.iw_ifname));
4537 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
4543 * irdma_init_rdma_device - initialization of rdma device
4544 * @iwdev: irdma device
4546 static int irdma_init_rdma_device(struct irdma_device *iwdev)
4548 struct pci_dev *pcidev = iwdev->rf->pcidev;
4551 if (iwdev->roce_mode) {
4552 irdma_init_roce_device(iwdev);
4554 ret = irdma_init_iw_device(iwdev);
4558 iwdev->ibdev.phys_port_cnt = 1;
4559 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
4560 iwdev->ibdev.dev.parent = &pcidev->dev;
4561 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
4567 * irdma_port_ibevent - indicate port event
4568 * @iwdev: irdma device
4570 void irdma_port_ibevent(struct irdma_device *iwdev)
4572 struct ib_event event;
4574 event.device = &iwdev->ibdev;
4575 event.element.port_num = 1;
4577 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4578 ib_dispatch_event(&event);
4582 * irdma_ib_unregister_device - unregister rdma device from IB
4584 * @iwdev: irdma device
4586 void irdma_ib_unregister_device(struct irdma_device *iwdev)
4588 iwdev->iw_status = 0;
4589 irdma_port_ibevent(iwdev);
4590 ib_unregister_device(&iwdev->ibdev);
4594 * irdma_ib_register_device - register irdma device to IB core
4595 * @iwdev: irdma device
4597 int irdma_ib_register_device(struct irdma_device *iwdev)
4601 ret = irdma_init_rdma_device(iwdev);
4605 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
4608 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
4609 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
4613 iwdev->iw_status = 1;
4614 irdma_port_ibevent(iwdev);
4620 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
4626 * irdma_ib_dealloc_device
4629 * callback from ibdev dealloc_driver to deallocate resources
4630 * unber irdma device
4632 void irdma_ib_dealloc_device(struct ib_device *ibdev)
4634 struct irdma_device *iwdev = to_iwdev(ibdev);
4636 irdma_rt_deinit_hw(iwdev);
4637 irdma_ctrl_deinit_hw(iwdev->rf);