1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/vmalloc.h>
9 #include <linux/log2.h>
11 #include <rdma/ib_addr.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/ib_user_verbs.h>
14 #include <rdma/ib_verbs.h>
15 #include <rdma/uverbs_ioctl.h>
18 #include "efa_io_defs.h"
21 EFA_MMAP_DMA_PAGE = 0,
26 #define EFA_AENQ_ENABLED_GROUPS \
27 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
28 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
30 struct efa_user_mmap_entry {
31 struct rdma_user_mmap_entry rdma_entry;
36 #define EFA_DEFINE_DEVICE_STATS(op) \
37 op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
38 op(EFA_COMPLETED_CMDS, "completed_cmds") \
39 op(EFA_CMDS_ERR, "cmds_err") \
40 op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
41 op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
42 op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
43 op(EFA_CREATE_QP_ERR, "create_qp_err") \
44 op(EFA_CREATE_CQ_ERR, "create_cq_err") \
45 op(EFA_REG_MR_ERR, "reg_mr_err") \
46 op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
47 op(EFA_CREATE_AH_ERR, "create_ah_err") \
48 op(EFA_MMAP_ERR, "mmap_err")
50 #define EFA_DEFINE_PORT_STATS(op) \
51 op(EFA_TX_BYTES, "tx_bytes") \
52 op(EFA_TX_PKTS, "tx_pkts") \
53 op(EFA_RX_BYTES, "rx_bytes") \
54 op(EFA_RX_PKTS, "rx_pkts") \
55 op(EFA_RX_DROPS, "rx_drops") \
56 op(EFA_SEND_BYTES, "send_bytes") \
57 op(EFA_SEND_WRS, "send_wrs") \
58 op(EFA_RECV_BYTES, "recv_bytes") \
59 op(EFA_RECV_WRS, "recv_wrs") \
60 op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
61 op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
62 op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
63 op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
65 #define EFA_STATS_ENUM(ename, name) ename,
66 #define EFA_STATS_STR(ename, nam) \
69 enum efa_hw_device_stats {
70 EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
73 static const struct rdma_stat_desc efa_device_stats_descs[] = {
74 EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
77 enum efa_hw_port_stats {
78 EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
81 static const struct rdma_stat_desc efa_port_stats_descs[] = {
82 EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
85 #define EFA_CHUNK_PAYLOAD_SHIFT 12
86 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
87 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
89 #define EFA_CHUNK_SHIFT 12
90 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
91 #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
93 #define EFA_PTRS_PER_CHUNK \
94 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
96 #define EFA_CHUNK_USED_SIZE \
97 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
105 struct pbl_chunk_list {
106 struct pbl_chunk *chunks;
116 u32 pbl_buf_size_in_pages;
117 struct scatterlist *sgl;
119 struct pbl_chunk_list chunk_list;
123 u32 pbl_buf_size_in_bytes;
124 u8 physically_continuous;
127 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
129 return container_of(ibdev, struct efa_dev, ibdev);
132 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
134 return container_of(ibucontext, struct efa_ucontext, ibucontext);
137 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
139 return container_of(ibpd, struct efa_pd, ibpd);
142 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
144 return container_of(ibmr, struct efa_mr, ibmr);
147 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
149 return container_of(ibqp, struct efa_qp, ibqp);
152 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
154 return container_of(ibcq, struct efa_cq, ibcq);
157 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
159 return container_of(ibah, struct efa_ah, ibah);
162 static inline struct efa_user_mmap_entry *
163 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
165 return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
168 #define EFA_DEV_CAP(dev, cap) \
169 ((dev)->dev_attr.device_caps & \
170 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
172 #define is_reserved_cleared(reserved) \
173 !memchr_inv(reserved, 0, sizeof(reserved))
175 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
176 size_t size, enum dma_data_direction dir)
180 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
184 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
185 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
186 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
187 free_pages_exact(addr, size);
194 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
196 size_t size, enum dma_data_direction dir)
198 dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
199 free_pages_exact(cpu_addr, size);
202 int efa_query_device(struct ib_device *ibdev,
203 struct ib_device_attr *props,
204 struct ib_udata *udata)
206 struct efa_com_get_device_attr_result *dev_attr;
207 struct efa_ibv_ex_query_device_resp resp = {};
208 struct efa_dev *dev = to_edev(ibdev);
211 if (udata && udata->inlen &&
212 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
214 "Incompatible ABI params, udata not cleared\n");
218 dev_attr = &dev->dev_attr;
220 memset(props, 0, sizeof(*props));
221 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
222 props->page_size_cap = dev_attr->page_size_cap;
223 props->vendor_id = dev->pdev->vendor;
224 props->vendor_part_id = dev->pdev->device;
225 props->hw_ver = dev->pdev->subsystem_device;
226 props->max_qp = dev_attr->max_qp;
227 props->max_cq = dev_attr->max_cq;
228 props->max_pd = dev_attr->max_pd;
229 props->max_mr = dev_attr->max_mr;
230 props->max_ah = dev_attr->max_ah;
231 props->max_cqe = dev_attr->max_cq_depth;
232 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
233 dev_attr->max_rq_depth);
234 props->max_send_sge = dev_attr->max_sq_sge;
235 props->max_recv_sge = dev_attr->max_rq_sge;
236 props->max_sge_rd = dev_attr->max_wr_rdma_sge;
237 props->max_pkeys = 1;
239 if (udata && udata->outlen) {
240 resp.max_sq_sge = dev_attr->max_sq_sge;
241 resp.max_rq_sge = dev_attr->max_rq_sge;
242 resp.max_sq_wr = dev_attr->max_sq_depth;
243 resp.max_rq_wr = dev_attr->max_rq_depth;
244 resp.max_rdma_size = dev_attr->max_rdma_size;
246 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
247 if (EFA_DEV_CAP(dev, RDMA_READ))
248 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
250 if (EFA_DEV_CAP(dev, RNR_RETRY))
251 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
253 if (EFA_DEV_CAP(dev, DATA_POLLING_128))
254 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128;
256 if (EFA_DEV_CAP(dev, RDMA_WRITE))
257 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
260 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
262 err = ib_copy_to_udata(udata, &resp,
263 min(sizeof(resp), udata->outlen));
266 "Failed to copy udata for query_device\n");
274 int efa_query_port(struct ib_device *ibdev, u32 port,
275 struct ib_port_attr *props)
277 struct efa_dev *dev = to_edev(ibdev);
281 props->state = IB_PORT_ACTIVE;
282 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
283 props->gid_tbl_len = 1;
284 props->pkey_tbl_len = 1;
285 props->active_speed = IB_SPEED_EDR;
286 props->active_width = IB_WIDTH_4X;
287 props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
288 props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
289 props->max_msg_sz = dev->dev_attr.mtu;
290 props->max_vl_num = 1;
295 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
297 struct ib_qp_init_attr *qp_init_attr)
299 struct efa_dev *dev = to_edev(ibqp->device);
300 struct efa_com_query_qp_params params = {};
301 struct efa_com_query_qp_result result;
302 struct efa_qp *qp = to_eqp(ibqp);
305 #define EFA_QUERY_QP_SUPP_MASK \
306 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
307 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
309 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
310 ibdev_dbg(&dev->ibdev,
311 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
312 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
316 memset(qp_attr, 0, sizeof(*qp_attr));
317 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
319 params.qp_handle = qp->qp_handle;
320 err = efa_com_query_qp(&dev->edev, ¶ms, &result);
324 qp_attr->qp_state = result.qp_state;
325 qp_attr->qkey = result.qkey;
326 qp_attr->sq_psn = result.sq_psn;
327 qp_attr->sq_draining = result.sq_draining;
328 qp_attr->port_num = 1;
329 qp_attr->rnr_retry = result.rnr_retry;
331 qp_attr->cap.max_send_wr = qp->max_send_wr;
332 qp_attr->cap.max_recv_wr = qp->max_recv_wr;
333 qp_attr->cap.max_send_sge = qp->max_send_sge;
334 qp_attr->cap.max_recv_sge = qp->max_recv_sge;
335 qp_attr->cap.max_inline_data = qp->max_inline_data;
337 qp_init_attr->qp_type = ibqp->qp_type;
338 qp_init_attr->recv_cq = ibqp->recv_cq;
339 qp_init_attr->send_cq = ibqp->send_cq;
340 qp_init_attr->qp_context = ibqp->qp_context;
341 qp_init_attr->cap = qp_attr->cap;
346 int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
349 struct efa_dev *dev = to_edev(ibdev);
351 memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
356 int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
366 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
368 struct efa_com_dealloc_pd_params params = {
372 return efa_com_dealloc_pd(&dev->edev, ¶ms);
375 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
377 struct efa_dev *dev = to_edev(ibpd->device);
378 struct efa_ibv_alloc_pd_resp resp = {};
379 struct efa_com_alloc_pd_result result;
380 struct efa_pd *pd = to_epd(ibpd);
384 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
385 ibdev_dbg(&dev->ibdev,
386 "Incompatible ABI params, udata not cleared\n");
391 err = efa_com_alloc_pd(&dev->edev, &result);
395 pd->pdn = result.pdn;
396 resp.pdn = result.pdn;
399 err = ib_copy_to_udata(udata, &resp,
400 min(sizeof(resp), udata->outlen));
402 ibdev_dbg(&dev->ibdev,
403 "Failed to copy udata for alloc_pd\n");
408 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
413 efa_pd_dealloc(dev, result.pdn);
415 atomic64_inc(&dev->stats.alloc_pd_err);
419 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
421 struct efa_dev *dev = to_edev(ibpd->device);
422 struct efa_pd *pd = to_epd(ibpd);
424 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
425 efa_pd_dealloc(dev, pd->pdn);
429 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
431 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
433 return efa_com_destroy_qp(&dev->edev, ¶ms);
436 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
438 rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
439 rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
440 rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
441 rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
444 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
446 struct efa_dev *dev = to_edev(ibqp->pd->device);
447 struct efa_qp *qp = to_eqp(ibqp);
450 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
452 efa_qp_user_mmap_entries_remove(qp);
454 err = efa_destroy_qp_handle(dev, qp->qp_handle);
458 if (qp->rq_cpu_addr) {
459 ibdev_dbg(&dev->ibdev,
460 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
461 qp->rq_cpu_addr, qp->rq_size,
463 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
464 qp->rq_size, DMA_TO_DEVICE);
470 static struct rdma_user_mmap_entry*
471 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
472 u64 address, size_t length,
473 u8 mmap_flag, u64 *offset)
475 struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
481 entry->address = address;
482 entry->mmap_flag = mmap_flag;
484 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
490 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
492 return &entry->rdma_entry;
495 static int qp_mmap_entries_setup(struct efa_qp *qp,
497 struct efa_ucontext *ucontext,
498 struct efa_com_create_qp_params *params,
499 struct efa_ibv_create_qp_resp *resp)
504 address = dev->db_bar_addr + resp->sq_db_offset;
505 qp->sq_db_mmap_entry =
506 efa_user_mmap_entry_insert(&ucontext->ibucontext,
508 PAGE_SIZE, EFA_MMAP_IO_NC,
509 &resp->sq_db_mmap_key);
510 if (!qp->sq_db_mmap_entry)
513 resp->sq_db_offset &= ~PAGE_MASK;
515 address = dev->mem_bar_addr + resp->llq_desc_offset;
516 length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
517 (resp->llq_desc_offset & ~PAGE_MASK));
519 qp->llq_desc_mmap_entry =
520 efa_user_mmap_entry_insert(&ucontext->ibucontext,
523 &resp->llq_desc_mmap_key);
524 if (!qp->llq_desc_mmap_entry)
525 goto err_remove_mmap;
527 resp->llq_desc_offset &= ~PAGE_MASK;
530 address = dev->db_bar_addr + resp->rq_db_offset;
532 qp->rq_db_mmap_entry =
533 efa_user_mmap_entry_insert(&ucontext->ibucontext,
536 &resp->rq_db_mmap_key);
537 if (!qp->rq_db_mmap_entry)
538 goto err_remove_mmap;
540 resp->rq_db_offset &= ~PAGE_MASK;
542 address = virt_to_phys(qp->rq_cpu_addr);
544 efa_user_mmap_entry_insert(&ucontext->ibucontext,
545 address, qp->rq_size,
548 if (!qp->rq_mmap_entry)
549 goto err_remove_mmap;
551 resp->rq_mmap_size = qp->rq_size;
557 efa_qp_user_mmap_entries_remove(qp);
562 static int efa_qp_validate_cap(struct efa_dev *dev,
563 struct ib_qp_init_attr *init_attr)
565 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
566 ibdev_dbg(&dev->ibdev,
567 "qp: requested send wr[%u] exceeds the max[%u]\n",
568 init_attr->cap.max_send_wr,
569 dev->dev_attr.max_sq_depth);
572 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
573 ibdev_dbg(&dev->ibdev,
574 "qp: requested receive wr[%u] exceeds the max[%u]\n",
575 init_attr->cap.max_recv_wr,
576 dev->dev_attr.max_rq_depth);
579 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
580 ibdev_dbg(&dev->ibdev,
581 "qp: requested sge send[%u] exceeds the max[%u]\n",
582 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
585 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
586 ibdev_dbg(&dev->ibdev,
587 "qp: requested sge recv[%u] exceeds the max[%u]\n",
588 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
591 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
592 ibdev_dbg(&dev->ibdev,
593 "qp: requested inline data[%u] exceeds the max[%u]\n",
594 init_attr->cap.max_inline_data,
595 dev->dev_attr.inline_buf_size);
602 static int efa_qp_validate_attr(struct efa_dev *dev,
603 struct ib_qp_init_attr *init_attr)
605 if (init_attr->qp_type != IB_QPT_DRIVER &&
606 init_attr->qp_type != IB_QPT_UD) {
607 ibdev_dbg(&dev->ibdev,
608 "Unsupported qp type %d\n", init_attr->qp_type);
612 if (init_attr->srq) {
613 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
617 if (init_attr->create_flags) {
618 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
625 int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
626 struct ib_udata *udata)
628 struct efa_com_create_qp_params create_qp_params = {};
629 struct efa_com_create_qp_result create_qp_resp;
630 struct efa_dev *dev = to_edev(ibqp->device);
631 struct efa_ibv_create_qp_resp resp = {};
632 struct efa_ibv_create_qp cmd = {};
633 struct efa_qp *qp = to_eqp(ibqp);
634 struct efa_ucontext *ucontext;
637 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
640 err = efa_qp_validate_cap(dev, init_attr);
644 err = efa_qp_validate_attr(dev, init_attr);
648 if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
649 ibdev_dbg(&dev->ibdev,
650 "Incompatible ABI params, no input udata\n");
655 if (udata->inlen > sizeof(cmd) &&
656 !ib_is_udata_cleared(udata, sizeof(cmd),
657 udata->inlen - sizeof(cmd))) {
658 ibdev_dbg(&dev->ibdev,
659 "Incompatible ABI params, unknown fields in udata\n");
664 err = ib_copy_from_udata(&cmd, udata,
665 min(sizeof(cmd), udata->inlen));
667 ibdev_dbg(&dev->ibdev,
668 "Cannot copy udata for create_qp\n");
673 ibdev_dbg(&dev->ibdev,
674 "Incompatible ABI params, unknown fields in udata\n");
679 create_qp_params.uarn = ucontext->uarn;
680 create_qp_params.pd = to_epd(ibqp->pd)->pdn;
682 if (init_attr->qp_type == IB_QPT_UD) {
683 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
684 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
685 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
687 ibdev_dbg(&dev->ibdev,
688 "Unsupported qp type %d driver qp type %d\n",
689 init_attr->qp_type, cmd.driver_qp_type);
694 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
695 init_attr->qp_type, cmd.driver_qp_type);
696 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
697 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
698 create_qp_params.sq_depth = init_attr->cap.max_send_wr;
699 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
701 create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
702 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
703 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
705 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
706 qp->rq_size, DMA_TO_DEVICE);
707 if (!qp->rq_cpu_addr) {
712 ibdev_dbg(&dev->ibdev,
713 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
714 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
715 create_qp_params.rq_base_addr = qp->rq_dma_addr;
718 err = efa_com_create_qp(&dev->edev, &create_qp_params,
721 goto err_free_mapped;
723 resp.sq_db_offset = create_qp_resp.sq_db_offset;
724 resp.rq_db_offset = create_qp_resp.rq_db_offset;
725 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
726 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
727 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
729 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
734 qp->qp_handle = create_qp_resp.qp_handle;
735 qp->ibqp.qp_num = create_qp_resp.qp_num;
736 qp->max_send_wr = init_attr->cap.max_send_wr;
737 qp->max_recv_wr = init_attr->cap.max_recv_wr;
738 qp->max_send_sge = init_attr->cap.max_send_sge;
739 qp->max_recv_sge = init_attr->cap.max_recv_sge;
740 qp->max_inline_data = init_attr->cap.max_inline_data;
743 err = ib_copy_to_udata(udata, &resp,
744 min(sizeof(resp), udata->outlen));
746 ibdev_dbg(&dev->ibdev,
747 "Failed to copy udata for qp[%u]\n",
748 create_qp_resp.qp_num);
749 goto err_remove_mmap_entries;
753 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
757 err_remove_mmap_entries:
758 efa_qp_user_mmap_entries_remove(qp);
760 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
763 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
764 qp->rq_size, DMA_TO_DEVICE);
766 atomic64_inc(&dev->stats.create_qp_err);
770 static const struct {
772 enum ib_qp_attr_mask req_param;
773 enum ib_qp_attr_mask opt_param;
774 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
776 [IB_QPS_RESET] = { .valid = 1 },
779 .req_param = IB_QP_PKEY_INDEX |
785 [IB_QPS_RESET] = { .valid = 1 },
786 [IB_QPS_ERR] = { .valid = 1 },
789 .opt_param = IB_QP_PKEY_INDEX |
795 .opt_param = IB_QP_PKEY_INDEX |
800 [IB_QPS_RESET] = { .valid = 1 },
801 [IB_QPS_ERR] = { .valid = 1 },
804 .req_param = IB_QP_SQ_PSN,
805 .opt_param = IB_QP_CUR_STATE |
812 [IB_QPS_RESET] = { .valid = 1 },
813 [IB_QPS_ERR] = { .valid = 1 },
816 .opt_param = IB_QP_CUR_STATE |
821 .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
825 [IB_QPS_RESET] = { .valid = 1 },
826 [IB_QPS_ERR] = { .valid = 1 },
829 .opt_param = IB_QP_CUR_STATE |
834 .opt_param = IB_QP_PKEY_INDEX |
839 [IB_QPS_RESET] = { .valid = 1 },
840 [IB_QPS_ERR] = { .valid = 1 },
843 .opt_param = IB_QP_CUR_STATE |
848 [IB_QPS_RESET] = { .valid = 1 },
849 [IB_QPS_ERR] = { .valid = 1 },
853 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
854 enum ib_qp_state next_state,
855 enum ib_qp_attr_mask mask)
857 enum ib_qp_attr_mask req_param, opt_param;
859 if (mask & IB_QP_CUR_STATE &&
860 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
861 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
864 if (!srd_qp_state_table[cur_state][next_state].valid)
867 req_param = srd_qp_state_table[cur_state][next_state].req_param;
868 opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
870 if ((mask & req_param) != req_param)
873 if (mask & ~(req_param | opt_param | IB_QP_STATE))
879 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
880 struct ib_qp_attr *qp_attr, int qp_attr_mask,
881 enum ib_qp_state cur_state,
882 enum ib_qp_state new_state)
886 #define EFA_MODIFY_QP_SUPP_MASK \
887 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
888 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
891 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
892 ibdev_dbg(&dev->ibdev,
893 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
894 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
898 if (qp->ibqp.qp_type == IB_QPT_DRIVER)
899 err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
902 err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
906 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
910 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
911 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
915 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
916 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
923 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
924 int qp_attr_mask, struct ib_udata *udata)
926 struct efa_dev *dev = to_edev(ibqp->device);
927 struct efa_com_modify_qp_params params = {};
928 struct efa_qp *qp = to_eqp(ibqp);
929 enum ib_qp_state cur_state;
930 enum ib_qp_state new_state;
933 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
937 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
938 ibdev_dbg(&dev->ibdev,
939 "Incompatible ABI params, udata not cleared\n");
943 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
945 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
947 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
952 params.qp_handle = qp->qp_handle;
954 if (qp_attr_mask & IB_QP_STATE) {
955 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
957 EFA_SET(¶ms.modify_mask,
958 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
959 params.cur_qp_state = cur_state;
960 params.qp_state = new_state;
963 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
964 EFA_SET(¶ms.modify_mask,
965 EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
966 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
969 if (qp_attr_mask & IB_QP_QKEY) {
970 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
971 params.qkey = qp_attr->qkey;
974 if (qp_attr_mask & IB_QP_SQ_PSN) {
975 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
976 params.sq_psn = qp_attr->sq_psn;
979 if (qp_attr_mask & IB_QP_RNR_RETRY) {
980 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
982 params.rnr_retry = qp_attr->rnr_retry;
985 err = efa_com_modify_qp(&dev->edev, ¶ms);
989 qp->state = new_state;
994 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
996 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
998 return efa_com_destroy_cq(&dev->edev, ¶ms);
1001 static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq)
1003 rdma_user_mmap_entry_remove(cq->db_mmap_entry);
1004 rdma_user_mmap_entry_remove(cq->mmap_entry);
1007 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1009 struct efa_dev *dev = to_edev(ibcq->device);
1010 struct efa_cq *cq = to_ecq(ibcq);
1012 ibdev_dbg(&dev->ibdev,
1013 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
1014 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
1016 efa_cq_user_mmap_entries_remove(cq);
1017 efa_destroy_cq_idx(dev, cq->cq_idx);
1019 xa_erase(&dev->cqs_xa, cq->cq_idx);
1020 synchronize_irq(cq->eq->irq.irqn);
1022 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1027 static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec)
1029 return &dev->eqs[vec];
1032 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1033 struct efa_ibv_create_cq_resp *resp,
1036 resp->q_mmap_size = cq->size;
1037 cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1038 virt_to_phys(cq->cpu_addr),
1039 cq->size, EFA_MMAP_DMA_PAGE,
1041 if (!cq->mmap_entry)
1046 efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1047 dev->db_bar_addr + resp->db_off,
1048 PAGE_SIZE, EFA_MMAP_IO_NC,
1049 &resp->db_mmap_key);
1050 if (!cq->db_mmap_entry) {
1051 rdma_user_mmap_entry_remove(cq->mmap_entry);
1055 resp->db_off &= ~PAGE_MASK;
1056 resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF;
1062 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1063 struct ib_udata *udata)
1065 struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1066 udata, struct efa_ucontext, ibucontext);
1067 struct efa_com_create_cq_params params = {};
1068 struct efa_ibv_create_cq_resp resp = {};
1069 struct efa_com_create_cq_result result;
1070 struct ib_device *ibdev = ibcq->device;
1071 struct efa_dev *dev = to_edev(ibdev);
1072 struct efa_ibv_create_cq cmd = {};
1073 struct efa_cq *cq = to_ecq(ibcq);
1074 int entries = attr->cqe;
1078 ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1083 if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1085 "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1086 entries, dev->dev_attr.max_cq_depth);
1091 if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1093 "Incompatible ABI params, no input udata\n");
1098 if (udata->inlen > sizeof(cmd) &&
1099 !ib_is_udata_cleared(udata, sizeof(cmd),
1100 udata->inlen - sizeof(cmd))) {
1102 "Incompatible ABI params, unknown fields in udata\n");
1107 err = ib_copy_from_udata(&cmd, udata,
1108 min(sizeof(cmd), udata->inlen));
1110 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1114 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) {
1116 "Incompatible ABI params, unknown fields in udata\n");
1121 set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
1122 if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
1124 cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
1126 "Invalid entry size [%u]\n", cmd.cq_entry_size);
1131 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1133 "Invalid number of sub cqs[%u] expected[%u]\n",
1134 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1139 cq->ucontext = ucontext;
1140 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1141 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1143 if (!cq->cpu_addr) {
1148 params.uarn = cq->ucontext->uarn;
1149 params.cq_depth = entries;
1150 params.dma_addr = cq->dma_addr;
1151 params.entry_size_in_bytes = cmd.cq_entry_size;
1152 params.num_sub_cqs = cmd.num_sub_cqs;
1153 params.set_src_addr = set_src_addr;
1154 if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
1155 cq->eq = efa_vec2eq(dev, attr->comp_vector);
1156 params.eqn = cq->eq->eeq.eqn;
1157 params.interrupt_mode_enabled = true;
1160 err = efa_com_create_cq(&dev->edev, ¶ms, &result);
1162 goto err_free_mapped;
1164 resp.db_off = result.db_off;
1165 resp.cq_idx = result.cq_idx;
1166 cq->cq_idx = result.cq_idx;
1167 cq->ibcq.cqe = result.actual_depth;
1168 WARN_ON_ONCE(entries != result.actual_depth);
1170 err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
1172 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1174 goto err_destroy_cq;
1178 err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
1180 ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n",
1182 goto err_remove_mmap;
1186 if (udata->outlen) {
1187 err = ib_copy_to_udata(udata, &resp,
1188 min(sizeof(resp), udata->outlen));
1191 "Failed to copy udata for create_cq\n");
1196 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1197 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1203 xa_erase(&dev->cqs_xa, cq->cq_idx);
1205 efa_cq_user_mmap_entries_remove(cq);
1207 efa_destroy_cq_idx(dev, cq->cq_idx);
1209 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1213 atomic64_inc(&dev->stats.create_cq_err);
1217 static int umem_to_page_list(struct efa_dev *dev,
1218 struct ib_umem *umem,
1223 u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1224 struct ib_block_iter biter;
1225 unsigned int hp_idx = 0;
1227 ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1228 hp_cnt, pages_in_hp);
1230 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1231 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1236 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1238 struct scatterlist *sglist;
1242 sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
1245 sg_init_table(sglist, page_cnt);
1246 for (i = 0; i < page_cnt; i++) {
1247 pg = vmalloc_to_page(buf);
1250 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1251 buf += PAGE_SIZE / sizeof(*buf);
1261 * create a chunk list of physical pages dma addresses from the supplied
1262 * scatter gather list
1264 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1266 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1267 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1268 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1269 unsigned int chunk_list_size, chunk_idx, payload_idx;
1270 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1271 struct efa_com_ctrl_buff_info *ctrl_buf;
1272 u64 *cur_chunk_buf, *prev_chunk_buf;
1273 struct ib_block_iter biter;
1274 dma_addr_t dma_addr;
1277 /* allocate a chunk list that consists of 4KB chunks */
1278 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1280 chunk_list->size = chunk_list_size;
1281 chunk_list->chunks = kcalloc(chunk_list_size,
1282 sizeof(*chunk_list->chunks),
1284 if (!chunk_list->chunks)
1287 ibdev_dbg(&dev->ibdev,
1288 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1291 /* allocate chunk buffers: */
1292 for (i = 0; i < chunk_list_size; i++) {
1293 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1294 if (!chunk_list->chunks[i].buf)
1295 goto chunk_list_dealloc;
1297 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1299 chunk_list->chunks[chunk_list_size - 1].length =
1300 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1303 /* fill the dma addresses of sg list pages to chunks: */
1306 cur_chunk_buf = chunk_list->chunks[0].buf;
1307 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1308 EFA_CHUNK_PAYLOAD_SIZE) {
1309 cur_chunk_buf[payload_idx++] =
1310 rdma_block_iter_dma_address(&biter);
1312 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1314 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1319 /* map chunks to dma and fill chunks next ptrs */
1320 for (i = chunk_list_size - 1; i >= 0; i--) {
1321 dma_addr = dma_map_single(&dev->pdev->dev,
1322 chunk_list->chunks[i].buf,
1323 chunk_list->chunks[i].length,
1325 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1326 ibdev_err(&dev->ibdev,
1327 "chunk[%u] dma_map_failed\n", i);
1328 goto chunk_list_unmap;
1331 chunk_list->chunks[i].dma_addr = dma_addr;
1332 ibdev_dbg(&dev->ibdev,
1333 "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1338 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1340 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1341 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1342 ctrl_buf->length = chunk_list->chunks[i].length;
1344 efa_com_set_dma_addr(dma_addr,
1345 &ctrl_buf->address.mem_addr_high,
1346 &ctrl_buf->address.mem_addr_low);
1352 for (; i < chunk_list_size; i++) {
1353 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1354 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1357 for (i = 0; i < chunk_list_size; i++)
1358 kfree(chunk_list->chunks[i].buf);
1360 kfree(chunk_list->chunks);
1364 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1366 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1369 for (i = 0; i < chunk_list->size; i++) {
1370 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1371 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1372 kfree(chunk_list->chunks[i].buf);
1375 kfree(chunk_list->chunks);
1378 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
1379 static int pbl_continuous_initialize(struct efa_dev *dev,
1380 struct pbl_context *pbl)
1382 dma_addr_t dma_addr;
1384 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1385 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1386 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1387 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1391 pbl->phys.continuous.dma_addr = dma_addr;
1392 ibdev_dbg(&dev->ibdev,
1393 "pbl continuous - dma_addr = %pad, size[%u]\n",
1394 &dma_addr, pbl->pbl_buf_size_in_bytes);
1400 * initialize pbl indirect mode:
1401 * create a chunk list out of the dma addresses of the physical pages of
1404 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1406 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1407 struct scatterlist *sgl;
1408 int sg_dma_cnt, err;
1410 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1411 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1415 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1421 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1422 pbl->phys.indirect.sgl = sgl;
1423 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1424 err = pbl_chunk_list_create(dev, pbl);
1426 ibdev_dbg(&dev->ibdev,
1427 "chunk_list creation failed[%d]\n", err);
1431 ibdev_dbg(&dev->ibdev,
1432 "pbl indirect - size[%u], chunks[%u]\n",
1433 pbl->pbl_buf_size_in_bytes,
1434 pbl->phys.indirect.chunk_list.size);
1439 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1445 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1447 pbl_chunk_list_destroy(dev, pbl);
1448 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1449 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1450 kfree(pbl->phys.indirect.sgl);
1453 /* create a page buffer list from a mapped user memory region */
1454 static int pbl_create(struct efa_dev *dev,
1455 struct pbl_context *pbl,
1456 struct ib_umem *umem,
1462 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1463 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1467 if (is_vmalloc_addr(pbl->pbl_buf)) {
1468 pbl->physically_continuous = 0;
1469 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1474 err = pbl_indirect_initialize(dev, pbl);
1478 pbl->physically_continuous = 1;
1479 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1484 err = pbl_continuous_initialize(dev, pbl);
1489 ibdev_dbg(&dev->ibdev,
1490 "user_pbl_created: user_pages[%u], continuous[%u]\n",
1491 hp_cnt, pbl->physically_continuous);
1496 kvfree(pbl->pbl_buf);
1500 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1502 if (pbl->physically_continuous)
1503 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1504 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1506 pbl_indirect_terminate(dev, pbl);
1508 kvfree(pbl->pbl_buf);
1511 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1512 struct efa_com_reg_mr_params *params)
1516 params->inline_pbl = 1;
1517 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1518 params->page_num, params->page_shift);
1522 ibdev_dbg(&dev->ibdev,
1523 "inline_pbl_array - pages[%u]\n", params->page_num);
1528 static int efa_create_pbl(struct efa_dev *dev,
1529 struct pbl_context *pbl,
1531 struct efa_com_reg_mr_params *params)
1535 err = pbl_create(dev, pbl, mr->umem, params->page_num,
1536 params->page_shift);
1538 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1542 params->inline_pbl = 0;
1543 params->indirect = !pbl->physically_continuous;
1544 if (pbl->physically_continuous) {
1545 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1547 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1548 ¶ms->pbl.pbl.address.mem_addr_high,
1549 ¶ms->pbl.pbl.address.mem_addr_low);
1551 params->pbl.pbl.length =
1552 pbl->phys.indirect.chunk_list.chunks[0].length;
1554 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1555 ¶ms->pbl.pbl.address.mem_addr_high,
1556 ¶ms->pbl.pbl.address.mem_addr_low);
1562 static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
1563 struct ib_udata *udata)
1565 struct efa_dev *dev = to_edev(ibpd->device);
1566 int supp_access_flags;
1569 if (udata && udata->inlen &&
1570 !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1571 ibdev_dbg(&dev->ibdev,
1572 "Incompatible ABI params, udata not cleared\n");
1573 return ERR_PTR(-EINVAL);
1577 IB_ACCESS_LOCAL_WRITE |
1578 (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) |
1579 (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0);
1581 access_flags &= ~IB_ACCESS_OPTIONAL;
1582 if (access_flags & ~supp_access_flags) {
1583 ibdev_dbg(&dev->ibdev,
1584 "Unsupported access flags[%#x], supported[%#x]\n",
1585 access_flags, supp_access_flags);
1586 return ERR_PTR(-EOPNOTSUPP);
1589 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1591 return ERR_PTR(-ENOMEM);
1596 static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
1597 u64 length, u64 virt_addr, int access_flags)
1599 struct efa_dev *dev = to_edev(ibpd->device);
1600 struct efa_com_reg_mr_params params = {};
1601 struct efa_com_reg_mr_result result = {};
1602 struct pbl_context pbl;
1607 params.pd = to_epd(ibpd)->pdn;
1608 params.iova = virt_addr;
1609 params.mr_length_in_bytes = length;
1610 params.permissions = access_flags;
1612 pg_sz = ib_umem_find_best_pgsz(mr->umem,
1613 dev->dev_attr.page_size_cap,
1616 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1617 dev->dev_attr.page_size_cap);
1621 params.page_shift = order_base_2(pg_sz);
1622 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1624 ibdev_dbg(&dev->ibdev,
1625 "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1626 start, length, params.page_shift, params.page_num);
1628 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1629 if (params.page_num <= inline_size) {
1630 err = efa_create_inline_pbl(dev, mr, ¶ms);
1634 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1638 err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1642 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1643 pbl_destroy(dev, &pbl);
1649 mr->ibmr.lkey = result.l_key;
1650 mr->ibmr.rkey = result.r_key;
1651 mr->ibmr.length = length;
1652 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1657 struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
1658 u64 length, u64 virt_addr,
1659 int fd, int access_flags,
1660 struct ib_udata *udata)
1662 struct efa_dev *dev = to_edev(ibpd->device);
1663 struct ib_umem_dmabuf *umem_dmabuf;
1667 mr = efa_alloc_mr(ibpd, access_flags, udata);
1673 umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
1675 if (IS_ERR(umem_dmabuf)) {
1676 err = PTR_ERR(umem_dmabuf);
1677 ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
1681 mr->umem = &umem_dmabuf->umem;
1682 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1689 ib_umem_release(mr->umem);
1693 atomic64_inc(&dev->stats.reg_mr_err);
1694 return ERR_PTR(err);
1697 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1698 u64 virt_addr, int access_flags,
1699 struct ib_udata *udata)
1701 struct efa_dev *dev = to_edev(ibpd->device);
1705 mr = efa_alloc_mr(ibpd, access_flags, udata);
1711 mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1712 if (IS_ERR(mr->umem)) {
1713 err = PTR_ERR(mr->umem);
1714 ibdev_dbg(&dev->ibdev,
1715 "Failed to pin and map user space memory[%d]\n", err);
1719 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
1726 ib_umem_release(mr->umem);
1730 atomic64_inc(&dev->stats.reg_mr_err);
1731 return ERR_PTR(err);
1734 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1736 struct efa_dev *dev = to_edev(ibmr->device);
1737 struct efa_com_dereg_mr_params params;
1738 struct efa_mr *mr = to_emr(ibmr);
1741 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1743 params.l_key = mr->ibmr.lkey;
1744 err = efa_com_dereg_mr(&dev->edev, ¶ms);
1748 ib_umem_release(mr->umem);
1754 int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
1755 struct ib_port_immutable *immutable)
1757 struct ib_port_attr attr;
1760 err = ib_query_port(ibdev, port_num, &attr);
1762 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1766 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1767 immutable->gid_tbl_len = attr.gid_tbl_len;
1772 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1774 struct efa_com_dealloc_uar_params params = {
1778 return efa_com_dealloc_uar(&dev->edev, ¶ms);
1781 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1782 (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1785 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1786 const struct efa_ibv_alloc_ucontext_cmd *cmd)
1788 struct efa_dev *dev = to_edev(ibucontext->device);
1791 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1792 EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1795 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1796 EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1803 ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1808 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1810 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1811 struct efa_dev *dev = to_edev(ibucontext->device);
1812 struct efa_ibv_alloc_ucontext_resp resp = {};
1813 struct efa_ibv_alloc_ucontext_cmd cmd = {};
1814 struct efa_com_alloc_uar_result result;
1818 * it's fine if the driver does not know all request fields,
1819 * we will ack input fields in our response.
1822 err = ib_copy_from_udata(&cmd, udata,
1823 min(sizeof(cmd), udata->inlen));
1825 ibdev_dbg(&dev->ibdev,
1826 "Cannot copy udata for alloc_ucontext\n");
1830 err = efa_user_comp_handshake(ibucontext, &cmd);
1834 err = efa_com_alloc_uar(&dev->edev, &result);
1838 ucontext->uarn = result.uarn;
1840 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1841 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1842 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1843 resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1844 resp.max_llq_size = dev->dev_attr.max_llq_size;
1845 resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1846 resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1848 err = ib_copy_to_udata(udata, &resp,
1849 min(sizeof(resp), udata->outlen));
1851 goto err_dealloc_uar;
1856 efa_dealloc_uar(dev, result.uarn);
1858 atomic64_inc(&dev->stats.alloc_ucontext_err);
1862 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1864 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1865 struct efa_dev *dev = to_edev(ibucontext->device);
1867 efa_dealloc_uar(dev, ucontext->uarn);
1870 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1872 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1877 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1878 struct vm_area_struct *vma)
1880 struct rdma_user_mmap_entry *rdma_entry;
1881 struct efa_user_mmap_entry *entry;
1886 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1888 ibdev_dbg(&dev->ibdev,
1889 "pgoff[%#lx] does not have valid entry\n",
1891 atomic64_inc(&dev->stats.mmap_err);
1894 entry = to_emmap(rdma_entry);
1896 ibdev_dbg(&dev->ibdev,
1897 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1898 entry->address, rdma_entry->npages * PAGE_SIZE,
1901 pfn = entry->address >> PAGE_SHIFT;
1902 switch (entry->mmap_flag) {
1903 case EFA_MMAP_IO_NC:
1904 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1905 entry->rdma_entry.npages * PAGE_SIZE,
1906 pgprot_noncached(vma->vm_page_prot),
1909 case EFA_MMAP_IO_WC:
1910 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1911 entry->rdma_entry.npages * PAGE_SIZE,
1912 pgprot_writecombine(vma->vm_page_prot),
1915 case EFA_MMAP_DMA_PAGE:
1916 for (va = vma->vm_start; va < vma->vm_end;
1917 va += PAGE_SIZE, pfn++) {
1918 err = vm_insert_page(vma, va, pfn_to_page(pfn));
1930 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1931 entry->address, rdma_entry->npages * PAGE_SIZE,
1932 entry->mmap_flag, err);
1933 atomic64_inc(&dev->stats.mmap_err);
1936 rdma_user_mmap_entry_put(rdma_entry);
1940 int efa_mmap(struct ib_ucontext *ibucontext,
1941 struct vm_area_struct *vma)
1943 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1944 struct efa_dev *dev = to_edev(ibucontext->device);
1945 size_t length = vma->vm_end - vma->vm_start;
1947 ibdev_dbg(&dev->ibdev,
1948 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1949 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1951 return __efa_mmap(dev, ucontext, vma);
1954 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1956 struct efa_com_destroy_ah_params params = {
1958 .pdn = to_epd(ah->ibah.pd)->pdn,
1961 return efa_com_destroy_ah(&dev->edev, ¶ms);
1964 int efa_create_ah(struct ib_ah *ibah,
1965 struct rdma_ah_init_attr *init_attr,
1966 struct ib_udata *udata)
1968 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
1969 struct efa_dev *dev = to_edev(ibah->device);
1970 struct efa_com_create_ah_params params = {};
1971 struct efa_ibv_create_ah_resp resp = {};
1972 struct efa_com_create_ah_result result;
1973 struct efa_ah *ah = to_eah(ibah);
1976 if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
1977 ibdev_dbg(&dev->ibdev,
1978 "Create address handle is not supported in atomic context\n");
1984 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1985 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1990 memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1991 sizeof(params.dest_addr));
1992 params.pdn = to_epd(ibah->pd)->pdn;
1993 err = efa_com_create_ah(&dev->edev, ¶ms, &result);
1997 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
2000 resp.efa_address_handle = result.ah;
2002 if (udata->outlen) {
2003 err = ib_copy_to_udata(udata, &resp,
2004 min(sizeof(resp), udata->outlen));
2006 ibdev_dbg(&dev->ibdev,
2007 "Failed to copy udata for create_ah response\n");
2008 goto err_destroy_ah;
2011 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
2016 efa_ah_destroy(dev, ah);
2018 atomic64_inc(&dev->stats.create_ah_err);
2022 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
2024 struct efa_dev *dev = to_edev(ibah->pd->device);
2025 struct efa_ah *ah = to_eah(ibah);
2027 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
2029 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
2030 ibdev_dbg(&dev->ibdev,
2031 "Destroy address handle is not supported in atomic context\n");
2035 efa_ah_destroy(dev, ah);
2039 struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
2042 return rdma_alloc_hw_stats_struct(efa_port_stats_descs,
2043 ARRAY_SIZE(efa_port_stats_descs),
2044 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2047 struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
2049 return rdma_alloc_hw_stats_struct(efa_device_stats_descs,
2050 ARRAY_SIZE(efa_device_stats_descs),
2051 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2054 static int efa_fill_device_stats(struct efa_dev *dev,
2055 struct rdma_hw_stats *stats)
2057 struct efa_com_stats_admin *as = &dev->edev.aq.stats;
2058 struct efa_stats *s = &dev->stats;
2060 stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
2061 stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
2062 stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
2063 stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
2065 stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
2066 stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
2067 stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
2068 stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
2069 stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
2070 stats->value[EFA_ALLOC_UCONTEXT_ERR] =
2071 atomic64_read(&s->alloc_ucontext_err);
2072 stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
2073 stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
2075 return ARRAY_SIZE(efa_device_stats_descs);
2078 static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
2081 struct efa_com_get_stats_params params = {};
2082 union efa_com_get_stats_result result;
2083 struct efa_com_rdma_read_stats *rrs;
2084 struct efa_com_messages_stats *ms;
2085 struct efa_com_basic_stats *bs;
2088 params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
2089 params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
2091 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2095 bs = &result.basic_stats;
2096 stats->value[EFA_TX_BYTES] = bs->tx_bytes;
2097 stats->value[EFA_TX_PKTS] = bs->tx_pkts;
2098 stats->value[EFA_RX_BYTES] = bs->rx_bytes;
2099 stats->value[EFA_RX_PKTS] = bs->rx_pkts;
2100 stats->value[EFA_RX_DROPS] = bs->rx_drops;
2102 params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
2103 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2107 ms = &result.messages_stats;
2108 stats->value[EFA_SEND_BYTES] = ms->send_bytes;
2109 stats->value[EFA_SEND_WRS] = ms->send_wrs;
2110 stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
2111 stats->value[EFA_RECV_WRS] = ms->recv_wrs;
2113 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
2114 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
2118 rrs = &result.rdma_read_stats;
2119 stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
2120 stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
2121 stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
2122 stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
2124 return ARRAY_SIZE(efa_port_stats_descs);
2127 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
2128 u32 port_num, int index)
2131 return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
2133 return efa_fill_device_stats(to_edev(ibdev), stats);
2136 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
2139 return IB_LINK_LAYER_UNSPECIFIED;