Merge tag 'v4.18' into rdma.git for-next
[platform/kernel/linux-rpi.git] / drivers / infiniband / hw / qedr / verbs.c
index f07b8df..8cc3df2 100644 (file)
 #include <rdma/qedr-abi.h>
 #include "qedr_roce_cm.h"
 
+#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
+#define        RDMA_MAX_SGE_PER_SRQ    (4)
+#define RDMA_MAX_SRQ_WQE_SIZE  (RDMA_MAX_SGE_PER_SRQ + 1)
+
 #define DB_ADDR_SHIFT(addr)            ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
 
 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
@@ -84,6 +88,19 @@ int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
        return 0;
 }
 
+int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+       struct qedr_device_attr *qattr = &dev->attr;
+       struct qedr_srq *srq = get_qedr_srq(ibsrq);
+
+       srq_attr->srq_limit = srq->srq_limit;
+       srq_attr->max_wr = qattr->max_srq_wr;
+       srq_attr->max_sge = qattr->max_sge;
+
+       return 0;
+}
+
 int qedr_query_device(struct ib_device *ibdev,
                      struct ib_device_attr *attr, struct ib_udata *udata)
 {
@@ -112,7 +129,8 @@ int qedr_query_device(struct ib_device *ibdev,
            IB_DEVICE_RC_RNR_NAK_GEN |
            IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
 
-       attr->max_sge = qattr->max_sge;
+       attr->max_send_sge = qattr->max_sge;
+       attr->max_recv_sge = qattr->max_sge;
        attr->max_sge_rd = qattr->max_sge;
        attr->max_cq = qattr->max_cq;
        attr->max_cqe = qattr->max_cqe;
@@ -224,7 +242,7 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
        attr->lmc = 0;
        attr->sm_lid = 0;
        attr->sm_sl = 0;
-       attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+       attr->ip_gids = true;
        if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
                attr->gid_tbl_len = 1;
                attr->pkey_tbl_len = 1;
@@ -1075,27 +1093,19 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
                                          struct qed_rdma_modify_qp_in_params
                                          *qp_params)
 {
+       const struct ib_gid_attr *gid_attr;
        enum rdma_network_type nw_type;
-       struct ib_gid_attr gid_attr;
        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
-       union ib_gid gid;
        u32 ipv4_addr;
-       int rc = 0;
        int i;
 
-       rc = ib_get_cached_gid(ibqp->device,
-                              rdma_ah_get_port_num(&attr->ah_attr),
-                              grh->sgid_index, &gid, &gid_attr);
-       if (rc)
-               return rc;
-
-       qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+       gid_attr = grh->sgid_attr;
+       qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
 
-       dev_put(gid_attr.ndev);
-       nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+       nw_type = rdma_gid_attr_network_type(gid_attr);
        switch (nw_type) {
        case RDMA_NETWORK_IPV6:
-               memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+               memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
                       sizeof(qp_params->sgid));
                memcpy(&qp_params->dgid.bytes[0],
                       &grh->dgid,
@@ -1105,7 +1115,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
                          QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
                break;
        case RDMA_NETWORK_IB:
-               memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+               memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
                       sizeof(qp_params->sgid));
                memcpy(&qp_params->dgid.bytes[0],
                       &grh->dgid,
@@ -1115,7 +1125,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
        case RDMA_NETWORK_IPV4:
                memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
                memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
-               ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+               ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
                qp_params->sgid.ipv4_addr = ipv4_addr;
                ipv4_addr =
                    qedr_get_ipv4_from_gid(grh->dgid.raw);
@@ -1189,6 +1199,21 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
        return 0;
 }
 
+static int qedr_copy_srq_uresp(struct qedr_dev *dev,
+                              struct qedr_srq *srq, struct ib_udata *udata)
+{
+       struct qedr_create_srq_uresp uresp = {};
+       int rc;
+
+       uresp.srq_id = srq->srq_id;
+
+       rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+       if (rc)
+               DP_ERR(dev, "create srq: problem copying data to user space\n");
+
+       return rc;
+}
+
 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
                               struct qedr_create_qp_uresp *uresp,
                               struct qedr_qp *qp)
@@ -1255,13 +1280,18 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
        qp->state = QED_ROCE_QP_STATE_RESET;
        qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
        qp->sq_cq = get_qedr_cq(attrs->send_cq);
-       qp->rq_cq = get_qedr_cq(attrs->recv_cq);
        qp->dev = dev;
-       qp->rq.max_sges = attrs->cap.max_recv_sge;
 
-       DP_DEBUG(dev, QEDR_MSG_QP,
-                "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
-                qp->rq.max_sges, qp->rq_cq->icid);
+       if (attrs->srq) {
+               qp->srq = get_qedr_srq(attrs->srq);
+       } else {
+               qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+               qp->rq.max_sges = attrs->cap.max_recv_sge;
+               DP_DEBUG(dev, QEDR_MSG_QP,
+                        "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+                        qp->rq.max_sges, qp->rq_cq->icid);
+       }
+
        DP_DEBUG(dev, QEDR_MSG_QP,
                 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
                 pd->pd_id, qp->qp_type, qp->max_inline_data,
@@ -1276,9 +1306,303 @@ static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
        qp->sq.db = dev->db_addr +
                    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
        qp->sq.db_data.data.icid = qp->icid + 1;
-       qp->rq.db = dev->db_addr +
-                   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
-       qp->rq.db_data.data.icid = qp->icid;
+       if (!qp->srq) {
+               qp->rq.db = dev->db_addr +
+                           DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+               qp->rq.db_data.data.icid = qp->icid;
+       }
+}
+
+static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
+                                struct ib_srq_init_attr *attrs,
+                                struct ib_udata *udata)
+{
+       struct qedr_device_attr *qattr = &dev->attr;
+
+       if (attrs->attr.max_wr > qattr->max_srq_wr) {
+               DP_ERR(dev,
+                      "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
+                      attrs->attr.max_wr, qattr->max_srq_wr);
+               return -EINVAL;
+       }
+
+       if (attrs->attr.max_sge > qattr->max_sge) {
+               DP_ERR(dev,
+                      "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
+                      attrs->attr.max_sge, qattr->max_sge);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void qedr_free_srq_user_params(struct qedr_srq *srq)
+{
+       qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+       ib_umem_release(srq->usrq.umem);
+       ib_umem_release(srq->prod_umem);
+}
+
+static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
+{
+       struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+       struct qedr_dev *dev = srq->dev;
+
+       dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
+
+       dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
+                         hw_srq->virt_prod_pair_addr,
+                         hw_srq->phy_prod_pair_addr);
+}
+
+static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
+                                    struct qedr_srq *srq,
+                                    struct qedr_create_srq_ureq *ureq,
+                                    int access, int dmasync)
+{
+       struct scatterlist *sg;
+       int rc;
+
+       rc = qedr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
+                                 ureq->srq_len, access, dmasync, 1);
+       if (rc)
+               return rc;
+
+       srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
+                                    sizeof(struct rdma_srq_producers),
+                                    access, dmasync);
+       if (IS_ERR(srq->prod_umem)) {
+               qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
+               ib_umem_release(srq->usrq.umem);
+               DP_ERR(srq->dev,
+                      "create srq: failed ib_umem_get for producer, got %ld\n",
+                      PTR_ERR(srq->prod_umem));
+               return PTR_ERR(srq->prod_umem);
+       }
+
+       sg = srq->prod_umem->sg_head.sgl;
+       srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
+
+       return 0;
+}
+
+static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
+                                       struct qedr_dev *dev,
+                                       struct ib_srq_init_attr *init_attr)
+{
+       struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+       dma_addr_t phy_prod_pair_addr;
+       u32 num_elems;
+       void *va;
+       int rc;
+
+       va = dma_alloc_coherent(&dev->pdev->dev,
+                               sizeof(struct rdma_srq_producers),
+                               &phy_prod_pair_addr, GFP_KERNEL);
+       if (!va) {
+               DP_ERR(dev,
+                      "create srq: failed to allocate dma memory for producer\n");
+               return -ENOMEM;
+       }
+
+       hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
+       hw_srq->virt_prod_pair_addr = va;
+
+       num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
+       rc = dev->ops->common->chain_alloc(dev->cdev,
+                                          QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                          QED_CHAIN_MODE_PBL,
+                                          QED_CHAIN_CNT_TYPE_U32,
+                                          num_elems,
+                                          QEDR_SRQ_WQE_ELEM_SIZE,
+                                          &hw_srq->pbl, NULL);
+       if (rc)
+               goto err0;
+
+       hw_srq->num_elems = num_elems;
+
+       return 0;
+
+err0:
+       dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
+                         va, phy_prod_pair_addr);
+       return rc;
+}
+
+static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
+                       void *ptr, u32 id);
+static void qedr_idr_remove(struct qedr_dev *dev,
+                           struct qedr_idr *qidr, u32 id);
+
+struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
+                              struct ib_srq_init_attr *init_attr,
+                              struct ib_udata *udata)
+{
+       struct qed_rdma_destroy_srq_in_params destroy_in_params;
+       struct qed_rdma_create_srq_in_params in_params = {};
+       struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+       struct qed_rdma_create_srq_out_params out_params;
+       struct qedr_pd *pd = get_qedr_pd(ibpd);
+       struct qedr_create_srq_ureq ureq = {};
+       u64 pbl_base_addr, phy_prod_pair_addr;
+       struct ib_ucontext *ib_ctx = NULL;
+       struct qedr_srq_hwq_info *hw_srq;
+       struct qedr_ucontext *ctx = NULL;
+       u32 page_cnt, page_size;
+       struct qedr_srq *srq;
+       int rc = 0;
+
+       DP_DEBUG(dev, QEDR_MSG_QP,
+                "create SRQ called from %s (pd %p)\n",
+                (udata) ? "User lib" : "kernel", pd);
+
+       rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
+       if (rc)
+               return ERR_PTR(-EINVAL);
+
+       srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+       if (!srq)
+               return ERR_PTR(-ENOMEM);
+
+       srq->dev = dev;
+       hw_srq = &srq->hw_srq;
+       spin_lock_init(&srq->lock);
+
+       hw_srq->max_wr = init_attr->attr.max_wr;
+       hw_srq->max_sges = init_attr->attr.max_sge;
+
+       if (udata && ibpd->uobject && ibpd->uobject->context) {
+               ib_ctx = ibpd->uobject->context;
+               ctx = get_qedr_ucontext(ib_ctx);
+
+               if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+                       DP_ERR(dev,
+                              "create srq: problem copying data from user space\n");
+                       goto err0;
+               }
+
+               rc = qedr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
+               if (rc)
+                       goto err0;
+
+               page_cnt = srq->usrq.pbl_info.num_pbes;
+               pbl_base_addr = srq->usrq.pbl_tbl->pa;
+               phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+               page_size = BIT(srq->usrq.umem->page_shift);
+       } else {
+               struct qed_chain *pbl;
+
+               rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
+               if (rc)
+                       goto err0;
+
+               pbl = &hw_srq->pbl;
+               page_cnt = qed_chain_get_page_cnt(pbl);
+               pbl_base_addr = qed_chain_get_pbl_phys(pbl);
+               phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
+               page_size = QED_CHAIN_PAGE_SIZE;
+       }
+
+       in_params.pd_id = pd->pd_id;
+       in_params.pbl_base_addr = pbl_base_addr;
+       in_params.prod_pair_addr = phy_prod_pair_addr;
+       in_params.num_pages = page_cnt;
+       in_params.page_size = page_size;
+
+       rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
+       if (rc)
+               goto err1;
+
+       srq->srq_id = out_params.srq_id;
+
+       if (udata) {
+               rc = qedr_copy_srq_uresp(dev, srq, udata);
+               if (rc)
+                       goto err2;
+       }
+
+       rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
+       if (rc)
+               goto err2;
+
+       DP_DEBUG(dev, QEDR_MSG_SRQ,
+                "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
+       return &srq->ibsrq;
+
+err2:
+       destroy_in_params.srq_id = srq->srq_id;
+
+       dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
+err1:
+       if (udata)
+               qedr_free_srq_user_params(srq);
+       else
+               qedr_free_srq_kernel_params(srq);
+err0:
+       kfree(srq);
+
+       return ERR_PTR(-EFAULT);
+}
+
+int qedr_destroy_srq(struct ib_srq *ibsrq)
+{
+       struct qed_rdma_destroy_srq_in_params in_params = {};
+       struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+       struct qedr_srq *srq = get_qedr_srq(ibsrq);
+
+       qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
+       in_params.srq_id = srq->srq_id;
+       dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
+
+       if (ibsrq->pd->uobject)
+               qedr_free_srq_user_params(srq);
+       else
+               qedr_free_srq_kernel_params(srq);
+
+       DP_DEBUG(dev, QEDR_MSG_SRQ,
+                "destroy srq: destroyed srq with srq_id=0x%0x\n",
+                srq->srq_id);
+       kfree(srq);
+
+       return 0;
+}
+
+int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+                   enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+       struct qed_rdma_modify_srq_in_params in_params = {};
+       struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
+       struct qedr_srq *srq = get_qedr_srq(ibsrq);
+       int rc;
+
+       if (attr_mask & IB_SRQ_MAX_WR) {
+               DP_ERR(dev,
+                      "modify srq: invalid attribute mask=0x%x specified for %p\n",
+                      attr_mask, srq);
+               return -EINVAL;
+       }
+
+       if (attr_mask & IB_SRQ_LIMIT) {
+               if (attr->srq_limit >= srq->hw_srq.max_wr) {
+                       DP_ERR(dev,
+                              "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
+                              attr->srq_limit, srq->hw_srq.max_wr);
+                       return -EINVAL;
+               }
+
+               in_params.srq_id = srq->srq_id;
+               in_params.wqe_limit = attr->srq_limit;
+               rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
+               if (rc)
+                       return rc;
+       }
+
+       srq->srq_limit = attr->srq_limit;
+
+       DP_DEBUG(dev, QEDR_MSG_SRQ,
+                "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
+
+       return 0;
 }
 
 static inline void
@@ -1299,9 +1623,17 @@ qedr_init_common_qp_in_params(struct qedr_dev *dev,
        params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
        params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
        params->stats_queue = 0;
-       params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
        params->srq_id = 0;
        params->use_srq = false;
+
+       if (!qp->srq) {
+               params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+
+       } else {
+               params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+               params->srq_id = qp->srq->srq_id;
+               params->use_srq = true;
+       }
 }
 
 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
@@ -1318,32 +1650,27 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
                 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
 }
 
-static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
+static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
+                       void *ptr, u32 id)
 {
        int rc;
 
-       if (!rdma_protocol_iwarp(&dev->ibdev, 1))
-               return 0;
-
        idr_preload(GFP_KERNEL);
-       spin_lock_irq(&dev->idr_lock);
+       spin_lock_irq(&qidr->idr_lock);
 
-       rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
+       rc = idr_alloc(&qidr->idr, ptr, id, id + 1, GFP_ATOMIC);
 
-       spin_unlock_irq(&dev->idr_lock);
+       spin_unlock_irq(&qidr->idr_lock);
        idr_preload_end();
 
        return rc < 0 ? rc : 0;
 }
 
-static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
+static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
 {
-       if (!rdma_protocol_iwarp(&dev->ibdev, 1))
-               return;
-
-       spin_lock_irq(&dev->idr_lock);
-       idr_remove(&dev->qpidr, id);
-       spin_unlock_irq(&dev->idr_lock);
+       spin_lock_irq(&qidr->idr_lock);
+       idr_remove(&qidr->idr, id);
+       spin_unlock_irq(&qidr->idr_lock);
 }
 
 static inline void
@@ -1356,9 +1683,10 @@ qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
 
        qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
                           &qp->usq.pbl_info, FW_PAGE_SHIFT);
-
-       qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
-       qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+       if (!qp->srq) {
+               qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
+               qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+       }
 
        qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
                           &qp->urq.pbl_info, FW_PAGE_SHIFT);
@@ -1404,11 +1732,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
        if (rc)
                return rc;
 
-       /* RQ - read access only (0), dma sync not required (0) */
-       rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
-                                 ureq.rq_len, 0, 0, alloc_and_init);
-       if (rc)
-               return rc;
+       if (!qp->srq) {
+               /* RQ - read access only (0), dma sync not required (0) */
+               rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
+                                         ureq.rq_len, 0, 0, alloc_and_init);
+               if (rc)
+                       return rc;
+       }
 
        memset(&in_params, 0, sizeof(in_params));
        qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
@@ -1416,8 +1746,10 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
        in_params.qp_handle_hi = ureq.qp_handle_hi;
        in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
        in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
-       in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
-       in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+       if (!qp->srq) {
+               in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
+               in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+       }
 
        qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
                                              &in_params, &out_params);
@@ -1679,16 +2011,13 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
        if (rc)
                return ERR_PTR(rc);
 
-       if (attrs->srq)
-               return ERR_PTR(-EINVAL);
-
        DP_DEBUG(dev, QEDR_MSG_QP,
                 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
                 udata ? "user library" : "kernel", attrs->event_handler, pd,
                 get_qedr_cq(attrs->send_cq),
                 get_qedr_cq(attrs->send_cq)->icid,
                 get_qedr_cq(attrs->recv_cq),
-                get_qedr_cq(attrs->recv_cq)->icid);
+                attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
 
        qp = kzalloc(sizeof(*qp), GFP_KERNEL);
        if (!qp) {
@@ -1715,9 +2044,11 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
 
        qp->ibqp.qp_num = qp->qp_id;
 
-       rc = qedr_idr_add(dev, qp, qp->qp_id);
-       if (rc)
-               goto err;
+       if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+               rc = qedr_idr_add(dev, &dev->qpidr, qp, qp->qp_id);
+               if (rc)
+                       goto err;
+       }
 
        return &qp->ibqp;
 
@@ -2289,8 +2620,9 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
 
        qedr_free_qp_resources(dev, qp);
 
-       if (atomic_dec_and_test(&qp->refcnt)) {
-               qedr_idr_remove(dev, qp->qp_id);
+       if (atomic_dec_and_test(&qp->refcnt) &&
+           rdma_protocol_iwarp(&dev->ibdev, 1)) {
+               qedr_idr_remove(dev, &dev->qpidr, qp->qp_id);
                kfree(qp);
        }
        return rc;
@@ -2305,7 +2637,7 @@ struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
        if (!ah)
                return ERR_PTR(-ENOMEM);
 
-       ah->attr = *attr;
+       rdma_copy_ah_attr(&ah->attr, attr);
 
        return &ah->ibah;
 }
@@ -2314,6 +2646,7 @@ int qedr_destroy_ah(struct ib_ah *ibah)
 {
        struct qedr_ah *ah = get_qedr_ah(ibah);
 
+       rdma_destroy_ah_attr(&ah->attr);
        kfree(ah);
        return 0;
 }
@@ -2705,9 +3038,9 @@ static void swap_wqe_data64(u64 *p)
 
 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
                                       struct qedr_qp *qp, u8 *wqe_size,
-                                      struct ib_send_wr *wr,
-                                      struct ib_send_wr **bad_wr, u8 *bits,
-                                      u8 bit)
+                                      const struct ib_send_wr *wr,
+                                      const struct ib_send_wr **bad_wr,
+                                      u8 *bits, u8 bit)
 {
        u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
        char *seg_prt, *wqe;
@@ -2790,7 +3123,7 @@ static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
        } while (0)
 
 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
-                               struct ib_send_wr *wr)
+                               const struct ib_send_wr *wr)
 {
        u32 data_size = 0;
        int i;
@@ -2814,8 +3147,8 @@ static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
                                     struct qedr_qp *qp,
                                     struct rdma_sq_rdma_wqe_1st *rwqe,
                                     struct rdma_sq_rdma_wqe_2nd *rwqe2,
-                                    struct ib_send_wr *wr,
-                                    struct ib_send_wr **bad_wr)
+                                    const struct ib_send_wr *wr,
+                                    const struct ib_send_wr **bad_wr)
 {
        rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
        DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
@@ -2837,8 +3170,8 @@ static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
                                     struct qedr_qp *qp,
                                     struct rdma_sq_send_wqe_1st *swqe,
                                     struct rdma_sq_send_wqe_2st *swqe2,
-                                    struct ib_send_wr *wr,
-                                    struct ib_send_wr **bad_wr)
+                                    const struct ib_send_wr *wr,
+                                    const struct ib_send_wr **bad_wr)
 {
        memset(swqe2, 0, sizeof(*swqe2));
        if (wr->send_flags & IB_SEND_INLINE) {
@@ -2854,7 +3187,7 @@ static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
 
 static int qedr_prepare_reg(struct qedr_qp *qp,
                            struct rdma_sq_fmr_wqe_1st *fwqe1,
-                           struct ib_reg_wr *wr)
+                           const struct ib_reg_wr *wr)
 {
        struct qedr_mr *mr = get_qedr_mr(wr->mr);
        struct rdma_sq_fmr_wqe_2nd *fwqe2;
@@ -2916,7 +3249,8 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
        }
 }
 
-static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp,
+                                     const struct ib_send_wr *wr)
 {
        int wq_is_full, err_wr, pbl_is_full;
        struct qedr_dev *dev = qp->dev;
@@ -2953,8 +3287,8 @@ static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
        return true;
 }
 
-static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-                    struct ib_send_wr **bad_wr)
+static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+                           const struct ib_send_wr **bad_wr)
 {
        struct qedr_dev *dev = get_qedr_dev(ibqp->device);
        struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -3168,8 +3502,8 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        return rc;
 }
 
-int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-                  struct ib_send_wr **bad_wr)
+int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+                  const struct ib_send_wr **bad_wr)
 {
        struct qedr_dev *dev = get_qedr_dev(ibqp->device);
        struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -3234,8 +3568,104 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        return rc;
 }
 
-int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
-                  struct ib_recv_wr **bad_wr)
+static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
+{
+       u32 used;
+
+       /* Calculate number of elements used based on producer
+        * count and consumer count and subtract it from max
+        * work request supported so that we get elements left.
+        */
+       used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
+
+       return hw_srq->max_wr - used;
+}
+
+int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+                      const struct ib_recv_wr **bad_wr)
+{
+       struct qedr_srq *srq = get_qedr_srq(ibsrq);
+       struct qedr_srq_hwq_info *hw_srq;
+       struct qedr_dev *dev = srq->dev;
+       struct qed_chain *pbl;
+       unsigned long flags;
+       int status = 0;
+       u32 num_sge;
+       u32 offset;
+
+       spin_lock_irqsave(&srq->lock, flags);
+
+       hw_srq = &srq->hw_srq;
+       pbl = &srq->hw_srq.pbl;
+       while (wr) {
+               struct rdma_srq_wqe_header *hdr;
+               int i;
+
+               if (!qedr_srq_elem_left(hw_srq) ||
+                   wr->num_sge > srq->hw_srq.max_sges) {
+                       DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
+                              hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
+                              wr->num_sge, srq->hw_srq.max_sges);
+                       status = -ENOMEM;
+                       *bad_wr = wr;
+                       break;
+               }
+
+               hdr = qed_chain_produce(pbl);
+               num_sge = wr->num_sge;
+               /* Set number of sge and work request id in header */
+               SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
+
+               srq->hw_srq.wr_prod_cnt++;
+               hw_srq->wqe_prod++;
+               hw_srq->sge_prod++;
+
+               DP_DEBUG(dev, QEDR_MSG_SRQ,
+                        "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
+                        wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
+
+               for (i = 0; i < wr->num_sge; i++) {
+                       struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
+
+                       /* Set SGE length, lkey and address */
+                       SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
+                                   wr->sg_list[i].length, wr->sg_list[i].lkey);
+
+                       DP_DEBUG(dev, QEDR_MSG_SRQ,
+                                "[%d]: len %d key %x addr %x:%x\n",
+                                i, srq_sge->length, srq_sge->l_key,
+                                srq_sge->addr.hi, srq_sge->addr.lo);
+                       hw_srq->sge_prod++;
+               }
+
+               /* Flush WQE and SGE information before
+                * updating producer.
+                */
+               wmb();
+
+               /* SRQ producer is 8 bytes. Need to update SGE producer index
+                * in first 4 bytes and need to update WQE producer in
+                * next 4 bytes.
+                */
+               *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
+               offset = offsetof(struct rdma_srq_producers, wqe_prod);
+               *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
+                       hw_srq->wqe_prod;
+
+               /* Flush producer after updating it. */
+               wmb();
+               wr = wr->next;
+       }
+
+       DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
+                qed_chain_get_elem_left(pbl));
+       spin_unlock_irqrestore(&srq->lock, flags);
+
+       return status;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+                  const struct ib_recv_wr **bad_wr)
 {
        struct qedr_qp *qp = get_qedr_qp(ibqp);
        struct qedr_dev *dev = qp->dev;
@@ -3625,6 +4055,31 @@ static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
        wc->wr_id = wr_id;
 }
 
+static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+                               struct qedr_cq *cq, struct ib_wc *wc,
+                               struct rdma_cqe_responder *resp)
+{
+       struct qedr_srq *srq = qp->srq;
+       u64 wr_id;
+
+       wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
+                        le32_to_cpu(resp->srq_wr_id.lo), u64);
+
+       if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+               wc->status = IB_WC_WR_FLUSH_ERR;
+               wc->vendor_err = 0;
+               wc->wr_id = wr_id;
+               wc->byte_len = 0;
+               wc->src_qp = qp->id;
+               wc->qp = &qp->ibqp;
+               wc->wr_id = wr_id;
+       } else {
+               __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+       }
+       srq->hw_srq.wr_cons_cnt++;
+
+       return 1;
+}
 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
                            struct qedr_cq *cq, struct ib_wc *wc,
                            struct rdma_cqe_responder *resp)
@@ -3674,6 +4129,19 @@ static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
        }
 }
 
+static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+                                struct qedr_cq *cq, int num_entries,
+                                struct ib_wc *wc,
+                                struct rdma_cqe_responder *resp)
+{
+       int cnt;
+
+       cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
+       consume_cqe(cq);
+
+       return cnt;
+}
+
 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
                             struct qedr_cq *cq, int num_entries,
                             struct ib_wc *wc, struct rdma_cqe_responder *resp,
@@ -3751,6 +4219,11 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
                        cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
                                                &cqe->resp, &update);
                        break;
+               case RDMA_CQE_TYPE_RESPONDER_SRQ:
+                       cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
+                                                   wc, &cqe->resp);
+                       update = 1;
+                       break;
                case RDMA_CQE_TYPE_INVALID:
                default:
                        DP_ERR(dev, "Error: invalid CQE type = %d\n",