atomic_set(&qp->skb_out, 0);
}
+static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int wqe_size;
+ int err;
+
+ qp->sq.max_wr = init->cap.max_send_wr;
+ wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
+ init->cap.max_inline_data);
+ qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
+ qp->sq.max_inline = wqe_size;
+ wqe_size += sizeof(struct rxe_send_wqe);
+
+ qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!qp->sq.queue) {
+ rxe_err_qp(qp, "Unable to allocate send queue");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* prepare info for caller to mmap send queue if user space qp */
+ err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
+ qp->sq.queue->buf, qp->sq.queue->buf_size,
+ &qp->sq.queue->ip);
+ if (err) {
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ goto err_free;
+ }
+
+ /* return actual capabilities to caller which may be larger
+ * than requested
+ */
+ init->cap.max_send_wr = qp->sq.max_wr;
+ init->cap.max_send_sge = qp->sq.max_sge;
+ init->cap.max_inline_data = qp->sq.max_inline;
+
+ return 0;
+
+err_free:
+ vfree(qp->sq.queue->buf);
+ kfree(qp->sq.queue);
+ qp->sq.queue = NULL;
+err_out:
+ return err;
+}
+
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init, struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
- int wqe_size;
- enum queue_type type;
+
+ /* if we don't finish qp create make sure queue is valid */
+ skb_queue_head_init(&qp->req_pkts);
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
* (0xc000 - 0xffff).
*/
qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
- qp->sq.max_wr = init->cap.max_send_wr;
-
- /* These caps are limited by rxe_qp_chk_cap() done by the caller */
- wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
- init->cap.max_inline_data);
- qp->sq.max_sge = init->cap.max_send_sge =
- wqe_size / sizeof(struct ib_sge);
- qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
- wqe_size += sizeof(struct rxe_send_wqe);
- type = QUEUE_TYPE_FROM_CLIENT;
- qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
- wqe_size, type);
- if (!qp->sq.queue)
- return -ENOMEM;
-
- err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
- qp->sq.queue->buf, qp->sq.queue->buf_size,
- &qp->sq.queue->ip);
-
- if (err) {
- vfree(qp->sq.queue->buf);
- kfree(qp->sq.queue);
- qp->sq.queue = NULL;
+ err = rxe_init_sq(qp, init, udata, uresp);
+ if (err)
return err;
- }
qp->req.wqe_index = queue_get_producer(qp->sq.queue,
QUEUE_TYPE_FROM_CLIENT);
return 0;
}
+static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int wqe_size;
+ int err;
+
+ qp->rq.max_wr = init->cap.max_recv_wr;
+ qp->rq.max_sge = init->cap.max_recv_sge;
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ qp->rq.max_sge*sizeof(struct ib_sge);
+
+ qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!qp->rq.queue) {
+ rxe_err_qp(qp, "Unable to allocate recv queue");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* prepare info for caller to mmap recv queue if user space qp */
+ err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
+ qp->rq.queue->buf, qp->rq.queue->buf_size,
+ &qp->rq.queue->ip);
+ if (err) {
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ goto err_free;
+ }
+
+ /* return actual capabilities to caller which may be larger
+ * than requested
+ */
+ init->cap.max_recv_wr = qp->rq.max_wr;
+
+ return 0;
+
+err_free:
+ vfree(qp->rq.queue->buf);
+ kfree(qp->rq.queue);
+ qp->rq.queue = NULL;
+err_out:
+ return err;
+}
+
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init,
struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
- int wqe_size;
- enum queue_type type;
+
+ /* if we don't finish qp create make sure queue is valid */
+ skb_queue_head_init(&qp->resp_pkts);
if (!qp->srq) {
- qp->rq.max_wr = init->cap.max_recv_wr;
- qp->rq.max_sge = init->cap.max_recv_sge;
-
- wqe_size = rcv_wqe_size(qp->rq.max_sge);
-
- type = QUEUE_TYPE_FROM_CLIENT;
- qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
- wqe_size, type);
- if (!qp->rq.queue)
- return -ENOMEM;
-
- err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
- qp->rq.queue->buf, qp->rq.queue->buf_size,
- &qp->rq.queue->ip);
- if (err) {
- vfree(qp->rq.queue->buf);
- kfree(qp->rq.queue);
- qp->rq.queue = NULL;
+ err = rxe_init_rq(qp, init, udata, uresp);
+ if (err)
return err;
- }
}
rxe_init_task(&qp->resp.task, qp, rxe_responder);
if (srq)
rxe_get(srq);
- qp->pd = pd;
- qp->rcq = rcq;
- qp->scq = scq;
- qp->srq = srq;
+ qp->pd = pd;
+ qp->rcq = rcq;
+ qp->scq = scq;
+ qp->srq = srq;
atomic_inc(&rcq->num_wq);
atomic_inc(&scq->num_wq);