unsigned long flags = 0;
void *wqe = NULL;
__le32 doorbell[2];
+ u32 wqe_idx = 0;
int nreq = 0;
- u32 ind = 0;
int ret = 0;
u8 *smac;
int loopback;
}
spin_lock_irqsave(&qp->sq.lock, flags);
- ind = qp->sq_next_wqe;
+
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
ret = -ENOMEM;
goto out;
}
+ wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
+
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, qp->sq.max_gs);
goto out;
}
- wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
- qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
- wr->wr_id;
+ wqe = get_send_wqe(qp, wqe_idx);
+ qp->sq.wrid[wqe_idx] = wr->wr_id;
/* Corresponding to the RC and RD type wqe process separately */
if (ibqp->qp_type == IB_QPT_GSI) {
cpu_to_le32((wr->sg_list[1].addr) >> 32);
ud_sq_wqe->l_key1 =
cpu_to_le32(wr->sg_list[1].lkey);
- ind++;
} else if (ibqp->qp_type == IB_QPT_RC) {
u32 tmp_len = 0;
ctrl->flag |= cpu_to_le32(wr->num_sge <<
HNS_ROCE_WQE_SGE_NUM_BIT);
}
- ind++;
}
}
doorbell[1] = sq_db.u32_8;
hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
- qp->sq_next_wqe = ind;
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
- int ret = 0;
- int nreq = 0;
- int ind = 0;
- int i = 0;
- u32 reg_val;
- unsigned long flags = 0;
struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
struct hns_roce_wqe_data_seg *scat = NULL;
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_rq_db rq_db;
__le32 doorbell[2] = {0};
+ unsigned long flags = 0;
+ unsigned int wqe_idx;
+ int ret = 0;
+ int nreq = 0;
+ int i = 0;
+ u32 reg_val;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
- ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
goto out;
}
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
goto out;
}
- ctrl = get_recv_wqe(hr_qp, ind);
+ ctrl = get_recv_wqe(hr_qp, wqe_idx);
roce_set_field(ctrl->rwqe_byte_12,
RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
for (i = 0; i < wr->num_sge; i++)
set_data_seg(scat + i, wr->sg_list + i);
- hr_qp->rq.wrid[ind] = wr->wr_id;
-
- ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+ hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
}
out:
hr_qp->rq.tail = 0;
hr_qp->sq.head = 0;
hr_qp->sq.tail = 0;
- hr_qp->sq_next_wqe = 0;
}
kfree(context);
hr_qp->rq.tail = 0;
hr_qp->sq.head = 0;
hr_qp->sq.tail = 0;
- hr_qp->sq_next_wqe = 0;
}
out:
kfree(context);
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
struct ib_qp_attr attr;
- unsigned int sge_ind;
unsigned int owner_bit;
+ unsigned int sge_idx;
+ unsigned int wqe_idx;
unsigned long flags;
- unsigned int ind;
void *wqe = NULL;
bool loopback;
int attr_mask;
}
spin_lock_irqsave(&qp->sq.lock, flags);
- ind = qp->sq_next_wqe;
- sge_ind = qp->next_sge;
+ sge_idx = qp->next_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
goto out;
}
+ wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
+
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, qp->sq.max_gs);
goto out;
}
- wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
- qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
- wr->wr_id;
-
+ wqe = get_send_wqe(qp, wqe_idx);
+ qp->sq.wrid[wqe_idx] = wr->wr_id;
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
tmp_len = 0;
roce_set_field(ud_sq_wqe->byte_20,
V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_ind & (qp->sge.sge_cnt - 1));
+ sge_idx & (qp->sge.sge_cnt - 1));
roce_set_field(ud_sq_wqe->byte_24,
V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
GID_LEN_V2);
- set_extend_sge(qp, wr, &sge_ind);
- ind++;
+ set_extend_sge(qp, wr, &sge_idx);
} else if (ibqp->qp_type == IB_QPT_RC) {
rc_sq_wqe = wqe;
memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
wr->num_sge);
} else if (wr->opcode != IB_WR_REG_MR) {
ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
- wqe, &sge_ind, bad_wr);
+ wqe, &sge_idx, bad_wr);
if (ret)
goto out;
}
-
- ind++;
} else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
spin_unlock_irqrestore(&qp->sq.lock, flags);
hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
- qp->sq_next_wqe = ind;
- qp->next_sge = sge_ind;
+ qp->next_sge = sge_idx;
if (qp->state == IB_QPS_ERR) {
attr_mask = IB_QP_STATE;
unsigned long flags;
void *wqe = NULL;
int attr_mask;
+ u32 wqe_idx;
int ret = 0;
int nreq;
- int ind;
int i;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
- ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
if (hr_qp->state == IB_QPS_RESET) {
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
goto out;
}
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
goto out;
}
- wqe = get_recv_wqe(hr_qp, ind);
+ wqe = get_recv_wqe(hr_qp, wqe_idx);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
for (i = 0; i < wr->num_sge; i++) {
if (!wr->sg_list[i].length)
/* rq support inline data */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
- sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
- hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
+ sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
(u32)wr->num_sge;
for (i = 0; i < wr->num_sge; i++) {
sge_list[i].addr =
}
}
- hr_qp->rq.wrid[ind] = wr->wr_id;
-
- ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+ hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
}
out:
hr_qp->rq.tail = 0;
hr_qp->sq.head = 0;
hr_qp->sq.tail = 0;
- hr_qp->sq_next_wqe = 0;
hr_qp->next_sge = 0;
if (hr_qp->rq.wqe_cnt)
*hr_qp->rdb.db_record = 0;