int i;
if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
- if (le32_to_cpu(rc_sq_wqe->msg_len) >
- hr_dev->caps.max_sq_inline) {
+ if (unlikely(le32_to_cpu(rc_sq_wqe->msg_len) >
+ hr_dev->caps.max_sq_inline)) {
ibdev_err(ibdev, "inline len(1-%d)=%d, illegal",
rc_sq_wqe->msg_len,
hr_dev->caps.max_sq_inline);
return -EINVAL;
}
- if (wr->opcode == IB_WR_RDMA_READ) {
+ if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
ibdev_err(ibdev, "Not support inline data!\n");
return -EINVAL;
}
spin_lock_irqsave(&qp->sq.lock, flags);
ret = check_send_valid(hr_dev, qp);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
else if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
goto out;
}
spin_lock_irqsave(&hr_qp->rq.lock, flags);
ret = check_recv_valid(hr_dev, hr_qp);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
}
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
- hr_qp->ibqp.recv_cq)) {
+ if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq))) {
ret = -ENOMEM;
*bad_wr = wr;
goto out;
}
wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
- if (wqe_idx < 0) {
+ if (unlikely(wqe_idx < 0)) {
ret = -ENOMEM;
*bad_wr = wr;
break;
wqe_buf += size;
}
- if (data_len) {
+ if (unlikely(data_len)) {
wc->status = IB_WC_LOC_LEN_ERR;
return -EAGAIN;
}
break;
}
- if (wc->status == IB_WC_SUCCESS || wc->status == IB_WC_WR_FLUSH_ERR)
+ if (likely(wc->status == IB_WC_SUCCESS ||
+ wc->status == IB_WC_WR_FLUSH_ERR))
return;
ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
}
get_cqe_status(hr_dev, *cur_qp, cqe, wc);
- if (wc->status != IB_WC_SUCCESS)
+ if (unlikely(wc->status != IB_WC_SUCCESS))
return 0;
if (is_send) {
opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
(roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
- if (ret)
+ if (unlikely(ret))
return -EAGAIN;
}