return skb;
}
+static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct resp_res *res;
+ u32 pkts;
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ rxe_advance_resp_resource(qp);
+ free_rd_atomic_resource(qp, res);
+
+ res->type = RXE_READ_MASK;
+ res->replay = 0;
+ res->read.va = qp->resp.va + qp->resp.offset;
+ res->read.va_org = qp->resp.va + qp->resp.offset;
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
+ res->first_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
+
+ res->state = rdatm_res_state_new;
+
+ return res;
+}
+
+/**
+ * rxe_recheck_mr - revalidate MR from rkey and get a reference
+ * @qp: the qp
+ * @rkey: the rkey
+ *
+ * This code allows the MR to be invalidated or deregistered or
+ * the MW if one was used to be invalidated or deallocated.
+ * It is assumed that the access permissions if originally good
+ * are OK and the mappings to be unchanged.
+ *
+ * Return: mr on success else NULL
+ */
+static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mr *mr;
+ struct rxe_mw *mw;
+
+ if (rkey_is_mw(rkey)) {
+ mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
+ if (!mw || mw->rkey != rkey)
+ return NULL;
+
+ if (mw->state != RXE_MW_STATE_VALID) {
+ rxe_drop_ref(mw);
+ return NULL;
+ }
+
+ mr = mw->mr;
+ rxe_drop_ref(mw);
+ } else {
+ mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ if (!mr || mr->rkey != rkey)
+ return NULL;
+ }
+
+ if (mr->state != RXE_MR_STATE_VALID) {
+ rxe_drop_ref(mr);
+ return NULL;
+ }
+
+ return mr;
+}
+
/* RDMA read response. If res is not NULL, then we have a current RDMA request
* being processed or replayed.
*/
int opcode;
int err;
struct resp_res *res = qp->resp.res;
+ struct rxe_mr *mr;
if (!res) {
- /* This is the first time we process that request. Get a
- * resource
- */
- res = &qp->resp.resources[qp->resp.res_head];
-
- free_rd_atomic_resource(qp, res);
- rxe_advance_resp_resource(qp);
-
- res->type = RXE_READ_MASK;
- res->replay = 0;
-
- res->read.va = qp->resp.va +
- qp->resp.offset;
- res->read.va_org = qp->resp.va +
- qp->resp.offset;
-
- res->first_psn = req_pkt->psn;
-
- if (reth_len(req_pkt)) {
- res->last_psn = (req_pkt->psn +
- (reth_len(req_pkt) + mtu - 1) /
- mtu - 1) & BTH_PSN_MASK;
- } else {
- res->last_psn = res->first_psn;
- }
- res->cur_psn = req_pkt->psn;
-
- res->read.resid = qp->resp.resid;
- res->read.length = qp->resp.resid;
- res->read.rkey = qp->resp.rkey;
-
- /* note res inherits the reference to mr from qp */
- res->read.mr = qp->resp.mr;
- qp->resp.mr = NULL;
-
- qp->resp.res = res;
- res->state = rdatm_res_state_new;
+ res = rxe_prepare_read_res(qp, req_pkt);
+ qp->resp.res = res;
}
if (res->state == rdatm_res_state_new) {
+ mr = qp->resp.mr;
+ qp->resp.mr = NULL;
+
if (res->read.resid <= mtu)
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
else
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
} else {
+ mr = rxe_recheck_mr(qp, res->read.rkey);
+ if (!mr)
+ return RESPST_ERR_RKEY_VIOLATION;
+
if (res->read.resid > mtu)
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
else
if (!skb)
return RESPST_ERR_RNR;
- err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
+ err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
payload, RXE_FROM_MR_OBJ);
if (err)
pr_err("Failed copying memory\n");
+ if (mr)
+ rxe_drop_ref(mr);
if (bth_pad(&ack_pkt)) {
u8 *pad = payload_addr(&ack_pkt) + payload;