1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved.
7 * The rdma_rxe driver supports type 1 or type 2B memory windows.
8 * Type 1 MWs are created by ibv_alloc_mw() verbs calls and bound by
9 * ibv_bind_mw() calls. Type 2 MWs are also created by ibv_alloc_mw()
10 * but bound by bind_mw work requests. The ibv_bind_mw() call is converted
11 * by libibverbs to a bind_mw work request.
16 int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
18 struct rxe_mw *mw = to_rmw(ibmw);
19 struct rxe_pd *pd = to_rpd(ibmw->pd);
20 struct rxe_dev *rxe = to_rdev(ibmw->device);
25 ret = rxe_add_to_pool(&rxe->mw_pool, mw);
31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
32 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
33 RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
34 spin_lock_init(&mw->lock);
41 int rxe_dealloc_mw(struct ib_mw *ibmw)
43 struct rxe_mw *mw = to_rmw(ibmw);
50 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
51 struct rxe_mw *mw, struct rxe_mr *mr, int access)
53 if (mw->ibmw.type == IB_MW_TYPE_1) {
54 if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
56 "attempt to bind a type 1 MW not in the valid state\n");
61 if (unlikely((access & IB_ZERO_BASED))) {
62 rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n");
67 if (mw->ibmw.type == IB_MW_TYPE_2) {
69 if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
71 "attempt to bind a type 2 MW not in the free state\n");
76 if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
78 "attempt to bind type 2 MW with qp with different PD\n");
83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
85 "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
90 /* remaining checks only apply to a nonzero MR */
94 if (unlikely(mr->access & IB_ZERO_BASED)) {
95 rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n");
100 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
102 "attempt to bind an MW to an MR without bind access\n");
107 if (unlikely((access &
108 (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
109 !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
111 "attempt to bind an Writable MW to an MR without local write access\n");
116 if (access & IB_ZERO_BASED) {
117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
119 "attempt to bind a ZB MW outside of the MR\n");
123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
125 (mr->ibmr.iova + mr->ibmr.length)))) {
127 "attempt to bind a VA MW outside of the MR\n");
135 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
136 struct rxe_mw *mw, struct rxe_mr *mr, int access)
138 u32 key = wqe->wr.wr.mw.rkey & 0xff;
140 mw->rkey = (mw->rkey & ~0xff) | key;
142 mw->state = RXE_MW_STATE_VALID;
143 mw->addr = wqe->wr.wr.mw.addr;
144 mw->length = wqe->wr.wr.mw.length;
148 atomic_dec(&mw->mr->num_mw);
154 atomic_inc(&mr->num_mw);
158 if (mw->ibmw.type == IB_MW_TYPE_2) {
164 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
169 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
170 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
171 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
172 int access = wqe->wr.wr.mw.access;
174 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
180 if (unlikely(mw->rkey != mw_rkey)) {
185 if (likely(wqe->wr.wr.mw.length)) {
186 mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8);
192 if (unlikely(mr->lkey != mr_lkey)) {
200 if (access & ~RXE_ACCESS_SUPPORTED_MW) {
201 rxe_err_mw(mw, "access %#x not supported", access);
206 spin_lock_bh(&mw->lock);
208 ret = rxe_check_bind_mw(qp, wqe, mw, mr, access);
212 rxe_do_bind_mw(qp, wqe, mw, mr, access);
214 spin_unlock_bh(&mw->lock);
224 static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw)
226 if (unlikely(mw->state == RXE_MW_STATE_INVALID))
230 if (unlikely(mw->ibmw.type == IB_MW_TYPE_1))
236 static void rxe_do_invalidate_mw(struct rxe_mw *mw)
241 /* valid type 2 MW will always have a QP pointer */
246 /* valid type 2 MW will always have an MR pointer */
249 atomic_dec(&mr->num_mw);
255 mw->state = RXE_MW_STATE_FREE;
258 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
260 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
264 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
270 if (rkey != mw->rkey) {
275 spin_lock_bh(&mw->lock);
277 ret = rxe_check_invalidate_mw(qp, mw);
281 rxe_do_invalidate_mw(mw);
283 spin_unlock_bh(&mw->lock);
290 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
292 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
293 struct rxe_pd *pd = to_rpd(qp->ibqp.pd);
295 int index = rkey >> 8;
297 mw = rxe_pool_get_index(&rxe->mw_pool, index);
301 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd ||
302 (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
303 (mw->length == 0) || ((access & mw->access) != access) ||
304 mw->state != RXE_MW_STATE_VALID)) {
312 void rxe_mw_cleanup(struct rxe_pool_elem *elem)
314 struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
315 struct rxe_pd *pd = to_rpd(mw->ibmw.pd);
320 struct rxe_mr *mr = mw->mr;
323 atomic_dec(&mr->num_mw);
328 struct rxe_qp *qp = mw->qp;
337 mw->state = RXE_MW_STATE_INVALID;