1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Memory Regions (FMR).
8 * Referred to sometimes as MTHCAFMR mode.
10 * FMR uses synchronous memory registration and deregistration.
11 * FMR registration is known to be fast, but FMR deregistration
12 * can take tens of usecs to complete.
17 * A Memory Region is prepared for RDMA READ or WRITE using the
18 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
19 * finished, the Memory Region is unmapped using the ib_unmap_fmr
20 * verb (fmr_op_unmap).
23 #include <linux/sunrpc/svc_rdma.h>
25 #include "xprt_rdma.h"
26 #include <trace/events/rpcrdma.h>
28 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
29 # define RPCDBG_FACILITY RPCDBG_TRANS
32 /* Maximum scatter/gather per FMR */
33 #define RPCRDMA_MAX_FMR_SGES (64)
35 /* Access mode of externally registered pages */
37 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
38 IB_ACCESS_REMOTE_READ,
42 fmr_is_supported(struct rpcrdma_ia *ia)
44 if (!ia->ri_device->alloc_fmr) {
45 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
53 fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
55 static struct ib_fmr_attr fmr_attr = {
56 .max_pages = RPCRDMA_MAX_FMR_SGES,
58 .page_shift = PAGE_SHIFT
61 mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
62 sizeof(u64), GFP_KERNEL);
63 if (!mr->fmr.fm_physaddrs)
66 mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
67 sizeof(*mr->mr_sg), GFP_KERNEL);
71 sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
73 mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
75 if (IS_ERR(mr->fmr.fm_mr))
78 INIT_LIST_HEAD(&mr->mr_list);
82 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
83 PTR_ERR(mr->fmr.fm_mr));
87 kfree(mr->fmr.fm_physaddrs);
92 __fmr_unmap(struct rpcrdma_mr *mr)
97 list_add(&mr->fmr.fm_mr->list, &l);
98 rc = ib_unmap_fmr(&l);
99 list_del(&mr->fmr.fm_mr->list);
104 fmr_op_release_mr(struct rpcrdma_mr *mr)
106 LIST_HEAD(unmap_list);
109 kfree(mr->fmr.fm_physaddrs);
112 /* In case this one was left mapped, try to unmap it
113 * to prevent dealloc_fmr from failing with EBUSY
115 rc = __fmr_unmap(mr);
117 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
120 rc = ib_dealloc_fmr(mr->fmr.fm_mr);
122 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
128 /* Reset of a single FMR.
131 fmr_op_recover_mr(struct rpcrdma_mr *mr)
133 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
136 /* ORDER: invalidate first */
137 rc = __fmr_unmap(mr);
141 /* ORDER: then DMA unmap */
142 rpcrdma_mr_unmap_and_put(mr);
144 r_xprt->rx_stats.mrs_recovered++;
148 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
149 r_xprt->rx_stats.mrs_orphaned++;
151 trace_xprtrdma_dma_unmap(mr);
152 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
153 mr->mr_sg, mr->mr_nents, mr->mr_dir);
155 spin_lock(&r_xprt->rx_buf.rb_mrlock);
156 list_del(&mr->mr_all);
157 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
159 fmr_op_release_mr(mr);
163 * ep->rep_attr.cap.max_send_wr
164 * ep->rep_attr.cap.max_recv_wr
165 * cdata->max_requests
169 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
170 struct rpcrdma_create_data_internal *cdata)
174 max_qp_wr = ia->ri_device->attrs.max_qp_wr;
175 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
177 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
179 if (cdata->max_requests > max_qp_wr)
180 cdata->max_requests = max_qp_wr;
181 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
182 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
183 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
184 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
185 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
186 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
188 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
189 RPCRDMA_MAX_FMR_SGES);
193 /* FMR mode conveys up to 64 pages of payload per chunk segment.
196 fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
198 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
199 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
202 /* Use the ib_map_phys_fmr() verb to register a memory region
203 * for remote access via RDMA READ or RDMA WRITE.
205 static struct rpcrdma_mr_seg *
206 fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
207 int nsegs, bool writing, struct rpcrdma_mr **out)
209 struct rpcrdma_mr_seg *seg1 = seg;
210 int len, pageoff, i, rc;
211 struct rpcrdma_mr *mr;
214 mr = rpcrdma_mr_get(r_xprt);
216 return ERR_PTR(-EAGAIN);
218 pageoff = offset_in_page(seg1->mr_offset);
219 seg1->mr_offset -= pageoff; /* start of page */
220 seg1->mr_len += pageoff;
222 if (nsegs > RPCRDMA_MAX_FMR_SGES)
223 nsegs = RPCRDMA_MAX_FMR_SGES;
224 for (i = 0; i < nsegs;) {
226 sg_set_page(&mr->mr_sg[i],
229 offset_in_page(seg->mr_offset));
231 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
236 /* Check for holes */
237 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
238 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
241 mr->mr_dir = rpcrdma_data_dir(writing);
243 mr->mr_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
244 mr->mr_sg, i, mr->mr_dir);
247 trace_xprtrdma_dma_map(mr);
249 for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
250 dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
251 rc = ib_map_phys_fmr(mr->fmr.fm_mr, dma_pages, mr->mr_nents,
256 mr->mr_handle = mr->fmr.fm_mr->rkey;
258 mr->mr_offset = dma_pages[0] + pageoff;
264 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
267 return ERR_PTR(-EIO);
270 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
271 len, (unsigned long long)dma_pages[0],
272 pageoff, mr->mr_nents, rc);
273 rpcrdma_mr_unmap_and_put(mr);
274 return ERR_PTR(-EIO);
277 /* Post Send WR containing the RPC Call message.
280 fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
282 struct ib_send_wr *bad_wr;
284 return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr);
287 /* Invalidate all memory regions that were registered for "req".
289 * Sleeps until it is safe for the host CPU to access the
290 * previously mapped memory regions.
292 * Caller ensures that @mrs is not empty before the call. This
293 * function empties the list.
296 fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
298 struct rpcrdma_mr *mr;
299 LIST_HEAD(unmap_list);
302 /* ORDER: Invalidate all of the req's MRs first
304 * ib_unmap_fmr() is slow, so use a single call instead
305 * of one call per mapped FMR.
307 list_for_each_entry(mr, mrs, mr_list) {
308 dprintk("RPC: %s: unmapping fmr %p\n",
310 trace_xprtrdma_localinv(mr);
311 list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
313 r_xprt->rx_stats.local_inv_needed++;
314 rc = ib_unmap_fmr(&unmap_list);
318 /* ORDER: Now DMA unmap all of the req's MRs, and return
319 * them to the free MW list.
321 while (!list_empty(mrs)) {
322 mr = rpcrdma_mr_pop(mrs);
323 list_del(&mr->fmr.fm_mr->list);
324 rpcrdma_mr_unmap_and_put(mr);
330 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
332 while (!list_empty(mrs)) {
333 mr = rpcrdma_mr_pop(mrs);
334 list_del(&mr->fmr.fm_mr->list);
335 fmr_op_recover_mr(mr);
339 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
340 .ro_map = fmr_op_map,
341 .ro_send = fmr_op_send,
342 .ro_unmap_sync = fmr_op_unmap_sync,
343 .ro_recover_mr = fmr_op_recover_mr,
344 .ro_open = fmr_op_open,
345 .ro_maxpages = fmr_op_maxpages,
346 .ro_init_mr = fmr_op_init_mr,
347 .ro_release_mr = fmr_op_release_mr,
348 .ro_displayname = "fmr",
349 .ro_send_w_inv_ok = 0,