1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Memory Regions (FMR).
8 * Referred to sometimes as MTHCAFMR mode.
10 * FMR uses synchronous memory registration and deregistration.
11 * FMR registration is known to be fast, but FMR deregistration
12 * can take tens of usecs to complete.
17 * A Memory Region is prepared for RDMA READ or WRITE using the
18 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
19 * finished, the Memory Region is unmapped using the ib_unmap_fmr
20 * verb (fmr_op_unmap).
23 #include "xprt_rdma.h"
24 #include <trace/events/rpcrdma.h>
26 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
27 # define RPCDBG_FACILITY RPCDBG_TRANS
30 /* Maximum scatter/gather per FMR */
31 #define RPCRDMA_MAX_FMR_SGES (64)
33 /* Access mode of externally registered pages */
35 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
36 IB_ACCESS_REMOTE_READ,
40 fmr_is_supported(struct rpcrdma_ia *ia)
42 if (!ia->ri_device->alloc_fmr) {
43 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
51 fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
53 static struct ib_fmr_attr fmr_attr = {
54 .max_pages = RPCRDMA_MAX_FMR_SGES,
56 .page_shift = PAGE_SHIFT
59 mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
60 sizeof(u64), GFP_KERNEL);
61 if (!mr->fmr.fm_physaddrs)
64 mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
65 sizeof(*mr->mr_sg), GFP_KERNEL);
69 sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
71 mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
73 if (IS_ERR(mr->fmr.fm_mr))
79 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
80 PTR_ERR(mr->fmr.fm_mr));
84 kfree(mr->fmr.fm_physaddrs);
89 __fmr_unmap(struct rpcrdma_mr *mr)
94 list_add(&mr->fmr.fm_mr->list, &l);
95 rc = ib_unmap_fmr(&l);
96 list_del(&mr->fmr.fm_mr->list);
101 fmr_op_release_mr(struct rpcrdma_mr *mr)
103 LIST_HEAD(unmap_list);
106 /* Ensure MW is not on any rl_registered list */
107 if (!list_empty(&mr->mr_list))
108 list_del(&mr->mr_list);
110 kfree(mr->fmr.fm_physaddrs);
113 /* In case this one was left mapped, try to unmap it
114 * to prevent dealloc_fmr from failing with EBUSY
116 rc = __fmr_unmap(mr);
118 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
121 rc = ib_dealloc_fmr(mr->fmr.fm_mr);
123 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
129 /* Reset of a single FMR.
132 fmr_op_recover_mr(struct rpcrdma_mr *mr)
134 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
137 /* ORDER: invalidate first */
138 rc = __fmr_unmap(mr);
142 /* ORDER: then DMA unmap */
143 rpcrdma_mr_unmap_and_put(mr);
145 r_xprt->rx_stats.mrs_recovered++;
149 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
150 r_xprt->rx_stats.mrs_orphaned++;
152 trace_xprtrdma_dma_unmap(mr);
153 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
154 mr->mr_sg, mr->mr_nents, mr->mr_dir);
156 spin_lock(&r_xprt->rx_buf.rb_mrlock);
157 list_del(&mr->mr_all);
158 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
160 fmr_op_release_mr(mr);
164 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
165 struct rpcrdma_create_data_internal *cdata)
167 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
168 RPCRDMA_MAX_FMR_SGES);
172 /* FMR mode conveys up to 64 pages of payload per chunk segment.
175 fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
177 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
178 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
181 /* Use the ib_map_phys_fmr() verb to register a memory region
182 * for remote access via RDMA READ or RDMA WRITE.
184 static struct rpcrdma_mr_seg *
185 fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
186 int nsegs, bool writing, struct rpcrdma_mr **out)
188 struct rpcrdma_mr_seg *seg1 = seg;
189 int len, pageoff, i, rc;
190 struct rpcrdma_mr *mr;
193 mr = rpcrdma_mr_get(r_xprt);
195 return ERR_PTR(-EAGAIN);
197 pageoff = offset_in_page(seg1->mr_offset);
198 seg1->mr_offset -= pageoff; /* start of page */
199 seg1->mr_len += pageoff;
201 if (nsegs > RPCRDMA_MAX_FMR_SGES)
202 nsegs = RPCRDMA_MAX_FMR_SGES;
203 for (i = 0; i < nsegs;) {
205 sg_set_page(&mr->mr_sg[i],
208 offset_in_page(seg->mr_offset));
210 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
215 /* Check for holes */
216 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
217 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
220 mr->mr_dir = rpcrdma_data_dir(writing);
222 mr->mr_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
223 mr->mr_sg, i, mr->mr_dir);
227 for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
228 dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
229 rc = ib_map_phys_fmr(mr->fmr.fm_mr, dma_pages, mr->mr_nents,
234 mr->mr_handle = mr->fmr.fm_mr->rkey;
236 mr->mr_offset = dma_pages[0] + pageoff;
242 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
245 return ERR_PTR(-EIO);
248 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
249 len, (unsigned long long)dma_pages[0],
250 pageoff, mr->mr_nents, rc);
251 rpcrdma_mr_unmap_and_put(mr);
252 return ERR_PTR(-EIO);
255 /* Post Send WR containing the RPC Call message.
258 fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
260 struct ib_send_wr *bad_wr;
262 return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr);
265 /* Invalidate all memory regions that were registered for "req".
267 * Sleeps until it is safe for the host CPU to access the
268 * previously mapped memory regions.
270 * Caller ensures that @mrs is not empty before the call. This
271 * function empties the list.
274 fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
276 struct rpcrdma_mr *mr;
277 LIST_HEAD(unmap_list);
280 /* ORDER: Invalidate all of the req's MRs first
282 * ib_unmap_fmr() is slow, so use a single call instead
283 * of one call per mapped FMR.
285 list_for_each_entry(mr, mrs, mr_list) {
286 dprintk("RPC: %s: unmapping fmr %p\n",
288 trace_xprtrdma_localinv(mr);
289 list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
291 r_xprt->rx_stats.local_inv_needed++;
292 rc = ib_unmap_fmr(&unmap_list);
296 /* ORDER: Now DMA unmap all of the req's MRs, and return
297 * them to the free MW list.
299 while (!list_empty(mrs)) {
300 mr = rpcrdma_mr_pop(mrs);
301 list_del(&mr->fmr.fm_mr->list);
302 rpcrdma_mr_unmap_and_put(mr);
308 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
310 while (!list_empty(mrs)) {
311 mr = rpcrdma_mr_pop(mrs);
312 list_del(&mr->fmr.fm_mr->list);
313 fmr_op_recover_mr(mr);
317 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
318 .ro_map = fmr_op_map,
319 .ro_send = fmr_op_send,
320 .ro_unmap_sync = fmr_op_unmap_sync,
321 .ro_recover_mr = fmr_op_recover_mr,
322 .ro_open = fmr_op_open,
323 .ro_maxpages = fmr_op_maxpages,
324 .ro_init_mr = fmr_op_init_mr,
325 .ro_release_mr = fmr_op_release_mr,
326 .ro_displayname = "fmr",
327 .ro_send_w_inv_ok = 0,