2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
14 #include "xprt_rdma.h"
16 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17 # define RPCDBG_FACILITY RPCDBG_TRANS
21 __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
24 struct rpcrdma_frmr *f = &r->r.frmr;
27 f->fr_mr = ib_alloc_fast_reg_mr(pd, depth);
30 f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth);
31 if (IS_ERR(f->fr_pgl))
36 rc = PTR_ERR(f->fr_mr);
37 dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n",
42 rc = PTR_ERR(f->fr_pgl);
43 dprintk("RPC: %s: ib_alloc_fast_reg_page_list status %i\n",
45 ib_dereg_mr(f->fr_mr);
50 __frwr_release(struct rpcrdma_mw *r)
54 rc = ib_dereg_mr(r->r.frmr.fr_mr);
56 dprintk("RPC: %s: ib_dereg_mr status %i\n",
58 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
62 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
63 struct rpcrdma_create_data_internal *cdata)
65 struct ib_device_attr *devattr = &ia->ri_devattr;
68 ia->ri_max_frmr_depth =
69 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
70 devattr->max_fast_reg_page_list_len);
71 dprintk("RPC: %s: device's max FR page list len = %u\n",
72 __func__, ia->ri_max_frmr_depth);
74 /* Add room for frmr register and invalidate WRs.
75 * 1. FRMR reg WR for head
76 * 2. FRMR invalidate WR for head
77 * 3. N FRMR reg WRs for pagelist
78 * 4. N FRMR invalidate WRs for pagelist
79 * 5. FRMR reg WR for tail
80 * 6. FRMR invalidate WR for tail
85 /* Calculate N if the device max FRMR depth is smaller than
86 * RPCRDMA_MAX_DATA_SEGS.
88 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
89 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
91 depth += 2; /* FRMR reg + invalidate */
92 delta -= ia->ri_max_frmr_depth;
96 ep->rep_attr.cap.max_send_wr *= depth;
97 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
98 cdata->max_requests = devattr->max_qp_wr / depth;
99 if (!cdata->max_requests)
101 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
108 /* FRWR mode conveys a list of pages per chunk segment. The
109 * maximum length of that list is the FRWR page list depth.
112 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
114 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
116 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
117 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
121 frwr_op_init(struct rpcrdma_xprt *r_xprt)
123 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
124 struct ib_device *device = r_xprt->rx_ia.ri_id->device;
125 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
126 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
129 INIT_LIST_HEAD(&buf->rb_mws);
130 INIT_LIST_HEAD(&buf->rb_all);
132 i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
133 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
136 struct rpcrdma_mw *r;
139 r = kzalloc(sizeof(*r), GFP_KERNEL);
143 rc = __frwr_init(r, pd, device, depth);
149 list_add(&r->mw_list, &buf->rb_mws);
150 list_add(&r->mw_all, &buf->rb_all);
156 /* Post a FAST_REG Work Request to register a memory region
157 * for remote access via RDMA READ or RDMA WRITE.
160 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
161 int nsegs, bool writing)
163 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
164 struct rpcrdma_mr_seg *seg1 = seg;
165 struct rpcrdma_mw *mw = seg1->rl_mw;
166 struct rpcrdma_frmr *frmr = &mw->r.frmr;
167 struct ib_mr *mr = frmr->fr_mr;
168 struct ib_send_wr fastreg_wr, *bad_wr;
176 pageoff = offset_in_page(seg1->mr_offset);
177 seg1->mr_offset -= pageoff; /* start of page */
178 seg1->mr_len += pageoff;
180 if (nsegs > ia->ri_max_frmr_depth)
181 nsegs = ia->ri_max_frmr_depth;
182 for (page_no = i = 0; i < nsegs;) {
183 rpcrdma_map_one(ia, seg, writing);
185 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
186 frmr->fr_pgl->page_list[page_no++] = pa;
192 /* Check for holes */
193 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
194 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
197 dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n",
198 __func__, mw, i, len);
200 frmr->fr_state = FRMR_IS_VALID;
202 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
203 fastreg_wr.wr_id = (unsigned long)(void *)mw;
204 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
205 fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff;
206 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
207 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
208 fastreg_wr.wr.fast_reg.page_list_len = page_no;
209 fastreg_wr.wr.fast_reg.length = len;
210 fastreg_wr.wr.fast_reg.access_flags = writing ?
211 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
212 IB_ACCESS_REMOTE_READ;
213 key = (u8)(mr->rkey & 0x000000FF);
214 ib_update_fast_reg_key(mr, ++key);
215 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
217 DECR_CQCOUNT(&r_xprt->rx_ep);
218 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
222 seg1->mr_rkey = mr->rkey;
223 seg1->mr_base = seg1->mr_dma + pageoff;
229 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
230 ib_update_fast_reg_key(mr, --key);
231 frmr->fr_state = FRMR_IS_INVALID;
233 rpcrdma_unmap_one(ia, --seg);
237 /* Post a LOCAL_INV Work Request to prevent further remote access
238 * via RDMA READ or RDMA WRITE.
241 frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
243 struct rpcrdma_mr_seg *seg1 = seg;
244 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
245 struct ib_send_wr invalidate_wr, *bad_wr;
246 int rc, nsegs = seg->mr_nsegs;
248 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
250 memset(&invalidate_wr, 0, sizeof(invalidate_wr));
251 invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
252 invalidate_wr.opcode = IB_WR_LOCAL_INV;
253 invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
254 DECR_CQCOUNT(&r_xprt->rx_ep);
256 read_lock(&ia->ri_qplock);
257 while (seg1->mr_nsegs--)
258 rpcrdma_unmap_one(ia, seg++);
259 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
260 read_unlock(&ia->ri_qplock);
266 /* Force rpcrdma_buffer_get() to retry */
267 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
268 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
272 /* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
273 * an unusable state. Find FRMRs in this state and dereg / reg
274 * each. FRMRs that are VALID and attached to an rpcrdma_req are
277 * This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
279 * This is invoked only in the transport connect worker in order
280 * to serialize with rpcrdma_register_frmr_external().
283 frwr_op_reset(struct rpcrdma_xprt *r_xprt)
285 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
286 struct ib_device *device = r_xprt->rx_ia.ri_id->device;
287 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
288 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
289 struct rpcrdma_mw *r;
292 list_for_each_entry(r, &buf->rb_all, mw_all) {
293 if (r->r.frmr.fr_state == FRMR_IS_INVALID)
297 rc = __frwr_init(r, pd, device, depth);
299 dprintk("RPC: %s: mw %p left %s\n",
301 (r->r.frmr.fr_state == FRMR_IS_STALE ?
306 r->r.frmr.fr_state = FRMR_IS_INVALID;
311 frwr_op_destroy(struct rpcrdma_buffer *buf)
313 struct rpcrdma_mw *r;
315 while (!list_empty(&buf->rb_all)) {
316 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
317 list_del(&r->mw_all);
323 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
324 .ro_map = frwr_op_map,
325 .ro_unmap = frwr_op_unmap,
326 .ro_open = frwr_op_open,
327 .ro_maxpages = frwr_op_maxpages,
328 .ro_init = frwr_op_init,
329 .ro_reset = frwr_op_reset,
330 .ro_destroy = frwr_op_destroy,
331 .ro_displayname = "frwr",