1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016 HGST, a Western Digital Company.
5 #include <linux/moduleparam.h>
6 #include <linux/slab.h>
7 #include <linux/pci-p2pdma.h>
8 #include <rdma/mr_pool.h>
18 static bool rdma_rw_force_mr;
19 module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
20 MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
23 * Report whether memory registration should be used. Memory registration must
24 * be used for iWarp devices because of iWARP-specific limitations. Memory
25 * registration is also enabled if registering memory might yield better
26 * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
28 static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
30 if (rdma_protocol_iwarp(dev, port_num))
32 if (dev->attrs.max_sgl_rd)
34 if (unlikely(rdma_rw_force_mr))
40 * Check if the device will use memory registration for this RW operation.
41 * For RDMA READs we must use MRs on iWarp and can optionally use them as an
42 * optimization otherwise. Additionally we have a debug option to force usage
43 * of MRs to help testing this code path.
45 static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
46 enum dma_data_direction dir, int dma_nents)
48 if (dir == DMA_FROM_DEVICE) {
49 if (rdma_protocol_iwarp(dev, port_num))
51 if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
54 if (unlikely(rdma_rw_force_mr))
59 static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev,
65 max_pages = dev->attrs.max_pi_fast_reg_page_list_len;
67 max_pages = dev->attrs.max_fast_reg_page_list_len;
69 /* arbitrary limit to avoid allocating gigantic resources */
70 return min_t(u32, max_pages, 256);
73 static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
77 if (reg->mr->need_inval) {
78 reg->inv_wr.opcode = IB_WR_LOCAL_INV;
79 reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
80 reg->inv_wr.next = ®->reg_wr.wr;
83 reg->inv_wr.next = NULL;
89 /* Caller must have zero-initialized *reg. */
90 static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
91 struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
92 u32 sg_cnt, u32 offset)
94 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
96 u32 nents = min(sg_cnt, pages_per_mr);
99 reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
103 count += rdma_rw_inv_key(reg);
105 ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
106 if (ret < 0 || ret < nents) {
107 ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
111 reg->reg_wr.wr.opcode = IB_WR_REG_MR;
112 reg->reg_wr.mr = reg->mr;
113 reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
114 if (rdma_protocol_iwarp(qp->device, port_num))
115 reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
118 reg->sge.addr = reg->mr->iova;
119 reg->sge.length = reg->mr->length;
123 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
124 u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
125 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
127 struct rdma_rw_reg_ctx *prev = NULL;
128 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
130 int i, j, ret = 0, count = 0;
132 ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
133 ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
139 for (i = 0; i < ctx->nr_ops; i++) {
140 struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
141 u32 nents = min(sg_cnt, pages_per_mr);
143 ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
150 if (reg->mr->need_inval)
151 prev->wr.wr.next = ®->inv_wr;
153 prev->wr.wr.next = ®->reg_wr.wr;
156 reg->reg_wr.wr.next = ®->wr.wr;
158 reg->wr.wr.sg_list = ®->sge;
159 reg->wr.wr.num_sge = 1;
160 reg->wr.remote_addr = remote_addr;
162 if (dir == DMA_TO_DEVICE) {
163 reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
164 } else if (!rdma_cap_read_inv(qp->device, port_num)) {
165 reg->wr.wr.opcode = IB_WR_RDMA_READ;
167 reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
168 reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
172 remote_addr += reg->sge.length;
174 for (j = 0; j < nents; j++)
181 prev->wr.wr.next = NULL;
183 ctx->type = RDMA_RW_MR;
188 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
194 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
195 struct scatterlist *sg, u32 sg_cnt, u32 offset,
196 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
198 u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
201 u32 total_len = 0, i, j;
203 ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
205 ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
209 ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
213 for (i = 0; i < ctx->nr_ops; i++) {
214 struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
215 u32 nr_sge = min(sg_cnt, max_sge);
217 if (dir == DMA_TO_DEVICE)
218 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
220 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
221 rdma_wr->remote_addr = remote_addr + total_len;
222 rdma_wr->rkey = rkey;
223 rdma_wr->wr.num_sge = nr_sge;
224 rdma_wr->wr.sg_list = sge;
226 for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
227 sge->addr = sg_dma_address(sg) + offset;
228 sge->length = sg_dma_len(sg) - offset;
229 sge->lkey = qp->pd->local_dma_lkey;
231 total_len += sge->length;
237 rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
238 &ctx->map.wrs[i + 1].wr : NULL;
241 ctx->type = RDMA_RW_MULTI_WR;
245 kfree(ctx->map.sges);
250 static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
251 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
252 enum dma_data_direction dir)
254 struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
258 ctx->single.sge.lkey = qp->pd->local_dma_lkey;
259 ctx->single.sge.addr = sg_dma_address(sg) + offset;
260 ctx->single.sge.length = sg_dma_len(sg) - offset;
262 memset(rdma_wr, 0, sizeof(*rdma_wr));
263 if (dir == DMA_TO_DEVICE)
264 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
266 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
267 rdma_wr->wr.sg_list = &ctx->single.sge;
268 rdma_wr->wr.num_sge = 1;
269 rdma_wr->remote_addr = remote_addr;
270 rdma_wr->rkey = rkey;
272 ctx->type = RDMA_RW_SINGLE_WR;
276 static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
277 u32 sg_cnt, enum dma_data_direction dir)
279 if (is_pci_p2pdma_page(sg_page(sg)))
280 pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
282 ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
285 static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
286 u32 sg_cnt, enum dma_data_direction dir)
288 if (is_pci_p2pdma_page(sg_page(sg)))
289 return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
290 return ib_dma_map_sg(dev, sg, sg_cnt, dir);
294 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
295 * @ctx: context to initialize
296 * @qp: queue pair to operate on
297 * @port_num: port num to which the connection is bound
298 * @sg: scatterlist to READ/WRITE from/to
299 * @sg_cnt: number of entries in @sg
300 * @sg_offset: current byte offset into @sg
301 * @remote_addr:remote address to read/write (relative to @rkey)
302 * @rkey: remote key to operate on
303 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
305 * Returns the number of WQEs that will be needed on the workqueue if
306 * successful, or a negative error code.
308 int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
309 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
310 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
312 struct ib_device *dev = qp->pd->device;
315 ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
321 * Skip to the S/G entry that sg_offset falls into:
324 u32 len = sg_dma_len(sg);
335 if (WARN_ON_ONCE(sg_cnt == 0))
338 if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
339 ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
340 sg_offset, remote_addr, rkey, dir);
341 } else if (sg_cnt > 1) {
342 ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
343 remote_addr, rkey, dir);
345 ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
346 remote_addr, rkey, dir);
354 rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
357 EXPORT_SYMBOL(rdma_rw_ctx_init);
360 * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
361 * @ctx: context to initialize
362 * @qp: queue pair to operate on
363 * @port_num: port num to which the connection is bound
364 * @sg: scatterlist to READ/WRITE from/to
365 * @sg_cnt: number of entries in @sg
366 * @prot_sg: scatterlist to READ/WRITE protection information from/to
367 * @prot_sg_cnt: number of entries in @prot_sg
368 * @sig_attrs: signature offloading algorithms
369 * @remote_addr:remote address to read/write (relative to @rkey)
370 * @rkey: remote key to operate on
371 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
373 * Returns the number of WQEs that will be needed on the workqueue if
374 * successful, or a negative error code.
376 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
377 u8 port_num, struct scatterlist *sg, u32 sg_cnt,
378 struct scatterlist *prot_sg, u32 prot_sg_cnt,
379 struct ib_sig_attrs *sig_attrs,
380 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
382 struct ib_device *dev = qp->pd->device;
383 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
385 struct ib_rdma_wr *rdma_wr;
388 if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
389 pr_err("SG count too large: sg_cnt=%d, prot_sg_cnt=%d, pages_per_mr=%d\n",
390 sg_cnt, prot_sg_cnt, pages_per_mr);
394 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
400 ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir);
408 ctx->type = RDMA_RW_SIG_MR;
410 ctx->reg = kcalloc(1, sizeof(*ctx->reg), GFP_KERNEL);
413 goto out_unmap_prot_sg;
416 ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs);
422 count += rdma_rw_inv_key(ctx->reg);
424 memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs));
426 ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg,
427 prot_sg_cnt, NULL, SZ_4K);
429 pr_err("failed to map PI sg (%d)\n", sg_cnt + prot_sg_cnt);
430 goto out_destroy_sig_mr;
433 ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY;
434 ctx->reg->reg_wr.wr.wr_cqe = NULL;
435 ctx->reg->reg_wr.wr.num_sge = 0;
436 ctx->reg->reg_wr.wr.send_flags = 0;
437 ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
438 if (rdma_protocol_iwarp(qp->device, port_num))
439 ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
440 ctx->reg->reg_wr.mr = ctx->reg->mr;
441 ctx->reg->reg_wr.key = ctx->reg->mr->lkey;
444 ctx->reg->sge.addr = ctx->reg->mr->iova;
445 ctx->reg->sge.length = ctx->reg->mr->length;
446 if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE)
447 ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length;
449 rdma_wr = &ctx->reg->wr;
450 rdma_wr->wr.sg_list = &ctx->reg->sge;
451 rdma_wr->wr.num_sge = 1;
452 rdma_wr->remote_addr = remote_addr;
453 rdma_wr->rkey = rkey;
454 if (dir == DMA_TO_DEVICE)
455 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
457 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
458 ctx->reg->reg_wr.wr.next = &rdma_wr->wr;
464 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
469 ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
471 ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
474 EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
477 * Now that we are going to post the WRs we can update the lkey and need_inval
478 * state on the MRs. If we were doing this at init time, we would get double
479 * or missing invalidations if a context was initialized but not actually
482 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
484 reg->mr->need_inval = need_inval;
485 ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
486 reg->reg_wr.key = reg->mr->lkey;
487 reg->sge.lkey = reg->mr->lkey;
491 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
492 * @ctx: context to operate on
493 * @qp: queue pair to operate on
494 * @port_num: port num to which the connection is bound
495 * @cqe: completion queue entry for the last WR
496 * @chain_wr: WR to append to the posted chain
498 * Return the WR chain for the set of RDMA READ/WRITE operations described by
499 * @ctx, as well as any memory registration operations needed. If @chain_wr
500 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
501 * If @chain_wr is not set @cqe must be set so that the caller gets a
502 * completion notification.
504 struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
505 u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
507 struct ib_send_wr *first_wr, *last_wr;
514 for (i = 0; i < ctx->nr_ops; i++) {
515 rdma_rw_update_lkey(&ctx->reg[i],
516 ctx->reg[i].wr.wr.opcode !=
517 IB_WR_RDMA_READ_WITH_INV);
520 if (ctx->reg[0].inv_wr.next)
521 first_wr = &ctx->reg[0].inv_wr;
523 first_wr = &ctx->reg[0].reg_wr.wr;
524 last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
526 case RDMA_RW_MULTI_WR:
527 first_wr = &ctx->map.wrs[0].wr;
528 last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
530 case RDMA_RW_SINGLE_WR:
531 first_wr = &ctx->single.wr.wr;
532 last_wr = &ctx->single.wr.wr;
539 last_wr->next = chain_wr;
541 last_wr->wr_cqe = cqe;
542 last_wr->send_flags |= IB_SEND_SIGNALED;
547 EXPORT_SYMBOL(rdma_rw_ctx_wrs);
550 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
551 * @ctx: context to operate on
552 * @qp: queue pair to operate on
553 * @port_num: port num to which the connection is bound
554 * @cqe: completion queue entry for the last WR
555 * @chain_wr: WR to append to the posted chain
557 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
558 * any memory registration operations needed. If @chain_wr is non-NULL the
559 * WR it points to will be appended to the chain of WRs posted. If @chain_wr
560 * is not set @cqe must be set so that the caller gets a completion
563 int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
564 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
566 struct ib_send_wr *first_wr;
568 first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
569 return ib_post_send(qp, first_wr, NULL);
571 EXPORT_SYMBOL(rdma_rw_ctx_post);
574 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
575 * @ctx: context to release
576 * @qp: queue pair to operate on
577 * @port_num: port num to which the connection is bound
578 * @sg: scatterlist that was used for the READ/WRITE
579 * @sg_cnt: number of entries in @sg
580 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
582 void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
583 struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir)
589 for (i = 0; i < ctx->nr_ops; i++)
590 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
593 case RDMA_RW_MULTI_WR:
595 kfree(ctx->map.sges);
597 case RDMA_RW_SINGLE_WR:
604 rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
606 EXPORT_SYMBOL(rdma_rw_ctx_destroy);
609 * rdma_rw_ctx_destroy_signature - release all resources allocated by
610 * rdma_rw_ctx_signature_init
611 * @ctx: context to release
612 * @qp: queue pair to operate on
613 * @port_num: port num to which the connection is bound
614 * @sg: scatterlist that was used for the READ/WRITE
615 * @sg_cnt: number of entries in @sg
616 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
617 * @prot_sg_cnt: number of entries in @prot_sg
618 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
620 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
621 u8 port_num, struct scatterlist *sg, u32 sg_cnt,
622 struct scatterlist *prot_sg, u32 prot_sg_cnt,
623 enum dma_data_direction dir)
625 if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
628 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
631 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
633 ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
635 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
638 * rdma_rw_mr_factor - return number of MRs required for a payload
639 * @device: device handling the connection
640 * @port_num: port num to which the connection is bound
641 * @maxpages: maximum payload pages per rdma_rw_ctx
643 * Returns the number of MRs the device requires to move @maxpayload
644 * bytes. The returned value is used during transport creation to
645 * compute max_rdma_ctxts and the size of the transport's Send and
646 * Send Completion Queues.
648 unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
649 unsigned int maxpages)
651 unsigned int mr_pages;
653 if (rdma_rw_can_use_mr(device, port_num))
654 mr_pages = rdma_rw_fr_page_list_len(device, false);
656 mr_pages = device->attrs.max_sge_rd;
657 return DIV_ROUND_UP(maxpages, mr_pages);
659 EXPORT_SYMBOL(rdma_rw_mr_factor);
661 void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
665 WARN_ON_ONCE(attr->port_num == 0);
668 * Each context needs at least one RDMA READ or WRITE WR.
670 * For some hardware we might need more, eventually we should ask the
671 * HCA driver for a multiplier here.
676 * If the devices needs MRs to perform RDMA READ or WRITE operations,
677 * we'll need two additional MRs for the registrations and the
680 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN ||
681 rdma_rw_can_use_mr(dev, attr->port_num))
682 factor += 2; /* inv + reg */
684 attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
687 * But maybe we were just too high in the sky and the device doesn't
688 * even support all we need, and we'll have to live with what we get..
690 attr->cap.max_send_wr =
691 min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
694 int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
696 struct ib_device *dev = qp->pd->device;
697 u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0;
700 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) {
701 nr_sig_mrs = attr->cap.max_rdma_ctxs;
702 nr_mrs = attr->cap.max_rdma_ctxs;
703 max_num_sg = rdma_rw_fr_page_list_len(dev, true);
704 } else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
705 nr_mrs = attr->cap.max_rdma_ctxs;
706 max_num_sg = rdma_rw_fr_page_list_len(dev, false);
710 ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
714 pr_err("%s: failed to allocated %d MRs\n",
721 ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
722 IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg);
724 pr_err("%s: failed to allocated %d SIG MRs\n",
725 __func__, nr_sig_mrs);
726 goto out_free_rdma_mrs;
733 ib_mr_pool_destroy(qp, &qp->rdma_mrs);
737 void rdma_rw_cleanup_mrs(struct ib_qp *qp)
739 ib_mr_pool_destroy(qp, &qp->sig_mrs);
740 ib_mr_pool_destroy(qp, &qp->rdma_mrs);