1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83 * Send completion handler finally releases the Reply's pages.
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/rpc_rdma.h>
110 #include <linux/sunrpc/svc_rdma.h>
112 #include "xprt_rdma.h"
113 #include <trace/events/rpcrdma.h>
115 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
117 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
119 static inline struct svc_rdma_send_ctxt *
120 svc_rdma_next_send_ctxt(struct list_head *list)
122 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
126 static struct svc_rdma_send_ctxt *
127 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
129 struct svc_rdma_send_ctxt *ctxt;
135 size = sizeof(*ctxt);
136 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
137 ctxt = kmalloc(size, GFP_KERNEL);
140 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
143 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
144 rdma->sc_max_req_size, DMA_TO_DEVICE);
145 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
148 ctxt->sc_send_wr.next = NULL;
149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
150 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
151 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
152 ctxt->sc_cqe.done = svc_rdma_wc_send;
153 ctxt->sc_xprt_buf = buffer;
154 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
155 rdma->sc_max_req_size);
156 ctxt->sc_sges[0].addr = addr;
158 for (i = 0; i < rdma->sc_max_send_sges; i++)
159 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
171 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
172 * @rdma: svcxprt_rdma being torn down
175 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
177 struct svc_rdma_send_ctxt *ctxt;
179 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
180 list_del(&ctxt->sc_list);
181 ib_dma_unmap_single(rdma->sc_pd->device,
182 ctxt->sc_sges[0].addr,
183 rdma->sc_max_req_size,
185 kfree(ctxt->sc_xprt_buf);
191 * svc_rdma_send_ctxt_get - Get a free send_ctxt
192 * @rdma: controlling svcxprt_rdma
194 * Returns a ready-to-use send_ctxt, or NULL if none are
195 * available and a fresh one cannot be allocated.
197 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
199 struct svc_rdma_send_ctxt *ctxt;
201 spin_lock(&rdma->sc_send_lock);
202 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
205 list_del(&ctxt->sc_list);
206 spin_unlock(&rdma->sc_send_lock);
209 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
210 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
211 ctxt->sc_xprt_buf, NULL);
213 ctxt->sc_send_wr.num_sge = 0;
214 ctxt->sc_cur_sge_no = 0;
215 ctxt->sc_page_count = 0;
219 spin_unlock(&rdma->sc_send_lock);
220 ctxt = svc_rdma_send_ctxt_alloc(rdma);
227 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
228 * @rdma: controlling svcxprt_rdma
229 * @ctxt: object to return to the free list
231 * Pages left in sc_pages are DMA unmapped and released.
233 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
234 struct svc_rdma_send_ctxt *ctxt)
236 struct ib_device *device = rdma->sc_cm_id->device;
239 /* The first SGE contains the transport header, which
240 * remains mapped until @ctxt is destroyed.
242 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
243 ib_dma_unmap_page(device,
244 ctxt->sc_sges[i].addr,
245 ctxt->sc_sges[i].length,
247 trace_svcrdma_dma_unmap_page(rdma,
248 ctxt->sc_sges[i].addr,
249 ctxt->sc_sges[i].length);
252 for (i = 0; i < ctxt->sc_page_count; ++i)
253 put_page(ctxt->sc_pages[i]);
255 spin_lock(&rdma->sc_send_lock);
256 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
257 spin_unlock(&rdma->sc_send_lock);
261 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
262 * @cq: Completion Queue context
263 * @wc: Work Completion object
265 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
266 * the Send completion handler could be running.
268 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
270 struct svcxprt_rdma *rdma = cq->cq_context;
271 struct ib_cqe *cqe = wc->wr_cqe;
272 struct svc_rdma_send_ctxt *ctxt;
274 trace_svcrdma_wc_send(wc);
276 atomic_inc(&rdma->sc_sq_avail);
277 wake_up(&rdma->sc_send_wait);
279 ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
280 svc_rdma_send_ctxt_put(rdma, ctxt);
282 if (unlikely(wc->status != IB_WC_SUCCESS)) {
283 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
284 svc_xprt_enqueue(&rdma->sc_xprt);
287 svc_xprt_put(&rdma->sc_xprt);
291 * svc_rdma_send - Post a single Send WR
292 * @rdma: transport on which to post the WR
293 * @wr: prepared Send WR to post
295 * Returns zero the Send WR was posted successfully. Otherwise, a
296 * negative errno is returned.
298 int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
304 /* Sync the transport header buffer */
305 ib_dma_sync_single_for_device(rdma->sc_pd->device,
307 wr->sg_list[0].length,
310 /* If the SQ is full, wait until an SQ entry is available */
312 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
313 atomic_inc(&rdma_stat_sq_starve);
314 trace_svcrdma_sq_full(rdma);
315 atomic_inc(&rdma->sc_sq_avail);
316 wait_event(rdma->sc_send_wait,
317 atomic_read(&rdma->sc_sq_avail) > 1);
318 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
320 trace_svcrdma_sq_retry(rdma);
324 svc_xprt_get(&rdma->sc_xprt);
325 trace_svcrdma_post_send(wr);
326 ret = ib_post_send(rdma->sc_qp, wr, NULL);
332 trace_svcrdma_sq_post_err(rdma, ret);
333 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
334 svc_xprt_put(&rdma->sc_xprt);
335 wake_up(&rdma->sc_send_wait);
340 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
341 * @sctxt: Send context for the RPC Reply
344 * On success, returns length in bytes of the Reply XDR buffer
345 * that was consumed by the Reply Read list
346 * %-EMSGSIZE on XDR buffer overflow
348 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
350 /* RPC-over-RDMA version 1 replies never have a Read list. */
351 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
355 * svc_rdma_encode_write_segment - Encode one Write segment
356 * @src: matching Write chunk in the RPC Call header
357 * @sctxt: Send context for the RPC Reply
358 * @remaining: remaining bytes of the payload left in the Write chunk
361 * On success, returns length in bytes of the Reply XDR buffer
362 * that was consumed by the Write segment
363 * %-EMSGSIZE on XDR buffer overflow
365 static ssize_t svc_rdma_encode_write_segment(__be32 *src,
366 struct svc_rdma_send_ctxt *sctxt,
367 unsigned int *remaining)
370 const size_t len = rpcrdma_segment_maxsz * sizeof(*p);
374 p = xdr_reserve_space(&sctxt->sc_stream, len);
378 handle = be32_to_cpup(src++);
379 length = be32_to_cpup(src++);
380 xdr_decode_hyper(src, &offset);
382 *p++ = cpu_to_be32(handle);
383 if (*remaining < length) {
384 /* segment only partly filled */
388 /* entire segment was consumed */
389 *remaining -= length;
391 *p++ = cpu_to_be32(length);
392 xdr_encode_hyper(p, offset);
394 trace_svcrdma_encode_wseg(handle, length, offset);
399 * svc_rdma_encode_write_chunk - Encode one Write chunk
400 * @src: matching Write chunk in the RPC Call header
401 * @sctxt: Send context for the RPC Reply
402 * @remaining: size in bytes of the payload in the Write chunk
404 * Copy a Write chunk from the Call transport header to the
405 * Reply transport header. Update each segment's length field
406 * to reflect the number of bytes written in that segment.
409 * On success, returns length in bytes of the Reply XDR buffer
410 * that was consumed by the Write chunk
411 * %-EMSGSIZE on XDR buffer overflow
413 static ssize_t svc_rdma_encode_write_chunk(__be32 *src,
414 struct svc_rdma_send_ctxt *sctxt,
415 unsigned int remaining)
417 unsigned int i, nsegs;
421 trace_svcrdma_encode_write_chunk(remaining);
424 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
429 nsegs = be32_to_cpup(src++);
430 ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs);
435 for (i = nsegs; i; i--) {
436 ret = svc_rdma_encode_write_segment(src, sctxt, &remaining);
439 src += rpcrdma_segment_maxsz;
447 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
448 * @rctxt: Reply context with information about the RPC Call
449 * @sctxt: Send context for the RPC Reply
450 * @length: size in bytes of the payload in the first Write chunk
452 * The client provides a Write chunk list in the Call message. Fill
453 * in the segments in the first Write chunk in the Reply's transport
454 * header with the number of bytes consumed in each segment.
455 * Remaining chunks are returned unused.
458 * - Client has provided only one Write chunk
461 * On success, returns length in bytes of the Reply XDR buffer
462 * that was consumed by the Reply's Write list
463 * %-EMSGSIZE on XDR buffer overflow
466 svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt,
467 struct svc_rdma_send_ctxt *sctxt,
472 ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length);
477 /* Terminate the Write list */
478 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
486 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
487 * @rctxt: Reply context with information about the RPC Call
488 * @sctxt: Send context for the RPC Reply
489 * @length: size in bytes of the payload in the Reply chunk
492 * - Reply can always fit in the client-provided Reply chunk
495 * On success, returns length in bytes of the Reply XDR buffer
496 * that was consumed by the Reply's Reply chunk
497 * %-EMSGSIZE on XDR buffer overflow
500 svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt,
501 struct svc_rdma_send_ctxt *sctxt,
504 return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt,
508 static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
509 struct svc_rdma_send_ctxt *ctxt,
511 unsigned long offset,
514 struct ib_device *dev = rdma->sc_cm_id->device;
517 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
518 trace_svcrdma_dma_map_page(rdma, dma_addr, len);
519 if (ib_dma_mapping_error(dev, dma_addr))
522 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
523 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
524 ctxt->sc_send_wr.num_sge++;
531 /* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
532 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
534 static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
535 struct svc_rdma_send_ctxt *ctxt,
539 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
540 offset_in_page(base), len);
544 * svc_rdma_pull_up_needed - Determine whether to use pull-up
545 * @rdma: controlling transport
546 * @sctxt: send_ctxt for the Send WR
547 * @rctxt: Write and Reply chunks provided by client
548 * @xdr: xdr_buf containing RPC message to transmit
551 * %true if pull-up must be used
554 static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
555 struct svc_rdma_send_ctxt *sctxt,
556 const struct svc_rdma_recv_ctxt *rctxt,
561 /* For small messages, copying bytes is cheaper than DMA mapping.
563 if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
566 /* Check whether the xdr_buf has more elements than can
567 * fit in a single RDMA Send.
573 if (!rctxt || !rctxt->rc_write_list) {
574 unsigned int remaining;
575 unsigned long pageoff;
577 pageoff = xdr->page_base & ~PAGE_MASK;
578 remaining = xdr->page_len;
581 remaining -= min_t(u32, PAGE_SIZE - pageoff,
588 if (xdr->tail[0].iov_len)
591 /* assume 1 SGE is needed for the transport header */
592 return elements >= rdma->sc_max_send_sges;
596 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
597 * @rdma: controlling transport
598 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
599 * @rctxt: Write and Reply chunks provided by client
600 * @xdr: prepared xdr_buf containing RPC message
602 * The device is not capable of sending the reply directly.
603 * Assemble the elements of @xdr into the transport header buffer.
605 * Returns zero on success, or a negative errno on failure.
607 static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
608 struct svc_rdma_send_ctxt *sctxt,
609 const struct svc_rdma_recv_ctxt *rctxt,
610 const struct xdr_buf *xdr)
612 unsigned char *dst, *tailbase;
613 unsigned int taillen;
615 dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len;
616 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
617 dst += xdr->head[0].iov_len;
619 tailbase = xdr->tail[0].iov_base;
620 taillen = xdr->tail[0].iov_len;
621 if (rctxt && rctxt->rc_write_list) {
624 xdrpad = xdr_pad_size(xdr->page_len);
625 if (taillen && xdrpad) {
630 unsigned int len, remaining;
631 unsigned long pageoff;
632 struct page **ppages;
634 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
635 pageoff = xdr->page_base & ~PAGE_MASK;
636 remaining = xdr->page_len;
638 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
640 memcpy(dst, page_address(*ppages), len);
648 memcpy(dst, tailbase, taillen);
650 sctxt->sc_sges[0].length += xdr->len;
651 trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
655 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
656 * @rdma: controlling transport
657 * @sctxt: send_ctxt for the Send WR
658 * @rctxt: Write and Reply chunks provided by client
659 * @xdr: prepared xdr_buf containing RPC message
661 * Load the xdr_buf into the ctxt's sge array, and DMA map each
662 * element as it is added. The Send WR's num_sge field is set.
664 * Returns zero on success, or a negative errno on failure.
666 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
667 struct svc_rdma_send_ctxt *sctxt,
668 const struct svc_rdma_recv_ctxt *rctxt,
671 unsigned int len, remaining;
672 unsigned long page_off;
673 struct page **ppages;
678 /* Set up the (persistently-mapped) transport header SGE. */
679 sctxt->sc_send_wr.num_sge = 1;
680 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
682 /* If there is a Reply chunk, nothing follows the transport
683 * header, and we're done here.
685 if (rctxt && rctxt->rc_reply_chunk)
688 /* For pull-up, svc_rdma_send() will sync the transport header.
689 * No additional DMA mapping is necessary.
691 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
692 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
694 ++sctxt->sc_cur_sge_no;
695 ret = svc_rdma_dma_map_buf(rdma, sctxt,
696 xdr->head[0].iov_base,
697 xdr->head[0].iov_len);
701 /* If a Write chunk is present, the xdr_buf's page list
702 * is not included inline. However the Upper Layer may
703 * have added XDR padding in the tail buffer, and that
704 * should not be included inline.
706 if (rctxt && rctxt->rc_write_list) {
707 base = xdr->tail[0].iov_base;
708 len = xdr->tail[0].iov_len;
709 xdr_pad = xdr_pad_size(xdr->page_len);
711 if (len && xdr_pad) {
719 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
720 page_off = xdr->page_base & ~PAGE_MASK;
721 remaining = xdr->page_len;
723 len = min_t(u32, PAGE_SIZE - page_off, remaining);
725 ++sctxt->sc_cur_sge_no;
726 ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++,
735 base = xdr->tail[0].iov_base;
736 len = xdr->tail[0].iov_len;
739 ++sctxt->sc_cur_sge_no;
740 ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len);
748 /* The svc_rqst and all resources it owns are released as soon as
749 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
750 * so they are released by the Send completion handler.
752 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
753 struct svc_rdma_send_ctxt *ctxt)
755 int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
757 ctxt->sc_page_count += pages;
758 for (i = 0; i < pages; i++) {
759 ctxt->sc_pages[i] = rqstp->rq_respages[i];
760 rqstp->rq_respages[i] = NULL;
763 /* Prevent svc_xprt_release from releasing pages in rq_pages */
764 rqstp->rq_next_page = rqstp->rq_respages;
767 /* Prepare the portion of the RPC Reply that will be transmitted
768 * via RDMA Send. The RPC-over-RDMA transport header is prepared
769 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
771 * Depending on whether a Write list or Reply chunk is present,
772 * the server may send all, a portion of, or none of the xdr_buf.
773 * In the latter case, only the transport header (sc_sges[0]) is
776 * RDMA Send is the last step of transmitting an RPC reply. Pages
777 * involved in the earlier RDMA Writes are here transferred out
778 * of the rqstp and into the sctxt's page array. These pages are
779 * DMA unmapped by each Write completion, but the subsequent Send
780 * completion finally releases these pages.
783 * - The Reply's transport header will never be larger than a page.
785 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
786 struct svc_rdma_send_ctxt *sctxt,
787 const struct svc_rdma_recv_ctxt *rctxt,
788 struct svc_rqst *rqstp)
792 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
796 svc_rdma_save_io_pages(rqstp, sctxt);
798 if (rctxt->rc_inv_rkey) {
799 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
800 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
802 sctxt->sc_send_wr.opcode = IB_WR_SEND;
804 return svc_rdma_send(rdma, &sctxt->sc_send_wr);
807 /* Given the client-provided Write and Reply chunks, the server was not
808 * able to form a complete reply. Return an RDMA_ERROR message so the
809 * client can retire this RPC transaction. As above, the Send completion
810 * routine releases payload pages that were part of a previous RDMA Write.
812 * Remote Invalidation is skipped for simplicity.
814 static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
815 struct svc_rdma_send_ctxt *ctxt,
816 struct svc_rqst *rqstp)
818 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
819 __be32 *rdma_argp = rctxt->rc_recv_buf;
822 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
823 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
826 p = xdr_reserve_space(&ctxt->sc_stream, RPCRDMA_HDRLEN_ERR);
831 *p++ = *(rdma_argp + 1);
832 *p++ = rdma->sc_fc_credits;
835 trace_svcrdma_err_chunk(*rdma_argp);
837 svc_rdma_save_io_pages(rqstp, ctxt);
839 ctxt->sc_send_wr.num_sge = 1;
840 ctxt->sc_send_wr.opcode = IB_WR_SEND;
841 ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len;
842 return svc_rdma_send(rdma, &ctxt->sc_send_wr);
846 * svc_rdma_sendto - Transmit an RPC reply
847 * @rqstp: processed RPC request, reply XDR already in ::rq_res
849 * Any resources still associated with @rqstp are released upon return.
850 * If no reply message was possible, the connection is closed.
853 * %0 if an RPC reply has been successfully posted,
854 * %-ENOMEM if a resource shortage occurred (connection is lost),
855 * %-ENOTCONN if posting failed (connection is lost).
857 int svc_rdma_sendto(struct svc_rqst *rqstp)
859 struct svc_xprt *xprt = rqstp->rq_xprt;
860 struct svcxprt_rdma *rdma =
861 container_of(xprt, struct svcxprt_rdma, sc_xprt);
862 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
863 __be32 *rdma_argp = rctxt->rc_recv_buf;
864 __be32 *wr_lst = rctxt->rc_write_list;
865 __be32 *rp_ch = rctxt->rc_reply_chunk;
866 struct xdr_buf *xdr = &rqstp->rq_res;
867 struct svc_rdma_send_ctxt *sctxt;
871 /* Create the RDMA response header. xprt->xpt_mutex,
872 * acquired in svc_send(), serializes RPC replies. The
873 * code path below that inserts the credit grant value
874 * into each transport header runs only inside this
878 sctxt = svc_rdma_send_ctxt_get(rdma);
882 p = xdr_reserve_space(&sctxt->sc_stream,
883 rpcrdma_fixed_maxsz * sizeof(*p));
887 *p++ = *(rdma_argp + 1);
888 *p++ = rdma->sc_fc_credits;
889 *p = rp_ch ? rdma_nomsg : rdma_msg;
891 if (svc_rdma_encode_read_list(sctxt) < 0)
894 /* XXX: Presume the client sent only one Write chunk */
895 unsigned long offset;
898 if (rctxt->rc_read_payload_length) {
899 offset = rctxt->rc_read_payload_offset;
900 length = rctxt->rc_read_payload_length;
902 offset = xdr->head[0].iov_len;
903 length = xdr->page_len;
905 ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
909 if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0)
912 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
916 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
919 if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
922 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
926 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
932 if (ret != -E2BIG && ret != -EINVAL)
935 ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
941 svc_rdma_send_ctxt_put(rdma, sctxt);
943 trace_svcrdma_send_failed(rqstp, ret);
944 set_bit(XPT_CLOSE, &xprt->xpt_flags);
949 * svc_rdma_read_payload - special processing for a READ payload
950 * @rqstp: svc_rqst to operate on
951 * @offset: payload's byte offset in @xdr
952 * @length: size of payload, in bytes
954 * Returns zero on success.
956 * For the moment, just record the xdr_buf location of the READ
957 * payload. svc_rdma_sendto will use that location later when
958 * we actually send the payload.
960 int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
963 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
965 /* XXX: Just one READ payload slot for now, since our
966 * transport implementation currently supports only one
969 rctxt->rc_read_payload_offset = offset;
970 rctxt->rc_read_payload_length = length;