1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * This file contains the guts of the RPC RDMA protocol, and
46 * does marshaling/unmarshaling, etc. It is also where interfacing
47 * to the Linux RPC framework lives.
50 #include <linux/highmem.h>
52 #include <linux/sunrpc/svc_rdma.h>
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY RPCDBG_TRANS
61 /* Returns size of largest RPC-over-RDMA header in a Call message
63 * The largest Call header contains a full-size Read list and a
64 * minimal Reply chunk.
66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70 /* Fixed header fields and list discriminators */
71 size = RPCRDMA_HDRLEN_MIN;
73 /* Maximum Read list size */
74 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
76 /* Minimal Read chunk size */
77 size += sizeof(__be32); /* segment count */
78 size += rpcrdma_segment_maxsz * sizeof(__be32);
79 size += sizeof(__be32); /* list discriminator */
84 /* Returns size of largest RPC-over-RDMA header in a Reply message
86 * There is only one Write list or one Reply chunk per Reply
87 * message. The larger list is the Write list.
89 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
93 /* Fixed header fields and list discriminators */
94 size = RPCRDMA_HDRLEN_MIN;
96 /* Maximum Write list size */
97 size = sizeof(__be32); /* segment count */
98 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
99 size += sizeof(__be32); /* list discriminator */
105 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
106 * @r_xprt: transport instance to initialize
108 * The max_inline fields contain the maximum size of an RPC message
109 * so the marshaling code doesn't have to repeat this calculation
112 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
114 unsigned int maxsegs = r_xprt->rx_ia.ri_max_rdma_segs;
115 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
117 ep->rep_max_inline_send =
118 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
119 ep->rep_max_inline_recv =
120 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
123 /* The client can send a request inline as long as the RPCRDMA header
124 * plus the RPC call fit under the transport's inline limit. If the
125 * combined call message size exceeds that limit, the client must use
126 * a Read chunk for this operation.
128 * A Read chunk is also required if sending the RPC call inline would
129 * exceed this device's max_sge limit.
131 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
132 struct rpc_rqst *rqst)
134 struct xdr_buf *xdr = &rqst->rq_snd_buf;
135 unsigned int count, remaining, offset;
137 if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
141 remaining = xdr->page_len;
142 offset = offset_in_page(xdr->page_base);
143 count = RPCRDMA_MIN_SEND_SGES;
145 remaining -= min_t(unsigned int,
146 PAGE_SIZE - offset, remaining);
148 if (++count > r_xprt->rx_ep.rep_attr.cap.max_send_sge)
156 /* The client can't know how large the actual reply will be. Thus it
157 * plans for the largest possible reply for that particular ULP
158 * operation. If the maximum combined reply message size exceeds that
159 * limit, the client must provide a write list or a reply chunk for
162 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
163 struct rpc_rqst *rqst)
165 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
168 /* The client is required to provide a Reply chunk if the maximum
169 * size of the non-payload part of the RPC Reply is larger than
170 * the inline threshold.
173 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
174 const struct rpc_rqst *rqst)
176 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
178 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
179 r_xprt->rx_ep.rep_max_inline_recv;
182 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
183 * a byte range. Other modes coalesce these SGEs into a single MR
186 * Returns pointer to next available SGE, and bumps the total number
189 static struct rpcrdma_mr_seg *
190 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
193 u32 remaining, page_offset;
196 base = vec->iov_base;
197 page_offset = offset_in_page(base);
198 remaining = vec->iov_len;
201 seg->mr_offset = base;
202 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
203 remaining -= seg->mr_len;
212 /* Convert @xdrbuf into SGEs no larger than a page each. As they
213 * are registered, these SGEs are then coalesced into RDMA segments
214 * when the selected memreg mode supports it.
216 * Returns positive number of SGEs consumed, or a negative errno.
220 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
221 unsigned int pos, enum rpcrdma_chunktype type,
222 struct rpcrdma_mr_seg *seg)
224 unsigned long page_base;
226 struct page **ppages;
230 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
232 len = xdrbuf->page_len;
233 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
234 page_base = offset_in_page(xdrbuf->page_base);
236 /* ACL likes to be lazy in allocating pages - ACLs
237 * are small by default but can get huge.
239 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
241 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
245 seg->mr_page = *ppages;
246 seg->mr_offset = (char *)page_base;
247 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
255 /* When encoding a Read chunk, the tail iovec contains an
256 * XDR pad and may be omitted.
258 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
261 /* When encoding a Write chunk, some servers need to see an
262 * extra segment for non-XDR-aligned Write chunks. The upper
263 * layer provides space in the tail iovec that may be used
266 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
269 if (xdrbuf->tail[0].iov_len)
270 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
273 if (unlikely(n > RPCRDMA_MAX_SEGS))
279 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
281 *iptr++ = cpu_to_be32(mr->mr_handle);
282 *iptr++ = cpu_to_be32(mr->mr_length);
283 xdr_encode_hyper(iptr, mr->mr_offset);
287 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
291 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
295 xdr_encode_rdma_segment(p, mr);
300 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
305 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
309 *p++ = xdr_one; /* Item present */
310 *p++ = cpu_to_be32(position);
311 xdr_encode_rdma_segment(p, mr);
315 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
316 struct rpcrdma_req *req,
317 struct rpcrdma_mr_seg *seg,
318 int nsegs, bool writing,
319 struct rpcrdma_mr **mr)
321 *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
323 *mr = rpcrdma_mr_get(r_xprt);
326 trace_xprtrdma_mr_get(req);
330 rpcrdma_mr_push(*mr, &req->rl_registered);
331 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
334 trace_xprtrdma_nomrs(req);
335 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
336 rpcrdma_mrs_refresh(r_xprt);
337 return ERR_PTR(-EAGAIN);
340 /* Register and XDR encode the Read list. Supports encoding a list of read
341 * segments that belong to a single read chunk.
343 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
345 * Read chunklist (a linked list):
346 * N elements, position P (same P for all chunks of same arg!):
347 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
349 * Returns zero on success, or a negative errno if a failure occurred.
350 * @xdr is advanced to the next position in the stream.
352 * Only a single @pos value is currently supported.
354 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
355 struct rpcrdma_req *req,
356 struct rpc_rqst *rqst,
357 enum rpcrdma_chunktype rtype)
359 struct xdr_stream *xdr = &req->rl_stream;
360 struct rpcrdma_mr_seg *seg;
361 struct rpcrdma_mr *mr;
365 if (rtype == rpcrdma_noch_pullup || rtype == rpcrdma_noch_mapped)
368 pos = rqst->rq_snd_buf.head[0].iov_len;
369 if (rtype == rpcrdma_areadch)
371 seg = req->rl_segments;
372 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
378 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
382 if (encode_read_segment(xdr, mr, pos) < 0)
385 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
386 r_xprt->rx_stats.read_chunk_count++;
387 nsegs -= mr->mr_nents;
391 return xdr_stream_encode_item_absent(xdr);
394 /* Register and XDR encode the Write list. Supports encoding a list
395 * containing one array of plain segments that belong to a single
398 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
400 * Write chunklist (a list of (one) counted array):
402 * 1 - N - HLOO - HLOO - ... - HLOO - 0
404 * Returns zero on success, or a negative errno if a failure occurred.
405 * @xdr is advanced to the next position in the stream.
407 * Only a single Write chunk is currently supported.
409 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
410 struct rpcrdma_req *req,
411 struct rpc_rqst *rqst,
412 enum rpcrdma_chunktype wtype)
414 struct xdr_stream *xdr = &req->rl_stream;
415 struct rpcrdma_mr_seg *seg;
416 struct rpcrdma_mr *mr;
420 if (wtype != rpcrdma_writech)
423 seg = req->rl_segments;
424 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
425 rqst->rq_rcv_buf.head[0].iov_len,
430 if (xdr_stream_encode_item_present(xdr) < 0)
432 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
433 if (unlikely(!segcount))
435 /* Actual value encoded below */
439 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
443 if (encode_rdma_segment(xdr, mr) < 0)
446 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
447 r_xprt->rx_stats.write_chunk_count++;
448 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
450 nsegs -= mr->mr_nents;
453 /* Update count of segments in this Write chunk */
454 *segcount = cpu_to_be32(nchunks);
457 return xdr_stream_encode_item_absent(xdr);
460 /* Register and XDR encode the Reply chunk. Supports encoding an array
461 * of plain segments that belong to a single write (reply) chunk.
463 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
465 * Reply chunk (a counted array):
467 * 1 - N - HLOO - HLOO - ... - HLOO
469 * Returns zero on success, or a negative errno if a failure occurred.
470 * @xdr is advanced to the next position in the stream.
472 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
473 struct rpcrdma_req *req,
474 struct rpc_rqst *rqst,
475 enum rpcrdma_chunktype wtype)
477 struct xdr_stream *xdr = &req->rl_stream;
478 struct rpcrdma_mr_seg *seg;
479 struct rpcrdma_mr *mr;
483 if (wtype != rpcrdma_replych)
484 return xdr_stream_encode_item_absent(xdr);
486 seg = req->rl_segments;
487 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
491 if (xdr_stream_encode_item_present(xdr) < 0)
493 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
494 if (unlikely(!segcount))
496 /* Actual value encoded below */
500 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
504 if (encode_rdma_segment(xdr, mr) < 0)
507 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
508 r_xprt->rx_stats.reply_chunk_count++;
509 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
511 nsegs -= mr->mr_nents;
514 /* Update count of segments in the Reply chunk */
515 *segcount = cpu_to_be32(nchunks);
520 static void rpcrdma_sendctx_done(struct kref *kref)
522 struct rpcrdma_req *req =
523 container_of(kref, struct rpcrdma_req, rl_kref);
524 struct rpcrdma_rep *rep = req->rl_reply;
526 rpcrdma_complete_rqst(rep);
527 rep->rr_rxprt->rx_stats.reply_waits_for_send++;
531 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
532 * @sc: sendctx containing SGEs to unmap
535 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
537 struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf;
540 if (!sc->sc_unmap_count)
543 /* The first two SGEs contain the transport header and
544 * the inline buffer. These are always left mapped so
545 * they can be cheaply re-used.
547 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
548 ++sge, --sc->sc_unmap_count)
549 ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length,
552 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
555 /* Prepare an SGE for the RPC-over-RDMA transport header.
557 static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
558 struct rpcrdma_req *req, u32 len)
560 struct rpcrdma_sendctx *sc = req->rl_sendctx;
561 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
562 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
564 sge->addr = rdmab_addr(rb);
566 sge->lkey = rdmab_lkey(rb);
568 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
572 /* The head iovec is straightforward, as it is usually already
573 * DMA-mapped. Sync the content that has changed.
575 static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt,
576 struct rpcrdma_req *req, unsigned int len)
578 struct rpcrdma_sendctx *sc = req->rl_sendctx;
579 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
580 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
582 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
585 sge->addr = rdmab_addr(rb);
587 sge->lkey = rdmab_lkey(rb);
589 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
594 /* If there is a page list present, DMA map and prepare an
595 * SGE for each page to be sent.
597 static bool rpcrdma_prepare_pagelist(struct rpcrdma_req *req,
600 struct rpcrdma_sendctx *sc = req->rl_sendctx;
601 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
602 unsigned int page_base, len, remaining;
603 struct page **ppages;
606 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
607 page_base = offset_in_page(xdr->page_base);
608 remaining = xdr->page_len;
610 sge = &sc->sc_sges[req->rl_wr.num_sge++];
611 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
612 sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages,
613 page_base, len, DMA_TO_DEVICE);
614 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
615 goto out_mapping_err;
618 sge->lkey = rdmab_lkey(rb);
620 sc->sc_unmap_count++;
629 trace_xprtrdma_dma_maperr(sge->addr);
633 /* The tail iovec may include an XDR pad for the page list,
634 * as well as additional content, and may not reside in the
635 * same page as the head iovec.
637 static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
639 unsigned int page_base, unsigned int len)
641 struct rpcrdma_sendctx *sc = req->rl_sendctx;
642 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
643 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
644 struct page *page = virt_to_page(xdr->tail[0].iov_base);
646 sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len,
648 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
649 goto out_mapping_err;
652 sge->lkey = rdmab_lkey(rb);
653 ++sc->sc_unmap_count;
657 trace_xprtrdma_dma_maperr(sge->addr);
661 /* Copy the tail to the end of the head buffer.
663 static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt,
664 struct rpcrdma_req *req,
669 dst = (unsigned char *)xdr->head[0].iov_base;
670 dst += xdr->head[0].iov_len + xdr->page_len;
671 memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
672 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len;
675 /* Copy pagelist content into the head buffer.
677 static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt,
678 struct rpcrdma_req *req,
681 unsigned int len, page_base, remaining;
682 struct page **ppages;
683 unsigned char *src, *dst;
685 dst = (unsigned char *)xdr->head[0].iov_base;
686 dst += xdr->head[0].iov_len;
687 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
688 page_base = offset_in_page(xdr->page_base);
689 remaining = xdr->page_len;
691 src = page_address(*ppages);
693 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
694 memcpy(dst, src, len);
695 r_xprt->rx_stats.pullup_copy_count += len;
704 /* Copy the contents of @xdr into @rl_sendbuf and DMA sync it.
705 * When the head, pagelist, and tail are small, a pull-up copy
706 * is considerably less costly than DMA mapping the components
710 * - the caller has already verified that the total length
711 * of the RPC Call body will fit into @rl_sendbuf.
713 static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt,
714 struct rpcrdma_req *req,
717 if (unlikely(xdr->tail[0].iov_len))
718 rpcrdma_pullup_tail_iov(r_xprt, req, xdr);
720 if (unlikely(xdr->page_len))
721 rpcrdma_pullup_pagelist(r_xprt, req, xdr);
723 /* The whole RPC message resides in the head iovec now */
724 return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len);
727 static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt,
728 struct rpcrdma_req *req,
731 struct kvec *tail = &xdr->tail[0];
733 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
736 if (!rpcrdma_prepare_pagelist(req, xdr))
739 if (!rpcrdma_prepare_tail_iov(req, xdr,
740 offset_in_page(tail->iov_base),
744 if (req->rl_sendctx->sc_unmap_count)
745 kref_get(&req->rl_kref);
749 static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
750 struct rpcrdma_req *req,
753 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
756 /* If there is a Read chunk, the page list is being handled
757 * via explicit RDMA, and thus is skipped here.
760 /* Do not include the tail if it is only an XDR pad */
761 if (xdr->tail[0].iov_len > 3) {
762 unsigned int page_base, len;
764 /* If the content in the page list is an odd length,
765 * xdr_write_pages() adds a pad at the beginning of
766 * the tail iovec. Force the tail's non-pad content to
767 * land at the next XDR position in the Send message.
769 page_base = offset_in_page(xdr->tail[0].iov_base);
770 len = xdr->tail[0].iov_len;
771 page_base += len & 3;
773 if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
775 kref_get(&req->rl_kref);
782 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
783 * @r_xprt: controlling transport
784 * @req: context of RPC Call being marshalled
785 * @hdrlen: size of transport header, in bytes
786 * @xdr: xdr_buf containing RPC Call
787 * @rtype: chunk type being encoded
789 * Returns 0 on success; otherwise a negative errno is returned.
791 inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
792 struct rpcrdma_req *req, u32 hdrlen,
794 enum rpcrdma_chunktype rtype)
799 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
800 if (!req->rl_sendctx)
802 req->rl_sendctx->sc_unmap_count = 0;
803 req->rl_sendctx->sc_req = req;
804 kref_init(&req->rl_kref);
805 req->rl_wr.wr_cqe = &req->rl_sendctx->sc_cqe;
806 req->rl_wr.sg_list = req->rl_sendctx->sc_sges;
807 req->rl_wr.num_sge = 0;
808 req->rl_wr.opcode = IB_WR_SEND;
810 rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
814 case rpcrdma_noch_pullup:
815 if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
818 case rpcrdma_noch_mapped:
819 if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr))
823 if (!rpcrdma_prepare_readch(r_xprt, req, xdr))
826 case rpcrdma_areadch:
835 rpcrdma_sendctx_unmap(req->rl_sendctx);
837 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
842 * rpcrdma_marshal_req - Marshal and send one RPC request
843 * @r_xprt: controlling transport
844 * @rqst: RPC request to be marshaled
846 * For the RPC in "rqst", this function:
847 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
848 * - Registers Read, Write, and Reply chunks
849 * - Constructs the transport header
850 * - Posts a Send WR to send the transport header and request
853 * %0 if the RPC was sent successfully,
854 * %-ENOTCONN if the connection was lost,
855 * %-EAGAIN if the caller should call again with the same arguments,
856 * %-ENOBUFS if the caller should call again after a delay,
857 * %-EMSGSIZE if the transport header is too small,
858 * %-EIO if a permanent problem occurred while marshaling.
861 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
863 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
864 struct xdr_stream *xdr = &req->rl_stream;
865 enum rpcrdma_chunktype rtype, wtype;
866 struct xdr_buf *buf = &rqst->rq_snd_buf;
871 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
872 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
875 /* Fixed header fields */
877 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
881 *p++ = rpcrdma_version;
882 *p++ = r_xprt->rx_buf.rb_max_requests;
884 /* When the ULP employs a GSS flavor that guarantees integrity
885 * or privacy, direct data placement of individual data items
888 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
889 RPCAUTH_AUTH_DATATOUCH);
892 * Chunks needed for results?
894 * o If the expected result is under the inline threshold, all ops
896 * o Large read ops return data as write chunk(s), header as
898 * o Large non-read ops return as a single reply chunk.
900 if (rpcrdma_results_inline(r_xprt, rqst))
901 wtype = rpcrdma_noch;
902 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
903 rpcrdma_nonpayload_inline(r_xprt, rqst))
904 wtype = rpcrdma_writech;
906 wtype = rpcrdma_replych;
909 * Chunks needed for arguments?
911 * o If the total request is under the inline threshold, all ops
912 * are sent as inline.
913 * o Large write ops transmit data as read chunk(s), header as
915 * o Large non-write ops are sent with the entire message as a
916 * single read chunk (protocol 0-position special case).
918 * This assumes that the upper layer does not present a request
919 * that both has a data payload, and whose non-data arguments
920 * by themselves are larger than the inline threshold.
922 if (rpcrdma_args_inline(r_xprt, rqst)) {
924 rtype = buf->len < rdmab_length(req->rl_sendbuf) ?
925 rpcrdma_noch_pullup : rpcrdma_noch_mapped;
926 } else if (ddp_allowed && buf->flags & XDRBUF_WRITE) {
928 rtype = rpcrdma_readch;
930 r_xprt->rx_stats.nomsg_call_count++;
932 rtype = rpcrdma_areadch;
935 /* This implementation supports the following combinations
936 * of chunk lists in one RPC-over-RDMA Call message:
941 * - Read list + Reply chunk
943 * It might not yet support the following combinations:
945 * - Read list + Write list
947 * It does not support the following combinations:
949 * - Write list + Reply chunk
950 * - Read list + Write list + Reply chunk
952 * This implementation supports only a single chunk in each
953 * Read or Write list. Thus for example the client cannot
954 * send a Call message with a Position Zero Read chunk and a
955 * regular Read chunk at the same time.
957 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
960 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
963 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
967 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
972 trace_xprtrdma_marshal(req, rtype, wtype);
976 trace_xprtrdma_marshal_failed(rqst, ret);
977 r_xprt->rx_stats.failed_marshal_count++;
982 static void __rpcrdma_update_cwnd_locked(struct rpc_xprt *xprt,
983 struct rpcrdma_buffer *buf,
986 buf->rb_credits = grant;
987 xprt->cwnd = grant << RPC_CWNDSHIFT;
990 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
992 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
994 spin_lock(&xprt->transport_lock);
995 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
996 spin_unlock(&xprt->transport_lock);
1000 * rpcrdma_reset_cwnd - Reset the xprt's congestion window
1001 * @r_xprt: controlling transport instance
1003 * Prepare @r_xprt for the next connection by reinitializing
1004 * its credit grant to one (see RFC 8166, Section 3.3.3).
1006 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
1008 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1010 spin_lock(&xprt->transport_lock);
1012 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
1013 spin_unlock(&xprt->transport_lock);
1017 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
1018 * @rqst: controlling RPC request
1019 * @srcp: points to RPC message payload in receive buffer
1020 * @copy_len: remaining length of receive buffer content
1021 * @pad: Write chunk pad bytes needed (zero for pure inline)
1023 * The upper layer has set the maximum number of bytes it can
1024 * receive in each component of rq_rcv_buf. These values are set in
1025 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
1027 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
1028 * many cases this function simply updates iov_base pointers in
1029 * rq_rcv_buf to point directly to the received reply data, to
1030 * avoid copying reply data.
1032 * Returns the count of bytes which had to be memcopied.
1034 static unsigned long
1035 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
1037 unsigned long fixup_copy_count;
1038 int i, npages, curlen;
1040 struct page **ppages;
1043 /* The head iovec is redirected to the RPC reply message
1044 * in the receive buffer, to avoid a memcopy.
1046 rqst->rq_rcv_buf.head[0].iov_base = srcp;
1047 rqst->rq_private_buf.head[0].iov_base = srcp;
1049 /* The contents of the receive buffer that follow
1050 * head.iov_len bytes are copied into the page list.
1052 curlen = rqst->rq_rcv_buf.head[0].iov_len;
1053 if (curlen > copy_len)
1058 ppages = rqst->rq_rcv_buf.pages +
1059 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
1060 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
1061 fixup_copy_count = 0;
1062 if (copy_len && rqst->rq_rcv_buf.page_len) {
1065 pagelist_len = rqst->rq_rcv_buf.page_len;
1066 if (pagelist_len > copy_len)
1067 pagelist_len = copy_len;
1068 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
1069 for (i = 0; i < npages; i++) {
1070 curlen = PAGE_SIZE - page_base;
1071 if (curlen > pagelist_len)
1072 curlen = pagelist_len;
1074 destp = kmap_atomic(ppages[i]);
1075 memcpy(destp + page_base, srcp, curlen);
1076 flush_dcache_page(ppages[i]);
1077 kunmap_atomic(destp);
1080 fixup_copy_count += curlen;
1081 pagelist_len -= curlen;
1087 /* Implicit padding for the last segment in a Write
1088 * chunk is inserted inline at the front of the tail
1089 * iovec. The upper layer ignores the content of
1090 * the pad. Simply ensure inline content in the tail
1091 * that follows the Write chunk is properly aligned.
1097 /* The tail iovec is redirected to the remaining data
1098 * in the receive buffer, to avoid a memcopy.
1100 if (copy_len || pad) {
1101 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1102 rqst->rq_private_buf.tail[0].iov_base = srcp;
1105 if (fixup_copy_count)
1106 trace_xprtrdma_fixup(rqst, fixup_copy_count);
1107 return fixup_copy_count;
1110 /* By convention, backchannel calls arrive via rdma_msg type
1111 * messages, and never populate the chunk lists. This makes
1112 * the RPC/RDMA header small and fixed in size, so it is
1113 * straightforward to check the RPC header's direction field.
1116 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1117 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1119 struct xdr_stream *xdr = &rep->rr_stream;
1122 if (rep->rr_proc != rdma_msg)
1125 /* Peek at stream contents without advancing. */
1126 p = xdr_inline_decode(xdr, 0);
1129 if (*p++ != xdr_zero)
1131 if (*p++ != xdr_zero)
1133 if (*p++ != xdr_zero)
1137 if (*p++ != rep->rr_xid)
1139 if (*p != cpu_to_be32(RPC_CALL))
1142 /* Now that we are sure this is a backchannel call,
1143 * advance to the RPC header.
1145 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1149 rpcrdma_bc_receive_call(r_xprt, rep);
1153 pr_warn("RPC/RDMA short backward direction call\n");
1156 #else /* CONFIG_SUNRPC_BACKCHANNEL */
1160 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1162 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1168 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1172 handle = be32_to_cpup(p++);
1173 *length = be32_to_cpup(p++);
1174 xdr_decode_hyper(p, &offset);
1176 trace_xprtrdma_decode_seg(handle, *length, offset);
1180 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1182 u32 segcount, seglength;
1185 p = xdr_inline_decode(xdr, sizeof(*p));
1190 segcount = be32_to_cpup(p);
1191 while (segcount--) {
1192 if (decode_rdma_segment(xdr, &seglength))
1194 *length += seglength;
1200 /* In RPC-over-RDMA Version One replies, a Read list is never
1201 * expected. This decoder is a stub that returns an error if
1202 * a Read list is present.
1204 static int decode_read_list(struct xdr_stream *xdr)
1208 p = xdr_inline_decode(xdr, sizeof(*p));
1211 if (unlikely(*p != xdr_zero))
1216 /* Supports only one Write chunk in the Write list
1218 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1227 p = xdr_inline_decode(xdr, sizeof(*p));
1235 if (decode_write_chunk(xdr, &chunklen))
1237 *length += chunklen;
1243 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1247 p = xdr_inline_decode(xdr, sizeof(*p));
1253 if (decode_write_chunk(xdr, length))
1259 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1260 struct rpc_rqst *rqst)
1262 struct xdr_stream *xdr = &rep->rr_stream;
1263 u32 writelist, replychunk, rpclen;
1266 /* Decode the chunk lists */
1267 if (decode_read_list(xdr))
1269 if (decode_write_list(xdr, &writelist))
1271 if (decode_reply_chunk(xdr, &replychunk))
1274 /* RDMA_MSG sanity checks */
1275 if (unlikely(replychunk))
1278 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1279 base = (char *)xdr_inline_decode(xdr, 0);
1280 rpclen = xdr_stream_remaining(xdr);
1281 r_xprt->rx_stats.fixup_copy_count +=
1282 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1284 r_xprt->rx_stats.total_rdma_reply += writelist;
1285 return rpclen + xdr_align_size(writelist);
1289 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1291 struct xdr_stream *xdr = &rep->rr_stream;
1292 u32 writelist, replychunk;
1294 /* Decode the chunk lists */
1295 if (decode_read_list(xdr))
1297 if (decode_write_list(xdr, &writelist))
1299 if (decode_reply_chunk(xdr, &replychunk))
1302 /* RDMA_NOMSG sanity checks */
1303 if (unlikely(writelist))
1305 if (unlikely(!replychunk))
1308 /* Reply chunk buffer already is the reply vector */
1309 r_xprt->rx_stats.total_rdma_reply += replychunk;
1314 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1315 struct rpc_rqst *rqst)
1317 struct xdr_stream *xdr = &rep->rr_stream;
1320 p = xdr_inline_decode(xdr, sizeof(*p));
1326 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1329 dprintk("RPC: %s: server reports "
1330 "version error (%u-%u), xid %08x\n", __func__,
1331 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1332 be32_to_cpu(rep->rr_xid));
1335 dprintk("RPC: %s: server reports "
1336 "header decoding error, xid %08x\n", __func__,
1337 be32_to_cpu(rep->rr_xid));
1340 dprintk("RPC: %s: server reports "
1341 "unrecognized error %d, xid %08x\n", __func__,
1342 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1345 r_xprt->rx_stats.bad_reply_count++;
1349 /* Perform XID lookup, reconstruction of the RPC reply, and
1350 * RPC completion while holding the transport lock to ensure
1351 * the rep, rqst, and rq_task pointers remain stable.
1353 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1355 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1356 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1357 struct rpc_rqst *rqst = rep->rr_rqst;
1360 switch (rep->rr_proc) {
1362 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1365 status = rpcrdma_decode_nomsg(r_xprt, rep);
1368 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1377 spin_lock(&xprt->queue_lock);
1378 xprt_complete_rqst(rqst->rq_task, status);
1379 xprt_unpin_rqst(rqst);
1380 spin_unlock(&xprt->queue_lock);
1383 /* If the incoming reply terminated a pending RPC, the next
1384 * RPC call will post a replacement receive buffer as it is
1388 trace_xprtrdma_reply_hdr(rep);
1389 r_xprt->rx_stats.bad_reply_count++;
1393 static void rpcrdma_reply_done(struct kref *kref)
1395 struct rpcrdma_req *req =
1396 container_of(kref, struct rpcrdma_req, rl_kref);
1398 rpcrdma_complete_rqst(req->rl_reply);
1402 * rpcrdma_reply_handler - Process received RPC/RDMA messages
1403 * @rep: Incoming rpcrdma_rep object to process
1405 * Errors must result in the RPC task either being awakened, or
1406 * allowed to timeout, to discover the errors at that time.
1408 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1410 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1411 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1412 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1413 struct rpcrdma_req *req;
1414 struct rpc_rqst *rqst;
1418 /* Any data means we had a useful conversation, so
1419 * then we don't need to delay the next reconnect.
1421 if (xprt->reestablish_timeout)
1422 xprt->reestablish_timeout = 0;
1424 /* Fixed transport header fields */
1425 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1426 rep->rr_hdrbuf.head[0].iov_base, NULL);
1427 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1429 goto out_shortreply;
1431 rep->rr_vers = *p++;
1432 credits = be32_to_cpu(*p++);
1433 rep->rr_proc = *p++;
1435 if (rep->rr_vers != rpcrdma_version)
1436 goto out_badversion;
1438 if (rpcrdma_is_bcall(r_xprt, rep))
1441 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1442 * get context for handling any incoming chunks.
1444 spin_lock(&xprt->queue_lock);
1445 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1448 xprt_pin_rqst(rqst);
1449 spin_unlock(&xprt->queue_lock);
1452 credits = 1; /* don't deadlock */
1453 else if (credits > r_xprt->rx_ep.rep_max_requests)
1454 credits = r_xprt->rx_ep.rep_max_requests;
1455 if (buf->rb_credits != credits)
1456 rpcrdma_update_cwnd(r_xprt, credits);
1457 rpcrdma_post_recvs(r_xprt, false);
1459 req = rpcr_to_rdmar(rqst);
1460 if (req->rl_reply) {
1461 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1462 rpcrdma_recv_buffer_put(req->rl_reply);
1464 req->rl_reply = rep;
1465 rep->rr_rqst = rqst;
1467 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1469 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1470 frwr_reminv(rep, &req->rl_registered);
1471 if (!list_empty(&req->rl_registered))
1472 frwr_unmap_async(r_xprt, req);
1473 /* LocalInv completion will complete the RPC */
1475 kref_put(&req->rl_kref, rpcrdma_reply_done);
1479 trace_xprtrdma_reply_vers(rep);
1483 spin_unlock(&xprt->queue_lock);
1484 trace_xprtrdma_reply_rqst(rep);
1488 trace_xprtrdma_reply_short(rep);
1491 rpcrdma_recv_buffer_put(rep);