OSDN Git Service

ahci: Add Intel Comet Lake PCH RAID PCI ID
[tomoyo/tomoyo-test1.git] / net / sunrpc / xprtrdma / rpc_rdma.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41
42 /*
43  * rpc_rdma.c
44  *
45  * This file contains the guts of the RPC RDMA protocol, and
46  * does marshaling/unmarshaling, etc. It is also where interfacing
47  * to the Linux RPC framework lives.
48  */
49
50 #include <linux/highmem.h>
51
52 #include <linux/sunrpc/svc_rdma.h>
53
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
56
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY        RPCDBG_TRANS
59 #endif
60
61 /* Returns size of largest RPC-over-RDMA header in a Call message
62  *
63  * The largest Call header contains a full-size Read list and a
64  * minimal Reply chunk.
65  */
66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67 {
68         unsigned int size;
69
70         /* Fixed header fields and list discriminators */
71         size = RPCRDMA_HDRLEN_MIN;
72
73         /* Maximum Read list size */
74         size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
75
76         /* Minimal Read chunk size */
77         size += sizeof(__be32); /* segment count */
78         size += rpcrdma_segment_maxsz * sizeof(__be32);
79         size += sizeof(__be32); /* list discriminator */
80
81         return size;
82 }
83
84 /* Returns size of largest RPC-over-RDMA header in a Reply message
85  *
86  * There is only one Write list or one Reply chunk per Reply
87  * message.  The larger list is the Write list.
88  */
89 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
90 {
91         unsigned int size;
92
93         /* Fixed header fields and list discriminators */
94         size = RPCRDMA_HDRLEN_MIN;
95
96         /* Maximum Write list size */
97         size = sizeof(__be32);          /* segment count */
98         size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
99         size += sizeof(__be32); /* list discriminator */
100
101         return size;
102 }
103
104 /**
105  * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
106  * @r_xprt: transport instance to initialize
107  *
108  * The max_inline fields contain the maximum size of an RPC message
109  * so the marshaling code doesn't have to repeat this calculation
110  * for every RPC.
111  */
112 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
113 {
114         unsigned int maxsegs = r_xprt->rx_ia.ri_max_rdma_segs;
115         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
116
117         ep->rep_max_inline_send =
118                 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
119         ep->rep_max_inline_recv =
120                 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
121 }
122
123 /* The client can send a request inline as long as the RPCRDMA header
124  * plus the RPC call fit under the transport's inline limit. If the
125  * combined call message size exceeds that limit, the client must use
126  * a Read chunk for this operation.
127  *
128  * A Read chunk is also required if sending the RPC call inline would
129  * exceed this device's max_sge limit.
130  */
131 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
132                                 struct rpc_rqst *rqst)
133 {
134         struct xdr_buf *xdr = &rqst->rq_snd_buf;
135         unsigned int count, remaining, offset;
136
137         if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
138                 return false;
139
140         if (xdr->page_len) {
141                 remaining = xdr->page_len;
142                 offset = offset_in_page(xdr->page_base);
143                 count = RPCRDMA_MIN_SEND_SGES;
144                 while (remaining) {
145                         remaining -= min_t(unsigned int,
146                                            PAGE_SIZE - offset, remaining);
147                         offset = 0;
148                         if (++count > r_xprt->rx_ep.rep_attr.cap.max_send_sge)
149                                 return false;
150                 }
151         }
152
153         return true;
154 }
155
156 /* The client can't know how large the actual reply will be. Thus it
157  * plans for the largest possible reply for that particular ULP
158  * operation. If the maximum combined reply message size exceeds that
159  * limit, the client must provide a write list or a reply chunk for
160  * this request.
161  */
162 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
163                                    struct rpc_rqst *rqst)
164 {
165         return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
166 }
167
168 /* The client is required to provide a Reply chunk if the maximum
169  * size of the non-payload part of the RPC Reply is larger than
170  * the inline threshold.
171  */
172 static bool
173 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
174                           const struct rpc_rqst *rqst)
175 {
176         const struct xdr_buf *buf = &rqst->rq_rcv_buf;
177
178         return (buf->head[0].iov_len + buf->tail[0].iov_len) <
179                 r_xprt->rx_ep.rep_max_inline_recv;
180 }
181
182 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
183  * a byte range. Other modes coalesce these SGEs into a single MR
184  * when they can.
185  *
186  * Returns pointer to next available SGE, and bumps the total number
187  * of SGEs consumed.
188  */
189 static struct rpcrdma_mr_seg *
190 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
191                      unsigned int *n)
192 {
193         u32 remaining, page_offset;
194         char *base;
195
196         base = vec->iov_base;
197         page_offset = offset_in_page(base);
198         remaining = vec->iov_len;
199         while (remaining) {
200                 seg->mr_page = NULL;
201                 seg->mr_offset = base;
202                 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
203                 remaining -= seg->mr_len;
204                 base += seg->mr_len;
205                 ++seg;
206                 ++(*n);
207                 page_offset = 0;
208         }
209         return seg;
210 }
211
212 /* Convert @xdrbuf into SGEs no larger than a page each. As they
213  * are registered, these SGEs are then coalesced into RDMA segments
214  * when the selected memreg mode supports it.
215  *
216  * Returns positive number of SGEs consumed, or a negative errno.
217  */
218
219 static int
220 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
221                      unsigned int pos, enum rpcrdma_chunktype type,
222                      struct rpcrdma_mr_seg *seg)
223 {
224         unsigned long page_base;
225         unsigned int len, n;
226         struct page **ppages;
227
228         n = 0;
229         if (pos == 0)
230                 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
231
232         len = xdrbuf->page_len;
233         ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
234         page_base = offset_in_page(xdrbuf->page_base);
235         while (len) {
236                 /* ACL likes to be lazy in allocating pages - ACLs
237                  * are small by default but can get huge.
238                  */
239                 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
240                         if (!*ppages)
241                                 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
242                         if (!*ppages)
243                                 return -ENOBUFS;
244                 }
245                 seg->mr_page = *ppages;
246                 seg->mr_offset = (char *)page_base;
247                 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
248                 len -= seg->mr_len;
249                 ++ppages;
250                 ++seg;
251                 ++n;
252                 page_base = 0;
253         }
254
255         /* When encoding a Read chunk, the tail iovec contains an
256          * XDR pad and may be omitted.
257          */
258         if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
259                 goto out;
260
261         /* When encoding a Write chunk, some servers need to see an
262          * extra segment for non-XDR-aligned Write chunks. The upper
263          * layer provides space in the tail iovec that may be used
264          * for this purpose.
265          */
266         if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
267                 goto out;
268
269         if (xdrbuf->tail[0].iov_len)
270                 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
271
272 out:
273         if (unlikely(n > RPCRDMA_MAX_SEGS))
274                 return -EIO;
275         return n;
276 }
277
278 static void
279 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
280 {
281         *iptr++ = cpu_to_be32(mr->mr_handle);
282         *iptr++ = cpu_to_be32(mr->mr_length);
283         xdr_encode_hyper(iptr, mr->mr_offset);
284 }
285
286 static int
287 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
288 {
289         __be32 *p;
290
291         p = xdr_reserve_space(xdr, 4 * sizeof(*p));
292         if (unlikely(!p))
293                 return -EMSGSIZE;
294
295         xdr_encode_rdma_segment(p, mr);
296         return 0;
297 }
298
299 static int
300 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
301                     u32 position)
302 {
303         __be32 *p;
304
305         p = xdr_reserve_space(xdr, 6 * sizeof(*p));
306         if (unlikely(!p))
307                 return -EMSGSIZE;
308
309         *p++ = xdr_one;                 /* Item present */
310         *p++ = cpu_to_be32(position);
311         xdr_encode_rdma_segment(p, mr);
312         return 0;
313 }
314
315 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
316                                                  struct rpcrdma_req *req,
317                                                  struct rpcrdma_mr_seg *seg,
318                                                  int nsegs, bool writing,
319                                                  struct rpcrdma_mr **mr)
320 {
321         *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
322         if (!*mr) {
323                 *mr = rpcrdma_mr_get(r_xprt);
324                 if (!*mr)
325                         goto out_getmr_err;
326                 trace_xprtrdma_mr_get(req);
327                 (*mr)->mr_req = req;
328         }
329
330         rpcrdma_mr_push(*mr, &req->rl_registered);
331         return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
332
333 out_getmr_err:
334         trace_xprtrdma_nomrs(req);
335         xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
336         rpcrdma_mrs_refresh(r_xprt);
337         return ERR_PTR(-EAGAIN);
338 }
339
340 /* Register and XDR encode the Read list. Supports encoding a list of read
341  * segments that belong to a single read chunk.
342  *
343  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
344  *
345  *  Read chunklist (a linked list):
346  *   N elements, position P (same P for all chunks of same arg!):
347  *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
348  *
349  * Returns zero on success, or a negative errno if a failure occurred.
350  * @xdr is advanced to the next position in the stream.
351  *
352  * Only a single @pos value is currently supported.
353  */
354 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
355                                     struct rpcrdma_req *req,
356                                     struct rpc_rqst *rqst,
357                                     enum rpcrdma_chunktype rtype)
358 {
359         struct xdr_stream *xdr = &req->rl_stream;
360         struct rpcrdma_mr_seg *seg;
361         struct rpcrdma_mr *mr;
362         unsigned int pos;
363         int nsegs;
364
365         if (rtype == rpcrdma_noch_pullup || rtype == rpcrdma_noch_mapped)
366                 goto done;
367
368         pos = rqst->rq_snd_buf.head[0].iov_len;
369         if (rtype == rpcrdma_areadch)
370                 pos = 0;
371         seg = req->rl_segments;
372         nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
373                                      rtype, seg);
374         if (nsegs < 0)
375                 return nsegs;
376
377         do {
378                 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
379                 if (IS_ERR(seg))
380                         return PTR_ERR(seg);
381
382                 if (encode_read_segment(xdr, mr, pos) < 0)
383                         return -EMSGSIZE;
384
385                 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
386                 r_xprt->rx_stats.read_chunk_count++;
387                 nsegs -= mr->mr_nents;
388         } while (nsegs);
389
390 done:
391         return xdr_stream_encode_item_absent(xdr);
392 }
393
394 /* Register and XDR encode the Write list. Supports encoding a list
395  * containing one array of plain segments that belong to a single
396  * write chunk.
397  *
398  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
399  *
400  *  Write chunklist (a list of (one) counted array):
401  *   N elements:
402  *    1 - N - HLOO - HLOO - ... - HLOO - 0
403  *
404  * Returns zero on success, or a negative errno if a failure occurred.
405  * @xdr is advanced to the next position in the stream.
406  *
407  * Only a single Write chunk is currently supported.
408  */
409 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
410                                      struct rpcrdma_req *req,
411                                      struct rpc_rqst *rqst,
412                                      enum rpcrdma_chunktype wtype)
413 {
414         struct xdr_stream *xdr = &req->rl_stream;
415         struct rpcrdma_mr_seg *seg;
416         struct rpcrdma_mr *mr;
417         int nsegs, nchunks;
418         __be32 *segcount;
419
420         if (wtype != rpcrdma_writech)
421                 goto done;
422
423         seg = req->rl_segments;
424         nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
425                                      rqst->rq_rcv_buf.head[0].iov_len,
426                                      wtype, seg);
427         if (nsegs < 0)
428                 return nsegs;
429
430         if (xdr_stream_encode_item_present(xdr) < 0)
431                 return -EMSGSIZE;
432         segcount = xdr_reserve_space(xdr, sizeof(*segcount));
433         if (unlikely(!segcount))
434                 return -EMSGSIZE;
435         /* Actual value encoded below */
436
437         nchunks = 0;
438         do {
439                 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
440                 if (IS_ERR(seg))
441                         return PTR_ERR(seg);
442
443                 if (encode_rdma_segment(xdr, mr) < 0)
444                         return -EMSGSIZE;
445
446                 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
447                 r_xprt->rx_stats.write_chunk_count++;
448                 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
449                 nchunks++;
450                 nsegs -= mr->mr_nents;
451         } while (nsegs);
452
453         /* Update count of segments in this Write chunk */
454         *segcount = cpu_to_be32(nchunks);
455
456 done:
457         return xdr_stream_encode_item_absent(xdr);
458 }
459
460 /* Register and XDR encode the Reply chunk. Supports encoding an array
461  * of plain segments that belong to a single write (reply) chunk.
462  *
463  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
464  *
465  *  Reply chunk (a counted array):
466  *   N elements:
467  *    1 - N - HLOO - HLOO - ... - HLOO
468  *
469  * Returns zero on success, or a negative errno if a failure occurred.
470  * @xdr is advanced to the next position in the stream.
471  */
472 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
473                                       struct rpcrdma_req *req,
474                                       struct rpc_rqst *rqst,
475                                       enum rpcrdma_chunktype wtype)
476 {
477         struct xdr_stream *xdr = &req->rl_stream;
478         struct rpcrdma_mr_seg *seg;
479         struct rpcrdma_mr *mr;
480         int nsegs, nchunks;
481         __be32 *segcount;
482
483         if (wtype != rpcrdma_replych)
484                 return xdr_stream_encode_item_absent(xdr);
485
486         seg = req->rl_segments;
487         nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
488         if (nsegs < 0)
489                 return nsegs;
490
491         if (xdr_stream_encode_item_present(xdr) < 0)
492                 return -EMSGSIZE;
493         segcount = xdr_reserve_space(xdr, sizeof(*segcount));
494         if (unlikely(!segcount))
495                 return -EMSGSIZE;
496         /* Actual value encoded below */
497
498         nchunks = 0;
499         do {
500                 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
501                 if (IS_ERR(seg))
502                         return PTR_ERR(seg);
503
504                 if (encode_rdma_segment(xdr, mr) < 0)
505                         return -EMSGSIZE;
506
507                 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
508                 r_xprt->rx_stats.reply_chunk_count++;
509                 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
510                 nchunks++;
511                 nsegs -= mr->mr_nents;
512         } while (nsegs);
513
514         /* Update count of segments in the Reply chunk */
515         *segcount = cpu_to_be32(nchunks);
516
517         return 0;
518 }
519
520 static void rpcrdma_sendctx_done(struct kref *kref)
521 {
522         struct rpcrdma_req *req =
523                 container_of(kref, struct rpcrdma_req, rl_kref);
524         struct rpcrdma_rep *rep = req->rl_reply;
525
526         rpcrdma_complete_rqst(rep);
527         rep->rr_rxprt->rx_stats.reply_waits_for_send++;
528 }
529
530 /**
531  * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
532  * @sc: sendctx containing SGEs to unmap
533  *
534  */
535 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
536 {
537         struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf;
538         struct ib_sge *sge;
539
540         if (!sc->sc_unmap_count)
541                 return;
542
543         /* The first two SGEs contain the transport header and
544          * the inline buffer. These are always left mapped so
545          * they can be cheaply re-used.
546          */
547         for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
548              ++sge, --sc->sc_unmap_count)
549                 ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length,
550                                   DMA_TO_DEVICE);
551
552         kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
553 }
554
555 /* Prepare an SGE for the RPC-over-RDMA transport header.
556  */
557 static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
558                                     struct rpcrdma_req *req, u32 len)
559 {
560         struct rpcrdma_sendctx *sc = req->rl_sendctx;
561         struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
562         struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
563
564         sge->addr = rdmab_addr(rb);
565         sge->length = len;
566         sge->lkey = rdmab_lkey(rb);
567
568         ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
569                                       DMA_TO_DEVICE);
570 }
571
572 /* The head iovec is straightforward, as it is usually already
573  * DMA-mapped. Sync the content that has changed.
574  */
575 static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt,
576                                      struct rpcrdma_req *req, unsigned int len)
577 {
578         struct rpcrdma_sendctx *sc = req->rl_sendctx;
579         struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
580         struct rpcrdma_regbuf *rb = req->rl_sendbuf;
581
582         if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
583                 return false;
584
585         sge->addr = rdmab_addr(rb);
586         sge->length = len;
587         sge->lkey = rdmab_lkey(rb);
588
589         ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
590                                       DMA_TO_DEVICE);
591         return true;
592 }
593
594 /* If there is a page list present, DMA map and prepare an
595  * SGE for each page to be sent.
596  */
597 static bool rpcrdma_prepare_pagelist(struct rpcrdma_req *req,
598                                      struct xdr_buf *xdr)
599 {
600         struct rpcrdma_sendctx *sc = req->rl_sendctx;
601         struct rpcrdma_regbuf *rb = req->rl_sendbuf;
602         unsigned int page_base, len, remaining;
603         struct page **ppages;
604         struct ib_sge *sge;
605
606         ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
607         page_base = offset_in_page(xdr->page_base);
608         remaining = xdr->page_len;
609         while (remaining) {
610                 sge = &sc->sc_sges[req->rl_wr.num_sge++];
611                 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
612                 sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages,
613                                             page_base, len, DMA_TO_DEVICE);
614                 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
615                         goto out_mapping_err;
616
617                 sge->length = len;
618                 sge->lkey = rdmab_lkey(rb);
619
620                 sc->sc_unmap_count++;
621                 ppages++;
622                 remaining -= len;
623                 page_base = 0;
624         }
625
626         return true;
627
628 out_mapping_err:
629         trace_xprtrdma_dma_maperr(sge->addr);
630         return false;
631 }
632
633 /* The tail iovec may include an XDR pad for the page list,
634  * as well as additional content, and may not reside in the
635  * same page as the head iovec.
636  */
637 static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
638                                      struct xdr_buf *xdr,
639                                      unsigned int page_base, unsigned int len)
640 {
641         struct rpcrdma_sendctx *sc = req->rl_sendctx;
642         struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
643         struct rpcrdma_regbuf *rb = req->rl_sendbuf;
644         struct page *page = virt_to_page(xdr->tail[0].iov_base);
645
646         sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len,
647                                     DMA_TO_DEVICE);
648         if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
649                 goto out_mapping_err;
650
651         sge->length = len;
652         sge->lkey = rdmab_lkey(rb);
653         ++sc->sc_unmap_count;
654         return true;
655
656 out_mapping_err:
657         trace_xprtrdma_dma_maperr(sge->addr);
658         return false;
659 }
660
661 /* Copy the tail to the end of the head buffer.
662  */
663 static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt,
664                                     struct rpcrdma_req *req,
665                                     struct xdr_buf *xdr)
666 {
667         unsigned char *dst;
668
669         dst = (unsigned char *)xdr->head[0].iov_base;
670         dst += xdr->head[0].iov_len + xdr->page_len;
671         memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
672         r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len;
673 }
674
675 /* Copy pagelist content into the head buffer.
676  */
677 static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt,
678                                     struct rpcrdma_req *req,
679                                     struct xdr_buf *xdr)
680 {
681         unsigned int len, page_base, remaining;
682         struct page **ppages;
683         unsigned char *src, *dst;
684
685         dst = (unsigned char *)xdr->head[0].iov_base;
686         dst += xdr->head[0].iov_len;
687         ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
688         page_base = offset_in_page(xdr->page_base);
689         remaining = xdr->page_len;
690         while (remaining) {
691                 src = page_address(*ppages);
692                 src += page_base;
693                 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
694                 memcpy(dst, src, len);
695                 r_xprt->rx_stats.pullup_copy_count += len;
696
697                 ppages++;
698                 dst += len;
699                 remaining -= len;
700                 page_base = 0;
701         }
702 }
703
704 /* Copy the contents of @xdr into @rl_sendbuf and DMA sync it.
705  * When the head, pagelist, and tail are small, a pull-up copy
706  * is considerably less costly than DMA mapping the components
707  * of @xdr.
708  *
709  * Assumptions:
710  *  - the caller has already verified that the total length
711  *    of the RPC Call body will fit into @rl_sendbuf.
712  */
713 static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt,
714                                         struct rpcrdma_req *req,
715                                         struct xdr_buf *xdr)
716 {
717         if (unlikely(xdr->tail[0].iov_len))
718                 rpcrdma_pullup_tail_iov(r_xprt, req, xdr);
719
720         if (unlikely(xdr->page_len))
721                 rpcrdma_pullup_pagelist(r_xprt, req, xdr);
722
723         /* The whole RPC message resides in the head iovec now */
724         return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len);
725 }
726
727 static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt,
728                                         struct rpcrdma_req *req,
729                                         struct xdr_buf *xdr)
730 {
731         struct kvec *tail = &xdr->tail[0];
732
733         if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
734                 return false;
735         if (xdr->page_len)
736                 if (!rpcrdma_prepare_pagelist(req, xdr))
737                         return false;
738         if (tail->iov_len)
739                 if (!rpcrdma_prepare_tail_iov(req, xdr,
740                                               offset_in_page(tail->iov_base),
741                                               tail->iov_len))
742                         return false;
743
744         if (req->rl_sendctx->sc_unmap_count)
745                 kref_get(&req->rl_kref);
746         return true;
747 }
748
749 static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
750                                    struct rpcrdma_req *req,
751                                    struct xdr_buf *xdr)
752 {
753         if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
754                 return false;
755
756         /* If there is a Read chunk, the page list is being handled
757          * via explicit RDMA, and thus is skipped here.
758          */
759
760         /* Do not include the tail if it is only an XDR pad */
761         if (xdr->tail[0].iov_len > 3) {
762                 unsigned int page_base, len;
763
764                 /* If the content in the page list is an odd length,
765                  * xdr_write_pages() adds a pad at the beginning of
766                  * the tail iovec. Force the tail's non-pad content to
767                  * land at the next XDR position in the Send message.
768                  */
769                 page_base = offset_in_page(xdr->tail[0].iov_base);
770                 len = xdr->tail[0].iov_len;
771                 page_base += len & 3;
772                 len -= len & 3;
773                 if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
774                         return false;
775                 kref_get(&req->rl_kref);
776         }
777
778         return true;
779 }
780
781 /**
782  * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
783  * @r_xprt: controlling transport
784  * @req: context of RPC Call being marshalled
785  * @hdrlen: size of transport header, in bytes
786  * @xdr: xdr_buf containing RPC Call
787  * @rtype: chunk type being encoded
788  *
789  * Returns 0 on success; otherwise a negative errno is returned.
790  */
791 inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
792                                      struct rpcrdma_req *req, u32 hdrlen,
793                                      struct xdr_buf *xdr,
794                                      enum rpcrdma_chunktype rtype)
795 {
796         int ret;
797
798         ret = -EAGAIN;
799         req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
800         if (!req->rl_sendctx)
801                 goto out_nosc;
802         req->rl_sendctx->sc_unmap_count = 0;
803         req->rl_sendctx->sc_req = req;
804         kref_init(&req->rl_kref);
805         req->rl_wr.wr_cqe = &req->rl_sendctx->sc_cqe;
806         req->rl_wr.sg_list = req->rl_sendctx->sc_sges;
807         req->rl_wr.num_sge = 0;
808         req->rl_wr.opcode = IB_WR_SEND;
809
810         rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
811
812         ret = -EIO;
813         switch (rtype) {
814         case rpcrdma_noch_pullup:
815                 if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
816                         goto out_unmap;
817                 break;
818         case rpcrdma_noch_mapped:
819                 if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr))
820                         goto out_unmap;
821                 break;
822         case rpcrdma_readch:
823                 if (!rpcrdma_prepare_readch(r_xprt, req, xdr))
824                         goto out_unmap;
825                 break;
826         case rpcrdma_areadch:
827                 break;
828         default:
829                 goto out_unmap;
830         }
831
832         return 0;
833
834 out_unmap:
835         rpcrdma_sendctx_unmap(req->rl_sendctx);
836 out_nosc:
837         trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
838         return ret;
839 }
840
841 /**
842  * rpcrdma_marshal_req - Marshal and send one RPC request
843  * @r_xprt: controlling transport
844  * @rqst: RPC request to be marshaled
845  *
846  * For the RPC in "rqst", this function:
847  *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
848  *  - Registers Read, Write, and Reply chunks
849  *  - Constructs the transport header
850  *  - Posts a Send WR to send the transport header and request
851  *
852  * Returns:
853  *      %0 if the RPC was sent successfully,
854  *      %-ENOTCONN if the connection was lost,
855  *      %-EAGAIN if the caller should call again with the same arguments,
856  *      %-ENOBUFS if the caller should call again after a delay,
857  *      %-EMSGSIZE if the transport header is too small,
858  *      %-EIO if a permanent problem occurred while marshaling.
859  */
860 int
861 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
862 {
863         struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
864         struct xdr_stream *xdr = &req->rl_stream;
865         enum rpcrdma_chunktype rtype, wtype;
866         struct xdr_buf *buf = &rqst->rq_snd_buf;
867         bool ddp_allowed;
868         __be32 *p;
869         int ret;
870
871         rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
872         xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
873                         rqst);
874
875         /* Fixed header fields */
876         ret = -EMSGSIZE;
877         p = xdr_reserve_space(xdr, 4 * sizeof(*p));
878         if (!p)
879                 goto out_err;
880         *p++ = rqst->rq_xid;
881         *p++ = rpcrdma_version;
882         *p++ = r_xprt->rx_buf.rb_max_requests;
883
884         /* When the ULP employs a GSS flavor that guarantees integrity
885          * or privacy, direct data placement of individual data items
886          * is not allowed.
887          */
888         ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
889                                                 RPCAUTH_AUTH_DATATOUCH);
890
891         /*
892          * Chunks needed for results?
893          *
894          * o If the expected result is under the inline threshold, all ops
895          *   return as inline.
896          * o Large read ops return data as write chunk(s), header as
897          *   inline.
898          * o Large non-read ops return as a single reply chunk.
899          */
900         if (rpcrdma_results_inline(r_xprt, rqst))
901                 wtype = rpcrdma_noch;
902         else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
903                  rpcrdma_nonpayload_inline(r_xprt, rqst))
904                 wtype = rpcrdma_writech;
905         else
906                 wtype = rpcrdma_replych;
907
908         /*
909          * Chunks needed for arguments?
910          *
911          * o If the total request is under the inline threshold, all ops
912          *   are sent as inline.
913          * o Large write ops transmit data as read chunk(s), header as
914          *   inline.
915          * o Large non-write ops are sent with the entire message as a
916          *   single read chunk (protocol 0-position special case).
917          *
918          * This assumes that the upper layer does not present a request
919          * that both has a data payload, and whose non-data arguments
920          * by themselves are larger than the inline threshold.
921          */
922         if (rpcrdma_args_inline(r_xprt, rqst)) {
923                 *p++ = rdma_msg;
924                 rtype = buf->len < rdmab_length(req->rl_sendbuf) ?
925                         rpcrdma_noch_pullup : rpcrdma_noch_mapped;
926         } else if (ddp_allowed && buf->flags & XDRBUF_WRITE) {
927                 *p++ = rdma_msg;
928                 rtype = rpcrdma_readch;
929         } else {
930                 r_xprt->rx_stats.nomsg_call_count++;
931                 *p++ = rdma_nomsg;
932                 rtype = rpcrdma_areadch;
933         }
934
935         /* This implementation supports the following combinations
936          * of chunk lists in one RPC-over-RDMA Call message:
937          *
938          *   - Read list
939          *   - Write list
940          *   - Reply chunk
941          *   - Read list + Reply chunk
942          *
943          * It might not yet support the following combinations:
944          *
945          *   - Read list + Write list
946          *
947          * It does not support the following combinations:
948          *
949          *   - Write list + Reply chunk
950          *   - Read list + Write list + Reply chunk
951          *
952          * This implementation supports only a single chunk in each
953          * Read or Write list. Thus for example the client cannot
954          * send a Call message with a Position Zero Read chunk and a
955          * regular Read chunk at the same time.
956          */
957         ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
958         if (ret)
959                 goto out_err;
960         ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
961         if (ret)
962                 goto out_err;
963         ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
964         if (ret)
965                 goto out_err;
966
967         ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
968                                         buf, rtype);
969         if (ret)
970                 goto out_err;
971
972         trace_xprtrdma_marshal(req, rtype, wtype);
973         return 0;
974
975 out_err:
976         trace_xprtrdma_marshal_failed(rqst, ret);
977         r_xprt->rx_stats.failed_marshal_count++;
978         frwr_reset(req);
979         return ret;
980 }
981
982 static void __rpcrdma_update_cwnd_locked(struct rpc_xprt *xprt,
983                                          struct rpcrdma_buffer *buf,
984                                          u32 grant)
985 {
986         buf->rb_credits = grant;
987         xprt->cwnd = grant << RPC_CWNDSHIFT;
988 }
989
990 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
991 {
992         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
993
994         spin_lock(&xprt->transport_lock);
995         __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
996         spin_unlock(&xprt->transport_lock);
997 }
998
999 /**
1000  * rpcrdma_reset_cwnd - Reset the xprt's congestion window
1001  * @r_xprt: controlling transport instance
1002  *
1003  * Prepare @r_xprt for the next connection by reinitializing
1004  * its credit grant to one (see RFC 8166, Section 3.3.3).
1005  */
1006 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
1007 {
1008         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1009
1010         spin_lock(&xprt->transport_lock);
1011         xprt->cong = 0;
1012         __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
1013         spin_unlock(&xprt->transport_lock);
1014 }
1015
1016 /**
1017  * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
1018  * @rqst: controlling RPC request
1019  * @srcp: points to RPC message payload in receive buffer
1020  * @copy_len: remaining length of receive buffer content
1021  * @pad: Write chunk pad bytes needed (zero for pure inline)
1022  *
1023  * The upper layer has set the maximum number of bytes it can
1024  * receive in each component of rq_rcv_buf. These values are set in
1025  * the head.iov_len, page_len, tail.iov_len, and buflen fields.
1026  *
1027  * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
1028  * many cases this function simply updates iov_base pointers in
1029  * rq_rcv_buf to point directly to the received reply data, to
1030  * avoid copying reply data.
1031  *
1032  * Returns the count of bytes which had to be memcopied.
1033  */
1034 static unsigned long
1035 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
1036 {
1037         unsigned long fixup_copy_count;
1038         int i, npages, curlen;
1039         char *destp;
1040         struct page **ppages;
1041         int page_base;
1042
1043         /* The head iovec is redirected to the RPC reply message
1044          * in the receive buffer, to avoid a memcopy.
1045          */
1046         rqst->rq_rcv_buf.head[0].iov_base = srcp;
1047         rqst->rq_private_buf.head[0].iov_base = srcp;
1048
1049         /* The contents of the receive buffer that follow
1050          * head.iov_len bytes are copied into the page list.
1051          */
1052         curlen = rqst->rq_rcv_buf.head[0].iov_len;
1053         if (curlen > copy_len)
1054                 curlen = copy_len;
1055         srcp += curlen;
1056         copy_len -= curlen;
1057
1058         ppages = rqst->rq_rcv_buf.pages +
1059                 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
1060         page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
1061         fixup_copy_count = 0;
1062         if (copy_len && rqst->rq_rcv_buf.page_len) {
1063                 int pagelist_len;
1064
1065                 pagelist_len = rqst->rq_rcv_buf.page_len;
1066                 if (pagelist_len > copy_len)
1067                         pagelist_len = copy_len;
1068                 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
1069                 for (i = 0; i < npages; i++) {
1070                         curlen = PAGE_SIZE - page_base;
1071                         if (curlen > pagelist_len)
1072                                 curlen = pagelist_len;
1073
1074                         destp = kmap_atomic(ppages[i]);
1075                         memcpy(destp + page_base, srcp, curlen);
1076                         flush_dcache_page(ppages[i]);
1077                         kunmap_atomic(destp);
1078                         srcp += curlen;
1079                         copy_len -= curlen;
1080                         fixup_copy_count += curlen;
1081                         pagelist_len -= curlen;
1082                         if (!pagelist_len)
1083                                 break;
1084                         page_base = 0;
1085                 }
1086
1087                 /* Implicit padding for the last segment in a Write
1088                  * chunk is inserted inline at the front of the tail
1089                  * iovec. The upper layer ignores the content of
1090                  * the pad. Simply ensure inline content in the tail
1091                  * that follows the Write chunk is properly aligned.
1092                  */
1093                 if (pad)
1094                         srcp -= pad;
1095         }
1096
1097         /* The tail iovec is redirected to the remaining data
1098          * in the receive buffer, to avoid a memcopy.
1099          */
1100         if (copy_len || pad) {
1101                 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1102                 rqst->rq_private_buf.tail[0].iov_base = srcp;
1103         }
1104
1105         if (fixup_copy_count)
1106                 trace_xprtrdma_fixup(rqst, fixup_copy_count);
1107         return fixup_copy_count;
1108 }
1109
1110 /* By convention, backchannel calls arrive via rdma_msg type
1111  * messages, and never populate the chunk lists. This makes
1112  * the RPC/RDMA header small and fixed in size, so it is
1113  * straightforward to check the RPC header's direction field.
1114  */
1115 static bool
1116 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1117 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1118 {
1119         struct xdr_stream *xdr = &rep->rr_stream;
1120         __be32 *p;
1121
1122         if (rep->rr_proc != rdma_msg)
1123                 return false;
1124
1125         /* Peek at stream contents without advancing. */
1126         p = xdr_inline_decode(xdr, 0);
1127
1128         /* Chunk lists */
1129         if (*p++ != xdr_zero)
1130                 return false;
1131         if (*p++ != xdr_zero)
1132                 return false;
1133         if (*p++ != xdr_zero)
1134                 return false;
1135
1136         /* RPC header */
1137         if (*p++ != rep->rr_xid)
1138                 return false;
1139         if (*p != cpu_to_be32(RPC_CALL))
1140                 return false;
1141
1142         /* Now that we are sure this is a backchannel call,
1143          * advance to the RPC header.
1144          */
1145         p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1146         if (unlikely(!p))
1147                 goto out_short;
1148
1149         rpcrdma_bc_receive_call(r_xprt, rep);
1150         return true;
1151
1152 out_short:
1153         pr_warn("RPC/RDMA short backward direction call\n");
1154         return true;
1155 }
1156 #else   /* CONFIG_SUNRPC_BACKCHANNEL */
1157 {
1158         return false;
1159 }
1160 #endif  /* CONFIG_SUNRPC_BACKCHANNEL */
1161
1162 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1163 {
1164         u32 handle;
1165         u64 offset;
1166         __be32 *p;
1167
1168         p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1169         if (unlikely(!p))
1170                 return -EIO;
1171
1172         handle = be32_to_cpup(p++);
1173         *length = be32_to_cpup(p++);
1174         xdr_decode_hyper(p, &offset);
1175
1176         trace_xprtrdma_decode_seg(handle, *length, offset);
1177         return 0;
1178 }
1179
1180 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1181 {
1182         u32 segcount, seglength;
1183         __be32 *p;
1184
1185         p = xdr_inline_decode(xdr, sizeof(*p));
1186         if (unlikely(!p))
1187                 return -EIO;
1188
1189         *length = 0;
1190         segcount = be32_to_cpup(p);
1191         while (segcount--) {
1192                 if (decode_rdma_segment(xdr, &seglength))
1193                         return -EIO;
1194                 *length += seglength;
1195         }
1196
1197         return 0;
1198 }
1199
1200 /* In RPC-over-RDMA Version One replies, a Read list is never
1201  * expected. This decoder is a stub that returns an error if
1202  * a Read list is present.
1203  */
1204 static int decode_read_list(struct xdr_stream *xdr)
1205 {
1206         __be32 *p;
1207
1208         p = xdr_inline_decode(xdr, sizeof(*p));
1209         if (unlikely(!p))
1210                 return -EIO;
1211         if (unlikely(*p != xdr_zero))
1212                 return -EIO;
1213         return 0;
1214 }
1215
1216 /* Supports only one Write chunk in the Write list
1217  */
1218 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1219 {
1220         u32 chunklen;
1221         bool first;
1222         __be32 *p;
1223
1224         *length = 0;
1225         first = true;
1226         do {
1227                 p = xdr_inline_decode(xdr, sizeof(*p));
1228                 if (unlikely(!p))
1229                         return -EIO;
1230                 if (*p == xdr_zero)
1231                         break;
1232                 if (!first)
1233                         return -EIO;
1234
1235                 if (decode_write_chunk(xdr, &chunklen))
1236                         return -EIO;
1237                 *length += chunklen;
1238                 first = false;
1239         } while (true);
1240         return 0;
1241 }
1242
1243 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1244 {
1245         __be32 *p;
1246
1247         p = xdr_inline_decode(xdr, sizeof(*p));
1248         if (unlikely(!p))
1249                 return -EIO;
1250
1251         *length = 0;
1252         if (*p != xdr_zero)
1253                 if (decode_write_chunk(xdr, length))
1254                         return -EIO;
1255         return 0;
1256 }
1257
1258 static int
1259 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1260                    struct rpc_rqst *rqst)
1261 {
1262         struct xdr_stream *xdr = &rep->rr_stream;
1263         u32 writelist, replychunk, rpclen;
1264         char *base;
1265
1266         /* Decode the chunk lists */
1267         if (decode_read_list(xdr))
1268                 return -EIO;
1269         if (decode_write_list(xdr, &writelist))
1270                 return -EIO;
1271         if (decode_reply_chunk(xdr, &replychunk))
1272                 return -EIO;
1273
1274         /* RDMA_MSG sanity checks */
1275         if (unlikely(replychunk))
1276                 return -EIO;
1277
1278         /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1279         base = (char *)xdr_inline_decode(xdr, 0);
1280         rpclen = xdr_stream_remaining(xdr);
1281         r_xprt->rx_stats.fixup_copy_count +=
1282                 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1283
1284         r_xprt->rx_stats.total_rdma_reply += writelist;
1285         return rpclen + xdr_align_size(writelist);
1286 }
1287
1288 static noinline int
1289 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1290 {
1291         struct xdr_stream *xdr = &rep->rr_stream;
1292         u32 writelist, replychunk;
1293
1294         /* Decode the chunk lists */
1295         if (decode_read_list(xdr))
1296                 return -EIO;
1297         if (decode_write_list(xdr, &writelist))
1298                 return -EIO;
1299         if (decode_reply_chunk(xdr, &replychunk))
1300                 return -EIO;
1301
1302         /* RDMA_NOMSG sanity checks */
1303         if (unlikely(writelist))
1304                 return -EIO;
1305         if (unlikely(!replychunk))
1306                 return -EIO;
1307
1308         /* Reply chunk buffer already is the reply vector */
1309         r_xprt->rx_stats.total_rdma_reply += replychunk;
1310         return replychunk;
1311 }
1312
1313 static noinline int
1314 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1315                      struct rpc_rqst *rqst)
1316 {
1317         struct xdr_stream *xdr = &rep->rr_stream;
1318         __be32 *p;
1319
1320         p = xdr_inline_decode(xdr, sizeof(*p));
1321         if (unlikely(!p))
1322                 return -EIO;
1323
1324         switch (*p) {
1325         case err_vers:
1326                 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1327                 if (!p)
1328                         break;
1329                 dprintk("RPC:       %s: server reports "
1330                         "version error (%u-%u), xid %08x\n", __func__,
1331                         be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1332                         be32_to_cpu(rep->rr_xid));
1333                 break;
1334         case err_chunk:
1335                 dprintk("RPC:       %s: server reports "
1336                         "header decoding error, xid %08x\n", __func__,
1337                         be32_to_cpu(rep->rr_xid));
1338                 break;
1339         default:
1340                 dprintk("RPC:       %s: server reports "
1341                         "unrecognized error %d, xid %08x\n", __func__,
1342                         be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1343         }
1344
1345         r_xprt->rx_stats.bad_reply_count++;
1346         return -EREMOTEIO;
1347 }
1348
1349 /* Perform XID lookup, reconstruction of the RPC reply, and
1350  * RPC completion while holding the transport lock to ensure
1351  * the rep, rqst, and rq_task pointers remain stable.
1352  */
1353 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1354 {
1355         struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1356         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1357         struct rpc_rqst *rqst = rep->rr_rqst;
1358         int status;
1359
1360         switch (rep->rr_proc) {
1361         case rdma_msg:
1362                 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1363                 break;
1364         case rdma_nomsg:
1365                 status = rpcrdma_decode_nomsg(r_xprt, rep);
1366                 break;
1367         case rdma_error:
1368                 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1369                 break;
1370         default:
1371                 status = -EIO;
1372         }
1373         if (status < 0)
1374                 goto out_badheader;
1375
1376 out:
1377         spin_lock(&xprt->queue_lock);
1378         xprt_complete_rqst(rqst->rq_task, status);
1379         xprt_unpin_rqst(rqst);
1380         spin_unlock(&xprt->queue_lock);
1381         return;
1382
1383 /* If the incoming reply terminated a pending RPC, the next
1384  * RPC call will post a replacement receive buffer as it is
1385  * being marshaled.
1386  */
1387 out_badheader:
1388         trace_xprtrdma_reply_hdr(rep);
1389         r_xprt->rx_stats.bad_reply_count++;
1390         goto out;
1391 }
1392
1393 static void rpcrdma_reply_done(struct kref *kref)
1394 {
1395         struct rpcrdma_req *req =
1396                 container_of(kref, struct rpcrdma_req, rl_kref);
1397
1398         rpcrdma_complete_rqst(req->rl_reply);
1399 }
1400
1401 /**
1402  * rpcrdma_reply_handler - Process received RPC/RDMA messages
1403  * @rep: Incoming rpcrdma_rep object to process
1404  *
1405  * Errors must result in the RPC task either being awakened, or
1406  * allowed to timeout, to discover the errors at that time.
1407  */
1408 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1409 {
1410         struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1411         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1412         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1413         struct rpcrdma_req *req;
1414         struct rpc_rqst *rqst;
1415         u32 credits;
1416         __be32 *p;
1417
1418         /* Any data means we had a useful conversation, so
1419          * then we don't need to delay the next reconnect.
1420          */
1421         if (xprt->reestablish_timeout)
1422                 xprt->reestablish_timeout = 0;
1423
1424         /* Fixed transport header fields */
1425         xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1426                         rep->rr_hdrbuf.head[0].iov_base, NULL);
1427         p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1428         if (unlikely(!p))
1429                 goto out_shortreply;
1430         rep->rr_xid = *p++;
1431         rep->rr_vers = *p++;
1432         credits = be32_to_cpu(*p++);
1433         rep->rr_proc = *p++;
1434
1435         if (rep->rr_vers != rpcrdma_version)
1436                 goto out_badversion;
1437
1438         if (rpcrdma_is_bcall(r_xprt, rep))
1439                 return;
1440
1441         /* Match incoming rpcrdma_rep to an rpcrdma_req to
1442          * get context for handling any incoming chunks.
1443          */
1444         spin_lock(&xprt->queue_lock);
1445         rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1446         if (!rqst)
1447                 goto out_norqst;
1448         xprt_pin_rqst(rqst);
1449         spin_unlock(&xprt->queue_lock);
1450
1451         if (credits == 0)
1452                 credits = 1;    /* don't deadlock */
1453         else if (credits > r_xprt->rx_ep.rep_max_requests)
1454                 credits = r_xprt->rx_ep.rep_max_requests;
1455         if (buf->rb_credits != credits)
1456                 rpcrdma_update_cwnd(r_xprt, credits);
1457         rpcrdma_post_recvs(r_xprt, false);
1458
1459         req = rpcr_to_rdmar(rqst);
1460         if (req->rl_reply) {
1461                 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1462                 rpcrdma_recv_buffer_put(req->rl_reply);
1463         }
1464         req->rl_reply = rep;
1465         rep->rr_rqst = rqst;
1466
1467         trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1468
1469         if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1470                 frwr_reminv(rep, &req->rl_registered);
1471         if (!list_empty(&req->rl_registered))
1472                 frwr_unmap_async(r_xprt, req);
1473                 /* LocalInv completion will complete the RPC */
1474         else
1475                 kref_put(&req->rl_kref, rpcrdma_reply_done);
1476         return;
1477
1478 out_badversion:
1479         trace_xprtrdma_reply_vers(rep);
1480         goto out;
1481
1482 out_norqst:
1483         spin_unlock(&xprt->queue_lock);
1484         trace_xprtrdma_reply_rqst(rep);
1485         goto out;
1486
1487 out_shortreply:
1488         trace_xprtrdma_reply_short(rep);
1489
1490 out:
1491         rpcrdma_recv_buffer_put(rep);
1492 }