OSDN Git Service

svcrdma: Do not send Write chunk XDR pad with inline content
authorChuck Lever <chuck.lever@oracle.com>
Tue, 1 Mar 2016 18:05:54 +0000 (13:05 -0500)
committerJ. Bruce Fields <bfields@redhat.com>
Tue, 1 Mar 2016 21:06:34 +0000 (13:06 -0800)
The NFS server's XDR encoders adds an XDR pad for content in the
xdr_buf page list at the beginning of the xdr_buf's tail buffer.

On RDMA transports, Write chunks are sent separately and without an
XDR pad.

If a Write chunk is being sent, strip off the pad in the tail buffer
so that inline content following the Write chunk remains XDR-aligned
when it is sent to the client.

BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=294
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index 5322fea..40b6785 100644 (file)
@@ -224,7 +224,7 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
 
 /* svc_rdma_sendto.c */
 extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
-                           struct svc_rdma_req_map *);
+                           struct svc_rdma_req_map *, bool);
 extern int svc_rdma_sendto(struct svc_rqst *);
 extern struct rpcrdma_read_chunk *
        svc_rdma_get_read_chunk(struct rpcrdma_msg *);
index 65a7c23..de39196 100644 (file)
@@ -107,7 +107,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
        int ret;
 
        vec = svc_rdma_get_req_map(rdma);
-       ret = svc_rdma_map_xdr(rdma, sndbuf, vec);
+       ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false);
        if (ret)
                goto out_err;
 
index 86fea5c..a8fab99 100644 (file)
@@ -57,7 +57,8 @@ static u32 xdr_padsize(u32 len)
 
 int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
                     struct xdr_buf *xdr,
-                    struct svc_rdma_req_map *vec)
+                    struct svc_rdma_req_map *vec,
+                    bool write_chunk_present)
 {
        int sge_no;
        u32 sge_bytes;
@@ -97,9 +98,20 @@ int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
 
        /* Tail SGE */
        if (xdr->tail[0].iov_len) {
-               vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
-               vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
-               sge_no++;
+               unsigned char *base = xdr->tail[0].iov_base;
+               size_t len = xdr->tail[0].iov_len;
+               u32 xdr_pad = xdr_padsize(xdr->page_len);
+
+               if (write_chunk_present && xdr_pad) {
+                       base += xdr_pad;
+                       len -= xdr_pad;
+               }
+
+               if (len) {
+                       vec->sge[sge_no].iov_base = base;
+                       vec->sge[sge_no].iov_len = len;
+                       sge_no++;
+               }
        }
 
        dprintk("svcrdma: %s: sge_no %d page_no %d "
@@ -594,7 +606,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
        ctxt = svc_rdma_get_context(rdma);
        ctxt->direction = DMA_TO_DEVICE;
        vec = svc_rdma_get_req_map(rdma);
-       ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec);
+       ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
        if (ret)
                goto err0;
        inline_bytes = rqstp->rq_res.len;