1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015 Oracle. All rights reserved.
5 * Support for backward direction RPCs on RPC/RDMA.
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
11 #include <linux/sunrpc/svc_rdma.h>
13 #include "xprt_rdma.h"
14 #include <trace/events/rpcrdma.h>
16 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17 # define RPCDBG_FACILITY RPCDBG_TRANS
20 #undef RPCRDMA_BACKCHANNEL_DEBUG
22 static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
25 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
26 struct rpcrdma_req *req;
27 struct rpc_rqst *rqst;
30 for (i = 0; i < (count << 1); i++) {
31 struct rpcrdma_regbuf *rb;
34 req = rpcrdma_create_req(r_xprt);
40 INIT_LIST_HEAD(&rqst->rq_bc_list);
41 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
42 spin_lock(&xprt->bc_pa_lock);
43 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
44 spin_unlock(&xprt->bc_pa_lock);
46 size = r_xprt->rx_data.inline_rsize;
47 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
51 xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
52 min_t(size_t, size, PAGE_SIZE));
57 rpcrdma_req_destroy(req);
62 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
63 * @xprt: transport associated with these backchannel resources
64 * @reqs: number of concurrent incoming requests to expect
66 * Returns 0 on success; otherwise a negative errno
68 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
70 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
73 /* The backchannel reply path returns each rpc_rqst to the
74 * bc_pa_list _after_ the reply is sent. If the server is
75 * faster than the client, it can send another backward
76 * direction request before the rpc_rqst is returned to the
77 * list. The client rejects the request in this case.
79 * Twice as many rpc_rqsts are prepared to ensure there is
80 * always an rpc_rqst available as soon as a reply is sent.
82 if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
85 rc = rpcrdma_bc_setup_reqs(r_xprt, reqs);
89 r_xprt->rx_buf.rb_bc_srv_max_requests = reqs;
90 trace_xprtrdma_cb_setup(r_xprt, reqs);
94 xprt_rdma_bc_destroy(xprt, reqs);
97 pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
102 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
105 * Returns maximum size, in bytes, of a backchannel message
107 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
109 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
110 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
113 maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
114 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
115 return maxmsg - RPCRDMA_HDRLEN_MIN;
118 static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
120 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
121 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
124 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
125 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
126 req->rl_rdmabuf->rg_base);
128 p = xdr_reserve_space(&req->rl_stream, 28);
132 *p++ = rpcrdma_version;
133 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
139 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
140 &rqst->rq_snd_buf, rpcrdma_noch))
143 trace_xprtrdma_cb_reply(rqst);
148 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
149 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
151 * Caller holds the transport's write lock.
154 * %0 if the RPC message has been sent
155 * %-ENOTCONN if the caller should reconnect and call again
156 * %-EIO if a permanent error occurred and the request was not
157 * sent. Do not try to send this message again.
159 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
161 struct rpc_xprt *xprt = rqst->rq_xprt;
162 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
163 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
166 if (!xprt_connected(xprt))
169 if (!xprt_request_get_cong(xprt, rqst))
172 rc = rpcrdma_bc_marshal_reply(rqst);
176 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
177 goto drop_connection;
184 xprt_rdma_close(xprt);
189 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
190 * @xprt: transport associated with these backchannel resources
191 * @reqs: number of incoming requests to destroy; ignored
193 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
195 struct rpc_rqst *rqst, *tmp;
197 spin_lock(&xprt->bc_pa_lock);
198 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
199 list_del(&rqst->rq_bc_pa_list);
200 spin_unlock(&xprt->bc_pa_lock);
202 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
204 spin_lock(&xprt->bc_pa_lock);
206 spin_unlock(&xprt->bc_pa_lock);
210 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
211 * @rqst: request to release
213 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
215 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
216 struct rpc_xprt *xprt = rqst->rq_xprt;
218 rpcrdma_recv_buffer_put(req->rl_reply);
219 req->rl_reply = NULL;
221 spin_lock(&xprt->bc_pa_lock);
222 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
223 spin_unlock(&xprt->bc_pa_lock);
227 * rpcrdma_bc_receive_call - Handle a backward direction call
228 * @r_xprt: transport receiving the call
229 * @rep: receive buffer containing the call
231 * Operational assumptions:
232 * o Backchannel credits are ignored, just as the NFS server
233 * forechannel currently does
234 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
235 * No replay detection is done at the transport level
237 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
238 struct rpcrdma_rep *rep)
240 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
241 struct svc_serv *bc_serv;
242 struct rpcrdma_req *req;
243 struct rpc_rqst *rqst;
248 p = xdr_inline_decode(&rep->rr_stream, 0);
249 size = xdr_stream_remaining(&rep->rr_stream);
251 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
252 pr_info("RPC: %s: callback XID %08x, length=%u\n",
253 __func__, be32_to_cpup(p), size);
254 pr_info("RPC: %s: %*ph\n", __func__, size, p);
257 /* Grab a free bc rqst */
258 spin_lock(&xprt->bc_pa_lock);
259 if (list_empty(&xprt->bc_pa_list)) {
260 spin_unlock(&xprt->bc_pa_lock);
263 rqst = list_first_entry(&xprt->bc_pa_list,
264 struct rpc_rqst, rq_bc_pa_list);
265 list_del(&rqst->rq_bc_pa_list);
266 spin_unlock(&xprt->bc_pa_lock);
269 rqst->rq_reply_bytes_recvd = 0;
270 rqst->rq_bytes_sent = 0;
273 rqst->rq_private_buf.len = size;
275 buf = &rqst->rq_rcv_buf;
276 memset(buf, 0, sizeof(*buf));
277 buf->head[0].iov_base = p;
278 buf->head[0].iov_len = size;
281 /* The receive buffer has to be hooked to the rpcrdma_req
282 * so that it is not released while the req is pointing
283 * to its buffer, and so that it can be reposted after
284 * the Upper Layer is done decoding it.
286 req = rpcr_to_rdmar(rqst);
288 trace_xprtrdma_cb_call(rqst);
290 /* Queue rqst for ULP's callback service */
291 bc_serv = xprt->bc_serv;
292 spin_lock(&bc_serv->sv_cb_lock);
293 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
294 spin_unlock(&bc_serv->sv_cb_lock);
296 wake_up(&bc_serv->sv_cb_waitq);
298 r_xprt->rx_stats.bcall_count++;
302 pr_warn("RPC/RDMA backchannel overflow\n");
303 xprt_force_disconnect(xprt);
304 /* This receive buffer gets reposted automatically
305 * when the connection is re-established.