1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015 Oracle. All rights reserved.
5 * Support for backward direction RPCs on RPC/RDMA.
8 #include <linux/module.h>
9 #include <linux/sunrpc/xprt.h>
10 #include <linux/sunrpc/svc.h>
11 #include <linux/sunrpc/svc_xprt.h>
12 #include <linux/sunrpc/svc_rdma.h>
14 #include "xprt_rdma.h"
15 #include <trace/events/rpcrdma.h>
17 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
18 # define RPCDBG_FACILITY RPCDBG_TRANS
21 #undef RPCRDMA_BACKCHANNEL_DEBUG
23 static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
24 struct rpc_rqst *rqst)
26 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
27 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
29 spin_lock(&buf->rb_reqslock);
30 list_del(&req->rl_all);
31 spin_unlock(&buf->rb_reqslock);
33 rpcrdma_destroy_req(req);
36 static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
39 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
40 struct rpc_rqst *rqst;
43 for (i = 0; i < (count << 1); i++) {
44 struct rpcrdma_regbuf *rb;
45 struct rpcrdma_req *req;
48 req = rpcrdma_create_req(r_xprt);
54 INIT_LIST_HEAD(&rqst->rq_bc_list);
55 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
56 spin_lock(&xprt->bc_pa_lock);
57 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
58 spin_unlock(&xprt->bc_pa_lock);
60 size = r_xprt->rx_data.inline_rsize;
61 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
65 xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
66 min_t(size_t, size, PAGE_SIZE));
71 rpcrdma_bc_free_rqst(r_xprt, rqst);
76 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
77 * @xprt: transport associated with these backchannel resources
78 * @reqs: number of concurrent incoming requests to expect
80 * Returns 0 on success; otherwise a negative errno
82 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
84 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
87 /* The backchannel reply path returns each rpc_rqst to the
88 * bc_pa_list _after_ the reply is sent. If the server is
89 * faster than the client, it can send another backward
90 * direction request before the rpc_rqst is returned to the
91 * list. The client rejects the request in this case.
93 * Twice as many rpc_rqsts are prepared to ensure there is
94 * always an rpc_rqst available as soon as a reply is sent.
96 if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
99 rc = rpcrdma_bc_setup_reqs(r_xprt, reqs);
103 r_xprt->rx_buf.rb_bc_srv_max_requests = reqs;
104 request_module("svcrdma");
105 trace_xprtrdma_cb_setup(r_xprt, reqs);
109 xprt_rdma_bc_destroy(xprt, reqs);
112 pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
117 * xprt_rdma_bc_up - Create transport endpoint for backchannel service
118 * @serv: server endpoint
119 * @net: network namespace
121 * The "xprt" is an implied argument: it supplies the name of the
122 * backchannel transport class.
124 * Returns zero on success, negative errno on failure
126 int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
130 ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
137 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
140 * Returns maximum size, in bytes, of a backchannel message
142 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
144 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
145 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
148 maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
149 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
150 return maxmsg - RPCRDMA_HDRLEN_MIN;
153 static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
155 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
156 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
159 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
160 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
161 req->rl_rdmabuf->rg_base);
163 p = xdr_reserve_space(&req->rl_stream, 28);
167 *p++ = rpcrdma_version;
168 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
174 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
175 &rqst->rq_snd_buf, rpcrdma_noch))
178 trace_xprtrdma_cb_reply(rqst);
183 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
184 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
186 * Caller holds the transport's write lock.
189 * %0 if the RPC message has been sent
190 * %-ENOTCONN if the caller should reconnect and call again
191 * %-EIO if a permanent error occurred and the request was not
192 * sent. Do not try to send this message again.
194 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
196 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
197 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
200 if (!xprt_connected(rqst->rq_xprt))
201 goto drop_connection;
203 if (!xprt_request_get_cong(rqst->rq_xprt, rqst))
206 rc = rpcrdma_bc_marshal_reply(rqst);
210 rpcrdma_post_recvs(r_xprt, true);
211 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
212 goto drop_connection;
219 xprt_disconnect_done(rqst->rq_xprt);
224 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
225 * @xprt: transport associated with these backchannel resources
226 * @reqs: number of incoming requests to destroy; ignored
228 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
230 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
231 struct rpc_rqst *rqst, *tmp;
233 spin_lock(&xprt->bc_pa_lock);
234 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
235 list_del(&rqst->rq_bc_pa_list);
236 spin_unlock(&xprt->bc_pa_lock);
238 rpcrdma_bc_free_rqst(r_xprt, rqst);
240 spin_lock(&xprt->bc_pa_lock);
242 spin_unlock(&xprt->bc_pa_lock);
246 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
247 * @rqst: request to release
249 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
251 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
252 struct rpc_xprt *xprt = rqst->rq_xprt;
254 dprintk("RPC: %s: freeing rqst %p (req %p)\n",
255 __func__, rqst, req);
257 rpcrdma_recv_buffer_put(req->rl_reply);
258 req->rl_reply = NULL;
260 spin_lock(&xprt->bc_pa_lock);
261 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
262 spin_unlock(&xprt->bc_pa_lock);
266 * rpcrdma_bc_receive_call - Handle a backward direction call
267 * @r_xprt: transport receiving the call
268 * @rep: receive buffer containing the call
270 * Operational assumptions:
271 * o Backchannel credits are ignored, just as the NFS server
272 * forechannel currently does
273 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
274 * No replay detection is done at the transport level
276 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
277 struct rpcrdma_rep *rep)
279 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
280 struct svc_serv *bc_serv;
281 struct rpcrdma_req *req;
282 struct rpc_rqst *rqst;
287 p = xdr_inline_decode(&rep->rr_stream, 0);
288 size = xdr_stream_remaining(&rep->rr_stream);
290 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
291 pr_info("RPC: %s: callback XID %08x, length=%u\n",
292 __func__, be32_to_cpup(p), size);
293 pr_info("RPC: %s: %*ph\n", __func__, size, p);
296 /* Grab a free bc rqst */
297 spin_lock(&xprt->bc_pa_lock);
298 if (list_empty(&xprt->bc_pa_list)) {
299 spin_unlock(&xprt->bc_pa_lock);
302 rqst = list_first_entry(&xprt->bc_pa_list,
303 struct rpc_rqst, rq_bc_pa_list);
304 list_del(&rqst->rq_bc_pa_list);
305 spin_unlock(&xprt->bc_pa_lock);
308 rqst->rq_reply_bytes_recvd = 0;
309 rqst->rq_bytes_sent = 0;
312 rqst->rq_private_buf.len = size;
314 buf = &rqst->rq_rcv_buf;
315 memset(buf, 0, sizeof(*buf));
316 buf->head[0].iov_base = p;
317 buf->head[0].iov_len = size;
320 /* The receive buffer has to be hooked to the rpcrdma_req
321 * so that it is not released while the req is pointing
322 * to its buffer, and so that it can be reposted after
323 * the Upper Layer is done decoding it.
325 req = rpcr_to_rdmar(rqst);
327 trace_xprtrdma_cb_call(rqst);
329 /* Queue rqst for ULP's callback service */
330 bc_serv = xprt->bc_serv;
331 spin_lock(&bc_serv->sv_cb_lock);
332 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
333 spin_unlock(&bc_serv->sv_cb_lock);
335 wake_up(&bc_serv->sv_cb_waitq);
337 r_xprt->rx_stats.bcall_count++;
341 pr_warn("RPC/RDMA backchannel overflow\n");
342 xprt_disconnect_done(xprt);
343 /* This receive buffer gets reposted automatically
344 * when the connection is re-established.