1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
4 (c) 2007 Network Appliance, Inc. All Rights Reserved.
5 (c) 2009 NetApp. All Rights Reserved.
8 ******************************************************************************/
10 #include <linux/tcp.h>
11 #include <linux/slab.h>
12 #include <linux/sunrpc/xprt.h>
13 #include <linux/export.h>
14 #include <linux/sunrpc/bc_xprt.h>
16 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17 #define RPCDBG_FACILITY RPCDBG_TRANS
20 #define BC_MAX_SLOTS 64U
22 unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
28 * Helper routines that track the number of preallocation elements
31 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
33 return xprt->bc_alloc_count < xprt->bc_alloc_max;
37 * Free the preallocated rpc_rqst structure and the memory
38 * buffers hanging off of it.
40 static void xprt_free_allocation(struct rpc_rqst *req)
42 struct xdr_buf *xbufp;
44 dprintk("RPC: free allocations for req= %p\n", req);
45 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
46 xbufp = &req->rq_rcv_buf;
47 free_page((unsigned long)xbufp->head[0].iov_base);
48 xbufp = &req->rq_snd_buf;
49 free_page((unsigned long)xbufp->head[0].iov_base);
53 static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf)
55 buf->head[0].iov_len = PAGE_SIZE;
56 buf->tail[0].iov_len = 0;
61 buf->buflen = PAGE_SIZE;
64 static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
67 /* Preallocate one XDR receive buffer */
68 page = alloc_page(gfp_flags);
71 xdr_buf_init(buf, page_address(page), PAGE_SIZE);
75 static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
77 gfp_t gfp_flags = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
80 /* Pre-allocate one backchannel rpc_rqst */
81 req = kzalloc(sizeof(*req), gfp_flags);
86 INIT_LIST_HEAD(&req->rq_bc_list);
88 /* Preallocate one XDR receive buffer */
89 if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
90 printk(KERN_ERR "Failed to create bc receive xbuf\n");
93 req->rq_rcv_buf.len = PAGE_SIZE;
95 /* Preallocate one XDR send buffer */
96 if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
97 printk(KERN_ERR "Failed to create bc snd xbuf\n");
102 xprt_free_allocation(req);
107 * Preallocate up to min_reqs structures and related buffers for use
108 * by the backchannel. This function can be called multiple times
109 * when creating new sessions that use the same rpc_xprt. The
110 * preallocated buffers are added to the pool of resources used by
111 * the rpc_xprt. Any one of these resources may be used by an
112 * incoming callback request. It's up to the higher levels in the
113 * stack to enforce that the maximum number of session slots is not
116 * Some callback arguments can be large. For example, a pNFS server
117 * using multiple deviceids. The list can be unbound, but the client
118 * has the ability to tell the server the maximum size of the callback
119 * requests. Each deviceID is 16 bytes, so allocate one page
120 * for the arguments to have enough room to receive a number of these
121 * deviceIDs. The NFS client indicates to the pNFS server that its
122 * callback requests can be up to 4096 bytes in size.
124 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
126 if (!xprt->ops->bc_setup)
128 return xprt->ops->bc_setup(xprt, min_reqs);
130 EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
132 int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
134 struct rpc_rqst *req;
135 struct list_head tmp_list;
138 dprintk("RPC: setup backchannel transport\n");
140 if (min_reqs > BC_MAX_SLOTS)
141 min_reqs = BC_MAX_SLOTS;
144 * We use a temporary list to keep track of the preallocated
145 * buffers. Once we're done building the list we splice it
146 * into the backchannel preallocation list off of the rpc_xprt
147 * struct. This helps minimize the amount of time the list
148 * lock is held on the rpc_xprt struct. It also makes cleanup
149 * easier in case of memory allocation errors.
151 INIT_LIST_HEAD(&tmp_list);
152 for (i = 0; i < min_reqs; i++) {
153 /* Pre-allocate one backchannel rpc_rqst */
154 req = xprt_alloc_bc_req(xprt);
156 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
160 /* Add the allocated buffer to the tmp list */
161 dprintk("RPC: adding req= %p\n", req);
162 list_add(&req->rq_bc_pa_list, &tmp_list);
166 * Add the temporary list to the backchannel preallocation list
168 spin_lock(&xprt->bc_pa_lock);
169 list_splice(&tmp_list, &xprt->bc_pa_list);
170 xprt->bc_alloc_count += min_reqs;
171 xprt->bc_alloc_max += min_reqs;
172 atomic_add(min_reqs, &xprt->bc_slot_count);
173 spin_unlock(&xprt->bc_pa_lock);
175 dprintk("RPC: setup backchannel transport done\n");
180 * Memory allocation failed, free the temporary list
182 while (!list_empty(&tmp_list)) {
183 req = list_first_entry(&tmp_list,
186 list_del(&req->rq_bc_pa_list);
187 xprt_free_allocation(req);
190 dprintk("RPC: setup backchannel transport failed\n");
195 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
196 * @xprt: the transport holding the preallocated strucures
197 * @max_reqs: the maximum number of preallocated structures to destroy
199 * Since these structures may have been allocated by multiple calls
200 * to xprt_setup_backchannel, we only destroy up to the maximum number
201 * of reqs specified by the caller.
203 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
205 if (xprt->ops->bc_destroy)
206 xprt->ops->bc_destroy(xprt, max_reqs);
208 EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
210 void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
212 struct rpc_rqst *req = NULL, *tmp = NULL;
214 dprintk("RPC: destroy backchannel transport\n");
219 spin_lock_bh(&xprt->bc_pa_lock);
220 xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
221 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
222 dprintk("RPC: req=%p\n", req);
223 list_del(&req->rq_bc_pa_list);
224 xprt_free_allocation(req);
225 xprt->bc_alloc_count--;
226 atomic_dec(&xprt->bc_slot_count);
230 spin_unlock_bh(&xprt->bc_pa_lock);
233 dprintk("RPC: backchannel list empty= %s\n",
234 list_empty(&xprt->bc_pa_list) ? "true" : "false");
237 static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
238 struct rpc_rqst *new)
240 struct rpc_rqst *req = NULL;
242 dprintk("RPC: allocate a backchannel request\n");
243 if (list_empty(&xprt->bc_pa_list)) {
246 if (atomic_read(&xprt->bc_slot_count) >= BC_MAX_SLOTS)
248 list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
249 xprt->bc_alloc_count++;
250 atomic_inc(&xprt->bc_slot_count);
252 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
254 req->rq_reply_bytes_recvd = 0;
255 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
256 sizeof(req->rq_private_buf));
258 req->rq_connect_cookie = xprt->connect_cookie;
259 dprintk("RPC: backchannel req=%p\n", req);
265 * Return the preallocated rpc_rqst structure and XDR buffers
266 * associated with this rpc_task.
268 void xprt_free_bc_request(struct rpc_rqst *req)
270 struct rpc_xprt *xprt = req->rq_xprt;
272 xprt->ops->bc_free_rqst(req);
275 void xprt_free_bc_rqst(struct rpc_rqst *req)
277 struct rpc_xprt *xprt = req->rq_xprt;
279 dprintk("RPC: free backchannel req=%p\n", req);
281 req->rq_connect_cookie = xprt->connect_cookie - 1;
282 smp_mb__before_atomic();
283 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
284 smp_mb__after_atomic();
287 * Return it to the list of preallocations so that it
288 * may be reused by a new callback request.
290 spin_lock_bh(&xprt->bc_pa_lock);
291 if (xprt_need_to_requeue(xprt)) {
292 xprt_bc_reinit_xdr_buf(&req->rq_snd_buf);
293 xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf);
294 req->rq_rcv_buf.len = PAGE_SIZE;
295 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
296 xprt->bc_alloc_count++;
297 atomic_inc(&xprt->bc_slot_count);
300 spin_unlock_bh(&xprt->bc_pa_lock);
303 * The last remaining session was destroyed while this
304 * entry was in use. Free the entry and don't attempt
305 * to add back to the list because there is no need to
306 * have anymore preallocated entries.
308 dprintk("RPC: Last session removed req=%p\n", req);
309 xprt_free_allocation(req);
315 * One or more rpc_rqst structure have been preallocated during the
316 * backchannel setup. Buffer space for the send and private XDR buffers
317 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
318 * to this request. Use xprt_free_bc_request to return it.
320 * We know that we're called in soft interrupt context, grab the spin_lock
321 * since there is no need to grab the bottom half spin_lock.
323 * Return an available rpc_rqst, otherwise NULL if non are available.
325 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
327 struct rpc_rqst *req, *new = NULL;
330 spin_lock(&xprt->bc_pa_lock);
331 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
332 if (req->rq_connect_cookie != xprt->connect_cookie)
334 if (req->rq_xid == xid)
337 req = xprt_get_bc_request(xprt, xid, new);
339 spin_unlock(&xprt->bc_pa_lock);
342 xprt_free_allocation(new);
346 new = xprt_alloc_bc_req(xprt);
352 * Add callback request to callback list. The callback
353 * service sleeps on the sv_cb_waitq waiting for new
354 * requests. Wake it up after adding enqueing the
357 void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
359 struct rpc_xprt *xprt = req->rq_xprt;
360 struct svc_serv *bc_serv = xprt->bc_serv;
362 spin_lock(&xprt->bc_pa_lock);
363 list_del(&req->rq_bc_pa_list);
364 xprt->bc_alloc_count--;
365 spin_unlock(&xprt->bc_pa_lock);
367 req->rq_private_buf.len = copied;
368 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
370 dprintk("RPC: add callback request to list\n");
372 spin_lock(&bc_serv->sv_cb_lock);
373 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
374 wake_up(&bc_serv->sv_cb_waitq);
375 spin_unlock(&bc_serv->sv_cb_lock);