1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/xprt.c
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
8 * The interface works like this:
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
41 #include <linux/module.h>
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
55 #include <trace/events/sunrpc.h>
65 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
66 # define RPCDBG_FACILITY RPCDBG_XPRT
72 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
73 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
74 static void xprt_destroy(struct rpc_xprt *xprt);
75 static void xprt_request_init(struct rpc_task *task);
77 static DEFINE_SPINLOCK(xprt_list_lock);
78 static LIST_HEAD(xprt_list);
80 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
82 unsigned long timeout = jiffies + req->rq_timeout;
84 if (time_before(timeout, req->rq_majortimeo))
86 return req->rq_majortimeo;
90 * xprt_register_transport - register a transport implementation
91 * @transport: transport to register
93 * If a transport implementation is loaded as a kernel module, it can
94 * call this interface to make itself known to the RPC client.
97 * 0: transport successfully registered
98 * -EEXIST: transport already registered
99 * -EINVAL: transport module being unloaded
101 int xprt_register_transport(struct xprt_class *transport)
103 struct xprt_class *t;
107 spin_lock(&xprt_list_lock);
108 list_for_each_entry(t, &xprt_list, list) {
109 /* don't register the same transport class twice */
110 if (t->ident == transport->ident)
114 list_add_tail(&transport->list, &xprt_list);
115 printk(KERN_INFO "RPC: Registered %s transport module.\n",
120 spin_unlock(&xprt_list_lock);
123 EXPORT_SYMBOL_GPL(xprt_register_transport);
126 * xprt_unregister_transport - unregister a transport implementation
127 * @transport: transport to unregister
130 * 0: transport successfully unregistered
131 * -ENOENT: transport never registered
133 int xprt_unregister_transport(struct xprt_class *transport)
135 struct xprt_class *t;
139 spin_lock(&xprt_list_lock);
140 list_for_each_entry(t, &xprt_list, list) {
141 if (t == transport) {
143 "RPC: Unregistered %s transport module.\n",
145 list_del_init(&transport->list);
152 spin_unlock(&xprt_list_lock);
155 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
158 xprt_class_release(const struct xprt_class *t)
160 module_put(t->owner);
163 static const struct xprt_class *
164 xprt_class_find_by_ident_locked(int ident)
166 const struct xprt_class *t;
168 list_for_each_entry(t, &xprt_list, list) {
169 if (t->ident != ident)
171 if (!try_module_get(t->owner))
178 static const struct xprt_class *
179 xprt_class_find_by_ident(int ident)
181 const struct xprt_class *t;
183 spin_lock(&xprt_list_lock);
184 t = xprt_class_find_by_ident_locked(ident);
185 spin_unlock(&xprt_list_lock);
189 static const struct xprt_class *
190 xprt_class_find_by_netid_locked(const char *netid)
192 const struct xprt_class *t;
195 list_for_each_entry(t, &xprt_list, list) {
196 for (i = 0; t->netid[i][0] != '\0'; i++) {
197 if (strcmp(t->netid[i], netid) != 0)
199 if (!try_module_get(t->owner))
207 static const struct xprt_class *
208 xprt_class_find_by_netid(const char *netid)
210 const struct xprt_class *t;
212 spin_lock(&xprt_list_lock);
213 t = xprt_class_find_by_netid_locked(netid);
215 spin_unlock(&xprt_list_lock);
216 request_module("rpc%s", netid);
217 spin_lock(&xprt_list_lock);
218 t = xprt_class_find_by_netid_locked(netid);
220 spin_unlock(&xprt_list_lock);
225 * xprt_find_transport_ident - convert a netid into a transport identifier
226 * @netid: transport to load
229 * > 0: transport identifier
230 * -ENOENT: transport module not available
232 int xprt_find_transport_ident(const char *netid)
234 const struct xprt_class *t;
237 t = xprt_class_find_by_netid(netid);
241 xprt_class_release(t);
244 EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
246 static void xprt_clear_locked(struct rpc_xprt *xprt)
248 xprt->snd_task = NULL;
249 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
250 smp_mb__before_atomic();
251 clear_bit(XPRT_LOCKED, &xprt->state);
252 smp_mb__after_atomic();
254 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
258 * xprt_reserve_xprt - serialize write access to transports
259 * @task: task that is requesting access to the transport
260 * @xprt: pointer to the target transport
262 * This prevents mixing the payload of separate requests, and prevents
263 * transport connects from colliding with writes. No congestion control
266 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
268 struct rpc_rqst *req = task->tk_rqstp;
270 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
271 if (task == xprt->snd_task)
275 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
277 xprt->snd_task = task;
280 trace_xprt_reserve_xprt(xprt, task);
284 xprt_clear_locked(xprt);
286 task->tk_status = -EAGAIN;
287 if (RPC_IS_SOFT(task))
288 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
289 xprt_request_timeout(req));
291 rpc_sleep_on(&xprt->sending, task, NULL);
294 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
297 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
299 return test_bit(XPRT_CWND_WAIT, &xprt->state);
303 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
305 if (!list_empty(&xprt->xmit_queue)) {
306 /* Peek at head of queue to see if it can make progress */
307 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
311 set_bit(XPRT_CWND_WAIT, &xprt->state);
315 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
317 if (!RPCXPRT_CONGESTED(xprt))
318 clear_bit(XPRT_CWND_WAIT, &xprt->state);
322 * xprt_reserve_xprt_cong - serialize write access to transports
323 * @task: task that is requesting access to the transport
325 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
326 * integrated into the decision of whether a request is allowed to be
327 * woken up and given access to the transport.
328 * Note that the lock is only granted if we know there are free slots.
330 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
332 struct rpc_rqst *req = task->tk_rqstp;
334 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
335 if (task == xprt->snd_task)
340 xprt->snd_task = task;
343 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
345 if (!xprt_need_congestion_window_wait(xprt)) {
346 xprt->snd_task = task;
350 xprt_clear_locked(xprt);
352 task->tk_status = -EAGAIN;
353 if (RPC_IS_SOFT(task))
354 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
355 xprt_request_timeout(req));
357 rpc_sleep_on(&xprt->sending, task, NULL);
360 trace_xprt_reserve_cong(xprt, task);
363 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
365 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
369 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
371 spin_lock(&xprt->transport_lock);
372 retval = xprt->ops->reserve_xprt(xprt, task);
373 spin_unlock(&xprt->transport_lock);
377 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
379 struct rpc_xprt *xprt = data;
381 xprt->snd_task = task;
385 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
387 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
389 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
391 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
392 __xprt_lock_write_func, xprt))
395 xprt_clear_locked(xprt);
398 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
400 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
402 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
404 if (xprt_need_congestion_window_wait(xprt))
406 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
407 __xprt_lock_write_func, xprt))
410 xprt_clear_locked(xprt);
414 * xprt_release_xprt - allow other requests to use a transport
415 * @xprt: transport with other tasks potentially waiting
416 * @task: task that is releasing access to the transport
418 * Note that "task" can be NULL. No congestion control is provided.
420 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
422 if (xprt->snd_task == task) {
423 xprt_clear_locked(xprt);
424 __xprt_lock_write_next(xprt);
426 trace_xprt_release_xprt(xprt, task);
428 EXPORT_SYMBOL_GPL(xprt_release_xprt);
431 * xprt_release_xprt_cong - allow other requests to use a transport
432 * @xprt: transport with other tasks potentially waiting
433 * @task: task that is releasing access to the transport
435 * Note that "task" can be NULL. Another task is awoken to use the
436 * transport if the transport's congestion window allows it.
438 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
440 if (xprt->snd_task == task) {
441 xprt_clear_locked(xprt);
442 __xprt_lock_write_next_cong(xprt);
444 trace_xprt_release_cong(xprt, task);
446 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
448 void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
450 if (xprt->snd_task != task)
452 spin_lock(&xprt->transport_lock);
453 xprt->ops->release_xprt(xprt, task);
454 spin_unlock(&xprt->transport_lock);
458 * Van Jacobson congestion avoidance. Check if the congestion window
459 * overflowed. Put the task to sleep if this is the case.
462 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
466 trace_xprt_get_cong(xprt, req->rq_task);
467 if (RPCXPRT_CONGESTED(xprt)) {
468 xprt_set_congestion_window_wait(xprt);
472 xprt->cong += RPC_CWNDSCALE;
477 * Adjust the congestion window, and wake up the next task
478 * that has been sleeping due to congestion
481 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
486 xprt->cong -= RPC_CWNDSCALE;
487 xprt_test_and_clear_congestion_window_wait(xprt);
488 trace_xprt_put_cong(xprt, req->rq_task);
489 __xprt_lock_write_next_cong(xprt);
493 * xprt_request_get_cong - Request congestion control credits
494 * @xprt: pointer to transport
495 * @req: pointer to RPC request
497 * Useful for transports that require congestion control.
500 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
506 spin_lock(&xprt->transport_lock);
507 ret = __xprt_get_cong(xprt, req) != 0;
508 spin_unlock(&xprt->transport_lock);
511 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
514 * xprt_release_rqst_cong - housekeeping when request is complete
515 * @task: RPC request that recently completed
517 * Useful for transports that require congestion control.
519 void xprt_release_rqst_cong(struct rpc_task *task)
521 struct rpc_rqst *req = task->tk_rqstp;
523 __xprt_put_cong(req->rq_xprt, req);
525 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
527 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
529 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
530 __xprt_lock_write_next_cong(xprt);
534 * Clear the congestion window wait flag and wake up the next
535 * entry on xprt->sending
538 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
540 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
541 spin_lock(&xprt->transport_lock);
542 __xprt_lock_write_next_cong(xprt);
543 spin_unlock(&xprt->transport_lock);
548 * xprt_adjust_cwnd - adjust transport congestion window
549 * @xprt: pointer to xprt
550 * @task: recently completed RPC request used to adjust window
551 * @result: result code of completed RPC request
553 * The transport code maintains an estimate on the maximum number of out-
554 * standing RPC requests, using a smoothed version of the congestion
555 * avoidance implemented in 44BSD. This is basically the Van Jacobson
556 * congestion algorithm: If a retransmit occurs, the congestion window is
557 * halved; otherwise, it is incremented by 1/cwnd when
559 * - a reply is received and
560 * - a full number of requests are outstanding and
561 * - the congestion window hasn't been updated recently.
563 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
565 struct rpc_rqst *req = task->tk_rqstp;
566 unsigned long cwnd = xprt->cwnd;
568 if (result >= 0 && cwnd <= xprt->cong) {
569 /* The (cwnd >> 1) term makes sure
570 * the result gets rounded properly. */
571 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
572 if (cwnd > RPC_MAXCWND(xprt))
573 cwnd = RPC_MAXCWND(xprt);
574 __xprt_lock_write_next_cong(xprt);
575 } else if (result == -ETIMEDOUT) {
577 if (cwnd < RPC_CWNDSCALE)
578 cwnd = RPC_CWNDSCALE;
580 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
581 xprt->cong, xprt->cwnd, cwnd);
583 __xprt_put_cong(xprt, req);
585 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
588 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
589 * @xprt: transport with waiting tasks
590 * @status: result code to plant in each task before waking it
593 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
596 rpc_wake_up_status(&xprt->pending, status);
598 rpc_wake_up(&xprt->pending);
600 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
603 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
606 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
607 * we don't in general want to force a socket disconnection due to
608 * an incomplete RPC call transmission.
610 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
612 set_bit(XPRT_WRITE_SPACE, &xprt->state);
614 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
617 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
619 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
620 __xprt_lock_write_next(xprt);
621 dprintk("RPC: write space: waking waiting task on "
629 * xprt_write_space - wake the task waiting for transport output buffer space
630 * @xprt: transport with waiting tasks
632 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
634 bool xprt_write_space(struct rpc_xprt *xprt)
638 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
640 spin_lock(&xprt->transport_lock);
641 ret = xprt_clear_write_space_locked(xprt);
642 spin_unlock(&xprt->transport_lock);
645 EXPORT_SYMBOL_GPL(xprt_write_space);
647 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
649 s64 delta = ktime_to_ns(ktime_get() - abstime);
650 return likely(delta >= 0) ?
651 jiffies - nsecs_to_jiffies(delta) :
652 jiffies + nsecs_to_jiffies(-delta);
655 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
657 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
658 unsigned long majortimeo = req->rq_timeout;
660 if (to->to_exponential)
661 majortimeo <<= to->to_retries;
663 majortimeo += to->to_increment * to->to_retries;
664 if (majortimeo > to->to_maxval || majortimeo == 0)
665 majortimeo = to->to_maxval;
669 static void xprt_reset_majortimeo(struct rpc_rqst *req)
671 req->rq_majortimeo += xprt_calc_majortimeo(req);
674 static void xprt_reset_minortimeo(struct rpc_rqst *req)
676 req->rq_minortimeo += req->rq_timeout;
679 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
681 unsigned long time_init;
682 struct rpc_xprt *xprt = req->rq_xprt;
684 if (likely(xprt && xprt_connected(xprt)))
687 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
688 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
689 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
690 req->rq_minortimeo = time_init + req->rq_timeout;
694 * xprt_adjust_timeout - adjust timeout values for next retransmit
695 * @req: RPC request containing parameters to use for the adjustment
698 int xprt_adjust_timeout(struct rpc_rqst *req)
700 struct rpc_xprt *xprt = req->rq_xprt;
701 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
704 if (time_before(jiffies, req->rq_majortimeo)) {
705 if (time_before(jiffies, req->rq_minortimeo))
707 if (to->to_exponential)
708 req->rq_timeout <<= 1;
710 req->rq_timeout += to->to_increment;
711 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
712 req->rq_timeout = to->to_maxval;
715 req->rq_timeout = to->to_initval;
717 xprt_reset_majortimeo(req);
718 /* Reset the RTT counters == "slow start" */
719 spin_lock(&xprt->transport_lock);
720 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
721 spin_unlock(&xprt->transport_lock);
724 xprt_reset_minortimeo(req);
726 if (req->rq_timeout == 0) {
727 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
728 req->rq_timeout = 5 * HZ;
733 static void xprt_autoclose(struct work_struct *work)
735 struct rpc_xprt *xprt =
736 container_of(work, struct rpc_xprt, task_cleanup);
737 unsigned int pflags = memalloc_nofs_save();
739 trace_xprt_disconnect_auto(xprt);
740 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
741 xprt->ops->close(xprt);
742 xprt_release_write(xprt, NULL);
743 wake_up_bit(&xprt->state, XPRT_LOCKED);
744 memalloc_nofs_restore(pflags);
748 * xprt_disconnect_done - mark a transport as disconnected
749 * @xprt: transport to flag for disconnect
752 void xprt_disconnect_done(struct rpc_xprt *xprt)
754 trace_xprt_disconnect_done(xprt);
755 spin_lock(&xprt->transport_lock);
756 xprt_clear_connected(xprt);
757 xprt_clear_write_space_locked(xprt);
758 xprt_clear_congestion_window_wait_locked(xprt);
759 xprt_wake_pending_tasks(xprt, -ENOTCONN);
760 spin_unlock(&xprt->transport_lock);
762 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
765 * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
766 * @xprt: transport to disconnect
768 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
770 if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
772 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
773 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
774 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
775 rpc_wake_up_queued_task_set_status(&xprt->pending,
776 xprt->snd_task, -ENOTCONN);
780 * xprt_force_disconnect - force a transport to disconnect
781 * @xprt: transport to disconnect
784 void xprt_force_disconnect(struct rpc_xprt *xprt)
786 trace_xprt_disconnect_force(xprt);
788 /* Don't race with the test_bit() in xprt_clear_locked() */
789 spin_lock(&xprt->transport_lock);
790 xprt_schedule_autoclose_locked(xprt);
791 spin_unlock(&xprt->transport_lock);
793 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
796 xprt_connect_cookie(struct rpc_xprt *xprt)
798 return READ_ONCE(xprt->connect_cookie);
802 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
804 struct rpc_rqst *req = task->tk_rqstp;
805 struct rpc_xprt *xprt = req->rq_xprt;
807 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
808 !xprt_connected(xprt);
812 * xprt_conditional_disconnect - force a transport to disconnect
813 * @xprt: transport to disconnect
814 * @cookie: 'connection cookie'
816 * This attempts to break the connection if and only if 'cookie' matches
817 * the current transport 'connection cookie'. It ensures that we don't
818 * try to break the connection more than once when we need to retransmit
819 * a batch of RPC requests.
822 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
824 /* Don't race with the test_bit() in xprt_clear_locked() */
825 spin_lock(&xprt->transport_lock);
826 if (cookie != xprt->connect_cookie)
828 if (test_bit(XPRT_CLOSING, &xprt->state))
830 xprt_schedule_autoclose_locked(xprt);
832 spin_unlock(&xprt->transport_lock);
836 xprt_has_timer(const struct rpc_xprt *xprt)
838 return xprt->idle_timeout != 0;
842 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
843 __must_hold(&xprt->transport_lock)
845 xprt->last_used = jiffies;
846 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
847 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
851 xprt_init_autodisconnect(struct timer_list *t)
853 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
855 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
857 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
858 xprt->last_used = jiffies;
859 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
861 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
864 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
865 static void xprt_inject_disconnect(struct rpc_xprt *xprt)
867 if (!fail_sunrpc.ignore_client_disconnect &&
868 should_fail(&fail_sunrpc.attr, 1))
869 xprt->ops->inject_disconnect(xprt);
872 static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
877 bool xprt_lock_connect(struct rpc_xprt *xprt,
878 struct rpc_task *task,
883 spin_lock(&xprt->transport_lock);
884 if (!test_bit(XPRT_LOCKED, &xprt->state))
886 if (xprt->snd_task != task)
888 set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
889 xprt->snd_task = cookie;
892 spin_unlock(&xprt->transport_lock);
895 EXPORT_SYMBOL_GPL(xprt_lock_connect);
897 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
899 spin_lock(&xprt->transport_lock);
900 if (xprt->snd_task != cookie)
902 if (!test_bit(XPRT_LOCKED, &xprt->state))
904 xprt->snd_task =NULL;
905 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
906 xprt->ops->release_xprt(xprt, NULL);
907 xprt_schedule_autodisconnect(xprt);
909 spin_unlock(&xprt->transport_lock);
910 wake_up_bit(&xprt->state, XPRT_LOCKED);
912 EXPORT_SYMBOL_GPL(xprt_unlock_connect);
915 * xprt_connect - schedule a transport connect operation
916 * @task: RPC task that is requesting the connect
919 void xprt_connect(struct rpc_task *task)
921 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
923 trace_xprt_connect(xprt);
925 if (!xprt_bound(xprt)) {
926 task->tk_status = -EAGAIN;
929 if (!xprt_lock_write(xprt, task))
932 if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
933 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
934 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
935 xprt_request_timeout(task->tk_rqstp));
937 if (test_bit(XPRT_CLOSING, &xprt->state))
939 if (xprt_test_and_set_connecting(xprt))
942 if (!xprt_connected(xprt)) {
943 xprt->stat.connect_start = jiffies;
944 xprt->ops->connect(xprt, task);
946 xprt_clear_connecting(xprt);
948 rpc_wake_up_queued_task(&xprt->pending, task);
951 xprt_release_write(xprt, task);
955 * xprt_reconnect_delay - compute the wait before scheduling a connect
956 * @xprt: transport instance
959 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
961 unsigned long start, now = jiffies;
963 start = xprt->stat.connect_start + xprt->reestablish_timeout;
964 if (time_after(start, now))
968 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
971 * xprt_reconnect_backoff - compute the new re-establish timeout
972 * @xprt: transport instance
973 * @init_to: initial reestablish timeout
976 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
978 xprt->reestablish_timeout <<= 1;
979 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
980 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
981 if (xprt->reestablish_timeout < init_to)
982 xprt->reestablish_timeout = init_to;
984 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
986 enum xprt_xid_rb_cmp {
991 static enum xprt_xid_rb_cmp
992 xprt_xid_cmp(__be32 xid1, __be32 xid2)
996 if ((__force u32)xid1 < (__force u32)xid2)
1001 static struct rpc_rqst *
1002 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
1004 struct rb_node *n = xprt->recv_queue.rb_node;
1005 struct rpc_rqst *req;
1008 req = rb_entry(n, struct rpc_rqst, rq_recv);
1009 switch (xprt_xid_cmp(xid, req->rq_xid)) {
1024 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1026 struct rb_node **p = &xprt->recv_queue.rb_node;
1027 struct rb_node *n = NULL;
1028 struct rpc_rqst *req;
1030 while (*p != NULL) {
1032 req = rb_entry(n, struct rpc_rqst, rq_recv);
1033 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1041 WARN_ON_ONCE(new != req);
1045 rb_link_node(&new->rq_recv, n, p);
1046 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1050 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1052 rb_erase(&req->rq_recv, &xprt->recv_queue);
1056 * xprt_lookup_rqst - find an RPC request corresponding to an XID
1057 * @xprt: transport on which the original request was transmitted
1058 * @xid: RPC XID of incoming reply
1060 * Caller holds xprt->queue_lock.
1062 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1064 struct rpc_rqst *entry;
1066 entry = xprt_request_rb_find(xprt, xid);
1067 if (entry != NULL) {
1068 trace_xprt_lookup_rqst(xprt, xid, 0);
1069 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1073 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
1075 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1076 xprt->stat.bad_xids++;
1079 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1082 xprt_is_pinned_rqst(struct rpc_rqst *req)
1084 return atomic_read(&req->rq_pin) != 0;
1088 * xprt_pin_rqst - Pin a request on the transport receive list
1089 * @req: Request to pin
1091 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1092 * so should be holding xprt->queue_lock.
1094 void xprt_pin_rqst(struct rpc_rqst *req)
1096 atomic_inc(&req->rq_pin);
1098 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1101 * xprt_unpin_rqst - Unpin a request on the transport receive list
1102 * @req: Request to pin
1104 * Caller should be holding xprt->queue_lock.
1106 void xprt_unpin_rqst(struct rpc_rqst *req)
1108 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1109 atomic_dec(&req->rq_pin);
1112 if (atomic_dec_and_test(&req->rq_pin))
1113 wake_up_var(&req->rq_pin);
1115 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1117 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1119 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1123 xprt_request_data_received(struct rpc_task *task)
1125 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1126 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1130 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1132 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1133 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1137 * xprt_request_enqueue_receive - Add an request to the receive queue
1142 xprt_request_enqueue_receive(struct rpc_task *task)
1144 struct rpc_rqst *req = task->tk_rqstp;
1145 struct rpc_xprt *xprt = req->rq_xprt;
1147 if (!xprt_request_need_enqueue_receive(task, req))
1150 xprt_request_prepare(task->tk_rqstp);
1151 spin_lock(&xprt->queue_lock);
1153 /* Update the softirq receive buffer */
1154 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1155 sizeof(req->rq_private_buf));
1157 /* Add request to the receive list */
1158 xprt_request_rb_insert(xprt, req);
1159 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1160 spin_unlock(&xprt->queue_lock);
1162 /* Turn off autodisconnect */
1163 del_singleshot_timer_sync(&xprt->timer);
1167 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1170 * Caller must hold xprt->queue_lock.
1173 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1175 struct rpc_rqst *req = task->tk_rqstp;
1177 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1178 xprt_request_rb_remove(req->rq_xprt, req);
1182 * xprt_update_rtt - Update RPC RTT statistics
1183 * @task: RPC request that recently completed
1185 * Caller holds xprt->queue_lock.
1187 void xprt_update_rtt(struct rpc_task *task)
1189 struct rpc_rqst *req = task->tk_rqstp;
1190 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1191 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1192 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1195 if (req->rq_ntrans == 1)
1196 rpc_update_rtt(rtt, timer, m);
1197 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1200 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1203 * xprt_complete_rqst - called when reply processing is complete
1204 * @task: RPC request that recently completed
1205 * @copied: actual number of bytes received from the transport
1207 * Caller holds xprt->queue_lock.
1209 void xprt_complete_rqst(struct rpc_task *task, int copied)
1211 struct rpc_rqst *req = task->tk_rqstp;
1212 struct rpc_xprt *xprt = req->rq_xprt;
1216 req->rq_private_buf.len = copied;
1217 /* Ensure all writes are done before we update */
1218 /* req->rq_reply_bytes_recvd */
1220 req->rq_reply_bytes_recvd = copied;
1221 xprt_request_dequeue_receive_locked(task);
1222 rpc_wake_up_queued_task(&xprt->pending, task);
1224 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1226 static void xprt_timer(struct rpc_task *task)
1228 struct rpc_rqst *req = task->tk_rqstp;
1229 struct rpc_xprt *xprt = req->rq_xprt;
1231 if (task->tk_status != -ETIMEDOUT)
1234 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1235 if (!req->rq_reply_bytes_recvd) {
1236 if (xprt->ops->timer)
1237 xprt->ops->timer(xprt, task);
1239 task->tk_status = 0;
1243 * xprt_wait_for_reply_request_def - wait for reply
1244 * @task: pointer to rpc_task
1246 * Set a request's retransmit timeout based on the transport's
1247 * default timeout parameters. Used by transports that don't adjust
1248 * the retransmit timeout based on round-trip time estimation,
1249 * and put the task to sleep on the pending queue.
1251 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1253 struct rpc_rqst *req = task->tk_rqstp;
1255 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1256 xprt_request_timeout(req));
1258 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1261 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1262 * @task: pointer to rpc_task
1264 * Set a request's retransmit timeout using the RTT estimator,
1265 * and put the task to sleep on the pending queue.
1267 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1269 int timer = task->tk_msg.rpc_proc->p_timer;
1270 struct rpc_clnt *clnt = task->tk_client;
1271 struct rpc_rtt *rtt = clnt->cl_rtt;
1272 struct rpc_rqst *req = task->tk_rqstp;
1273 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1274 unsigned long timeout;
1276 timeout = rpc_calc_rto(rtt, timer);
1277 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1278 if (timeout > max_timeout || timeout == 0)
1279 timeout = max_timeout;
1280 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1283 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1286 * xprt_request_wait_receive - wait for the reply to an RPC request
1287 * @task: RPC task about to send a request
1290 void xprt_request_wait_receive(struct rpc_task *task)
1292 struct rpc_rqst *req = task->tk_rqstp;
1293 struct rpc_xprt *xprt = req->rq_xprt;
1295 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1298 * Sleep on the pending queue if we're expecting a reply.
1299 * The spinlock ensures atomicity between the test of
1300 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1302 spin_lock(&xprt->queue_lock);
1303 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1304 xprt->ops->wait_for_reply_request(task);
1306 * Send an extra queue wakeup call if the
1307 * connection was dropped in case the call to
1308 * rpc_sleep_on() raced.
1310 if (xprt_request_retransmit_after_disconnect(task))
1311 rpc_wake_up_queued_task_set_status(&xprt->pending,
1314 spin_unlock(&xprt->queue_lock);
1318 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1320 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1324 * xprt_request_enqueue_transmit - queue a task for transmission
1325 * @task: pointer to rpc_task
1327 * Add a task to the transmission queue.
1330 xprt_request_enqueue_transmit(struct rpc_task *task)
1332 struct rpc_rqst *pos, *req = task->tk_rqstp;
1333 struct rpc_xprt *xprt = req->rq_xprt;
1335 if (xprt_request_need_enqueue_transmit(task, req)) {
1336 req->rq_bytes_sent = 0;
1337 spin_lock(&xprt->queue_lock);
1339 * Requests that carry congestion control credits are added
1340 * to the head of the list to avoid starvation issues.
1343 xprt_clear_congestion_window_wait(xprt);
1344 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1347 /* Note: req is added _before_ pos */
1348 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1349 INIT_LIST_HEAD(&req->rq_xmit2);
1352 } else if (!req->rq_seqno) {
1353 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1354 if (pos->rq_task->tk_owner != task->tk_owner)
1356 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1357 INIT_LIST_HEAD(&req->rq_xmit);
1361 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1362 INIT_LIST_HEAD(&req->rq_xmit2);
1364 atomic_long_inc(&xprt->xmit_queuelen);
1365 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1366 spin_unlock(&xprt->queue_lock);
1371 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1372 * @task: pointer to rpc_task
1374 * Remove a task from the transmission queue
1375 * Caller must hold xprt->queue_lock
1378 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1380 struct rpc_rqst *req = task->tk_rqstp;
1382 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1384 if (!list_empty(&req->rq_xmit)) {
1385 list_del(&req->rq_xmit);
1386 if (!list_empty(&req->rq_xmit2)) {
1387 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1388 struct rpc_rqst, rq_xmit2);
1389 list_del(&req->rq_xmit2);
1390 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1393 list_del(&req->rq_xmit2);
1394 atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1398 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1399 * @task: pointer to rpc_task
1401 * Remove a task from the transmission queue
1404 xprt_request_dequeue_transmit(struct rpc_task *task)
1406 struct rpc_rqst *req = task->tk_rqstp;
1407 struct rpc_xprt *xprt = req->rq_xprt;
1409 spin_lock(&xprt->queue_lock);
1410 xprt_request_dequeue_transmit_locked(task);
1411 spin_unlock(&xprt->queue_lock);
1415 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1416 * @task: pointer to rpc_task
1418 * Remove a task from the transmit and receive queues, and ensure that
1419 * it is not pinned by the receive work item.
1422 xprt_request_dequeue_xprt(struct rpc_task *task)
1424 struct rpc_rqst *req = task->tk_rqstp;
1425 struct rpc_xprt *xprt = req->rq_xprt;
1427 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1428 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1429 xprt_is_pinned_rqst(req)) {
1430 spin_lock(&xprt->queue_lock);
1431 xprt_request_dequeue_transmit_locked(task);
1432 xprt_request_dequeue_receive_locked(task);
1433 while (xprt_is_pinned_rqst(req)) {
1434 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1435 spin_unlock(&xprt->queue_lock);
1436 xprt_wait_on_pinned_rqst(req);
1437 spin_lock(&xprt->queue_lock);
1438 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1440 spin_unlock(&xprt->queue_lock);
1445 * xprt_request_prepare - prepare an encoded request for transport
1446 * @req: pointer to rpc_rqst
1448 * Calls into the transport layer to do whatever is needed to prepare
1449 * the request for transmission or receive.
1452 xprt_request_prepare(struct rpc_rqst *req)
1454 struct rpc_xprt *xprt = req->rq_xprt;
1456 if (xprt->ops->prepare_request)
1457 xprt->ops->prepare_request(req);
1461 * xprt_request_need_retransmit - Test if a task needs retransmission
1462 * @task: pointer to rpc_task
1464 * Test for whether a connection breakage requires the task to retransmit
1467 xprt_request_need_retransmit(struct rpc_task *task)
1469 return xprt_request_retransmit_after_disconnect(task);
1473 * xprt_prepare_transmit - reserve the transport before sending a request
1474 * @task: RPC task about to send a request
1477 bool xprt_prepare_transmit(struct rpc_task *task)
1479 struct rpc_rqst *req = task->tk_rqstp;
1480 struct rpc_xprt *xprt = req->rq_xprt;
1482 if (!xprt_lock_write(xprt, task)) {
1483 /* Race breaker: someone may have transmitted us */
1484 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1485 rpc_wake_up_queued_task_set_status(&xprt->sending,
1493 void xprt_end_transmit(struct rpc_task *task)
1495 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1497 xprt_inject_disconnect(xprt);
1498 xprt_release_write(xprt, task);
1502 * xprt_request_transmit - send an RPC request on a transport
1503 * @req: pointer to request to transmit
1504 * @snd_task: RPC task that owns the transport lock
1506 * This performs the transmission of a single request.
1507 * Note that if the request is not the same as snd_task, then it
1508 * does need to be pinned.
1509 * Returns '0' on success.
1512 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1514 struct rpc_xprt *xprt = req->rq_xprt;
1515 struct rpc_task *task = req->rq_task;
1516 unsigned int connect_cookie;
1517 int is_retrans = RPC_WAS_SENT(task);
1520 if (!req->rq_bytes_sent) {
1521 if (xprt_request_data_received(task)) {
1525 /* Verify that our message lies in the RPCSEC_GSS window */
1526 if (rpcauth_xmit_need_reencode(task)) {
1530 if (RPC_SIGNALLED(task)) {
1531 status = -ERESTARTSYS;
1537 * Update req->rq_ntrans before transmitting to avoid races with
1538 * xprt_update_rtt(), which needs to know that it is recording a
1539 * reply to the first transmission.
1543 trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1544 connect_cookie = xprt->connect_cookie;
1545 status = xprt->ops->send_request(req);
1548 trace_xprt_transmit(req, status);
1553 task->tk_client->cl_stats->rpcretrans++;
1554 trace_xprt_retransmit(req);
1557 xprt_inject_disconnect(xprt);
1559 task->tk_flags |= RPC_TASK_SENT;
1560 spin_lock(&xprt->transport_lock);
1563 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1564 xprt->stat.bklog_u += xprt->backlog.qlen;
1565 xprt->stat.sending_u += xprt->sending.qlen;
1566 xprt->stat.pending_u += xprt->pending.qlen;
1567 spin_unlock(&xprt->transport_lock);
1569 req->rq_connect_cookie = connect_cookie;
1571 trace_xprt_transmit(req, status);
1572 xprt_request_dequeue_transmit(task);
1573 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1578 * xprt_transmit - send an RPC request on a transport
1579 * @task: controlling RPC task
1581 * Attempts to drain the transmit queue. On exit, either the transport
1582 * signalled an error that needs to be handled before transmission can
1583 * resume, or @task finished transmitting, and detected that it already
1587 xprt_transmit(struct rpc_task *task)
1589 struct rpc_rqst *next, *req = task->tk_rqstp;
1590 struct rpc_xprt *xprt = req->rq_xprt;
1593 spin_lock(&xprt->queue_lock);
1595 next = list_first_entry_or_null(&xprt->xmit_queue,
1596 struct rpc_rqst, rq_xmit);
1599 xprt_pin_rqst(next);
1600 spin_unlock(&xprt->queue_lock);
1601 status = xprt_request_transmit(next, task);
1602 if (status == -EBADMSG && next != req)
1604 spin_lock(&xprt->queue_lock);
1605 xprt_unpin_rqst(next);
1607 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1608 task->tk_status = status;
1611 /* Was @task transmitted, and has it received a reply? */
1612 if (xprt_request_data_received(task) &&
1613 !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1615 cond_resched_lock(&xprt->queue_lock);
1617 spin_unlock(&xprt->queue_lock);
1620 static void xprt_complete_request_init(struct rpc_task *task)
1623 xprt_request_init(task);
1626 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1628 set_bit(XPRT_CONGESTED, &xprt->state);
1629 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1631 EXPORT_SYMBOL_GPL(xprt_add_backlog);
1633 static bool __xprt_set_rq(struct rpc_task *task, void *data)
1635 struct rpc_rqst *req = data;
1637 if (task->tk_rqstp == NULL) {
1638 memset(req, 0, sizeof(*req)); /* mark unused */
1639 task->tk_rqstp = req;
1645 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1647 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1648 clear_bit(XPRT_CONGESTED, &xprt->state);
1653 EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1655 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1659 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1661 spin_lock(&xprt->reserve_lock);
1662 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1663 xprt_add_backlog(xprt, task);
1666 spin_unlock(&xprt->reserve_lock);
1671 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1673 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1674 gfp_t gfp_mask = GFP_KERNEL;
1676 if (xprt->num_reqs >= xprt->max_reqs)
1679 spin_unlock(&xprt->reserve_lock);
1680 if (current->flags & PF_WQ_WORKER)
1681 gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
1682 req = kzalloc(sizeof(*req), gfp_mask);
1683 spin_lock(&xprt->reserve_lock);
1687 req = ERR_PTR(-ENOMEM);
1692 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1694 if (xprt->num_reqs > xprt->min_reqs) {
1702 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1704 struct rpc_rqst *req;
1706 spin_lock(&xprt->reserve_lock);
1707 if (!list_empty(&xprt->free)) {
1708 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1709 list_del(&req->rq_list);
1712 req = xprt_dynamic_alloc_slot(xprt);
1715 switch (PTR_ERR(req)) {
1717 dprintk("RPC: dynamic allocation of request slot "
1718 "failed! Retrying\n");
1719 task->tk_status = -ENOMEM;
1722 xprt_add_backlog(xprt, task);
1723 dprintk("RPC: waiting for request slot\n");
1726 task->tk_status = -EAGAIN;
1728 spin_unlock(&xprt->reserve_lock);
1731 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1733 spin_unlock(&xprt->reserve_lock);
1735 task->tk_status = 0;
1736 task->tk_rqstp = req;
1738 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1740 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1742 spin_lock(&xprt->reserve_lock);
1743 if (!xprt_wake_up_backlog(xprt, req) &&
1744 !xprt_dynamic_free_slot(xprt, req)) {
1745 memset(req, 0, sizeof(*req)); /* mark unused */
1746 list_add(&req->rq_list, &xprt->free);
1748 spin_unlock(&xprt->reserve_lock);
1750 EXPORT_SYMBOL_GPL(xprt_free_slot);
1752 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1754 struct rpc_rqst *req;
1755 while (!list_empty(&xprt->free)) {
1756 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1757 list_del(&req->rq_list);
1762 static DEFINE_IDA(rpc_xprt_ids);
1764 void xprt_cleanup_ids(void)
1766 ida_destroy(&rpc_xprt_ids);
1769 static int xprt_alloc_id(struct rpc_xprt *xprt)
1773 id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL);
1781 static void xprt_free_id(struct rpc_xprt *xprt)
1783 ida_simple_remove(&rpc_xprt_ids, xprt->id);
1786 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1787 unsigned int num_prealloc,
1788 unsigned int max_alloc)
1790 struct rpc_xprt *xprt;
1791 struct rpc_rqst *req;
1794 xprt = kzalloc(size, GFP_KERNEL);
1798 xprt_alloc_id(xprt);
1799 xprt_init(xprt, net);
1801 for (i = 0; i < num_prealloc; i++) {
1802 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1805 list_add(&req->rq_list, &xprt->free);
1807 if (max_alloc > num_prealloc)
1808 xprt->max_reqs = max_alloc;
1810 xprt->max_reqs = num_prealloc;
1811 xprt->min_reqs = num_prealloc;
1812 xprt->num_reqs = num_prealloc;
1821 EXPORT_SYMBOL_GPL(xprt_alloc);
1823 void xprt_free(struct rpc_xprt *xprt)
1825 put_net(xprt->xprt_net);
1826 xprt_free_all_slots(xprt);
1828 rpc_sysfs_xprt_destroy(xprt);
1829 kfree_rcu(xprt, rcu);
1831 EXPORT_SYMBOL_GPL(xprt_free);
1834 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1836 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1840 xprt_alloc_xid(struct rpc_xprt *xprt)
1844 spin_lock(&xprt->reserve_lock);
1845 xid = (__force __be32)xprt->xid++;
1846 spin_unlock(&xprt->reserve_lock);
1851 xprt_init_xid(struct rpc_xprt *xprt)
1853 xprt->xid = prandom_u32();
1857 xprt_request_init(struct rpc_task *task)
1859 struct rpc_xprt *xprt = task->tk_xprt;
1860 struct rpc_rqst *req = task->tk_rqstp;
1862 req->rq_task = task;
1863 req->rq_xprt = xprt;
1864 req->rq_buffer = NULL;
1865 req->rq_xid = xprt_alloc_xid(xprt);
1866 xprt_init_connect_cookie(req, xprt);
1867 req->rq_snd_buf.len = 0;
1868 req->rq_snd_buf.buflen = 0;
1869 req->rq_rcv_buf.len = 0;
1870 req->rq_rcv_buf.buflen = 0;
1871 req->rq_snd_buf.bvec = NULL;
1872 req->rq_rcv_buf.bvec = NULL;
1873 req->rq_release_snd_buf = NULL;
1874 xprt_init_majortimeo(task, req);
1876 trace_xprt_reserve(req);
1880 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1882 xprt->ops->alloc_slot(xprt, task);
1883 if (task->tk_rqstp != NULL)
1884 xprt_request_init(task);
1888 * xprt_reserve - allocate an RPC request slot
1889 * @task: RPC task requesting a slot allocation
1891 * If the transport is marked as being congested, or if no more
1892 * slots are available, place the task on the transport's
1895 void xprt_reserve(struct rpc_task *task)
1897 struct rpc_xprt *xprt = task->tk_xprt;
1899 task->tk_status = 0;
1900 if (task->tk_rqstp != NULL)
1903 task->tk_status = -EAGAIN;
1904 if (!xprt_throttle_congested(xprt, task))
1905 xprt_do_reserve(xprt, task);
1909 * xprt_retry_reserve - allocate an RPC request slot
1910 * @task: RPC task requesting a slot allocation
1912 * If no more slots are available, place the task on the transport's
1914 * Note that the only difference with xprt_reserve is that we now
1915 * ignore the value of the XPRT_CONGESTED flag.
1917 void xprt_retry_reserve(struct rpc_task *task)
1919 struct rpc_xprt *xprt = task->tk_xprt;
1921 task->tk_status = 0;
1922 if (task->tk_rqstp != NULL)
1925 task->tk_status = -EAGAIN;
1926 xprt_do_reserve(xprt, task);
1930 * xprt_release - release an RPC request slot
1931 * @task: task which is finished with the slot
1934 void xprt_release(struct rpc_task *task)
1936 struct rpc_xprt *xprt;
1937 struct rpc_rqst *req = task->tk_rqstp;
1940 if (task->tk_client) {
1941 xprt = task->tk_xprt;
1942 xprt_release_write(xprt, task);
1947 xprt = req->rq_xprt;
1948 xprt_request_dequeue_xprt(task);
1949 spin_lock(&xprt->transport_lock);
1950 xprt->ops->release_xprt(xprt, task);
1951 if (xprt->ops->release_request)
1952 xprt->ops->release_request(task);
1953 xprt_schedule_autodisconnect(xprt);
1954 spin_unlock(&xprt->transport_lock);
1956 xprt->ops->buf_free(task);
1957 xdr_free_bvec(&req->rq_rcv_buf);
1958 xdr_free_bvec(&req->rq_snd_buf);
1959 if (req->rq_cred != NULL)
1960 put_rpccred(req->rq_cred);
1961 if (req->rq_release_snd_buf)
1962 req->rq_release_snd_buf(req);
1964 task->tk_rqstp = NULL;
1965 if (likely(!bc_prealloc(req)))
1966 xprt->ops->free_slot(xprt, req);
1968 xprt_free_bc_request(req);
1971 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1973 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1975 struct xdr_buf *xbufp = &req->rq_snd_buf;
1977 task->tk_rqstp = req;
1978 req->rq_task = task;
1979 xprt_init_connect_cookie(req, req->rq_xprt);
1981 * Set up the xdr_buf length.
1982 * This also indicates that the buffer is XDR encoded already.
1984 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1985 xbufp->tail[0].iov_len;
1989 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1991 kref_init(&xprt->kref);
1993 spin_lock_init(&xprt->transport_lock);
1994 spin_lock_init(&xprt->reserve_lock);
1995 spin_lock_init(&xprt->queue_lock);
1997 INIT_LIST_HEAD(&xprt->free);
1998 xprt->recv_queue = RB_ROOT;
1999 INIT_LIST_HEAD(&xprt->xmit_queue);
2000 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
2001 spin_lock_init(&xprt->bc_pa_lock);
2002 INIT_LIST_HEAD(&xprt->bc_pa_list);
2003 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
2004 INIT_LIST_HEAD(&xprt->xprt_switch);
2006 xprt->last_used = jiffies;
2007 xprt->cwnd = RPC_INITCWND;
2008 xprt->bind_index = 0;
2010 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
2011 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
2012 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
2013 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
2015 xprt_init_xid(xprt);
2017 xprt->xprt_net = get_net(net);
2021 * xprt_create_transport - create an RPC transport
2022 * @args: rpc transport creation arguments
2025 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
2027 struct rpc_xprt *xprt;
2028 const struct xprt_class *t;
2030 t = xprt_class_find_by_ident(args->ident);
2032 dprintk("RPC: transport (%d) not supported\n", args->ident);
2033 return ERR_PTR(-EIO);
2036 xprt = t->setup(args);
2037 xprt_class_release(t);
2041 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
2042 xprt->idle_timeout = 0;
2043 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
2044 if (xprt_has_timer(xprt))
2045 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2047 timer_setup(&xprt->timer, NULL, 0);
2049 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
2051 return ERR_PTR(-EINVAL);
2053 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
2054 if (xprt->servername == NULL) {
2056 return ERR_PTR(-ENOMEM);
2059 rpc_xprt_debugfs_register(xprt);
2061 trace_xprt_create(xprt);
2066 static void xprt_destroy_cb(struct work_struct *work)
2068 struct rpc_xprt *xprt =
2069 container_of(work, struct rpc_xprt, task_cleanup);
2071 trace_xprt_destroy(xprt);
2073 rpc_xprt_debugfs_unregister(xprt);
2074 rpc_destroy_wait_queue(&xprt->binding);
2075 rpc_destroy_wait_queue(&xprt->pending);
2076 rpc_destroy_wait_queue(&xprt->sending);
2077 rpc_destroy_wait_queue(&xprt->backlog);
2078 kfree(xprt->servername);
2080 * Destroy any existing back channel
2082 xprt_destroy_backchannel(xprt, UINT_MAX);
2085 * Tear down transport state and free the rpc_xprt
2087 xprt->ops->destroy(xprt);
2091 * xprt_destroy - destroy an RPC transport, killing off all requests.
2092 * @xprt: transport to destroy
2095 static void xprt_destroy(struct rpc_xprt *xprt)
2098 * Exclude transport connect/disconnect handlers and autoclose
2100 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2103 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
2104 * is cleared. We use ->transport_lock to ensure the mod_timer()
2105 * can only run *before* del_time_sync(), never after.
2107 spin_lock(&xprt->transport_lock);
2108 del_timer_sync(&xprt->timer);
2109 spin_unlock(&xprt->transport_lock);
2112 * Destroy sockets etc from the system workqueue so they can
2113 * safely flush receive work running on rpciod.
2115 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2116 schedule_work(&xprt->task_cleanup);
2119 static void xprt_destroy_kref(struct kref *kref)
2121 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2125 * xprt_get - return a reference to an RPC transport.
2126 * @xprt: pointer to the transport
2129 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2131 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2135 EXPORT_SYMBOL_GPL(xprt_get);
2138 * xprt_put - release a reference to an RPC transport.
2139 * @xprt: pointer to the transport
2142 void xprt_put(struct rpc_xprt *xprt)
2145 kref_put(&xprt->kref, xprt_destroy_kref);
2147 EXPORT_SYMBOL_GPL(xprt_put);