Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / sunrpc / xprt.c
1 /*
2  *  linux/net/sunrpc/xprt.c
3  *
4  *  This is a generic RPC call interface supporting congestion avoidance,
5  *  and asynchronous calls.
6  *
7  *  The interface works like this:
8  *
9  *  -   When a process places a call, it allocates a request slot if
10  *      one is available. Otherwise, it sleeps on the backlog queue
11  *      (xprt_reserve).
12  *  -   Next, the caller puts together the RPC message, stuffs it into
13  *      the request struct, and calls xprt_transmit().
14  *  -   xprt_transmit sends the message and installs the caller on the
15  *      transport's wait list. At the same time, if a reply is expected,
16  *      it installs a timer that is run after the packet's timeout has
17  *      expired.
18  *  -   When a packet arrives, the data_ready handler walks the list of
19  *      pending requests for that transport. If a matching XID is found, the
20  *      caller is woken up, and the timer removed.
21  *  -   When no reply arrives within the timeout interval, the timer is
22  *      fired by the kernel and runs xprt_timer(). It either adjusts the
23  *      timeout values (minor timeout) or wakes up the caller with a status
24  *      of -ETIMEDOUT.
25  *  -   When the caller receives a notification from RPC that a reply arrived,
26  *      it should release the RPC slot, and process the reply.
27  *      If the call timed out, it may choose to retry the operation by
28  *      adjusting the initial timeout value, and simply calling rpc_call
29  *      again.
30  *
31  *  Support for async RPC is done through a set of RPC-specific scheduling
32  *  primitives that `transparently' work for processes as well as async
33  *  tasks that rely on callbacks.
34  *
35  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36  *
37  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
38  */
39
40 #include <linux/module.h>
41
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
47
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51
52 #include "sunrpc.h"
53
54 /*
55  * Local variables
56  */
57
58 #ifdef RPC_DEBUG
59 # define RPCDBG_FACILITY        RPCDBG_XPRT
60 #endif
61
62 /*
63  * Local functions
64  */
65 static void      xprt_init(struct rpc_xprt *xprt, struct net *net);
66 static void     xprt_request_init(struct rpc_task *, struct rpc_xprt *);
67 static void     xprt_connect_status(struct rpc_task *task);
68 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69 static void      xprt_destroy(struct rpc_xprt *xprt);
70
71 static DEFINE_SPINLOCK(xprt_list_lock);
72 static LIST_HEAD(xprt_list);
73
74 /*
75  * The transport code maintains an estimate on the maximum number of out-
76  * standing RPC requests, using a smoothed version of the congestion
77  * avoidance implemented in 44BSD. This is basically the Van Jacobson
78  * congestion algorithm: If a retransmit occurs, the congestion window is
79  * halved; otherwise, it is incremented by 1/cwnd when
80  *
81  *      -       a reply is received and
82  *      -       a full number of requests are outstanding and
83  *      -       the congestion window hasn't been updated recently.
84  */
85 #define RPC_CWNDSHIFT           (8U)
86 #define RPC_CWNDSCALE           (1U << RPC_CWNDSHIFT)
87 #define RPC_INITCWND            RPC_CWNDSCALE
88 #define RPC_MAXCWND(xprt)       ((xprt)->max_reqs << RPC_CWNDSHIFT)
89
90 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
91
92 /**
93  * xprt_register_transport - register a transport implementation
94  * @transport: transport to register
95  *
96  * If a transport implementation is loaded as a kernel module, it can
97  * call this interface to make itself known to the RPC client.
98  *
99  * Returns:
100  * 0:           transport successfully registered
101  * -EEXIST:     transport already registered
102  * -EINVAL:     transport module being unloaded
103  */
104 int xprt_register_transport(struct xprt_class *transport)
105 {
106         struct xprt_class *t;
107         int result;
108
109         result = -EEXIST;
110         spin_lock(&xprt_list_lock);
111         list_for_each_entry(t, &xprt_list, list) {
112                 /* don't register the same transport class twice */
113                 if (t->ident == transport->ident)
114                         goto out;
115         }
116
117         list_add_tail(&transport->list, &xprt_list);
118         printk(KERN_INFO "RPC: Registered %s transport module.\n",
119                transport->name);
120         result = 0;
121
122 out:
123         spin_unlock(&xprt_list_lock);
124         return result;
125 }
126 EXPORT_SYMBOL_GPL(xprt_register_transport);
127
128 /**
129  * xprt_unregister_transport - unregister a transport implementation
130  * @transport: transport to unregister
131  *
132  * Returns:
133  * 0:           transport successfully unregistered
134  * -ENOENT:     transport never registered
135  */
136 int xprt_unregister_transport(struct xprt_class *transport)
137 {
138         struct xprt_class *t;
139         int result;
140
141         result = 0;
142         spin_lock(&xprt_list_lock);
143         list_for_each_entry(t, &xprt_list, list) {
144                 if (t == transport) {
145                         printk(KERN_INFO
146                                 "RPC: Unregistered %s transport module.\n",
147                                 transport->name);
148                         list_del_init(&transport->list);
149                         goto out;
150                 }
151         }
152         result = -ENOENT;
153
154 out:
155         spin_unlock(&xprt_list_lock);
156         return result;
157 }
158 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
159
160 /**
161  * xprt_load_transport - load a transport implementation
162  * @transport_name: transport to load
163  *
164  * Returns:
165  * 0:           transport successfully loaded
166  * -ENOENT:     transport module not available
167  */
168 int xprt_load_transport(const char *transport_name)
169 {
170         struct xprt_class *t;
171         int result;
172
173         result = 0;
174         spin_lock(&xprt_list_lock);
175         list_for_each_entry(t, &xprt_list, list) {
176                 if (strcmp(t->name, transport_name) == 0) {
177                         spin_unlock(&xprt_list_lock);
178                         goto out;
179                 }
180         }
181         spin_unlock(&xprt_list_lock);
182         result = request_module("xprt%s", transport_name);
183 out:
184         return result;
185 }
186 EXPORT_SYMBOL_GPL(xprt_load_transport);
187
188 /**
189  * xprt_reserve_xprt - serialize write access to transports
190  * @task: task that is requesting access to the transport
191  * @xprt: pointer to the target transport
192  *
193  * This prevents mixing the payload of separate requests, and prevents
194  * transport connects from colliding with writes.  No congestion control
195  * is provided.
196  */
197 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
198 {
199         struct rpc_rqst *req = task->tk_rqstp;
200         int priority;
201
202         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
203                 if (task == xprt->snd_task)
204                         return 1;
205                 goto out_sleep;
206         }
207         xprt->snd_task = task;
208         if (req != NULL) {
209                 req->rq_bytes_sent = 0;
210                 req->rq_ntrans++;
211         }
212
213         return 1;
214
215 out_sleep:
216         dprintk("RPC: %5u failed to lock transport %p\n",
217                         task->tk_pid, xprt);
218         task->tk_timeout = 0;
219         task->tk_status = -EAGAIN;
220         if (req == NULL)
221                 priority = RPC_PRIORITY_LOW;
222         else if (!req->rq_ntrans)
223                 priority = RPC_PRIORITY_NORMAL;
224         else
225                 priority = RPC_PRIORITY_HIGH;
226         rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
227         return 0;
228 }
229 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
230
231 static void xprt_clear_locked(struct rpc_xprt *xprt)
232 {
233         xprt->snd_task = NULL;
234         if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
235                 smp_mb__before_clear_bit();
236                 clear_bit(XPRT_LOCKED, &xprt->state);
237                 smp_mb__after_clear_bit();
238         } else
239                 queue_work(rpciod_workqueue, &xprt->task_cleanup);
240 }
241
242 /*
243  * xprt_reserve_xprt_cong - serialize write access to transports
244  * @task: task that is requesting access to the transport
245  *
246  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
247  * integrated into the decision of whether a request is allowed to be
248  * woken up and given access to the transport.
249  */
250 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
251 {
252         struct rpc_rqst *req = task->tk_rqstp;
253         int priority;
254
255         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
256                 if (task == xprt->snd_task)
257                         return 1;
258                 goto out_sleep;
259         }
260         if (req == NULL) {
261                 xprt->snd_task = task;
262                 return 1;
263         }
264         if (__xprt_get_cong(xprt, task)) {
265                 xprt->snd_task = task;
266                 req->rq_bytes_sent = 0;
267                 req->rq_ntrans++;
268                 return 1;
269         }
270         xprt_clear_locked(xprt);
271 out_sleep:
272         dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
273         task->tk_timeout = 0;
274         task->tk_status = -EAGAIN;
275         if (req == NULL)
276                 priority = RPC_PRIORITY_LOW;
277         else if (!req->rq_ntrans)
278                 priority = RPC_PRIORITY_NORMAL;
279         else
280                 priority = RPC_PRIORITY_HIGH;
281         rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
282         return 0;
283 }
284 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
285
286 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
287 {
288         int retval;
289
290         spin_lock_bh(&xprt->transport_lock);
291         retval = xprt->ops->reserve_xprt(xprt, task);
292         spin_unlock_bh(&xprt->transport_lock);
293         return retval;
294 }
295
296 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
297 {
298         struct rpc_xprt *xprt = data;
299         struct rpc_rqst *req;
300
301         req = task->tk_rqstp;
302         xprt->snd_task = task;
303         if (req) {
304                 req->rq_bytes_sent = 0;
305                 req->rq_ntrans++;
306         }
307         return true;
308 }
309
310 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
311 {
312         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
313                 return;
314
315         if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
316                 return;
317         xprt_clear_locked(xprt);
318 }
319
320 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
321 {
322         struct rpc_xprt *xprt = data;
323         struct rpc_rqst *req;
324
325         req = task->tk_rqstp;
326         if (req == NULL) {
327                 xprt->snd_task = task;
328                 return true;
329         }
330         if (__xprt_get_cong(xprt, task)) {
331                 xprt->snd_task = task;
332                 req->rq_bytes_sent = 0;
333                 req->rq_ntrans++;
334                 return true;
335         }
336         return false;
337 }
338
339 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
340 {
341         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
342                 return;
343         if (RPCXPRT_CONGESTED(xprt))
344                 goto out_unlock;
345         if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
346                 return;
347 out_unlock:
348         xprt_clear_locked(xprt);
349 }
350
351 /**
352  * xprt_release_xprt - allow other requests to use a transport
353  * @xprt: transport with other tasks potentially waiting
354  * @task: task that is releasing access to the transport
355  *
356  * Note that "task" can be NULL.  No congestion control is provided.
357  */
358 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
359 {
360         if (xprt->snd_task == task) {
361                 xprt_clear_locked(xprt);
362                 __xprt_lock_write_next(xprt);
363         }
364 }
365 EXPORT_SYMBOL_GPL(xprt_release_xprt);
366
367 /**
368  * xprt_release_xprt_cong - allow other requests to use a transport
369  * @xprt: transport with other tasks potentially waiting
370  * @task: task that is releasing access to the transport
371  *
372  * Note that "task" can be NULL.  Another task is awoken to use the
373  * transport if the transport's congestion window allows it.
374  */
375 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
376 {
377         if (xprt->snd_task == task) {
378                 xprt_clear_locked(xprt);
379                 __xprt_lock_write_next_cong(xprt);
380         }
381 }
382 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
383
384 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
385 {
386         spin_lock_bh(&xprt->transport_lock);
387         xprt->ops->release_xprt(xprt, task);
388         spin_unlock_bh(&xprt->transport_lock);
389 }
390
391 /*
392  * Van Jacobson congestion avoidance. Check if the congestion window
393  * overflowed. Put the task to sleep if this is the case.
394  */
395 static int
396 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
397 {
398         struct rpc_rqst *req = task->tk_rqstp;
399
400         if (req->rq_cong)
401                 return 1;
402         dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
403                         task->tk_pid, xprt->cong, xprt->cwnd);
404         if (RPCXPRT_CONGESTED(xprt))
405                 return 0;
406         req->rq_cong = 1;
407         xprt->cong += RPC_CWNDSCALE;
408         return 1;
409 }
410
411 /*
412  * Adjust the congestion window, and wake up the next task
413  * that has been sleeping due to congestion
414  */
415 static void
416 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
417 {
418         if (!req->rq_cong)
419                 return;
420         req->rq_cong = 0;
421         xprt->cong -= RPC_CWNDSCALE;
422         __xprt_lock_write_next_cong(xprt);
423 }
424
425 /**
426  * xprt_release_rqst_cong - housekeeping when request is complete
427  * @task: RPC request that recently completed
428  *
429  * Useful for transports that require congestion control.
430  */
431 void xprt_release_rqst_cong(struct rpc_task *task)
432 {
433         struct rpc_rqst *req = task->tk_rqstp;
434
435         __xprt_put_cong(req->rq_xprt, req);
436 }
437 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
438
439 /**
440  * xprt_adjust_cwnd - adjust transport congestion window
441  * @xprt: pointer to xprt
442  * @task: recently completed RPC request used to adjust window
443  * @result: result code of completed RPC request
444  *
445  * We use a time-smoothed congestion estimator to avoid heavy oscillation.
446  */
447 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
448 {
449         struct rpc_rqst *req = task->tk_rqstp;
450         unsigned long cwnd = xprt->cwnd;
451
452         if (result >= 0 && cwnd <= xprt->cong) {
453                 /* The (cwnd >> 1) term makes sure
454                  * the result gets rounded properly. */
455                 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
456                 if (cwnd > RPC_MAXCWND(xprt))
457                         cwnd = RPC_MAXCWND(xprt);
458                 __xprt_lock_write_next_cong(xprt);
459         } else if (result == -ETIMEDOUT) {
460                 cwnd >>= 1;
461                 if (cwnd < RPC_CWNDSCALE)
462                         cwnd = RPC_CWNDSCALE;
463         }
464         dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
465                         xprt->cong, xprt->cwnd, cwnd);
466         xprt->cwnd = cwnd;
467         __xprt_put_cong(xprt, req);
468 }
469 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
470
471 /**
472  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
473  * @xprt: transport with waiting tasks
474  * @status: result code to plant in each task before waking it
475  *
476  */
477 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
478 {
479         if (status < 0)
480                 rpc_wake_up_status(&xprt->pending, status);
481         else
482                 rpc_wake_up(&xprt->pending);
483 }
484 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
485
486 /**
487  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
488  * @task: task to be put to sleep
489  * @action: function pointer to be executed after wait
490  */
491 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
492 {
493         struct rpc_rqst *req = task->tk_rqstp;
494         struct rpc_xprt *xprt = req->rq_xprt;
495
496         task->tk_timeout = req->rq_timeout;
497         rpc_sleep_on(&xprt->pending, task, action);
498 }
499 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
500
501 /**
502  * xprt_write_space - wake the task waiting for transport output buffer space
503  * @xprt: transport with waiting tasks
504  *
505  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
506  */
507 void xprt_write_space(struct rpc_xprt *xprt)
508 {
509         spin_lock_bh(&xprt->transport_lock);
510         if (xprt->snd_task) {
511                 dprintk("RPC:       write space: waking waiting task on "
512                                 "xprt %p\n", xprt);
513                 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
514         }
515         spin_unlock_bh(&xprt->transport_lock);
516 }
517 EXPORT_SYMBOL_GPL(xprt_write_space);
518
519 /**
520  * xprt_set_retrans_timeout_def - set a request's retransmit timeout
521  * @task: task whose timeout is to be set
522  *
523  * Set a request's retransmit timeout based on the transport's
524  * default timeout parameters.  Used by transports that don't adjust
525  * the retransmit timeout based on round-trip time estimation.
526  */
527 void xprt_set_retrans_timeout_def(struct rpc_task *task)
528 {
529         task->tk_timeout = task->tk_rqstp->rq_timeout;
530 }
531 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
532
533 /**
534  * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
535  * @task: task whose timeout is to be set
536  *
537  * Set a request's retransmit timeout using the RTT estimator.
538  */
539 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
540 {
541         int timer = task->tk_msg.rpc_proc->p_timer;
542         struct rpc_clnt *clnt = task->tk_client;
543         struct rpc_rtt *rtt = clnt->cl_rtt;
544         struct rpc_rqst *req = task->tk_rqstp;
545         unsigned long max_timeout = clnt->cl_timeout->to_maxval;
546
547         task->tk_timeout = rpc_calc_rto(rtt, timer);
548         task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
549         if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
550                 task->tk_timeout = max_timeout;
551 }
552 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
553
554 static void xprt_reset_majortimeo(struct rpc_rqst *req)
555 {
556         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
557
558         req->rq_majortimeo = req->rq_timeout;
559         if (to->to_exponential)
560                 req->rq_majortimeo <<= to->to_retries;
561         else
562                 req->rq_majortimeo += to->to_increment * to->to_retries;
563         if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
564                 req->rq_majortimeo = to->to_maxval;
565         req->rq_majortimeo += jiffies;
566 }
567
568 /**
569  * xprt_adjust_timeout - adjust timeout values for next retransmit
570  * @req: RPC request containing parameters to use for the adjustment
571  *
572  */
573 int xprt_adjust_timeout(struct rpc_rqst *req)
574 {
575         struct rpc_xprt *xprt = req->rq_xprt;
576         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
577         int status = 0;
578
579         if (time_before(jiffies, req->rq_majortimeo)) {
580                 if (to->to_exponential)
581                         req->rq_timeout <<= 1;
582                 else
583                         req->rq_timeout += to->to_increment;
584                 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
585                         req->rq_timeout = to->to_maxval;
586                 req->rq_retries++;
587         } else {
588                 req->rq_timeout = to->to_initval;
589                 req->rq_retries = 0;
590                 xprt_reset_majortimeo(req);
591                 /* Reset the RTT counters == "slow start" */
592                 spin_lock_bh(&xprt->transport_lock);
593                 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
594                 spin_unlock_bh(&xprt->transport_lock);
595                 status = -ETIMEDOUT;
596         }
597
598         if (req->rq_timeout == 0) {
599                 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
600                 req->rq_timeout = 5 * HZ;
601         }
602         return status;
603 }
604
605 static void xprt_autoclose(struct work_struct *work)
606 {
607         struct rpc_xprt *xprt =
608                 container_of(work, struct rpc_xprt, task_cleanup);
609
610         xprt->ops->close(xprt);
611         clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
612         xprt_release_write(xprt, NULL);
613 }
614
615 /**
616  * xprt_disconnect_done - mark a transport as disconnected
617  * @xprt: transport to flag for disconnect
618  *
619  */
620 void xprt_disconnect_done(struct rpc_xprt *xprt)
621 {
622         dprintk("RPC:       disconnected transport %p\n", xprt);
623         spin_lock_bh(&xprt->transport_lock);
624         xprt_clear_connected(xprt);
625         xprt_wake_pending_tasks(xprt, -EAGAIN);
626         spin_unlock_bh(&xprt->transport_lock);
627 }
628 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
629
630 /**
631  * xprt_force_disconnect - force a transport to disconnect
632  * @xprt: transport to disconnect
633  *
634  */
635 void xprt_force_disconnect(struct rpc_xprt *xprt)
636 {
637         /* Don't race with the test_bit() in xprt_clear_locked() */
638         spin_lock_bh(&xprt->transport_lock);
639         set_bit(XPRT_CLOSE_WAIT, &xprt->state);
640         /* Try to schedule an autoclose RPC call */
641         if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
642                 queue_work(rpciod_workqueue, &xprt->task_cleanup);
643         xprt_wake_pending_tasks(xprt, -EAGAIN);
644         spin_unlock_bh(&xprt->transport_lock);
645 }
646
647 /**
648  * xprt_conditional_disconnect - force a transport to disconnect
649  * @xprt: transport to disconnect
650  * @cookie: 'connection cookie'
651  *
652  * This attempts to break the connection if and only if 'cookie' matches
653  * the current transport 'connection cookie'. It ensures that we don't
654  * try to break the connection more than once when we need to retransmit
655  * a batch of RPC requests.
656  *
657  */
658 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
659 {
660         /* Don't race with the test_bit() in xprt_clear_locked() */
661         spin_lock_bh(&xprt->transport_lock);
662         if (cookie != xprt->connect_cookie)
663                 goto out;
664         if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
665                 goto out;
666         set_bit(XPRT_CLOSE_WAIT, &xprt->state);
667         /* Try to schedule an autoclose RPC call */
668         if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
669                 queue_work(rpciod_workqueue, &xprt->task_cleanup);
670         xprt_wake_pending_tasks(xprt, -EAGAIN);
671 out:
672         spin_unlock_bh(&xprt->transport_lock);
673 }
674
675 static void
676 xprt_init_autodisconnect(unsigned long data)
677 {
678         struct rpc_xprt *xprt = (struct rpc_xprt *)data;
679
680         spin_lock(&xprt->transport_lock);
681         if (!list_empty(&xprt->recv))
682                 goto out_abort;
683         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
684                 goto out_abort;
685         spin_unlock(&xprt->transport_lock);
686         set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
687         queue_work(rpciod_workqueue, &xprt->task_cleanup);
688         return;
689 out_abort:
690         spin_unlock(&xprt->transport_lock);
691 }
692
693 /**
694  * xprt_connect - schedule a transport connect operation
695  * @task: RPC task that is requesting the connect
696  *
697  */
698 void xprt_connect(struct rpc_task *task)
699 {
700         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
701
702         dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
703                         xprt, (xprt_connected(xprt) ? "is" : "is not"));
704
705         if (!xprt_bound(xprt)) {
706                 task->tk_status = -EAGAIN;
707                 return;
708         }
709         if (!xprt_lock_write(xprt, task))
710                 return;
711
712         if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
713                 xprt->ops->close(xprt);
714
715         if (xprt_connected(xprt))
716                 xprt_release_write(xprt, task);
717         else {
718                 task->tk_rqstp->rq_bytes_sent = 0;
719                 task->tk_timeout = task->tk_rqstp->rq_timeout;
720                 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
721
722                 if (test_bit(XPRT_CLOSING, &xprt->state))
723                         return;
724                 if (xprt_test_and_set_connecting(xprt))
725                         return;
726                 xprt->stat.connect_start = jiffies;
727                 xprt->ops->connect(xprt, task);
728         }
729 }
730
731 static void xprt_connect_status(struct rpc_task *task)
732 {
733         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
734
735         if (task->tk_status == 0) {
736                 xprt->stat.connect_count++;
737                 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
738                 dprintk("RPC: %5u xprt_connect_status: connection established\n",
739                                 task->tk_pid);
740                 return;
741         }
742
743         switch (task->tk_status) {
744         case -EAGAIN:
745                 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
746                 break;
747         case -ETIMEDOUT:
748                 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
749                                 "out\n", task->tk_pid);
750                 break;
751         default:
752                 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
753                                 "server %s\n", task->tk_pid, -task->tk_status,
754                                 xprt->servername);
755                 xprt_release_write(xprt, task);
756                 task->tk_status = -EIO;
757         }
758 }
759
760 /**
761  * xprt_lookup_rqst - find an RPC request corresponding to an XID
762  * @xprt: transport on which the original request was transmitted
763  * @xid: RPC XID of incoming reply
764  *
765  */
766 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
767 {
768         struct rpc_rqst *entry;
769
770         list_for_each_entry(entry, &xprt->recv, rq_list)
771                 if (entry->rq_xid == xid)
772                         return entry;
773
774         dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
775                         ntohl(xid));
776         xprt->stat.bad_xids++;
777         return NULL;
778 }
779 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
780
781 static void xprt_update_rtt(struct rpc_task *task)
782 {
783         struct rpc_rqst *req = task->tk_rqstp;
784         struct rpc_rtt *rtt = task->tk_client->cl_rtt;
785         unsigned int timer = task->tk_msg.rpc_proc->p_timer;
786         long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
787
788         if (timer) {
789                 if (req->rq_ntrans == 1)
790                         rpc_update_rtt(rtt, timer, m);
791                 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
792         }
793 }
794
795 /**
796  * xprt_complete_rqst - called when reply processing is complete
797  * @task: RPC request that recently completed
798  * @copied: actual number of bytes received from the transport
799  *
800  * Caller holds transport lock.
801  */
802 void xprt_complete_rqst(struct rpc_task *task, int copied)
803 {
804         struct rpc_rqst *req = task->tk_rqstp;
805         struct rpc_xprt *xprt = req->rq_xprt;
806
807         dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
808                         task->tk_pid, ntohl(req->rq_xid), copied);
809
810         xprt->stat.recvs++;
811         req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
812         if (xprt->ops->timer != NULL)
813                 xprt_update_rtt(task);
814
815         list_del_init(&req->rq_list);
816         req->rq_private_buf.len = copied;
817         /* Ensure all writes are done before we update */
818         /* req->rq_reply_bytes_recvd */
819         smp_wmb();
820         req->rq_reply_bytes_recvd = copied;
821         rpc_wake_up_queued_task(&xprt->pending, task);
822 }
823 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
824
825 static void xprt_timer(struct rpc_task *task)
826 {
827         struct rpc_rqst *req = task->tk_rqstp;
828         struct rpc_xprt *xprt = req->rq_xprt;
829
830         if (task->tk_status != -ETIMEDOUT)
831                 return;
832         dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
833
834         spin_lock_bh(&xprt->transport_lock);
835         if (!req->rq_reply_bytes_recvd) {
836                 if (xprt->ops->timer)
837                         xprt->ops->timer(xprt, task);
838         } else
839                 task->tk_status = 0;
840         spin_unlock_bh(&xprt->transport_lock);
841 }
842
843 static inline int xprt_has_timer(struct rpc_xprt *xprt)
844 {
845         return xprt->idle_timeout != 0;
846 }
847
848 /**
849  * xprt_prepare_transmit - reserve the transport before sending a request
850  * @task: RPC task about to send a request
851  *
852  */
853 int xprt_prepare_transmit(struct rpc_task *task)
854 {
855         struct rpc_rqst *req = task->tk_rqstp;
856         struct rpc_xprt *xprt = req->rq_xprt;
857         int err = 0;
858
859         dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
860
861         spin_lock_bh(&xprt->transport_lock);
862         if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
863                 err = req->rq_reply_bytes_recvd;
864                 goto out_unlock;
865         }
866         if (!xprt->ops->reserve_xprt(xprt, task))
867                 err = -EAGAIN;
868 out_unlock:
869         spin_unlock_bh(&xprt->transport_lock);
870         return err;
871 }
872
873 void xprt_end_transmit(struct rpc_task *task)
874 {
875         xprt_release_write(task->tk_rqstp->rq_xprt, task);
876 }
877
878 /**
879  * xprt_transmit - send an RPC request on a transport
880  * @task: controlling RPC task
881  *
882  * We have to copy the iovec because sendmsg fiddles with its contents.
883  */
884 void xprt_transmit(struct rpc_task *task)
885 {
886         struct rpc_rqst *req = task->tk_rqstp;
887         struct rpc_xprt *xprt = req->rq_xprt;
888         int status, numreqs;
889
890         dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
891
892         if (!req->rq_reply_bytes_recvd) {
893                 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
894                         /*
895                          * Add to the list only if we're expecting a reply
896                          */
897                         spin_lock_bh(&xprt->transport_lock);
898                         /* Update the softirq receive buffer */
899                         memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
900                                         sizeof(req->rq_private_buf));
901                         /* Add request to the receive list */
902                         list_add_tail(&req->rq_list, &xprt->recv);
903                         spin_unlock_bh(&xprt->transport_lock);
904                         xprt_reset_majortimeo(req);
905                         /* Turn off autodisconnect */
906                         del_singleshot_timer_sync(&xprt->timer);
907                 }
908         } else if (!req->rq_bytes_sent)
909                 return;
910
911         req->rq_connect_cookie = xprt->connect_cookie;
912         req->rq_xtime = ktime_get();
913         status = xprt->ops->send_request(task);
914         if (status != 0) {
915                 task->tk_status = status;
916                 return;
917         }
918
919         dprintk("RPC: %5u xmit complete\n", task->tk_pid);
920         task->tk_flags |= RPC_TASK_SENT;
921         spin_lock_bh(&xprt->transport_lock);
922
923         xprt->ops->set_retrans_timeout(task);
924
925         numreqs = atomic_read(&xprt->num_reqs);
926         if (numreqs > xprt->stat.max_slots)
927                 xprt->stat.max_slots = numreqs;
928         xprt->stat.sends++;
929         xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
930         xprt->stat.bklog_u += xprt->backlog.qlen;
931         xprt->stat.sending_u += xprt->sending.qlen;
932         xprt->stat.pending_u += xprt->pending.qlen;
933
934         /* Don't race with disconnect */
935         if (!xprt_connected(xprt))
936                 task->tk_status = -ENOTCONN;
937         else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
938                 /*
939                  * Sleep on the pending queue since
940                  * we're expecting a reply.
941                  */
942                 rpc_sleep_on(&xprt->pending, task, xprt_timer);
943         }
944         spin_unlock_bh(&xprt->transport_lock);
945 }
946
947 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
948 {
949         struct rpc_rqst *req = ERR_PTR(-EAGAIN);
950
951         if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
952                 goto out;
953         req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
954         if (req != NULL)
955                 goto out;
956         atomic_dec(&xprt->num_reqs);
957         req = ERR_PTR(-ENOMEM);
958 out:
959         return req;
960 }
961
962 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
963 {
964         if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
965                 kfree(req);
966                 return true;
967         }
968         return false;
969 }
970
971 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
972 {
973         struct rpc_rqst *req;
974
975         spin_lock(&xprt->reserve_lock);
976         if (!list_empty(&xprt->free)) {
977                 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
978                 list_del(&req->rq_list);
979                 goto out_init_req;
980         }
981         req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
982         if (!IS_ERR(req))
983                 goto out_init_req;
984         switch (PTR_ERR(req)) {
985         case -ENOMEM:
986                 dprintk("RPC:       dynamic allocation of request slot "
987                                 "failed! Retrying\n");
988                 task->tk_status = -ENOMEM;
989                 break;
990         case -EAGAIN:
991                 rpc_sleep_on(&xprt->backlog, task, NULL);
992                 dprintk("RPC:       waiting for request slot\n");
993         default:
994                 task->tk_status = -EAGAIN;
995         }
996         spin_unlock(&xprt->reserve_lock);
997         return;
998 out_init_req:
999         task->tk_status = 0;
1000         task->tk_rqstp = req;
1001         xprt_request_init(task, xprt);
1002         spin_unlock(&xprt->reserve_lock);
1003 }
1004 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1005
1006 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1007 {
1008         /* Note: grabbing the xprt_lock_write() ensures that we throttle
1009          * new slot allocation if the transport is congested (i.e. when
1010          * reconnecting a stream transport or when out of socket write
1011          * buffer space).
1012          */
1013         if (xprt_lock_write(xprt, task)) {
1014                 xprt_alloc_slot(xprt, task);
1015                 xprt_release_write(xprt, task);
1016         }
1017 }
1018 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1019
1020 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1021 {
1022         spin_lock(&xprt->reserve_lock);
1023         if (!xprt_dynamic_free_slot(xprt, req)) {
1024                 memset(req, 0, sizeof(*req));   /* mark unused */
1025                 list_add(&req->rq_list, &xprt->free);
1026         }
1027         rpc_wake_up_next(&xprt->backlog);
1028         spin_unlock(&xprt->reserve_lock);
1029 }
1030
1031 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1032 {
1033         struct rpc_rqst *req;
1034         while (!list_empty(&xprt->free)) {
1035                 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1036                 list_del(&req->rq_list);
1037                 kfree(req);
1038         }
1039 }
1040
1041 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1042                 unsigned int num_prealloc,
1043                 unsigned int max_alloc)
1044 {
1045         struct rpc_xprt *xprt;
1046         struct rpc_rqst *req;
1047         int i;
1048
1049         xprt = kzalloc(size, GFP_KERNEL);
1050         if (xprt == NULL)
1051                 goto out;
1052
1053         xprt_init(xprt, net);
1054
1055         for (i = 0; i < num_prealloc; i++) {
1056                 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1057                 if (!req)
1058                         break;
1059                 list_add(&req->rq_list, &xprt->free);
1060         }
1061         if (i < num_prealloc)
1062                 goto out_free;
1063         if (max_alloc > num_prealloc)
1064                 xprt->max_reqs = max_alloc;
1065         else
1066                 xprt->max_reqs = num_prealloc;
1067         xprt->min_reqs = num_prealloc;
1068         atomic_set(&xprt->num_reqs, num_prealloc);
1069
1070         return xprt;
1071
1072 out_free:
1073         xprt_free(xprt);
1074 out:
1075         return NULL;
1076 }
1077 EXPORT_SYMBOL_GPL(xprt_alloc);
1078
1079 void xprt_free(struct rpc_xprt *xprt)
1080 {
1081         put_net(xprt->xprt_net);
1082         xprt_free_all_slots(xprt);
1083         kfree(xprt);
1084 }
1085 EXPORT_SYMBOL_GPL(xprt_free);
1086
1087 /**
1088  * xprt_reserve - allocate an RPC request slot
1089  * @task: RPC task requesting a slot allocation
1090  *
1091  * If no more slots are available, place the task on the transport's
1092  * backlog queue.
1093  */
1094 void xprt_reserve(struct rpc_task *task)
1095 {
1096         struct rpc_xprt *xprt;
1097
1098         task->tk_status = 0;
1099         if (task->tk_rqstp != NULL)
1100                 return;
1101
1102         task->tk_timeout = 0;
1103         task->tk_status = -EAGAIN;
1104         rcu_read_lock();
1105         xprt = rcu_dereference(task->tk_client->cl_xprt);
1106         xprt->ops->alloc_slot(xprt, task);
1107         rcu_read_unlock();
1108 }
1109
1110 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1111 {
1112         return (__force __be32)xprt->xid++;
1113 }
1114
1115 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1116 {
1117         xprt->xid = net_random();
1118 }
1119
1120 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1121 {
1122         struct rpc_rqst *req = task->tk_rqstp;
1123
1124         INIT_LIST_HEAD(&req->rq_list);
1125         req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1126         req->rq_task    = task;
1127         req->rq_xprt    = xprt;
1128         req->rq_buffer  = NULL;
1129         req->rq_xid     = xprt_alloc_xid(xprt);
1130         req->rq_release_snd_buf = NULL;
1131         xprt_reset_majortimeo(req);
1132         dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1133                         req, ntohl(req->rq_xid));
1134 }
1135
1136 /**
1137  * xprt_release - release an RPC request slot
1138  * @task: task which is finished with the slot
1139  *
1140  */
1141 void xprt_release(struct rpc_task *task)
1142 {
1143         struct rpc_xprt *xprt;
1144         struct rpc_rqst *req = task->tk_rqstp;
1145
1146         if (req == NULL) {
1147                 if (task->tk_client) {
1148                         rcu_read_lock();
1149                         xprt = rcu_dereference(task->tk_client->cl_xprt);
1150                         if (xprt->snd_task == task)
1151                                 xprt_release_write(xprt, task);
1152                         rcu_read_unlock();
1153                 }
1154                 return;
1155         }
1156
1157         xprt = req->rq_xprt;
1158         if (task->tk_ops->rpc_count_stats != NULL)
1159                 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1160         else if (task->tk_client)
1161                 rpc_count_iostats(task, task->tk_client->cl_metrics);
1162         spin_lock_bh(&xprt->transport_lock);
1163         xprt->ops->release_xprt(xprt, task);
1164         if (xprt->ops->release_request)
1165                 xprt->ops->release_request(task);
1166         if (!list_empty(&req->rq_list))
1167                 list_del(&req->rq_list);
1168         xprt->last_used = jiffies;
1169         if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1170                 mod_timer(&xprt->timer,
1171                                 xprt->last_used + xprt->idle_timeout);
1172         spin_unlock_bh(&xprt->transport_lock);
1173         if (req->rq_buffer)
1174                 xprt->ops->buf_free(req->rq_buffer);
1175         if (req->rq_cred != NULL)
1176                 put_rpccred(req->rq_cred);
1177         task->tk_rqstp = NULL;
1178         if (req->rq_release_snd_buf)
1179                 req->rq_release_snd_buf(req);
1180
1181         dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1182         if (likely(!bc_prealloc(req)))
1183                 xprt_free_slot(xprt, req);
1184         else
1185                 xprt_free_bc_request(req);
1186 }
1187
1188 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1189 {
1190         atomic_set(&xprt->count, 1);
1191
1192         spin_lock_init(&xprt->transport_lock);
1193         spin_lock_init(&xprt->reserve_lock);
1194
1195         INIT_LIST_HEAD(&xprt->free);
1196         INIT_LIST_HEAD(&xprt->recv);
1197 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1198         spin_lock_init(&xprt->bc_pa_lock);
1199         INIT_LIST_HEAD(&xprt->bc_pa_list);
1200 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1201
1202         xprt->last_used = jiffies;
1203         xprt->cwnd = RPC_INITCWND;
1204         xprt->bind_index = 0;
1205
1206         rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1207         rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1208         rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1209         rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1210
1211         xprt_init_xid(xprt);
1212
1213         xprt->xprt_net = get_net(net);
1214 }
1215
1216 /**
1217  * xprt_create_transport - create an RPC transport
1218  * @args: rpc transport creation arguments
1219  *
1220  */
1221 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1222 {
1223         struct rpc_xprt *xprt;
1224         struct xprt_class *t;
1225
1226         spin_lock(&xprt_list_lock);
1227         list_for_each_entry(t, &xprt_list, list) {
1228                 if (t->ident == args->ident) {
1229                         spin_unlock(&xprt_list_lock);
1230                         goto found;
1231                 }
1232         }
1233         spin_unlock(&xprt_list_lock);
1234         printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
1235         return ERR_PTR(-EIO);
1236
1237 found:
1238         xprt = t->setup(args);
1239         if (IS_ERR(xprt)) {
1240                 dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1241                                 -PTR_ERR(xprt));
1242                 goto out;
1243         }
1244         INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1245         if (xprt_has_timer(xprt))
1246                 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1247                             (unsigned long)xprt);
1248         else
1249                 init_timer(&xprt->timer);
1250
1251         if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1252                 xprt_destroy(xprt);
1253                 return ERR_PTR(-EINVAL);
1254         }
1255         xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1256         if (xprt->servername == NULL) {
1257                 xprt_destroy(xprt);
1258                 return ERR_PTR(-ENOMEM);
1259         }
1260
1261         dprintk("RPC:       created transport %p with %u slots\n", xprt,
1262                         xprt->max_reqs);
1263 out:
1264         return xprt;
1265 }
1266
1267 /**
1268  * xprt_destroy - destroy an RPC transport, killing off all requests.
1269  * @xprt: transport to destroy
1270  *
1271  */
1272 static void xprt_destroy(struct rpc_xprt *xprt)
1273 {
1274         dprintk("RPC:       destroying transport %p\n", xprt);
1275         del_timer_sync(&xprt->timer);
1276
1277         rpc_destroy_wait_queue(&xprt->binding);
1278         rpc_destroy_wait_queue(&xprt->pending);
1279         rpc_destroy_wait_queue(&xprt->sending);
1280         rpc_destroy_wait_queue(&xprt->backlog);
1281         cancel_work_sync(&xprt->task_cleanup);
1282         kfree(xprt->servername);
1283         /*
1284          * Tear down transport state and free the rpc_xprt
1285          */
1286         xprt->ops->destroy(xprt);
1287 }
1288
1289 /**
1290  * xprt_put - release a reference to an RPC transport.
1291  * @xprt: pointer to the transport
1292  *
1293  */
1294 void xprt_put(struct rpc_xprt *xprt)
1295 {
1296         if (atomic_dec_and_test(&xprt->count))
1297                 xprt_destroy(xprt);
1298 }
1299
1300 /**
1301  * xprt_get - return a reference to an RPC transport.
1302  * @xprt: pointer to the transport
1303  *
1304  */
1305 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1306 {
1307         if (atomic_inc_not_zero(&xprt->count))
1308                 return xprt;
1309         return NULL;
1310 }