91771031ad3cb6d827a5bcf3b8be3c9ed548d5f4
[platform/kernel/linux-rpi.git] / net / rxrpc / call_object.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC individual remote procedure call handling
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/circ_buf.h>
13 #include <linux/spinlock_types.h>
14 #include <net/sock.h>
15 #include <net/af_rxrpc.h>
16 #include "ar-internal.h"
17
18 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19         [RXRPC_CALL_UNINITIALISED]              = "Uninit  ",
20         [RXRPC_CALL_CLIENT_AWAIT_CONN]          = "ClWtConn",
21         [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
22         [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
23         [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
24         [RXRPC_CALL_SERVER_PREALLOC]            = "SvPrealc",
25         [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
26         [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
27         [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
28         [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
29         [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
30         [RXRPC_CALL_COMPLETE]                   = "Complete",
31 };
32
33 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34         [RXRPC_CALL_SUCCEEDED]                  = "Complete",
35         [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
36         [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
37         [RXRPC_CALL_LOCAL_ERROR]                = "LocError",
38         [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
39 };
40
41 struct kmem_cache *rxrpc_call_jar;
42
43 static struct semaphore rxrpc_call_limiter =
44         __SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45 static struct semaphore rxrpc_kernel_call_limiter =
46         __SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
47
48 static void rxrpc_call_timer_expired(struct timer_list *t)
49 {
50         struct rxrpc_call *call = from_timer(call, t, timer);
51
52         _enter("%d", call->debug_id);
53
54         if (call->state < RXRPC_CALL_COMPLETE) {
55                 trace_rxrpc_timer_expired(call, jiffies);
56                 __rxrpc_queue_call(call);
57         } else {
58                 rxrpc_put_call(call, rxrpc_call_put);
59         }
60 }
61
62 void rxrpc_reduce_call_timer(struct rxrpc_call *call,
63                              unsigned long expire_at,
64                              unsigned long now,
65                              enum rxrpc_timer_trace why)
66 {
67         if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) {
68                 trace_rxrpc_timer(call, why, now);
69                 if (timer_reduce(&call->timer, expire_at))
70                         rxrpc_put_call(call, rxrpc_call_put_notimer);
71         }
72 }
73
74 void rxrpc_delete_call_timer(struct rxrpc_call *call)
75 {
76         if (del_timer_sync(&call->timer))
77                 rxrpc_put_call(call, rxrpc_call_put_timer);
78 }
79
80 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
81
82 /*
83  * find an extant server call
84  * - called in process context with IRQs enabled
85  */
86 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
87                                               unsigned long user_call_ID)
88 {
89         struct rxrpc_call *call;
90         struct rb_node *p;
91
92         _enter("%p,%lx", rx, user_call_ID);
93
94         read_lock(&rx->call_lock);
95
96         p = rx->calls.rb_node;
97         while (p) {
98                 call = rb_entry(p, struct rxrpc_call, sock_node);
99
100                 if (user_call_ID < call->user_call_ID)
101                         p = p->rb_left;
102                 else if (user_call_ID > call->user_call_ID)
103                         p = p->rb_right;
104                 else
105                         goto found_extant_call;
106         }
107
108         read_unlock(&rx->call_lock);
109         _leave(" = NULL");
110         return NULL;
111
112 found_extant_call:
113         rxrpc_get_call(call, rxrpc_call_got);
114         read_unlock(&rx->call_lock);
115         _leave(" = %p [%d]", call, refcount_read(&call->ref));
116         return call;
117 }
118
119 /*
120  * allocate a new call
121  */
122 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
123                                     unsigned int debug_id)
124 {
125         struct rxrpc_call *call;
126         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
127
128         call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
129         if (!call)
130                 return NULL;
131
132         call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
133                                     sizeof(struct sk_buff *),
134                                     gfp);
135         if (!call->rxtx_buffer)
136                 goto nomem;
137
138         call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
139         if (!call->rxtx_annotations)
140                 goto nomem_2;
141
142         mutex_init(&call->user_mutex);
143
144         /* Prevent lockdep reporting a deadlock false positive between the afs
145          * filesystem and sys_sendmsg() via the mmap sem.
146          */
147         if (rx->sk.sk_kern_sock)
148                 lockdep_set_class(&call->user_mutex,
149                                   &rxrpc_call_user_mutex_lock_class_key);
150
151         timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
152         INIT_WORK(&call->processor, &rxrpc_process_call);
153         INIT_LIST_HEAD(&call->link);
154         INIT_LIST_HEAD(&call->chan_wait_link);
155         INIT_LIST_HEAD(&call->accept_link);
156         INIT_LIST_HEAD(&call->recvmsg_link);
157         INIT_LIST_HEAD(&call->sock_link);
158         INIT_LIST_HEAD(&call->tx_buffer);
159         skb_queue_head_init(&call->recvmsg_queue);
160         skb_queue_head_init(&call->rx_oos_queue);
161         init_waitqueue_head(&call->waitq);
162         spin_lock_init(&call->notify_lock);
163         spin_lock_init(&call->tx_lock);
164         spin_lock_init(&call->input_lock);
165         spin_lock_init(&call->acks_ack_lock);
166         rwlock_init(&call->state_lock);
167         refcount_set(&call->ref, 1);
168         call->debug_id = debug_id;
169         call->tx_total_len = -1;
170         call->next_rx_timo = 20 * HZ;
171         call->next_req_timo = 1 * HZ;
172         atomic64_set(&call->ackr_window, 0x100000001ULL);
173
174         memset(&call->sock_node, 0xed, sizeof(call->sock_node));
175
176         call->rx_winsize = rxrpc_rx_window_size;
177         call->tx_winsize = 16;
178
179         call->cong_cwnd = 2;
180         call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
181
182         call->rxnet = rxnet;
183         call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
184         atomic_inc(&rxnet->nr_calls);
185         return call;
186
187 nomem_2:
188         kfree(call->rxtx_buffer);
189 nomem:
190         kmem_cache_free(rxrpc_call_jar, call);
191         return NULL;
192 }
193
194 /*
195  * Allocate a new client call.
196  */
197 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
198                                                   struct sockaddr_rxrpc *srx,
199                                                   gfp_t gfp,
200                                                   unsigned int debug_id)
201 {
202         struct rxrpc_call *call;
203         ktime_t now;
204
205         _enter("");
206
207         call = rxrpc_alloc_call(rx, gfp, debug_id);
208         if (!call)
209                 return ERR_PTR(-ENOMEM);
210         call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
211         call->service_id = srx->srx_service;
212         now = ktime_get_real();
213         call->acks_latest_ts = now;
214         call->cong_tstamp = now;
215
216         _leave(" = %p", call);
217         return call;
218 }
219
220 /*
221  * Initiate the call ack/resend/expiry timer.
222  */
223 static void rxrpc_start_call_timer(struct rxrpc_call *call)
224 {
225         unsigned long now = jiffies;
226         unsigned long j = now + MAX_JIFFY_OFFSET;
227
228         call->delay_ack_at = j;
229         call->ack_lost_at = j;
230         call->resend_at = j;
231         call->ping_at = j;
232         call->expect_rx_by = j;
233         call->expect_req_by = j;
234         call->expect_term_by = j;
235         call->timer.expires = now;
236 }
237
238 /*
239  * Wait for a call slot to become available.
240  */
241 static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
242 {
243         struct semaphore *limiter = &rxrpc_call_limiter;
244
245         if (p->kernel)
246                 limiter = &rxrpc_kernel_call_limiter;
247         if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
248                 down(limiter);
249                 return limiter;
250         }
251         return down_interruptible(limiter) < 0 ? NULL : limiter;
252 }
253
254 /*
255  * Release a call slot.
256  */
257 static void rxrpc_put_call_slot(struct rxrpc_call *call)
258 {
259         struct semaphore *limiter = &rxrpc_call_limiter;
260
261         if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
262                 limiter = &rxrpc_kernel_call_limiter;
263         up(limiter);
264 }
265
266 /*
267  * Set up a call for the given parameters.
268  * - Called with the socket lock held, which it must release.
269  * - If it returns a call, the call's lock will need releasing by the caller.
270  */
271 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
272                                          struct rxrpc_conn_parameters *cp,
273                                          struct sockaddr_rxrpc *srx,
274                                          struct rxrpc_call_params *p,
275                                          gfp_t gfp,
276                                          unsigned int debug_id)
277         __releases(&rx->sk.sk_lock.slock)
278         __acquires(&call->user_mutex)
279 {
280         struct rxrpc_call *call, *xcall;
281         struct rxrpc_net *rxnet;
282         struct semaphore *limiter;
283         struct rb_node *parent, **pp;
284         const void *here = __builtin_return_address(0);
285         int ret;
286
287         _enter("%p,%lx", rx, p->user_call_ID);
288
289         limiter = rxrpc_get_call_slot(p, gfp);
290         if (!limiter) {
291                 release_sock(&rx->sk);
292                 return ERR_PTR(-ERESTARTSYS);
293         }
294
295         call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
296         if (IS_ERR(call)) {
297                 release_sock(&rx->sk);
298                 up(limiter);
299                 _leave(" = %ld", PTR_ERR(call));
300                 return call;
301         }
302
303         call->interruptibility = p->interruptibility;
304         call->tx_total_len = p->tx_total_len;
305         trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
306                          refcount_read(&call->ref),
307                          here, (const void *)p->user_call_ID);
308         if (p->kernel)
309                 __set_bit(RXRPC_CALL_KERNEL, &call->flags);
310
311         /* We need to protect a partially set up call against the user as we
312          * will be acting outside the socket lock.
313          */
314         mutex_lock(&call->user_mutex);
315
316         /* Publish the call, even though it is incompletely set up as yet */
317         write_lock(&rx->call_lock);
318
319         pp = &rx->calls.rb_node;
320         parent = NULL;
321         while (*pp) {
322                 parent = *pp;
323                 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
324
325                 if (p->user_call_ID < xcall->user_call_ID)
326                         pp = &(*pp)->rb_left;
327                 else if (p->user_call_ID > xcall->user_call_ID)
328                         pp = &(*pp)->rb_right;
329                 else
330                         goto error_dup_user_ID;
331         }
332
333         rcu_assign_pointer(call->socket, rx);
334         call->user_call_ID = p->user_call_ID;
335         __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
336         rxrpc_get_call(call, rxrpc_call_got_userid);
337         rb_link_node(&call->sock_node, parent, pp);
338         rb_insert_color(&call->sock_node, &rx->calls);
339         list_add(&call->sock_link, &rx->sock_calls);
340
341         write_unlock(&rx->call_lock);
342
343         rxnet = call->rxnet;
344         spin_lock_bh(&rxnet->call_lock);
345         list_add_tail_rcu(&call->link, &rxnet->calls);
346         spin_unlock_bh(&rxnet->call_lock);
347
348         /* From this point on, the call is protected by its own lock. */
349         release_sock(&rx->sk);
350
351         /* Set up or get a connection record and set the protocol parameters,
352          * including channel number and call ID.
353          */
354         ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
355         if (ret < 0)
356                 goto error_attached_to_socket;
357
358         trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
359                          refcount_read(&call->ref), here, NULL);
360
361         rxrpc_start_call_timer(call);
362
363         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
364
365         _leave(" = %p [new]", call);
366         return call;
367
368         /* We unexpectedly found the user ID in the list after taking
369          * the call_lock.  This shouldn't happen unless the user races
370          * with itself and tries to add the same user ID twice at the
371          * same time in different threads.
372          */
373 error_dup_user_ID:
374         write_unlock(&rx->call_lock);
375         release_sock(&rx->sk);
376         __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
377                                     RX_CALL_DEAD, -EEXIST);
378         trace_rxrpc_call(call->debug_id, rxrpc_call_error,
379                          refcount_read(&call->ref), here, ERR_PTR(-EEXIST));
380         rxrpc_release_call(rx, call);
381         mutex_unlock(&call->user_mutex);
382         rxrpc_put_call(call, rxrpc_call_put);
383         _leave(" = -EEXIST");
384         return ERR_PTR(-EEXIST);
385
386         /* We got an error, but the call is attached to the socket and is in
387          * need of release.  However, we might now race with recvmsg() when
388          * completing the call queues it.  Return 0 from sys_sendmsg() and
389          * leave the error to recvmsg() to deal with.
390          */
391 error_attached_to_socket:
392         trace_rxrpc_call(call->debug_id, rxrpc_call_error,
393                          refcount_read(&call->ref), here, ERR_PTR(ret));
394         set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
395         __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
396                                     RX_CALL_DEAD, ret);
397         _leave(" = c=%08x [err]", call->debug_id);
398         return call;
399 }
400
401 /*
402  * Set up an incoming call.  call->conn points to the connection.
403  * This is called in BH context and isn't allowed to fail.
404  */
405 void rxrpc_incoming_call(struct rxrpc_sock *rx,
406                          struct rxrpc_call *call,
407                          struct sk_buff *skb)
408 {
409         struct rxrpc_connection *conn = call->conn;
410         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
411         u32 chan;
412
413         _enter(",%d", call->conn->debug_id);
414
415         rcu_assign_pointer(call->socket, rx);
416         call->call_id           = sp->hdr.callNumber;
417         call->service_id        = sp->hdr.serviceId;
418         call->cid               = sp->hdr.cid;
419         call->state             = RXRPC_CALL_SERVER_SECURING;
420         call->cong_tstamp       = skb->tstamp;
421
422         /* Set the channel for this call.  We don't get channel_lock as we're
423          * only defending against the data_ready handler (which we're called
424          * from) and the RESPONSE packet parser (which is only really
425          * interested in call_counter and can cope with a disagreement with the
426          * call pointer).
427          */
428         chan = sp->hdr.cid & RXRPC_CHANNELMASK;
429         conn->channels[chan].call_counter = call->call_id;
430         conn->channels[chan].call_id = call->call_id;
431         rcu_assign_pointer(conn->channels[chan].call, call);
432
433         spin_lock(&conn->params.peer->lock);
434         hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
435         spin_unlock(&conn->params.peer->lock);
436
437         _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
438
439         rxrpc_start_call_timer(call);
440         _leave("");
441 }
442
443 /*
444  * Queue a call's work processor, getting a ref to pass to the work queue.
445  */
446 bool rxrpc_queue_call(struct rxrpc_call *call)
447 {
448         const void *here = __builtin_return_address(0);
449         int n;
450
451         if (!__refcount_inc_not_zero(&call->ref, &n))
452                 return false;
453         if (rxrpc_queue_work(&call->processor))
454                 trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
455                                  here, NULL);
456         else
457                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
458         return true;
459 }
460
461 /*
462  * Queue a call's work processor, passing the callers ref to the work queue.
463  */
464 bool __rxrpc_queue_call(struct rxrpc_call *call)
465 {
466         const void *here = __builtin_return_address(0);
467         int n = refcount_read(&call->ref);
468         ASSERTCMP(n, >=, 1);
469         if (rxrpc_queue_work(&call->processor))
470                 trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
471                                  here, NULL);
472         else
473                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
474         return true;
475 }
476
477 /*
478  * Note the re-emergence of a call.
479  */
480 void rxrpc_see_call(struct rxrpc_call *call)
481 {
482         const void *here = __builtin_return_address(0);
483         if (call) {
484                 int n = refcount_read(&call->ref);
485
486                 trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
487                                  here, NULL);
488         }
489 }
490
491 bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
492 {
493         const void *here = __builtin_return_address(0);
494         int n;
495
496         if (!__refcount_inc_not_zero(&call->ref, &n))
497                 return false;
498         trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
499         return true;
500 }
501
502 /*
503  * Note the addition of a ref on a call.
504  */
505 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
506 {
507         const void *here = __builtin_return_address(0);
508         int n;
509
510         __refcount_inc(&call->ref, &n);
511         trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
512 }
513
514 /*
515  * Clean up the Rx skb ring.
516  */
517 static void rxrpc_cleanup_ring(struct rxrpc_call *call)
518 {
519         int i;
520
521         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
522                 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
523                 call->rxtx_buffer[i] = NULL;
524         }
525         skb_queue_purge(&call->recvmsg_queue);
526         skb_queue_purge(&call->rx_oos_queue);
527 }
528
529 /*
530  * Detach a call from its owning socket.
531  */
532 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
533 {
534         const void *here = __builtin_return_address(0);
535         struct rxrpc_connection *conn = call->conn;
536         bool put = false;
537
538         _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
539
540         trace_rxrpc_call(call->debug_id, rxrpc_call_release,
541                          refcount_read(&call->ref),
542                          here, (const void *)call->flags);
543
544         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
545
546         if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
547                 BUG();
548
549         rxrpc_put_call_slot(call);
550         rxrpc_delete_call_timer(call);
551
552         /* Make sure we don't get any more notifications */
553         write_lock_bh(&rx->recvmsg_lock);
554
555         if (!list_empty(&call->recvmsg_link)) {
556                 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
557                        call, call->events, call->flags);
558                 list_del(&call->recvmsg_link);
559                 put = true;
560         }
561
562         /* list_empty() must return false in rxrpc_notify_socket() */
563         call->recvmsg_link.next = NULL;
564         call->recvmsg_link.prev = NULL;
565
566         write_unlock_bh(&rx->recvmsg_lock);
567         if (put)
568                 rxrpc_put_call(call, rxrpc_call_put);
569
570         write_lock(&rx->call_lock);
571
572         if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
573                 rb_erase(&call->sock_node, &rx->calls);
574                 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
575                 rxrpc_put_call(call, rxrpc_call_put_userid);
576         }
577
578         list_del(&call->sock_link);
579         write_unlock(&rx->call_lock);
580
581         _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
582
583         if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
584                 rxrpc_disconnect_call(call);
585         if (call->security)
586                 call->security->free_call_crypto(call);
587         _leave("");
588 }
589
590 /*
591  * release all the calls associated with a socket
592  */
593 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
594 {
595         struct rxrpc_call *call;
596
597         _enter("%p", rx);
598
599         while (!list_empty(&rx->to_be_accepted)) {
600                 call = list_entry(rx->to_be_accepted.next,
601                                   struct rxrpc_call, accept_link);
602                 list_del(&call->accept_link);
603                 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
604                 rxrpc_put_call(call, rxrpc_call_put);
605         }
606
607         while (!list_empty(&rx->sock_calls)) {
608                 call = list_entry(rx->sock_calls.next,
609                                   struct rxrpc_call, sock_link);
610                 rxrpc_get_call(call, rxrpc_call_got);
611                 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
612                 rxrpc_send_abort_packet(call);
613                 rxrpc_release_call(rx, call);
614                 rxrpc_put_call(call, rxrpc_call_put);
615         }
616
617         _leave("");
618 }
619
620 /*
621  * release a call
622  */
623 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
624 {
625         struct rxrpc_net *rxnet = call->rxnet;
626         const void *here = __builtin_return_address(0);
627         unsigned int debug_id = call->debug_id;
628         bool dead;
629         int n;
630
631         ASSERT(call != NULL);
632
633         dead = __refcount_dec_and_test(&call->ref, &n);
634         trace_rxrpc_call(debug_id, op, n, here, NULL);
635         if (dead) {
636                 _debug("call %d dead", call->debug_id);
637                 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
638
639                 if (!list_empty(&call->link)) {
640                         spin_lock_bh(&rxnet->call_lock);
641                         list_del_init(&call->link);
642                         spin_unlock_bh(&rxnet->call_lock);
643                 }
644
645                 rxrpc_cleanup_call(call);
646         }
647 }
648
649 /*
650  * Final call destruction - but must be done in process context.
651  */
652 static void rxrpc_destroy_call(struct work_struct *work)
653 {
654         struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
655         struct rxrpc_net *rxnet = call->rxnet;
656
657         rxrpc_delete_call_timer(call);
658
659         rxrpc_put_connection(call->conn);
660         rxrpc_put_peer(call->peer);
661         kfree(call->rxtx_buffer);
662         kfree(call->rxtx_annotations);
663         kmem_cache_free(rxrpc_call_jar, call);
664         if (atomic_dec_and_test(&rxnet->nr_calls))
665                 wake_up_var(&rxnet->nr_calls);
666 }
667
668 /*
669  * Final call destruction under RCU.
670  */
671 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
672 {
673         struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
674
675         if (in_softirq()) {
676                 INIT_WORK(&call->processor, rxrpc_destroy_call);
677                 if (!rxrpc_queue_work(&call->processor))
678                         BUG();
679         } else {
680                 rxrpc_destroy_call(&call->processor);
681         }
682 }
683
684 /*
685  * clean up a call
686  */
687 void rxrpc_cleanup_call(struct rxrpc_call *call)
688 {
689         struct rxrpc_txbuf *txb;
690
691         _net("DESTROY CALL %d", call->debug_id);
692
693         memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
694
695         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
696         ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
697
698         rxrpc_cleanup_ring(call);
699         while ((txb = list_first_entry_or_null(&call->tx_buffer,
700                                                struct rxrpc_txbuf, call_link))) {
701                 list_del(&txb->call_link);
702                 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
703         }
704         rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
705         rxrpc_free_skb(call->acks_soft_tbl, rxrpc_skb_cleaned);
706
707         call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
708 }
709
710 /*
711  * Make sure that all calls are gone from a network namespace.  To reach this
712  * point, any open UDP sockets in that namespace must have been closed, so any
713  * outstanding calls cannot be doing I/O.
714  */
715 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
716 {
717         struct rxrpc_call *call;
718
719         _enter("");
720
721         if (!list_empty(&rxnet->calls)) {
722                 spin_lock_bh(&rxnet->call_lock);
723
724                 while (!list_empty(&rxnet->calls)) {
725                         call = list_entry(rxnet->calls.next,
726                                           struct rxrpc_call, link);
727                         _debug("Zapping call %p", call);
728
729                         rxrpc_see_call(call);
730                         list_del_init(&call->link);
731
732                         pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
733                                call, refcount_read(&call->ref),
734                                rxrpc_call_states[call->state],
735                                call->flags, call->events);
736
737                         spin_unlock_bh(&rxnet->call_lock);
738                         cond_resched();
739                         spin_lock_bh(&rxnet->call_lock);
740                 }
741
742                 spin_unlock_bh(&rxnet->call_lock);
743         }
744
745         atomic_dec(&rxnet->nr_calls);
746         wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
747 }