1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC individual remote procedure call handling
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/circ_buf.h>
13 #include <linux/spinlock_types.h>
15 #include <net/af_rxrpc.h>
16 #include "ar-internal.h"
18 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
20 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
21 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
22 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
26 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
27 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
28 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
29 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
30 [RXRPC_CALL_COMPLETE] = "Complete",
33 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34 [RXRPC_CALL_SUCCEEDED] = "Complete",
35 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
36 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
37 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
38 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
41 struct kmem_cache *rxrpc_call_jar;
43 static struct semaphore rxrpc_call_limiter =
44 __SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45 static struct semaphore rxrpc_kernel_call_limiter =
46 __SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
48 static void rxrpc_call_timer_expired(struct timer_list *t)
50 struct rxrpc_call *call = from_timer(call, t, timer);
52 _enter("%d", call->debug_id);
54 if (call->state < RXRPC_CALL_COMPLETE) {
55 trace_rxrpc_timer_expired(call, jiffies);
56 __rxrpc_queue_call(call);
58 rxrpc_put_call(call, rxrpc_call_put);
62 void rxrpc_reduce_call_timer(struct rxrpc_call *call,
63 unsigned long expire_at,
65 enum rxrpc_timer_trace why)
67 if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) {
68 trace_rxrpc_timer(call, why, now);
69 if (timer_reduce(&call->timer, expire_at))
70 rxrpc_put_call(call, rxrpc_call_put_notimer);
74 void rxrpc_delete_call_timer(struct rxrpc_call *call)
76 if (del_timer_sync(&call->timer))
77 rxrpc_put_call(call, rxrpc_call_put_timer);
80 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
83 * find an extant server call
84 * - called in process context with IRQs enabled
86 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
87 unsigned long user_call_ID)
89 struct rxrpc_call *call;
92 _enter("%p,%lx", rx, user_call_ID);
94 read_lock(&rx->call_lock);
96 p = rx->calls.rb_node;
98 call = rb_entry(p, struct rxrpc_call, sock_node);
100 if (user_call_ID < call->user_call_ID)
102 else if (user_call_ID > call->user_call_ID)
105 goto found_extant_call;
108 read_unlock(&rx->call_lock);
113 rxrpc_get_call(call, rxrpc_call_got);
114 read_unlock(&rx->call_lock);
115 _leave(" = %p [%d]", call, refcount_read(&call->ref));
120 * allocate a new call
122 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
123 unsigned int debug_id)
125 struct rxrpc_call *call;
126 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
128 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
132 call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
133 sizeof(struct sk_buff *),
135 if (!call->rxtx_buffer)
138 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
139 if (!call->rxtx_annotations)
142 mutex_init(&call->user_mutex);
144 /* Prevent lockdep reporting a deadlock false positive between the afs
145 * filesystem and sys_sendmsg() via the mmap sem.
147 if (rx->sk.sk_kern_sock)
148 lockdep_set_class(&call->user_mutex,
149 &rxrpc_call_user_mutex_lock_class_key);
151 timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
152 INIT_WORK(&call->processor, &rxrpc_process_call);
153 INIT_LIST_HEAD(&call->link);
154 INIT_LIST_HEAD(&call->chan_wait_link);
155 INIT_LIST_HEAD(&call->accept_link);
156 INIT_LIST_HEAD(&call->recvmsg_link);
157 INIT_LIST_HEAD(&call->sock_link);
158 INIT_LIST_HEAD(&call->tx_buffer);
159 skb_queue_head_init(&call->recvmsg_queue);
160 skb_queue_head_init(&call->rx_oos_queue);
161 init_waitqueue_head(&call->waitq);
162 spin_lock_init(&call->notify_lock);
163 spin_lock_init(&call->tx_lock);
164 spin_lock_init(&call->input_lock);
165 rwlock_init(&call->state_lock);
166 refcount_set(&call->ref, 1);
167 call->debug_id = debug_id;
168 call->tx_total_len = -1;
169 call->next_rx_timo = 20 * HZ;
170 call->next_req_timo = 1 * HZ;
171 atomic64_set(&call->ackr_window, 0x100000001ULL);
173 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
175 call->rx_winsize = rxrpc_rx_window_size;
176 call->tx_winsize = 16;
179 call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
182 call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
183 atomic_inc(&rxnet->nr_calls);
187 kfree(call->rxtx_buffer);
189 kmem_cache_free(rxrpc_call_jar, call);
194 * Allocate a new client call.
196 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
197 struct sockaddr_rxrpc *srx,
199 unsigned int debug_id)
201 struct rxrpc_call *call;
206 call = rxrpc_alloc_call(rx, gfp, debug_id);
208 return ERR_PTR(-ENOMEM);
209 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
210 call->service_id = srx->srx_service;
211 now = ktime_get_real();
212 call->acks_latest_ts = now;
213 call->cong_tstamp = now;
215 _leave(" = %p", call);
220 * Initiate the call ack/resend/expiry timer.
222 static void rxrpc_start_call_timer(struct rxrpc_call *call)
224 unsigned long now = jiffies;
225 unsigned long j = now + MAX_JIFFY_OFFSET;
227 call->delay_ack_at = j;
228 call->ack_lost_at = j;
231 call->expect_rx_by = j;
232 call->expect_req_by = j;
233 call->expect_term_by = j;
234 call->timer.expires = now;
238 * Wait for a call slot to become available.
240 static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
242 struct semaphore *limiter = &rxrpc_call_limiter;
245 limiter = &rxrpc_kernel_call_limiter;
246 if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
250 return down_interruptible(limiter) < 0 ? NULL : limiter;
254 * Release a call slot.
256 static void rxrpc_put_call_slot(struct rxrpc_call *call)
258 struct semaphore *limiter = &rxrpc_call_limiter;
260 if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
261 limiter = &rxrpc_kernel_call_limiter;
266 * Set up a call for the given parameters.
267 * - Called with the socket lock held, which it must release.
268 * - If it returns a call, the call's lock will need releasing by the caller.
270 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
271 struct rxrpc_conn_parameters *cp,
272 struct sockaddr_rxrpc *srx,
273 struct rxrpc_call_params *p,
275 unsigned int debug_id)
276 __releases(&rx->sk.sk_lock.slock)
277 __acquires(&call->user_mutex)
279 struct rxrpc_call *call, *xcall;
280 struct rxrpc_net *rxnet;
281 struct semaphore *limiter;
282 struct rb_node *parent, **pp;
283 const void *here = __builtin_return_address(0);
286 _enter("%p,%lx", rx, p->user_call_ID);
288 limiter = rxrpc_get_call_slot(p, gfp);
290 release_sock(&rx->sk);
291 return ERR_PTR(-ERESTARTSYS);
294 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
296 release_sock(&rx->sk);
298 _leave(" = %ld", PTR_ERR(call));
302 call->interruptibility = p->interruptibility;
303 call->tx_total_len = p->tx_total_len;
304 trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
305 refcount_read(&call->ref),
306 here, (const void *)p->user_call_ID);
308 __set_bit(RXRPC_CALL_KERNEL, &call->flags);
310 /* We need to protect a partially set up call against the user as we
311 * will be acting outside the socket lock.
313 mutex_lock(&call->user_mutex);
315 /* Publish the call, even though it is incompletely set up as yet */
316 write_lock(&rx->call_lock);
318 pp = &rx->calls.rb_node;
322 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
324 if (p->user_call_ID < xcall->user_call_ID)
325 pp = &(*pp)->rb_left;
326 else if (p->user_call_ID > xcall->user_call_ID)
327 pp = &(*pp)->rb_right;
329 goto error_dup_user_ID;
332 rcu_assign_pointer(call->socket, rx);
333 call->user_call_ID = p->user_call_ID;
334 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
335 rxrpc_get_call(call, rxrpc_call_got_userid);
336 rb_link_node(&call->sock_node, parent, pp);
337 rb_insert_color(&call->sock_node, &rx->calls);
338 list_add(&call->sock_link, &rx->sock_calls);
340 write_unlock(&rx->call_lock);
343 spin_lock_bh(&rxnet->call_lock);
344 list_add_tail_rcu(&call->link, &rxnet->calls);
345 spin_unlock_bh(&rxnet->call_lock);
347 /* From this point on, the call is protected by its own lock. */
348 release_sock(&rx->sk);
350 /* Set up or get a connection record and set the protocol parameters,
351 * including channel number and call ID.
353 ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
355 goto error_attached_to_socket;
357 trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
358 refcount_read(&call->ref), here, NULL);
360 rxrpc_start_call_timer(call);
362 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
364 _leave(" = %p [new]", call);
367 /* We unexpectedly found the user ID in the list after taking
368 * the call_lock. This shouldn't happen unless the user races
369 * with itself and tries to add the same user ID twice at the
370 * same time in different threads.
373 write_unlock(&rx->call_lock);
374 release_sock(&rx->sk);
375 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
376 RX_CALL_DEAD, -EEXIST);
377 trace_rxrpc_call(call->debug_id, rxrpc_call_error,
378 refcount_read(&call->ref), here, ERR_PTR(-EEXIST));
379 rxrpc_release_call(rx, call);
380 mutex_unlock(&call->user_mutex);
381 rxrpc_put_call(call, rxrpc_call_put);
382 _leave(" = -EEXIST");
383 return ERR_PTR(-EEXIST);
385 /* We got an error, but the call is attached to the socket and is in
386 * need of release. However, we might now race with recvmsg() when
387 * completing the call queues it. Return 0 from sys_sendmsg() and
388 * leave the error to recvmsg() to deal with.
390 error_attached_to_socket:
391 trace_rxrpc_call(call->debug_id, rxrpc_call_error,
392 refcount_read(&call->ref), here, ERR_PTR(ret));
393 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
394 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
396 _leave(" = c=%08x [err]", call->debug_id);
401 * Set up an incoming call. call->conn points to the connection.
402 * This is called in BH context and isn't allowed to fail.
404 void rxrpc_incoming_call(struct rxrpc_sock *rx,
405 struct rxrpc_call *call,
408 struct rxrpc_connection *conn = call->conn;
409 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
412 _enter(",%d", call->conn->debug_id);
414 rcu_assign_pointer(call->socket, rx);
415 call->call_id = sp->hdr.callNumber;
416 call->service_id = sp->hdr.serviceId;
417 call->cid = sp->hdr.cid;
418 call->state = RXRPC_CALL_SERVER_SECURING;
419 call->cong_tstamp = skb->tstamp;
421 /* Set the channel for this call. We don't get channel_lock as we're
422 * only defending against the data_ready handler (which we're called
423 * from) and the RESPONSE packet parser (which is only really
424 * interested in call_counter and can cope with a disagreement with the
427 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
428 conn->channels[chan].call_counter = call->call_id;
429 conn->channels[chan].call_id = call->call_id;
430 rcu_assign_pointer(conn->channels[chan].call, call);
432 spin_lock(&conn->params.peer->lock);
433 hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
434 spin_unlock(&conn->params.peer->lock);
436 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
438 rxrpc_start_call_timer(call);
443 * Queue a call's work processor, getting a ref to pass to the work queue.
445 bool rxrpc_queue_call(struct rxrpc_call *call)
447 const void *here = __builtin_return_address(0);
450 if (!__refcount_inc_not_zero(&call->ref, &n))
452 if (rxrpc_queue_work(&call->processor))
453 trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
456 rxrpc_put_call(call, rxrpc_call_put_noqueue);
461 * Queue a call's work processor, passing the callers ref to the work queue.
463 bool __rxrpc_queue_call(struct rxrpc_call *call)
465 const void *here = __builtin_return_address(0);
466 int n = refcount_read(&call->ref);
468 if (rxrpc_queue_work(&call->processor))
469 trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
472 rxrpc_put_call(call, rxrpc_call_put_noqueue);
477 * Note the re-emergence of a call.
479 void rxrpc_see_call(struct rxrpc_call *call)
481 const void *here = __builtin_return_address(0);
483 int n = refcount_read(&call->ref);
485 trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
490 bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
492 const void *here = __builtin_return_address(0);
495 if (!__refcount_inc_not_zero(&call->ref, &n))
497 trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
502 * Note the addition of a ref on a call.
504 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
506 const void *here = __builtin_return_address(0);
509 __refcount_inc(&call->ref, &n);
510 trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
514 * Clean up the Rx skb ring.
516 static void rxrpc_cleanup_ring(struct rxrpc_call *call)
520 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
521 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
522 call->rxtx_buffer[i] = NULL;
524 skb_queue_purge(&call->recvmsg_queue);
525 skb_queue_purge(&call->rx_oos_queue);
529 * Detach a call from its owning socket.
531 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
533 const void *here = __builtin_return_address(0);
534 struct rxrpc_connection *conn = call->conn;
537 _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
539 trace_rxrpc_call(call->debug_id, rxrpc_call_release,
540 refcount_read(&call->ref),
541 here, (const void *)call->flags);
543 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
545 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
548 rxrpc_put_call_slot(call);
549 rxrpc_delete_call_timer(call);
551 /* Make sure we don't get any more notifications */
552 write_lock_bh(&rx->recvmsg_lock);
554 if (!list_empty(&call->recvmsg_link)) {
555 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
556 call, call->events, call->flags);
557 list_del(&call->recvmsg_link);
561 /* list_empty() must return false in rxrpc_notify_socket() */
562 call->recvmsg_link.next = NULL;
563 call->recvmsg_link.prev = NULL;
565 write_unlock_bh(&rx->recvmsg_lock);
567 rxrpc_put_call(call, rxrpc_call_put);
569 write_lock(&rx->call_lock);
571 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
572 rb_erase(&call->sock_node, &rx->calls);
573 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
574 rxrpc_put_call(call, rxrpc_call_put_userid);
577 list_del(&call->sock_link);
578 write_unlock(&rx->call_lock);
580 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
582 if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
583 rxrpc_disconnect_call(call);
585 call->security->free_call_crypto(call);
590 * release all the calls associated with a socket
592 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
594 struct rxrpc_call *call;
598 while (!list_empty(&rx->to_be_accepted)) {
599 call = list_entry(rx->to_be_accepted.next,
600 struct rxrpc_call, accept_link);
601 list_del(&call->accept_link);
602 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
603 rxrpc_put_call(call, rxrpc_call_put);
606 while (!list_empty(&rx->sock_calls)) {
607 call = list_entry(rx->sock_calls.next,
608 struct rxrpc_call, sock_link);
609 rxrpc_get_call(call, rxrpc_call_got);
610 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
611 rxrpc_send_abort_packet(call);
612 rxrpc_release_call(rx, call);
613 rxrpc_put_call(call, rxrpc_call_put);
622 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
624 struct rxrpc_net *rxnet = call->rxnet;
625 const void *here = __builtin_return_address(0);
626 unsigned int debug_id = call->debug_id;
630 ASSERT(call != NULL);
632 dead = __refcount_dec_and_test(&call->ref, &n);
633 trace_rxrpc_call(debug_id, op, n, here, NULL);
635 _debug("call %d dead", call->debug_id);
636 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
638 if (!list_empty(&call->link)) {
639 spin_lock_bh(&rxnet->call_lock);
640 list_del_init(&call->link);
641 spin_unlock_bh(&rxnet->call_lock);
644 rxrpc_cleanup_call(call);
649 * Final call destruction - but must be done in process context.
651 static void rxrpc_destroy_call(struct work_struct *work)
653 struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
654 struct rxrpc_net *rxnet = call->rxnet;
656 rxrpc_delete_call_timer(call);
658 rxrpc_put_connection(call->conn);
659 rxrpc_put_peer(call->peer);
660 kfree(call->rxtx_buffer);
661 kfree(call->rxtx_annotations);
662 kmem_cache_free(rxrpc_call_jar, call);
663 if (atomic_dec_and_test(&rxnet->nr_calls))
664 wake_up_var(&rxnet->nr_calls);
668 * Final call destruction under RCU.
670 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
672 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
675 INIT_WORK(&call->processor, rxrpc_destroy_call);
676 if (!rxrpc_queue_work(&call->processor))
679 rxrpc_destroy_call(&call->processor);
686 void rxrpc_cleanup_call(struct rxrpc_call *call)
688 struct rxrpc_txbuf *txb;
690 _net("DESTROY CALL %d", call->debug_id);
692 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
694 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
695 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
697 rxrpc_cleanup_ring(call);
698 while ((txb = list_first_entry_or_null(&call->tx_buffer,
699 struct rxrpc_txbuf, call_link))) {
700 list_del(&txb->call_link);
701 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
703 rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
705 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
709 * Make sure that all calls are gone from a network namespace. To reach this
710 * point, any open UDP sockets in that namespace must have been closed, so any
711 * outstanding calls cannot be doing I/O.
713 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
715 struct rxrpc_call *call;
719 if (!list_empty(&rxnet->calls)) {
720 spin_lock_bh(&rxnet->call_lock);
722 while (!list_empty(&rxnet->calls)) {
723 call = list_entry(rxnet->calls.next,
724 struct rxrpc_call, link);
725 _debug("Zapping call %p", call);
727 rxrpc_see_call(call);
728 list_del_init(&call->link);
730 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
731 call, refcount_read(&call->ref),
732 rxrpc_call_states[call->state],
733 call->flags, call->events);
735 spin_unlock_bh(&rxnet->call_lock);
737 spin_lock_bh(&rxnet->call_lock);
740 spin_unlock_bh(&rxnet->call_lock);
743 atomic_dec(&rxnet->nr_calls);
744 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));