1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* incoming call handling
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
14 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/icmp.h>
18 #include <linux/gfp.h>
19 #include <linux/circ_buf.h>
21 #include <net/af_rxrpc.h>
23 #include "ar-internal.h"
25 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
26 unsigned long user_call_ID)
31 * Preallocate a single service call, connection and peer and, if possible,
32 * give them a user ID and attach the user's side of the ID to them.
34 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
35 struct rxrpc_backlog *b,
36 rxrpc_notify_rx_t notify_rx,
37 rxrpc_user_attach_call_t user_attach_call,
38 unsigned long user_call_ID, gfp_t gfp,
39 unsigned int debug_id)
41 struct rxrpc_call *call, *xcall;
42 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
43 struct rb_node *parent, **pp;
45 unsigned int size = RXRPC_BACKLOG_MAX;
46 unsigned int head, tail, call_head, call_tail;
48 max = rx->sk.sk_max_ack_backlog;
49 tmp = rx->sk.sk_ack_backlog;
51 _leave(" = -ENOBUFS [full %u]", max);
56 /* We don't need more conns and peers than we have calls, but on the
57 * other hand, we shouldn't ever use more peers than conns or conns
60 call_head = b->call_backlog_head;
61 call_tail = READ_ONCE(b->call_backlog_tail);
62 tmp = CIRC_CNT(call_head, call_tail, size);
64 _leave(" = -ENOBUFS [enough %u]", tmp);
69 head = b->peer_backlog_head;
70 tail = READ_ONCE(b->peer_backlog_tail);
71 if (CIRC_CNT(head, tail, size) < max) {
72 struct rxrpc_peer *peer;
74 peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc);
77 b->peer_backlog[head] = peer;
78 smp_store_release(&b->peer_backlog_head,
79 (head + 1) & (size - 1));
82 head = b->conn_backlog_head;
83 tail = READ_ONCE(b->conn_backlog_tail);
84 if (CIRC_CNT(head, tail, size) < max) {
85 struct rxrpc_connection *conn;
87 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
90 b->conn_backlog[head] = conn;
91 smp_store_release(&b->conn_backlog_head,
92 (head + 1) & (size - 1));
95 /* Now it gets complicated, because calls get registered with the
96 * socket here, with a user ID preassigned by the user.
98 call = rxrpc_alloc_call(rx, gfp, debug_id);
101 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102 call->state = RXRPC_CALL_SERVER_PREALLOC;
104 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
105 user_call_ID, rxrpc_call_new_prealloc_service);
107 write_lock(&rx->call_lock);
109 /* Check the user ID isn't already in use */
110 pp = &rx->calls.rb_node;
114 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
115 if (user_call_ID < xcall->user_call_ID)
116 pp = &(*pp)->rb_left;
117 else if (user_call_ID > xcall->user_call_ID)
118 pp = &(*pp)->rb_right;
123 call->user_call_ID = user_call_ID;
124 call->notify_rx = notify_rx;
125 if (user_attach_call) {
126 rxrpc_get_call(call, rxrpc_call_get_kernel_service);
127 user_attach_call(call, user_call_ID);
130 rxrpc_get_call(call, rxrpc_call_get_userid);
131 rb_link_node(&call->sock_node, parent, pp);
132 rb_insert_color(&call->sock_node, &rx->calls);
133 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
135 list_add(&call->sock_link, &rx->sock_calls);
137 write_unlock(&rx->call_lock);
140 spin_lock_bh(&rxnet->call_lock);
141 list_add_tail_rcu(&call->link, &rxnet->calls);
142 spin_unlock_bh(&rxnet->call_lock);
144 b->call_backlog[call_head] = call;
145 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
146 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
150 write_unlock(&rx->call_lock);
151 rxrpc_cleanup_call(call);
152 _leave(" = -EBADSLT");
157 * Allocate the preallocation buffers for incoming service calls. These must
158 * be charged manually.
160 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
162 struct rxrpc_backlog *b = rx->backlog;
165 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
175 * Discard the preallocation on a service.
177 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
179 struct rxrpc_backlog *b = rx->backlog;
180 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
181 unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
187 /* Make sure that there aren't any incoming calls in progress before we
188 * clear the preallocation buffers.
190 spin_lock_bh(&rx->incoming_lock);
191 spin_unlock_bh(&rx->incoming_lock);
193 head = b->peer_backlog_head;
194 tail = b->peer_backlog_tail;
195 while (CIRC_CNT(head, tail, size) > 0) {
196 struct rxrpc_peer *peer = b->peer_backlog[tail];
197 rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_conn);
199 tail = (tail + 1) & (size - 1);
202 head = b->conn_backlog_head;
203 tail = b->conn_backlog_tail;
204 while (CIRC_CNT(head, tail, size) > 0) {
205 struct rxrpc_connection *conn = b->conn_backlog[tail];
206 write_lock(&rxnet->conn_lock);
207 list_del(&conn->link);
208 list_del(&conn->proc_link);
209 write_unlock(&rxnet->conn_lock);
211 if (atomic_dec_and_test(&rxnet->nr_conns))
212 wake_up_var(&rxnet->nr_conns);
213 tail = (tail + 1) & (size - 1);
216 head = b->call_backlog_head;
217 tail = b->call_backlog_tail;
218 while (CIRC_CNT(head, tail, size) > 0) {
219 struct rxrpc_call *call = b->call_backlog[tail];
220 rcu_assign_pointer(call->socket, rx);
221 if (rx->discard_new_call) {
222 _debug("discard %lx", call->user_call_ID);
223 rx->discard_new_call(call, call->user_call_ID);
225 call->notify_rx = rxrpc_dummy_notify;
226 rxrpc_put_call(call, rxrpc_call_put_kernel);
228 rxrpc_call_completed(call);
229 rxrpc_release_call(rx, call);
230 rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
231 tail = (tail + 1) & (size - 1);
238 * Ping the other end to fill our RTT cache and to retrieve the rwind
239 * and MTU parameters.
241 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
243 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
244 ktime_t now = skb->tstamp;
246 if (call->peer->rtt_count < 3 ||
247 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
248 rxrpc_send_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
249 rxrpc_propose_ack_ping_for_params);
253 * Allocate a new incoming call from the prealloc pool, along with a connection
254 * and a peer as necessary.
256 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
257 struct rxrpc_local *local,
258 struct rxrpc_peer *peer,
259 struct rxrpc_connection *conn,
260 const struct rxrpc_security *sec,
263 struct rxrpc_backlog *b = rx->backlog;
264 struct rxrpc_call *call;
265 unsigned short call_head, conn_head, peer_head;
266 unsigned short call_tail, conn_tail, peer_tail;
267 unsigned short call_count, conn_count;
269 /* #calls >= #conns >= #peers must hold true. */
270 call_head = smp_load_acquire(&b->call_backlog_head);
271 call_tail = b->call_backlog_tail;
272 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
273 conn_head = smp_load_acquire(&b->conn_backlog_head);
274 conn_tail = b->conn_backlog_tail;
275 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
276 ASSERTCMP(conn_count, >=, call_count);
277 peer_head = smp_load_acquire(&b->peer_backlog_head);
278 peer_tail = b->peer_backlog_tail;
279 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
286 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn))
289 peer = b->peer_backlog[peer_tail];
290 if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
292 b->peer_backlog[peer_tail] = NULL;
293 smp_store_release(&b->peer_backlog_tail,
295 (RXRPC_BACKLOG_MAX - 1));
297 rxrpc_new_incoming_peer(rx, local, peer);
300 /* Now allocate and set up the connection */
301 conn = b->conn_backlog[conn_tail];
302 b->conn_backlog[conn_tail] = NULL;
303 smp_store_release(&b->conn_backlog_tail,
304 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
305 conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn);
307 rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn);
308 rxrpc_new_incoming_connection(rx, conn, sec, skb);
310 rxrpc_get_connection(conn, rxrpc_conn_get_service_conn);
311 atomic_inc(&conn->active);
314 /* And now we can allocate and set up a new call */
315 call = b->call_backlog[call_tail];
316 b->call_backlog[call_tail] = NULL;
317 smp_store_release(&b->call_backlog_tail,
318 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
320 rxrpc_see_call(call, rxrpc_call_see_accept);
321 call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call);
323 call->security = conn->security;
324 call->security_ix = conn->security_ix;
325 call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept);
326 call->dest_srx = peer->srx;
327 call->cong_ssthresh = call->peer->cong_ssthresh;
328 call->tx_last_sent = ktime_get_real();
333 * Set up a new incoming call. Called in BH context with the RCU read lock
336 * If this is for a kernel service, when we allocate the call, it will have
337 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
338 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
339 * services only have the ref from the backlog buffer. We pass this ref to the
342 * If we want to report an error, we mark the skb with the packet type and
343 * abort code and return NULL.
345 * The call is returned with the user access mutex held and a ref on it.
347 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
348 struct rxrpc_sock *rx,
351 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
352 const struct rxrpc_security *sec = NULL;
353 struct rxrpc_connection *conn;
354 struct rxrpc_peer *peer = NULL;
355 struct rxrpc_call *call = NULL;
359 spin_lock(&rx->incoming_lock);
360 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
361 rx->sk.sk_state == RXRPC_CLOSE) {
362 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
363 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
364 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
365 skb->priority = RX_INVALID_OPERATION;
369 /* The peer, connection and call may all have sprung into existence due
370 * to a duplicate packet being handled on another CPU in parallel, so
371 * we have to recheck the routing. However, we're now holding
372 * rx->incoming_lock, so the values should remain stable.
374 conn = rxrpc_find_connection_rcu(local, skb, &peer);
377 sec = rxrpc_get_incoming_security(rx, skb);
382 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, skb);
384 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
388 trace_rxrpc_receive(call, rxrpc_receive_incoming,
389 sp->hdr.serial, sp->hdr.seq);
391 /* Make the call live. */
392 rxrpc_incoming_call(rx, call, skb);
395 if (rx->notify_new_call)
396 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
398 spin_lock(&conn->state_lock);
399 switch (conn->state) {
400 case RXRPC_CONN_SERVICE_UNSECURED:
401 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
402 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
403 rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge);
406 case RXRPC_CONN_SERVICE:
407 write_lock(&call->state_lock);
408 if (call->state < RXRPC_CALL_COMPLETE)
409 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
410 write_unlock(&call->state_lock);
413 case RXRPC_CONN_REMOTELY_ABORTED:
414 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
415 conn->abort_code, conn->error);
417 case RXRPC_CONN_LOCALLY_ABORTED:
418 rxrpc_abort_call("CON", call, sp->hdr.seq,
419 conn->abort_code, conn->error);
424 spin_unlock(&conn->state_lock);
425 spin_unlock(&rx->incoming_lock);
427 rxrpc_send_ping(call, skb);
429 if (hlist_unhashed(&call->error_link)) {
430 spin_lock(&call->peer->lock);
431 hlist_add_head(&call->error_link, &call->peer->error_targets);
432 spin_unlock(&call->peer->lock);
435 _leave(" = %p{%d}", call, call->debug_id);
439 spin_unlock(&rx->incoming_lock);
440 _leave(" = NULL [%u]", skb->mark);
445 * Charge up socket with preallocated calls, attaching user call IDs.
447 int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
449 struct rxrpc_backlog *b = rx->backlog;
451 if (rx->sk.sk_state == RXRPC_CLOSE)
454 return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
456 atomic_inc_return(&rxrpc_debug_id));
460 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
461 * @sock: The socket on which to preallocate
462 * @notify_rx: Event notification function for the call
463 * @user_attach_call: Func to attach call to user_call_ID
464 * @user_call_ID: The tag to attach to the preallocated call
465 * @gfp: The allocation conditions.
466 * @debug_id: The tracing debug ID.
468 * Charge up the socket with preallocated calls, each with a user ID. A
469 * function should be provided to effect the attachment from the user's side.
470 * The user is given a ref to hold on the call.
472 * Note that the call may be come connected before this function returns.
474 int rxrpc_kernel_charge_accept(struct socket *sock,
475 rxrpc_notify_rx_t notify_rx,
476 rxrpc_user_attach_call_t user_attach_call,
477 unsigned long user_call_ID, gfp_t gfp,
478 unsigned int debug_id)
480 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
481 struct rxrpc_backlog *b = rx->backlog;
483 if (sock->sk->sk_state == RXRPC_CLOSE)
486 return rxrpc_service_prealloc_one(rx, b, notify_rx,
487 user_attach_call, user_call_ID,
490 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);