f6bc3b07c3e58c4870c9915bc0332c567a7fc231
[platform/kernel/linux-rpi.git] / net / rxrpc / call_accept.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* incoming call handling
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
14 #include <linux/udp.h>
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/icmp.h>
18 #include <linux/gfp.h>
19 #include <linux/circ_buf.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include <net/ip.h>
23 #include "ar-internal.h"
24
25 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
26                                unsigned long user_call_ID)
27 {
28 }
29
30 /*
31  * Preallocate a single service call, connection and peer and, if possible,
32  * give them a user ID and attach the user's side of the ID to them.
33  */
34 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
35                                       struct rxrpc_backlog *b,
36                                       rxrpc_notify_rx_t notify_rx,
37                                       rxrpc_user_attach_call_t user_attach_call,
38                                       unsigned long user_call_ID, gfp_t gfp,
39                                       unsigned int debug_id)
40 {
41         const void *here = __builtin_return_address(0);
42         struct rxrpc_call *call, *xcall;
43         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
44         struct rb_node *parent, **pp;
45         int max, tmp;
46         unsigned int size = RXRPC_BACKLOG_MAX;
47         unsigned int head, tail, call_head, call_tail;
48
49         max = rx->sk.sk_max_ack_backlog;
50         tmp = rx->sk.sk_ack_backlog;
51         if (tmp >= max) {
52                 _leave(" = -ENOBUFS [full %u]", max);
53                 return -ENOBUFS;
54         }
55         max -= tmp;
56
57         /* We don't need more conns and peers than we have calls, but on the
58          * other hand, we shouldn't ever use more peers than conns or conns
59          * than calls.
60          */
61         call_head = b->call_backlog_head;
62         call_tail = READ_ONCE(b->call_backlog_tail);
63         tmp = CIRC_CNT(call_head, call_tail, size);
64         if (tmp >= max) {
65                 _leave(" = -ENOBUFS [enough %u]", tmp);
66                 return -ENOBUFS;
67         }
68         max = tmp + 1;
69
70         head = b->peer_backlog_head;
71         tail = READ_ONCE(b->peer_backlog_tail);
72         if (CIRC_CNT(head, tail, size) < max) {
73                 struct rxrpc_peer *peer;
74
75                 peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc);
76                 if (!peer)
77                         return -ENOMEM;
78                 b->peer_backlog[head] = peer;
79                 smp_store_release(&b->peer_backlog_head,
80                                   (head + 1) & (size - 1));
81         }
82
83         head = b->conn_backlog_head;
84         tail = READ_ONCE(b->conn_backlog_tail);
85         if (CIRC_CNT(head, tail, size) < max) {
86                 struct rxrpc_connection *conn;
87
88                 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
89                 if (!conn)
90                         return -ENOMEM;
91                 b->conn_backlog[head] = conn;
92                 smp_store_release(&b->conn_backlog_head,
93                                   (head + 1) & (size - 1));
94
95                 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
96                                  refcount_read(&conn->ref), here);
97         }
98
99         /* Now it gets complicated, because calls get registered with the
100          * socket here, with a user ID preassigned by the user.
101          */
102         call = rxrpc_alloc_call(rx, gfp, debug_id);
103         if (!call)
104                 return -ENOMEM;
105         call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
106         call->state = RXRPC_CALL_SERVER_PREALLOC;
107
108         trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
109                          refcount_read(&call->ref),
110                          here, (const void *)user_call_ID);
111
112         write_lock(&rx->call_lock);
113
114         /* Check the user ID isn't already in use */
115         pp = &rx->calls.rb_node;
116         parent = NULL;
117         while (*pp) {
118                 parent = *pp;
119                 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
120                 if (user_call_ID < xcall->user_call_ID)
121                         pp = &(*pp)->rb_left;
122                 else if (user_call_ID > xcall->user_call_ID)
123                         pp = &(*pp)->rb_right;
124                 else
125                         goto id_in_use;
126         }
127
128         call->user_call_ID = user_call_ID;
129         call->notify_rx = notify_rx;
130         if (user_attach_call) {
131                 rxrpc_get_call(call, rxrpc_call_got_kernel);
132                 user_attach_call(call, user_call_ID);
133         }
134
135         rxrpc_get_call(call, rxrpc_call_got_userid);
136         rb_link_node(&call->sock_node, parent, pp);
137         rb_insert_color(&call->sock_node, &rx->calls);
138         set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
139
140         list_add(&call->sock_link, &rx->sock_calls);
141
142         write_unlock(&rx->call_lock);
143
144         rxnet = call->rxnet;
145         spin_lock_bh(&rxnet->call_lock);
146         list_add_tail_rcu(&call->link, &rxnet->calls);
147         spin_unlock_bh(&rxnet->call_lock);
148
149         b->call_backlog[call_head] = call;
150         smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
151         _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
152         return 0;
153
154 id_in_use:
155         write_unlock(&rx->call_lock);
156         rxrpc_cleanup_call(call);
157         _leave(" = -EBADSLT");
158         return -EBADSLT;
159 }
160
161 /*
162  * Allocate the preallocation buffers for incoming service calls.  These must
163  * be charged manually.
164  */
165 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
166 {
167         struct rxrpc_backlog *b = rx->backlog;
168
169         if (!b) {
170                 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
171                 if (!b)
172                         return -ENOMEM;
173                 rx->backlog = b;
174         }
175
176         return 0;
177 }
178
179 /*
180  * Discard the preallocation on a service.
181  */
182 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
183 {
184         struct rxrpc_backlog *b = rx->backlog;
185         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
186         unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
187
188         if (!b)
189                 return;
190         rx->backlog = NULL;
191
192         /* Make sure that there aren't any incoming calls in progress before we
193          * clear the preallocation buffers.
194          */
195         spin_lock_bh(&rx->incoming_lock);
196         spin_unlock_bh(&rx->incoming_lock);
197
198         head = b->peer_backlog_head;
199         tail = b->peer_backlog_tail;
200         while (CIRC_CNT(head, tail, size) > 0) {
201                 struct rxrpc_peer *peer = b->peer_backlog[tail];
202                 rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_conn);
203                 kfree(peer);
204                 tail = (tail + 1) & (size - 1);
205         }
206
207         head = b->conn_backlog_head;
208         tail = b->conn_backlog_tail;
209         while (CIRC_CNT(head, tail, size) > 0) {
210                 struct rxrpc_connection *conn = b->conn_backlog[tail];
211                 write_lock(&rxnet->conn_lock);
212                 list_del(&conn->link);
213                 list_del(&conn->proc_link);
214                 write_unlock(&rxnet->conn_lock);
215                 kfree(conn);
216                 if (atomic_dec_and_test(&rxnet->nr_conns))
217                         wake_up_var(&rxnet->nr_conns);
218                 tail = (tail + 1) & (size - 1);
219         }
220
221         head = b->call_backlog_head;
222         tail = b->call_backlog_tail;
223         while (CIRC_CNT(head, tail, size) > 0) {
224                 struct rxrpc_call *call = b->call_backlog[tail];
225                 rcu_assign_pointer(call->socket, rx);
226                 if (rx->discard_new_call) {
227                         _debug("discard %lx", call->user_call_ID);
228                         rx->discard_new_call(call, call->user_call_ID);
229                         if (call->notify_rx)
230                                 call->notify_rx = rxrpc_dummy_notify;
231                         rxrpc_put_call(call, rxrpc_call_put_kernel);
232                 }
233                 rxrpc_call_completed(call);
234                 rxrpc_release_call(rx, call);
235                 rxrpc_put_call(call, rxrpc_call_put);
236                 tail = (tail + 1) & (size - 1);
237         }
238
239         kfree(b);
240 }
241
242 /*
243  * Ping the other end to fill our RTT cache and to retrieve the rwind
244  * and MTU parameters.
245  */
246 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
247 {
248         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
249         ktime_t now = skb->tstamp;
250
251         if (call->peer->rtt_count < 3 ||
252             ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
253                 rxrpc_send_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
254                                rxrpc_propose_ack_ping_for_params);
255 }
256
257 /*
258  * Allocate a new incoming call from the prealloc pool, along with a connection
259  * and a peer as necessary.
260  */
261 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
262                                                     struct rxrpc_local *local,
263                                                     struct rxrpc_peer *peer,
264                                                     struct rxrpc_connection *conn,
265                                                     const struct rxrpc_security *sec,
266                                                     struct sk_buff *skb)
267 {
268         struct rxrpc_backlog *b = rx->backlog;
269         struct rxrpc_call *call;
270         unsigned short call_head, conn_head, peer_head;
271         unsigned short call_tail, conn_tail, peer_tail;
272         unsigned short call_count, conn_count;
273
274         /* #calls >= #conns >= #peers must hold true. */
275         call_head = smp_load_acquire(&b->call_backlog_head);
276         call_tail = b->call_backlog_tail;
277         call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
278         conn_head = smp_load_acquire(&b->conn_backlog_head);
279         conn_tail = b->conn_backlog_tail;
280         conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
281         ASSERTCMP(conn_count, >=, call_count);
282         peer_head = smp_load_acquire(&b->peer_backlog_head);
283         peer_tail = b->peer_backlog_tail;
284         ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
285                   conn_count);
286
287         if (call_count == 0)
288                 return NULL;
289
290         if (!conn) {
291                 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn))
292                         peer = NULL;
293                 if (!peer) {
294                         peer = b->peer_backlog[peer_tail];
295                         if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
296                                 return NULL;
297                         b->peer_backlog[peer_tail] = NULL;
298                         smp_store_release(&b->peer_backlog_tail,
299                                           (peer_tail + 1) &
300                                           (RXRPC_BACKLOG_MAX - 1));
301
302                         rxrpc_new_incoming_peer(rx, local, peer);
303                 }
304
305                 /* Now allocate and set up the connection */
306                 conn = b->conn_backlog[conn_tail];
307                 b->conn_backlog[conn_tail] = NULL;
308                 smp_store_release(&b->conn_backlog_tail,
309                                   (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
310                 conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn);
311                 conn->peer = peer;
312                 rxrpc_see_connection(conn);
313                 rxrpc_new_incoming_connection(rx, conn, sec, skb);
314         } else {
315                 rxrpc_get_connection(conn);
316         }
317
318         /* And now we can allocate and set up a new call */
319         call = b->call_backlog[call_tail];
320         b->call_backlog[call_tail] = NULL;
321         smp_store_release(&b->call_backlog_tail,
322                           (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
323
324         rxrpc_see_call(call);
325         call->conn = conn;
326         call->security = conn->security;
327         call->security_ix = conn->security_ix;
328         call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept);
329         call->cong_ssthresh = call->peer->cong_ssthresh;
330         call->tx_last_sent = ktime_get_real();
331         return call;
332 }
333
334 /*
335  * Set up a new incoming call.  Called in BH context with the RCU read lock
336  * held.
337  *
338  * If this is for a kernel service, when we allocate the call, it will have
339  * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
340  * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
341  * services only have the ref from the backlog buffer.  We want to pass this
342  * ref to non-BH context to dispose of.
343  *
344  * If we want to report an error, we mark the skb with the packet type and
345  * abort code and return NULL.
346  *
347  * The call is returned with the user access mutex held.
348  */
349 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
350                                            struct rxrpc_sock *rx,
351                                            struct sk_buff *skb)
352 {
353         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
354         const struct rxrpc_security *sec = NULL;
355         struct rxrpc_connection *conn;
356         struct rxrpc_peer *peer = NULL;
357         struct rxrpc_call *call = NULL;
358
359         _enter("");
360
361         spin_lock(&rx->incoming_lock);
362         if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
363             rx->sk.sk_state == RXRPC_CLOSE) {
364                 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
365                                   sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
366                 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
367                 skb->priority = RX_INVALID_OPERATION;
368                 goto no_call;
369         }
370
371         /* The peer, connection and call may all have sprung into existence due
372          * to a duplicate packet being handled on another CPU in parallel, so
373          * we have to recheck the routing.  However, we're now holding
374          * rx->incoming_lock, so the values should remain stable.
375          */
376         conn = rxrpc_find_connection_rcu(local, skb, &peer);
377
378         if (!conn) {
379                 sec = rxrpc_get_incoming_security(rx, skb);
380                 if (!sec)
381                         goto no_call;
382         }
383
384         call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, skb);
385         if (!call) {
386                 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
387                 goto no_call;
388         }
389
390         trace_rxrpc_receive(call, rxrpc_receive_incoming,
391                             sp->hdr.serial, sp->hdr.seq);
392
393         /* Make the call live. */
394         rxrpc_incoming_call(rx, call, skb);
395         conn = call->conn;
396
397         if (rx->notify_new_call)
398                 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
399
400         spin_lock(&conn->state_lock);
401         switch (conn->state) {
402         case RXRPC_CONN_SERVICE_UNSECURED:
403                 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
404                 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
405                 rxrpc_queue_conn(call->conn);
406                 break;
407
408         case RXRPC_CONN_SERVICE:
409                 write_lock(&call->state_lock);
410                 if (call->state < RXRPC_CALL_COMPLETE)
411                         call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
412                 write_unlock(&call->state_lock);
413                 break;
414
415         case RXRPC_CONN_REMOTELY_ABORTED:
416                 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
417                                           conn->abort_code, conn->error);
418                 break;
419         case RXRPC_CONN_LOCALLY_ABORTED:
420                 rxrpc_abort_call("CON", call, sp->hdr.seq,
421                                  conn->abort_code, conn->error);
422                 break;
423         default:
424                 BUG();
425         }
426         spin_unlock(&conn->state_lock);
427         spin_unlock(&rx->incoming_lock);
428
429         rxrpc_send_ping(call, skb);
430
431         /* We have to discard the prealloc queue's ref here and rely on a
432          * combination of the RCU read lock and refs held either by the socket
433          * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
434          * service to prevent the call from being deallocated too early.
435          */
436         rxrpc_put_call(call, rxrpc_call_put);
437
438         _leave(" = %p{%d}", call, call->debug_id);
439         return call;
440
441 no_call:
442         spin_unlock(&rx->incoming_lock);
443         _leave(" = NULL [%u]", skb->mark);
444         return NULL;
445 }
446
447 /*
448  * Charge up socket with preallocated calls, attaching user call IDs.
449  */
450 int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
451 {
452         struct rxrpc_backlog *b = rx->backlog;
453
454         if (rx->sk.sk_state == RXRPC_CLOSE)
455                 return -ESHUTDOWN;
456
457         return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
458                                           GFP_KERNEL,
459                                           atomic_inc_return(&rxrpc_debug_id));
460 }
461
462 /*
463  * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
464  * @sock: The socket on which to preallocate
465  * @notify_rx: Event notification function for the call
466  * @user_attach_call: Func to attach call to user_call_ID
467  * @user_call_ID: The tag to attach to the preallocated call
468  * @gfp: The allocation conditions.
469  * @debug_id: The tracing debug ID.
470  *
471  * Charge up the socket with preallocated calls, each with a user ID.  A
472  * function should be provided to effect the attachment from the user's side.
473  * The user is given a ref to hold on the call.
474  *
475  * Note that the call may be come connected before this function returns.
476  */
477 int rxrpc_kernel_charge_accept(struct socket *sock,
478                                rxrpc_notify_rx_t notify_rx,
479                                rxrpc_user_attach_call_t user_attach_call,
480                                unsigned long user_call_ID, gfp_t gfp,
481                                unsigned int debug_id)
482 {
483         struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
484         struct rxrpc_backlog *b = rx->backlog;
485
486         if (sock->sk->sk_state == RXRPC_CLOSE)
487                 return -ESHUTDOWN;
488
489         return rxrpc_service_prealloc_one(rx, b, notify_rx,
490                                           user_attach_call, user_call_ID,
491                                           gfp, debug_id);
492 }
493 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);