2e3f0a222e1b4ed4c9e803d0d483b5ee9d62ff7b
[platform/kernel/linux-rpi.git] / net / rxrpc / conn_object.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC virtual connection handler, common bits.
3  *
4  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/net.h>
13 #include <linux/skbuff.h>
14 #include "ar-internal.h"
15
16 /*
17  * Time till a connection expires after last use (in seconds).
18  */
19 unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
20 unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
21
22 static void rxrpc_clean_up_connection(struct work_struct *work);
23 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
24                                          unsigned long reap_at);
25
26 void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
27 {
28         struct rxrpc_local *local = conn->local;
29         bool busy;
30
31         if (WARN_ON_ONCE(!local))
32                 return;
33
34         spin_lock_bh(&local->lock);
35         busy = !list_empty(&conn->attend_link);
36         if (!busy) {
37                 rxrpc_get_connection(conn, why);
38                 list_add_tail(&conn->attend_link, &local->conn_attend_q);
39         }
40         spin_unlock_bh(&local->lock);
41         rxrpc_wake_up_io_thread(local);
42 }
43
44 static void rxrpc_connection_timer(struct timer_list *timer)
45 {
46         struct rxrpc_connection *conn =
47                 container_of(timer, struct rxrpc_connection, timer);
48
49         rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
50 }
51
52 /*
53  * allocate a new connection
54  */
55 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
56                                                 gfp_t gfp)
57 {
58         struct rxrpc_connection *conn;
59
60         _enter("");
61
62         conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
63         if (conn) {
64                 INIT_LIST_HEAD(&conn->cache_link);
65                 timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
66                 INIT_WORK(&conn->processor, rxrpc_process_connection);
67                 INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
68                 INIT_LIST_HEAD(&conn->proc_link);
69                 INIT_LIST_HEAD(&conn->link);
70                 skb_queue_head_init(&conn->rx_queue);
71                 conn->rxnet = rxnet;
72                 conn->security = &rxrpc_no_security;
73                 spin_lock_init(&conn->state_lock);
74                 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
75                 conn->idle_timestamp = jiffies;
76         }
77
78         _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
79         return conn;
80 }
81
82 /*
83  * Look up a connection in the cache by protocol parameters.
84  *
85  * If successful, a pointer to the connection is returned, but no ref is taken.
86  * NULL is returned if there is no match.
87  *
88  * When searching for a service call, if we find a peer but no connection, we
89  * return that through *_peer in case we need to create a new service call.
90  *
91  * The caller must be holding the RCU read lock.
92  */
93 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
94                                                           struct sockaddr_rxrpc *srx,
95                                                           struct sk_buff *skb)
96 {
97         struct rxrpc_connection *conn;
98         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
99         struct rxrpc_peer *peer;
100
101         _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
102
103         /* Look up client connections by connection ID alone as their
104          * IDs are unique for this machine.
105          */
106         conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
107         if (!conn || refcount_read(&conn->ref) == 0) {
108                 _debug("no conn");
109                 goto not_found;
110         }
111
112         if (conn->proto.epoch != sp->hdr.epoch ||
113             conn->local != local)
114                 goto not_found;
115
116         peer = conn->peer;
117         switch (srx->transport.family) {
118         case AF_INET:
119                 if (peer->srx.transport.sin.sin_port !=
120                     srx->transport.sin.sin_port ||
121                     peer->srx.transport.sin.sin_addr.s_addr !=
122                     srx->transport.sin.sin_addr.s_addr)
123                         goto not_found;
124                 break;
125 #ifdef CONFIG_AF_RXRPC_IPV6
126         case AF_INET6:
127                 if (peer->srx.transport.sin6.sin6_port !=
128                     srx->transport.sin6.sin6_port ||
129                     memcmp(&peer->srx.transport.sin6.sin6_addr,
130                            &srx->transport.sin6.sin6_addr,
131                            sizeof(struct in6_addr)) != 0)
132                         goto not_found;
133                 break;
134 #endif
135         default:
136                 BUG();
137         }
138
139         _leave(" = %p", conn);
140         return conn;
141
142 not_found:
143         _leave(" = NULL");
144         return NULL;
145 }
146
147 /*
148  * Disconnect a call and clear any channel it occupies when that call
149  * terminates.  The caller must hold the channel_lock and must release the
150  * call's ref on the connection.
151  */
152 void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
153                              struct rxrpc_call *call)
154 {
155         struct rxrpc_channel *chan =
156                 &conn->channels[call->cid & RXRPC_CHANNELMASK];
157
158         _enter("%d,%x", conn->debug_id, call->cid);
159
160         if (rcu_access_pointer(chan->call) == call) {
161                 /* Save the result of the call so that we can repeat it if necessary
162                  * through the channel, whilst disposing of the actual call record.
163                  */
164                 trace_rxrpc_disconnect_call(call);
165                 switch (call->completion) {
166                 case RXRPC_CALL_SUCCEEDED:
167                         chan->last_seq = call->rx_highest_seq;
168                         chan->last_type = RXRPC_PACKET_TYPE_ACK;
169                         break;
170                 case RXRPC_CALL_LOCALLY_ABORTED:
171                         chan->last_abort = call->abort_code;
172                         chan->last_type = RXRPC_PACKET_TYPE_ABORT;
173                         break;
174                 default:
175                         chan->last_abort = RX_CALL_DEAD;
176                         chan->last_type = RXRPC_PACKET_TYPE_ABORT;
177                         break;
178                 }
179
180                 /* Sync with rxrpc_conn_retransmit(). */
181                 smp_wmb();
182                 chan->last_call = chan->call_id;
183                 chan->call_id = chan->call_counter;
184
185                 rcu_assign_pointer(chan->call, NULL);
186         }
187
188         _leave("");
189 }
190
191 /*
192  * Disconnect a call and clear any channel it occupies when that call
193  * terminates.
194  */
195 void rxrpc_disconnect_call(struct rxrpc_call *call)
196 {
197         struct rxrpc_connection *conn = call->conn;
198
199         set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
200         rxrpc_see_call(call, rxrpc_call_see_disconnected);
201
202         call->peer->cong_ssthresh = call->cong_ssthresh;
203
204         if (!hlist_unhashed(&call->error_link)) {
205                 spin_lock(&call->peer->lock);
206                 hlist_del_init(&call->error_link);
207                 spin_unlock(&call->peer->lock);
208         }
209
210         if (rxrpc_is_client_call(call)) {
211                 rxrpc_disconnect_client_call(conn->bundle, call);
212         } else {
213                 spin_lock(&conn->bundle->channel_lock);
214                 __rxrpc_disconnect_call(conn, call);
215                 spin_unlock(&conn->bundle->channel_lock);
216
217                 conn->idle_timestamp = jiffies;
218                 if (atomic_dec_and_test(&conn->active))
219                         rxrpc_set_service_reap_timer(conn->rxnet,
220                                                      jiffies + rxrpc_connection_expiry);
221         }
222
223         rxrpc_put_call(call, rxrpc_call_put_io_thread);
224 }
225
226 /*
227  * Queue a connection's work processor, getting a ref to pass to the work
228  * queue.
229  */
230 void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
231 {
232         if (atomic_read(&conn->active) >= 0 &&
233             rxrpc_queue_work(&conn->processor))
234                 rxrpc_see_connection(conn, why);
235 }
236
237 /*
238  * Note the re-emergence of a connection.
239  */
240 void rxrpc_see_connection(struct rxrpc_connection *conn,
241                           enum rxrpc_conn_trace why)
242 {
243         if (conn) {
244                 int r = refcount_read(&conn->ref);
245
246                 trace_rxrpc_conn(conn->debug_id, r, why);
247         }
248 }
249
250 /*
251  * Get a ref on a connection.
252  */
253 struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
254                                               enum rxrpc_conn_trace why)
255 {
256         int r;
257
258         __refcount_inc(&conn->ref, &r);
259         trace_rxrpc_conn(conn->debug_id, r + 1, why);
260         return conn;
261 }
262
263 /*
264  * Try to get a ref on a connection.
265  */
266 struct rxrpc_connection *
267 rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
268                            enum rxrpc_conn_trace why)
269 {
270         int r;
271
272         if (conn) {
273                 if (__refcount_inc_not_zero(&conn->ref, &r))
274                         trace_rxrpc_conn(conn->debug_id, r + 1, why);
275                 else
276                         conn = NULL;
277         }
278         return conn;
279 }
280
281 /*
282  * Set the service connection reap timer.
283  */
284 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
285                                          unsigned long reap_at)
286 {
287         if (rxnet->live)
288                 timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
289 }
290
291 /*
292  * destroy a virtual connection
293  */
294 static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
295 {
296         struct rxrpc_connection *conn =
297                 container_of(rcu, struct rxrpc_connection, rcu);
298         struct rxrpc_net *rxnet = conn->rxnet;
299
300         _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
301
302         trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
303                          rxrpc_conn_free);
304         kfree(conn);
305
306         if (atomic_dec_and_test(&rxnet->nr_conns))
307                 wake_up_var(&rxnet->nr_conns);
308 }
309
310 /*
311  * Clean up a dead connection.
312  */
313 static void rxrpc_clean_up_connection(struct work_struct *work)
314 {
315         struct rxrpc_connection *conn =
316                 container_of(work, struct rxrpc_connection, destructor);
317         struct rxrpc_net *rxnet = conn->rxnet;
318
319         ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
320                !rcu_access_pointer(conn->channels[1].call) &&
321                !rcu_access_pointer(conn->channels[2].call) &&
322                !rcu_access_pointer(conn->channels[3].call));
323         ASSERT(list_empty(&conn->cache_link));
324
325         del_timer_sync(&conn->timer);
326         cancel_work_sync(&conn->processor); /* Processing may restart the timer */
327         del_timer_sync(&conn->timer);
328
329         write_lock(&rxnet->conn_lock);
330         list_del_init(&conn->proc_link);
331         write_unlock(&rxnet->conn_lock);
332
333         rxrpc_purge_queue(&conn->rx_queue);
334
335         rxrpc_kill_client_conn(conn);
336
337         conn->security->clear(conn);
338         key_put(conn->key);
339         rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
340         rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
341         rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
342
343         /* Drain the Rx queue.  Note that even though we've unpublished, an
344          * incoming packet could still be being added to our Rx queue, so we
345          * will need to drain it again in the RCU cleanup handler.
346          */
347         rxrpc_purge_queue(&conn->rx_queue);
348
349         call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
350 }
351
352 /*
353  * Drop a ref on a connection.
354  */
355 void rxrpc_put_connection(struct rxrpc_connection *conn,
356                           enum rxrpc_conn_trace why)
357 {
358         unsigned int debug_id;
359         bool dead;
360         int r;
361
362         if (!conn)
363                 return;
364
365         debug_id = conn->debug_id;
366         dead = __refcount_dec_and_test(&conn->ref, &r);
367         trace_rxrpc_conn(debug_id, r - 1, why);
368         if (dead) {
369                 del_timer(&conn->timer);
370                 cancel_work(&conn->processor);
371
372                 if (in_softirq() || work_busy(&conn->processor) ||
373                     timer_pending(&conn->timer))
374                         /* Can't use the rxrpc workqueue as we need to cancel/flush
375                          * something that may be running/waiting there.
376                          */
377                         schedule_work(&conn->destructor);
378                 else
379                         rxrpc_clean_up_connection(&conn->destructor);
380         }
381 }
382
383 /*
384  * reap dead service connections
385  */
386 void rxrpc_service_connection_reaper(struct work_struct *work)
387 {
388         struct rxrpc_connection *conn, *_p;
389         struct rxrpc_net *rxnet =
390                 container_of(work, struct rxrpc_net, service_conn_reaper);
391         unsigned long expire_at, earliest, idle_timestamp, now;
392         int active;
393
394         LIST_HEAD(graveyard);
395
396         _enter("");
397
398         now = jiffies;
399         earliest = now + MAX_JIFFY_OFFSET;
400
401         write_lock(&rxnet->conn_lock);
402         list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
403                 ASSERTCMP(atomic_read(&conn->active), >=, 0);
404                 if (likely(atomic_read(&conn->active) > 0))
405                         continue;
406                 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
407                         continue;
408
409                 if (rxnet->live && !conn->local->dead) {
410                         idle_timestamp = READ_ONCE(conn->idle_timestamp);
411                         expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
412                         if (conn->local->service_closed)
413                                 expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
414
415                         _debug("reap CONN %d { a=%d,t=%ld }",
416                                conn->debug_id, atomic_read(&conn->active),
417                                (long)expire_at - (long)now);
418
419                         if (time_before(now, expire_at)) {
420                                 if (time_before(expire_at, earliest))
421                                         earliest = expire_at;
422                                 continue;
423                         }
424                 }
425
426                 /* The activity count sits at 0 whilst the conn is unused on
427                  * the list; we reduce that to -1 to make the conn unavailable.
428                  */
429                 active = 0;
430                 if (!atomic_try_cmpxchg(&conn->active, &active, -1))
431                         continue;
432                 rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
433
434                 if (rxrpc_conn_is_client(conn))
435                         BUG();
436                 else
437                         rxrpc_unpublish_service_conn(conn);
438
439                 list_move_tail(&conn->link, &graveyard);
440         }
441         write_unlock(&rxnet->conn_lock);
442
443         if (earliest != now + MAX_JIFFY_OFFSET) {
444                 _debug("reschedule reaper %ld", (long)earliest - (long)now);
445                 ASSERT(time_after(earliest, now));
446                 rxrpc_set_service_reap_timer(rxnet, earliest);
447         }
448
449         while (!list_empty(&graveyard)) {
450                 conn = list_entry(graveyard.next, struct rxrpc_connection,
451                                   link);
452                 list_del_init(&conn->link);
453
454                 ASSERTCMP(atomic_read(&conn->active), ==, -1);
455                 rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
456         }
457
458         _leave("");
459 }
460
461 /*
462  * preemptively destroy all the service connection records rather than
463  * waiting for them to time out
464  */
465 void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
466 {
467         struct rxrpc_connection *conn, *_p;
468         bool leak = false;
469
470         _enter("");
471
472         atomic_dec(&rxnet->nr_conns);
473         rxrpc_destroy_all_client_connections(rxnet);
474
475         del_timer_sync(&rxnet->service_conn_reap_timer);
476         rxrpc_queue_work(&rxnet->service_conn_reaper);
477         flush_workqueue(rxrpc_workqueue);
478
479         write_lock(&rxnet->conn_lock);
480         list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
481                 pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
482                        conn, refcount_read(&conn->ref));
483                 leak = true;
484         }
485         write_unlock(&rxnet->conn_lock);
486         BUG_ON(leak);
487
488         ASSERT(list_empty(&rxnet->conn_proc_list));
489
490         /* We need to wait for the connections to be destroyed by RCU as they
491          * pin things that we still need to get rid of.
492          */
493         wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
494         _leave("");
495 }