63947cce4048363329fdb260a5a9b99a4c53d5e6
[platform/kernel/linux-rpi.git] / net / rxrpc / proc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* /proc/net/ support for AF_RXRPC
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #include <linux/module.h>
9 #include <net/sock.h>
10 #include <net/af_rxrpc.h>
11 #include "ar-internal.h"
12
13 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
14         [RXRPC_CONN_UNUSED]                     = "Unused  ",
15         [RXRPC_CONN_CLIENT]                     = "Client  ",
16         [RXRPC_CONN_SERVICE_PREALLOC]           = "SvPrealc",
17         [RXRPC_CONN_SERVICE_UNSECURED]          = "SvUnsec ",
18         [RXRPC_CONN_SERVICE_CHALLENGING]        = "SvChall ",
19         [RXRPC_CONN_SERVICE]                    = "SvSecure",
20         [RXRPC_CONN_ABORTED]                    = "Aborted ",
21 };
22
23 /*
24  * generate a list of extant and dead calls in /proc/net/rxrpc_calls
25  */
26 static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
27         __acquires(rcu)
28 {
29         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
30
31         rcu_read_lock();
32         return seq_list_start_head_rcu(&rxnet->calls, *_pos);
33 }
34
35 static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
36 {
37         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
38
39         return seq_list_next_rcu(v, &rxnet->calls, pos);
40 }
41
42 static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
43         __releases(rcu)
44 {
45         rcu_read_unlock();
46 }
47
48 static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
49 {
50         struct rxrpc_local *local;
51         struct rxrpc_call *call;
52         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
53         unsigned long timeout = 0;
54         rxrpc_seq_t acks_hard_ack;
55         char lbuff[50], rbuff[50];
56         u64 wtmp;
57
58         if (v == &rxnet->calls) {
59                 seq_puts(seq,
60                          "Proto Local                                          "
61                          " Remote                                         "
62                          " SvID ConnID   CallID   End Use State    Abort   "
63                          " DebugId  TxSeq    TW RxSeq    RW RxSerial CW RxTimo\n");
64                 return 0;
65         }
66
67         call = list_entry(v, struct rxrpc_call, link);
68
69         local = call->local;
70         if (local)
71                 sprintf(lbuff, "%pISpc", &local->srx.transport);
72         else
73                 strcpy(lbuff, "no_local");
74
75         sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
76
77         if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
78                 timeout = READ_ONCE(call->expect_rx_by);
79                 timeout -= jiffies;
80         }
81
82         acks_hard_ack = READ_ONCE(call->acks_hard_ack);
83         wtmp   = atomic64_read_acquire(&call->ackr_window);
84         seq_printf(seq,
85                    "UDP   %-47.47s %-47.47s %4x %08x %08x %s %3u"
86                    " %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n",
87                    lbuff,
88                    rbuff,
89                    call->dest_srx.srx_service,
90                    call->cid,
91                    call->call_id,
92                    rxrpc_is_service_call(call) ? "Svc" : "Clt",
93                    refcount_read(&call->ref),
94                    rxrpc_call_states[call->state],
95                    call->abort_code,
96                    call->debug_id,
97                    acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
98                    lower_32_bits(wtmp), upper_32_bits(wtmp) - lower_32_bits(wtmp),
99                    call->rx_serial,
100                    call->cong_cwnd,
101                    timeout);
102
103         return 0;
104 }
105
106 const struct seq_operations rxrpc_call_seq_ops = {
107         .start  = rxrpc_call_seq_start,
108         .next   = rxrpc_call_seq_next,
109         .stop   = rxrpc_call_seq_stop,
110         .show   = rxrpc_call_seq_show,
111 };
112
113 /*
114  * generate a list of extant virtual connections in /proc/net/rxrpc_conns
115  */
116 static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
117         __acquires(rxnet->conn_lock)
118 {
119         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
120
121         read_lock(&rxnet->conn_lock);
122         return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
123 }
124
125 static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
126                                        loff_t *pos)
127 {
128         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
129
130         return seq_list_next(v, &rxnet->conn_proc_list, pos);
131 }
132
133 static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
134         __releases(rxnet->conn_lock)
135 {
136         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
137
138         read_unlock(&rxnet->conn_lock);
139 }
140
141 static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
142 {
143         struct rxrpc_connection *conn;
144         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
145         const char *state;
146         char lbuff[50], rbuff[50];
147
148         if (v == &rxnet->conn_proc_list) {
149                 seq_puts(seq,
150                          "Proto Local                                          "
151                          " Remote                                         "
152                          " SvID ConnID   End Ref Act State    Key     "
153                          " Serial   ISerial  CallId0  CallId1  CallId2  CallId3\n"
154                          );
155                 return 0;
156         }
157
158         conn = list_entry(v, struct rxrpc_connection, proc_link);
159         if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
160                 strcpy(lbuff, "no_local");
161                 strcpy(rbuff, "no_connection");
162                 goto print;
163         }
164
165         sprintf(lbuff, "%pISpc", &conn->local->srx.transport);
166         sprintf(rbuff, "%pISpc", &conn->peer->srx.transport);
167 print:
168         state = rxrpc_is_conn_aborted(conn) ?
169                 rxrpc_call_completions[conn->completion] :
170                 rxrpc_conn_states[conn->state];
171         seq_printf(seq,
172                    "UDP   %-47.47s %-47.47s %4x %08x %s %3u %3d"
173                    " %s %08x %08x %08x %08x %08x %08x %08x\n",
174                    lbuff,
175                    rbuff,
176                    conn->service_id,
177                    conn->proto.cid,
178                    rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
179                    refcount_read(&conn->ref),
180                    atomic_read(&conn->active),
181                    state,
182                    key_serial(conn->key),
183                    atomic_read(&conn->serial),
184                    conn->hi_serial,
185                    conn->channels[0].call_id,
186                    conn->channels[1].call_id,
187                    conn->channels[2].call_id,
188                    conn->channels[3].call_id);
189
190         return 0;
191 }
192
193 const struct seq_operations rxrpc_connection_seq_ops = {
194         .start  = rxrpc_connection_seq_start,
195         .next   = rxrpc_connection_seq_next,
196         .stop   = rxrpc_connection_seq_stop,
197         .show   = rxrpc_connection_seq_show,
198 };
199
200 /*
201  * generate a list of extant virtual peers in /proc/net/rxrpc/peers
202  */
203 static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
204 {
205         struct rxrpc_peer *peer;
206         time64_t now;
207         char lbuff[50], rbuff[50];
208
209         if (v == SEQ_START_TOKEN) {
210                 seq_puts(seq,
211                          "Proto Local                                          "
212                          " Remote                                         "
213                          " Use SST   MTU LastUse      RTT      RTO\n"
214                          );
215                 return 0;
216         }
217
218         peer = list_entry(v, struct rxrpc_peer, hash_link);
219
220         sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
221
222         sprintf(rbuff, "%pISpc", &peer->srx.transport);
223
224         now = ktime_get_seconds();
225         seq_printf(seq,
226                    "UDP   %-47.47s %-47.47s %3u"
227                    " %3u %5u %6llus %8u %8u\n",
228                    lbuff,
229                    rbuff,
230                    refcount_read(&peer->ref),
231                    peer->cong_ssthresh,
232                    peer->mtu,
233                    now - peer->last_tx_at,
234                    peer->srtt_us >> 3,
235                    jiffies_to_usecs(peer->rto_j));
236
237         return 0;
238 }
239
240 static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
241         __acquires(rcu)
242 {
243         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
244         unsigned int bucket, n;
245         unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
246         void *p;
247
248         rcu_read_lock();
249
250         if (*_pos >= UINT_MAX)
251                 return NULL;
252
253         n = *_pos & ((1U << shift) - 1);
254         bucket = *_pos >> shift;
255         for (;;) {
256                 if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
257                         *_pos = UINT_MAX;
258                         return NULL;
259                 }
260                 if (n == 0) {
261                         if (bucket == 0)
262                                 return SEQ_START_TOKEN;
263                         *_pos += 1;
264                         n++;
265                 }
266
267                 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
268                 if (p)
269                         return p;
270                 bucket++;
271                 n = 1;
272                 *_pos = (bucket << shift) | n;
273         }
274 }
275
276 static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
277 {
278         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
279         unsigned int bucket, n;
280         unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
281         void *p;
282
283         if (*_pos >= UINT_MAX)
284                 return NULL;
285
286         bucket = *_pos >> shift;
287
288         p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
289         if (p)
290                 return p;
291
292         for (;;) {
293                 bucket++;
294                 n = 1;
295                 *_pos = (bucket << shift) | n;
296
297                 if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
298                         *_pos = UINT_MAX;
299                         return NULL;
300                 }
301                 if (n == 0) {
302                         *_pos += 1;
303                         n++;
304                 }
305
306                 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
307                 if (p)
308                         return p;
309         }
310 }
311
312 static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
313         __releases(rcu)
314 {
315         rcu_read_unlock();
316 }
317
318
319 const struct seq_operations rxrpc_peer_seq_ops = {
320         .start  = rxrpc_peer_seq_start,
321         .next   = rxrpc_peer_seq_next,
322         .stop   = rxrpc_peer_seq_stop,
323         .show   = rxrpc_peer_seq_show,
324 };
325
326 /*
327  * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
328  */
329 static int rxrpc_local_seq_show(struct seq_file *seq, void *v)
330 {
331         struct rxrpc_local *local;
332         char lbuff[50];
333
334         if (v == SEQ_START_TOKEN) {
335                 seq_puts(seq,
336                          "Proto Local                                          "
337                          " Use Act RxQ\n");
338                 return 0;
339         }
340
341         local = hlist_entry(v, struct rxrpc_local, link);
342
343         sprintf(lbuff, "%pISpc", &local->srx.transport);
344
345         seq_printf(seq,
346                    "UDP   %-47.47s %3u %3u %3u\n",
347                    lbuff,
348                    refcount_read(&local->ref),
349                    atomic_read(&local->active_users),
350                    local->rx_queue.qlen);
351
352         return 0;
353 }
354
355 static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos)
356         __acquires(rcu)
357 {
358         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
359         unsigned int n;
360
361         rcu_read_lock();
362
363         if (*_pos >= UINT_MAX)
364                 return NULL;
365
366         n = *_pos;
367         if (n == 0)
368                 return SEQ_START_TOKEN;
369
370         return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1);
371 }
372
373 static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
374 {
375         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
376
377         if (*_pos >= UINT_MAX)
378                 return NULL;
379
380         return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos);
381 }
382
383 static void rxrpc_local_seq_stop(struct seq_file *seq, void *v)
384         __releases(rcu)
385 {
386         rcu_read_unlock();
387 }
388
389 const struct seq_operations rxrpc_local_seq_ops = {
390         .start  = rxrpc_local_seq_start,
391         .next   = rxrpc_local_seq_next,
392         .stop   = rxrpc_local_seq_stop,
393         .show   = rxrpc_local_seq_show,
394 };
395
396 /*
397  * Display stats in /proc/net/rxrpc/stats
398  */
399 int rxrpc_stats_show(struct seq_file *seq, void *v)
400 {
401         struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(seq));
402
403         seq_printf(seq,
404                    "Data     : send=%u sendf=%u fail=%u\n",
405                    atomic_read(&rxnet->stat_tx_data_send),
406                    atomic_read(&rxnet->stat_tx_data_send_frag),
407                    atomic_read(&rxnet->stat_tx_data_send_fail));
408         seq_printf(seq,
409                    "Data-Tx  : nr=%u retrans=%u uf=%u cwr=%u\n",
410                    atomic_read(&rxnet->stat_tx_data),
411                    atomic_read(&rxnet->stat_tx_data_retrans),
412                    atomic_read(&rxnet->stat_tx_data_underflow),
413                    atomic_read(&rxnet->stat_tx_data_cwnd_reset));
414         seq_printf(seq,
415                    "Data-Rx  : nr=%u reqack=%u jumbo=%u\n",
416                    atomic_read(&rxnet->stat_rx_data),
417                    atomic_read(&rxnet->stat_rx_data_reqack),
418                    atomic_read(&rxnet->stat_rx_data_jumbo));
419         seq_printf(seq,
420                    "Ack      : fill=%u send=%u skip=%u\n",
421                    atomic_read(&rxnet->stat_tx_ack_fill),
422                    atomic_read(&rxnet->stat_tx_ack_send),
423                    atomic_read(&rxnet->stat_tx_ack_skip));
424         seq_printf(seq,
425                    "Ack-Tx   : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
426                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_REQUESTED]),
427                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DUPLICATE]),
428                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
429                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_EXCEEDS_WINDOW]),
430                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_NOSPACE]),
431                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING]),
432                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING_RESPONSE]),
433                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DELAY]),
434                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_IDLE]));
435         seq_printf(seq,
436                    "Ack-Rx   : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
437                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_REQUESTED]),
438                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DUPLICATE]),
439                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
440                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_EXCEEDS_WINDOW]),
441                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_NOSPACE]),
442                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING]),
443                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING_RESPONSE]),
444                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DELAY]),
445                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE]));
446         seq_printf(seq,
447                    "Why-Req-A: acklost=%u already=%u mrtt=%u ortt=%u\n",
448                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]),
449                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_already_on]),
450                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]),
451                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt]));
452         seq_printf(seq,
453                    "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n",
454                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]),
455                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_retrans]),
456                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_slow_start]),
457                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_small_txwin]));
458         seq_printf(seq,
459                    "Buffers  : txb=%u rxb=%u\n",
460                    atomic_read(&rxrpc_nr_txbuf),
461                    atomic_read(&rxrpc_n_rx_skbs));
462         seq_printf(seq,
463                    "IO-thread: loops=%u\n",
464                    atomic_read(&rxnet->stat_io_loop));
465         return 0;
466 }
467
468 /*
469  * Clear stats if /proc/net/rxrpc/stats is written to.
470  */
471 int rxrpc_stats_clear(struct file *file, char *buf, size_t size)
472 {
473         struct seq_file *m = file->private_data;
474         struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(m));
475
476         if (size > 1 || (size == 1 && buf[0] != '\n'))
477                 return -EINVAL;
478
479         atomic_set(&rxnet->stat_tx_data, 0);
480         atomic_set(&rxnet->stat_tx_data_retrans, 0);
481         atomic_set(&rxnet->stat_tx_data_underflow, 0);
482         atomic_set(&rxnet->stat_tx_data_cwnd_reset, 0);
483         atomic_set(&rxnet->stat_tx_data_send, 0);
484         atomic_set(&rxnet->stat_tx_data_send_frag, 0);
485         atomic_set(&rxnet->stat_tx_data_send_fail, 0);
486         atomic_set(&rxnet->stat_rx_data, 0);
487         atomic_set(&rxnet->stat_rx_data_reqack, 0);
488         atomic_set(&rxnet->stat_rx_data_jumbo, 0);
489
490         atomic_set(&rxnet->stat_tx_ack_fill, 0);
491         atomic_set(&rxnet->stat_tx_ack_send, 0);
492         atomic_set(&rxnet->stat_tx_ack_skip, 0);
493         memset(&rxnet->stat_tx_acks, 0, sizeof(rxnet->stat_tx_acks));
494         memset(&rxnet->stat_rx_acks, 0, sizeof(rxnet->stat_rx_acks));
495
496         memset(&rxnet->stat_why_req_ack, 0, sizeof(rxnet->stat_why_req_ack));
497
498         atomic_set(&rxnet->stat_io_loop, 0);
499         return size;
500 }