rxrpc: Fix call leak
[platform/kernel/linux-rpi.git] / net / rxrpc / call_event.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/circ_buf.h>
12 #include <linux/net.h>
13 #include <linux/skbuff.h>
14 #include <linux/slab.h>
15 #include <linux/udp.h>
16 #include <net/sock.h>
17 #include <net/af_rxrpc.h>
18 #include "ar-internal.h"
19
20 /*
21  * Propose a PING ACK be sent.
22  */
23 void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
24                         enum rxrpc_propose_ack_trace why)
25 {
26         unsigned long now = jiffies;
27         unsigned long ping_at = now + rxrpc_idle_ack_delay;
28
29         if (time_before(ping_at, call->ping_at)) {
30                 WRITE_ONCE(call->ping_at, ping_at);
31                 rxrpc_reduce_call_timer(call, ping_at, now,
32                                         rxrpc_timer_set_for_ping);
33                 trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
34         }
35 }
36
37 /*
38  * Propose a DELAY ACK be sent in the future.
39  */
40 void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
41                              enum rxrpc_propose_ack_trace why)
42 {
43         unsigned long expiry = rxrpc_soft_ack_delay;
44         unsigned long now = jiffies, ack_at;
45
46         call->ackr_serial = serial;
47
48         if (rxrpc_soft_ack_delay < expiry)
49                 expiry = rxrpc_soft_ack_delay;
50         if (call->peer->srtt_us != 0)
51                 ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
52         else
53                 ack_at = expiry;
54
55         ack_at += READ_ONCE(call->tx_backoff);
56         ack_at += now;
57         if (time_before(ack_at, call->delay_ack_at)) {
58                 WRITE_ONCE(call->delay_ack_at, ack_at);
59                 rxrpc_reduce_call_timer(call, ack_at, now,
60                                         rxrpc_timer_set_for_ack);
61         }
62
63         trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
64 }
65
66 /*
67  * Queue an ACK for immediate transmission.
68  */
69 void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
70                     rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
71 {
72         struct rxrpc_local *local = call->conn->params.local;
73         struct rxrpc_txbuf *txb;
74
75         if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
76                 return;
77         if (ack_reason == RXRPC_ACK_DELAY &&
78             test_and_set_bit(RXRPC_CALL_DELAY_ACK_PENDING, &call->flags)) {
79                 trace_rxrpc_drop_ack(call, why, ack_reason, serial, false);
80                 return;
81         }
82
83         rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
84
85         txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_ACK,
86                                 in_softirq() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS);
87         if (!txb) {
88                 kleave(" = -ENOMEM");
89                 return;
90         }
91
92         txb->ack_why            = why;
93         txb->wire.seq           = 0;
94         txb->wire.type          = RXRPC_PACKET_TYPE_ACK;
95         txb->wire.flags         |= RXRPC_SLOW_START_OK;
96         txb->ack.bufferSpace    = 0;
97         txb->ack.maxSkew        = 0;
98         txb->ack.firstPacket    = 0;
99         txb->ack.previousPacket = 0;
100         txb->ack.serial         = htonl(serial);
101         txb->ack.reason         = ack_reason;
102         txb->ack.nAcks          = 0;
103
104         if (!rxrpc_try_get_call(call, rxrpc_call_got)) {
105                 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_nomem);
106                 return;
107         }
108
109         spin_lock_bh(&local->ack_tx_lock);
110         list_add_tail(&txb->tx_link, &local->ack_tx_queue);
111         spin_unlock_bh(&local->ack_tx_lock);
112         trace_rxrpc_send_ack(call, why, ack_reason, serial);
113
114         if (in_task()) {
115                 rxrpc_transmit_ack_packets(call->peer->local);
116         } else {
117                 rxrpc_get_local(local);
118                 rxrpc_queue_local(local);
119         }
120 }
121
122 /*
123  * Handle congestion being detected by the retransmit timeout.
124  */
125 static void rxrpc_congestion_timeout(struct rxrpc_call *call)
126 {
127         set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
128 }
129
130 /*
131  * Perform retransmission of NAK'd and unack'd packets.
132  */
133 static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
134 {
135         struct rxrpc_ackpacket *ack = NULL;
136         struct rxrpc_txbuf *txb;
137         struct sk_buff *ack_skb = NULL;
138         unsigned long resend_at;
139         rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
140         ktime_t now, max_age, oldest, ack_ts;
141         bool unacked = false;
142         unsigned int i;
143         LIST_HEAD(retrans_queue);
144
145         _enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
146
147         now = ktime_get_real();
148         max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
149         oldest = now;
150
151         /* See if there's an ACK saved with a soft-ACK table in it. */
152         if (call->acks_soft_tbl) {
153                 spin_lock_bh(&call->acks_ack_lock);
154                 ack_skb = call->acks_soft_tbl;
155                 if (ack_skb) {
156                         rxrpc_get_skb(ack_skb, rxrpc_skb_ack);
157                         ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
158                 }
159                 spin_unlock_bh(&call->acks_ack_lock);
160         }
161
162         if (list_empty(&call->tx_buffer))
163                 goto no_resend;
164
165         spin_lock(&call->tx_lock);
166
167         if (list_empty(&call->tx_buffer))
168                 goto no_further_resend;
169
170         trace_rxrpc_resend(call);
171         txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link);
172
173         /* Scan the soft ACK table without dropping the lock and resend any
174          * explicitly NAK'd packets.
175          */
176         if (ack) {
177                 for (i = 0; i < ack->nAcks; i++) {
178                         rxrpc_seq_t seq;
179
180                         if (ack->acks[i] & 1)
181                                 continue;
182                         seq = ntohl(ack->firstPacket) + i;
183                         if (after(txb->seq, transmitted))
184                                 break;
185                         if (after(txb->seq, seq))
186                                 continue; /* A new hard ACK probably came in */
187                         list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
188                                 if (txb->seq == seq)
189                                         goto found_txb;
190                         }
191                         goto no_further_resend;
192
193                 found_txb:
194                         if (after(ntohl(txb->wire.serial), call->acks_highest_serial))
195                                 continue; /* Ack point not yet reached */
196
197                         rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked);
198
199                         if (list_empty(&txb->tx_link)) {
200                                 rxrpc_get_txbuf(txb, rxrpc_txbuf_get_retrans);
201                                 list_add_tail(&txb->tx_link, &retrans_queue);
202                                 set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
203                         }
204
205                         trace_rxrpc_retransmit(call, txb->seq,
206                                                ktime_to_ns(ktime_sub(txb->last_sent,
207                                                                      max_age)));
208
209                         if (list_is_last(&txb->call_link, &call->tx_buffer))
210                                 goto no_further_resend;
211                         txb = list_next_entry(txb, call_link);
212                 }
213         }
214
215         /* Fast-forward through the Tx queue to the point the peer says it has
216          * seen.  Anything between the soft-ACK table and that point will get
217          * ACK'd or NACK'd in due course, so don't worry about it here; here we
218          * need to consider retransmitting anything beyond that point.
219          *
220          * Note that ACK for a packet can beat the update of tx_transmitted.
221          */
222         if (after_eq(READ_ONCE(call->acks_prev_seq), READ_ONCE(call->tx_transmitted)))
223                 goto no_further_resend;
224
225         list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
226                 if (before_eq(txb->seq, READ_ONCE(call->acks_prev_seq)))
227                         continue;
228                 if (after(txb->seq, READ_ONCE(call->tx_transmitted)))
229                         break; /* Not transmitted yet */
230
231                 if (ack && ack->reason == RXRPC_ACK_PING_RESPONSE &&
232                     before(ntohl(txb->wire.serial), ntohl(ack->serial)))
233                         goto do_resend; /* Wasn't accounted for by a more recent ping. */
234
235                 if (ktime_after(txb->last_sent, max_age)) {
236                         if (ktime_before(txb->last_sent, oldest))
237                                 oldest = txb->last_sent;
238                         continue;
239                 }
240
241         do_resend:
242                 unacked = true;
243                 if (list_empty(&txb->tx_link)) {
244                         rxrpc_get_txbuf(txb, rxrpc_txbuf_get_retrans);
245                         list_add_tail(&txb->tx_link, &retrans_queue);
246                         set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
247                         rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
248                 }
249         }
250
251 no_further_resend:
252         spin_unlock(&call->tx_lock);
253 no_resend:
254         rxrpc_free_skb(ack_skb, rxrpc_skb_freed);
255
256         resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
257         resend_at += jiffies + rxrpc_get_rto_backoff(call->peer,
258                                                      !list_empty(&retrans_queue));
259         WRITE_ONCE(call->resend_at, resend_at);
260
261         if (unacked)
262                 rxrpc_congestion_timeout(call);
263
264         /* If there was nothing that needed retransmission then it's likely
265          * that an ACK got lost somewhere.  Send a ping to find out instead of
266          * retransmitting data.
267          */
268         if (list_empty(&retrans_queue)) {
269                 rxrpc_reduce_call_timer(call, resend_at, now_j,
270                                         rxrpc_timer_set_for_resend);
271                 ack_ts = ktime_sub(now, call->acks_latest_ts);
272                 if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
273                         goto out;
274                 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
275                                rxrpc_propose_ack_ping_for_lost_ack);
276                 goto out;
277         }
278
279         while ((txb = list_first_entry_or_null(&retrans_queue,
280                                                struct rxrpc_txbuf, tx_link))) {
281                 list_del_init(&txb->tx_link);
282                 rxrpc_send_data_packet(call, txb);
283                 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_trans);
284
285                 trace_rxrpc_retransmit(call, txb->seq,
286                                        ktime_to_ns(ktime_sub(txb->last_sent,
287                                                              max_age)));
288         }
289
290 out:
291         _leave("");
292 }
293
294 /*
295  * Handle retransmission and deferred ACK/abort generation.
296  */
297 void rxrpc_process_call(struct work_struct *work)
298 {
299         struct rxrpc_call *call =
300                 container_of(work, struct rxrpc_call, processor);
301         unsigned long now, next, t;
302         unsigned int iterations = 0;
303         rxrpc_serial_t ackr_serial;
304
305         rxrpc_see_call(call);
306
307         //printk("\n--------------------\n");
308         _enter("{%d,%s,%lx}",
309                call->debug_id, rxrpc_call_states[call->state], call->events);
310
311 recheck_state:
312         /* Limit the number of times we do this before returning to the manager */
313         iterations++;
314         if (iterations > 5)
315                 goto requeue;
316
317         if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
318                 rxrpc_send_abort_packet(call);
319                 goto recheck_state;
320         }
321
322         if (READ_ONCE(call->acks_hard_ack) != call->tx_bottom)
323                 rxrpc_shrink_call_tx_buffer(call);
324
325         if (call->state == RXRPC_CALL_COMPLETE) {
326                 rxrpc_delete_call_timer(call);
327                 goto out_put;
328         }
329
330         /* Work out if any timeouts tripped */
331         now = jiffies;
332         t = READ_ONCE(call->expect_rx_by);
333         if (time_after_eq(now, t)) {
334                 trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
335                 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
336         }
337
338         t = READ_ONCE(call->expect_req_by);
339         if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
340             time_after_eq(now, t)) {
341                 trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
342                 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
343         }
344
345         t = READ_ONCE(call->expect_term_by);
346         if (time_after_eq(now, t)) {
347                 trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
348                 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
349         }
350
351         t = READ_ONCE(call->delay_ack_at);
352         if (time_after_eq(now, t)) {
353                 trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
354                 cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
355                 ackr_serial = xchg(&call->ackr_serial, 0);
356                 rxrpc_send_ACK(call, RXRPC_ACK_DELAY, ackr_serial,
357                                rxrpc_propose_ack_ping_for_lost_ack);
358         }
359
360         t = READ_ONCE(call->ack_lost_at);
361         if (time_after_eq(now, t)) {
362                 trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
363                 cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
364                 set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
365         }
366
367         t = READ_ONCE(call->keepalive_at);
368         if (time_after_eq(now, t)) {
369                 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
370                 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
371                 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
372                                rxrpc_propose_ack_ping_for_keepalive);
373         }
374
375         t = READ_ONCE(call->ping_at);
376         if (time_after_eq(now, t)) {
377                 trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
378                 cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
379                 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
380                                rxrpc_propose_ack_ping_for_keepalive);
381         }
382
383         t = READ_ONCE(call->resend_at);
384         if (time_after_eq(now, t)) {
385                 trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
386                 cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
387                 set_bit(RXRPC_CALL_EV_RESEND, &call->events);
388         }
389
390         /* Process events */
391         if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
392                 if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
393                     (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
394                         trace_rxrpc_call_reset(call);
395                         rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
396                 } else {
397                         rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
398                 }
399                 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
400                 goto recheck_state;
401         }
402
403         if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
404                 call->acks_lost_top = call->tx_top;
405                 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
406                                rxrpc_propose_ack_ping_for_lost_ack);
407         }
408
409         if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) &&
410             call->state != RXRPC_CALL_CLIENT_RECV_REPLY) {
411                 rxrpc_resend(call, now);
412                 goto recheck_state;
413         }
414
415         /* Make sure the timer is restarted */
416         next = call->expect_rx_by;
417
418 #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
419
420         set(call->expect_req_by);
421         set(call->expect_term_by);
422         set(call->delay_ack_at);
423         set(call->ack_lost_at);
424         set(call->resend_at);
425         set(call->keepalive_at);
426         set(call->ping_at);
427
428         now = jiffies;
429         if (time_after_eq(now, next))
430                 goto recheck_state;
431
432         rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
433
434         /* other events may have been raised since we started checking */
435         if (call->events && call->state < RXRPC_CALL_COMPLETE)
436                 goto requeue;
437
438 out_put:
439         rxrpc_put_call(call, rxrpc_call_put);
440 out:
441         _leave("");
442         return;
443
444 requeue:
445         __rxrpc_queue_call(call);
446         goto out;
447 }