Merge tag 'drm-misc-next-fixes-2023-09-01' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / net / ipv4 / inet_timewait_sock.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the  BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              Generic TIME_WAIT sockets functions
8  *
9  *              From code orinally in TCP
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <net/inet_hashtables.h>
16 #include <net/inet_timewait_sock.h>
17 #include <net/ip.h>
18
19
20 /**
21  *      inet_twsk_bind_unhash - unhash a timewait socket from bind hash
22  *      @tw: timewait socket
23  *      @hashinfo: hashinfo pointer
24  *
25  *      unhash a timewait socket from bind hash, if hashed.
26  *      bind hash lock must be held by caller.
27  *      Returns 1 if caller should call inet_twsk_put() after lock release.
28  */
29 void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
30                           struct inet_hashinfo *hashinfo)
31 {
32         struct inet_bind2_bucket *tb2 = tw->tw_tb2;
33         struct inet_bind_bucket *tb = tw->tw_tb;
34
35         if (!tb)
36                 return;
37
38         __hlist_del(&tw->tw_bind_node);
39         tw->tw_tb = NULL;
40         inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
41
42         __hlist_del(&tw->tw_bind2_node);
43         tw->tw_tb2 = NULL;
44         inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
45
46         __sock_put((struct sock *)tw);
47 }
48
49 /* Must be called with locally disabled BHs. */
50 static void inet_twsk_kill(struct inet_timewait_sock *tw)
51 {
52         struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
53         spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
54         struct inet_bind_hashbucket *bhead, *bhead2;
55
56         spin_lock(lock);
57         sk_nulls_del_node_init_rcu((struct sock *)tw);
58         spin_unlock(lock);
59
60         /* Disassociate with bind bucket. */
61         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
62                         hashinfo->bhash_size)];
63         bhead2 = inet_bhashfn_portaddr(hashinfo, (struct sock *)tw,
64                                        twsk_net(tw), tw->tw_num);
65
66         spin_lock(&bhead->lock);
67         spin_lock(&bhead2->lock);
68         inet_twsk_bind_unhash(tw, hashinfo);
69         spin_unlock(&bhead2->lock);
70         spin_unlock(&bhead->lock);
71
72         refcount_dec(&tw->tw_dr->tw_refcount);
73         inet_twsk_put(tw);
74 }
75
76 void inet_twsk_free(struct inet_timewait_sock *tw)
77 {
78         struct module *owner = tw->tw_prot->owner;
79         twsk_destructor((struct sock *)tw);
80         kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
81         module_put(owner);
82 }
83
84 void inet_twsk_put(struct inet_timewait_sock *tw)
85 {
86         if (refcount_dec_and_test(&tw->tw_refcnt))
87                 inet_twsk_free(tw);
88 }
89 EXPORT_SYMBOL_GPL(inet_twsk_put);
90
91 static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
92                                    struct hlist_nulls_head *list)
93 {
94         hlist_nulls_add_head_rcu(&tw->tw_node, list);
95 }
96
97 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
98                                     struct hlist_head *list)
99 {
100         hlist_add_head(&tw->tw_bind_node, list);
101 }
102
103 static void inet_twsk_add_bind2_node(struct inet_timewait_sock *tw,
104                                      struct hlist_head *list)
105 {
106         hlist_add_head(&tw->tw_bind2_node, list);
107 }
108
109 /*
110  * Enter the time wait state. This is called with locally disabled BH.
111  * Essentially we whip up a timewait bucket, copy the relevant info into it
112  * from the SK, and mess with hash chains and list linkage.
113  */
114 void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
115                            struct inet_hashinfo *hashinfo)
116 {
117         const struct inet_sock *inet = inet_sk(sk);
118         const struct inet_connection_sock *icsk = inet_csk(sk);
119         struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
120         spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
121         struct inet_bind_hashbucket *bhead, *bhead2;
122
123         /* Step 1: Put TW into bind hash. Original socket stays there too.
124            Note, that any socket with inet->num != 0 MUST be bound in
125            binding cache, even if it is closed.
126          */
127         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
128                         hashinfo->bhash_size)];
129         bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
130
131         spin_lock(&bhead->lock);
132         spin_lock(&bhead2->lock);
133
134         tw->tw_tb = icsk->icsk_bind_hash;
135         WARN_ON(!icsk->icsk_bind_hash);
136         inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
137
138         tw->tw_tb2 = icsk->icsk_bind2_hash;
139         WARN_ON(!icsk->icsk_bind2_hash);
140         inet_twsk_add_bind2_node(tw, &tw->tw_tb2->deathrow);
141
142         spin_unlock(&bhead2->lock);
143         spin_unlock(&bhead->lock);
144
145         spin_lock(lock);
146
147         inet_twsk_add_node_rcu(tw, &ehead->chain);
148
149         /* Step 3: Remove SK from hash chain */
150         if (__sk_nulls_del_node_init_rcu(sk))
151                 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
152
153         spin_unlock(lock);
154
155         /* tw_refcnt is set to 3 because we have :
156          * - one reference for bhash chain.
157          * - one reference for ehash chain.
158          * - one reference for timer.
159          * We can use atomic_set() because prior spin_lock()/spin_unlock()
160          * committed into memory all tw fields.
161          * Also note that after this point, we lost our implicit reference
162          * so we are not allowed to use tw anymore.
163          */
164         refcount_set(&tw->tw_refcnt, 3);
165 }
166 EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
167
168 static void tw_timer_handler(struct timer_list *t)
169 {
170         struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer);
171
172         inet_twsk_kill(tw);
173 }
174
175 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
176                                            struct inet_timewait_death_row *dr,
177                                            const int state)
178 {
179         struct inet_timewait_sock *tw;
180
181         if (refcount_read(&dr->tw_refcount) - 1 >=
182             READ_ONCE(dr->sysctl_max_tw_buckets))
183                 return NULL;
184
185         tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
186                               GFP_ATOMIC);
187         if (tw) {
188                 const struct inet_sock *inet = inet_sk(sk);
189
190                 tw->tw_dr           = dr;
191                 /* Give us an identity. */
192                 tw->tw_daddr        = inet->inet_daddr;
193                 tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
194                 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
195                 tw->tw_tos          = inet->tos;
196                 tw->tw_num          = inet->inet_num;
197                 tw->tw_state        = TCP_TIME_WAIT;
198                 tw->tw_substate     = state;
199                 tw->tw_sport        = inet->inet_sport;
200                 tw->tw_dport        = inet->inet_dport;
201                 tw->tw_family       = sk->sk_family;
202                 tw->tw_reuse        = sk->sk_reuse;
203                 tw->tw_reuseport    = sk->sk_reuseport;
204                 tw->tw_hash         = sk->sk_hash;
205                 tw->tw_ipv6only     = 0;
206                 tw->tw_transparent  = inet->transparent;
207                 tw->tw_prot         = sk->sk_prot_creator;
208                 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
209                 twsk_net_set(tw, sock_net(sk));
210                 timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
211                 /*
212                  * Because we use RCU lookups, we should not set tw_refcnt
213                  * to a non null value before everything is setup for this
214                  * timewait socket.
215                  */
216                 refcount_set(&tw->tw_refcnt, 0);
217
218                 __module_get(tw->tw_prot->owner);
219         }
220
221         return tw;
222 }
223 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
224
225 /* These are always called from BH context.  See callers in
226  * tcp_input.c to verify this.
227  */
228
229 /* This is for handling early-kills of TIME_WAIT sockets.
230  * Warning : consume reference.
231  * Caller should not access tw anymore.
232  */
233 void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
234 {
235         if (del_timer_sync(&tw->tw_timer))
236                 inet_twsk_kill(tw);
237         inet_twsk_put(tw);
238 }
239 EXPORT_SYMBOL(inet_twsk_deschedule_put);
240
241 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
242 {
243         /* timeout := RTO * 3.5
244          *
245          * 3.5 = 1+2+0.5 to wait for two retransmits.
246          *
247          * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
248          * our ACK acking that FIN can be lost. If N subsequent retransmitted
249          * FINs (or previous seqments) are lost (probability of such event
250          * is p^(N+1), where p is probability to lose single packet and
251          * time to detect the loss is about RTO*(2^N - 1) with exponential
252          * backoff). Normal timewait length is calculated so, that we
253          * waited at least for one retransmitted FIN (maximal RTO is 120sec).
254          * [ BTW Linux. following BSD, violates this requirement waiting
255          *   only for 60sec, we should wait at least for 240 secs.
256          *   Well, 240 consumes too much of resources 8)
257          * ]
258          * This interval is not reduced to catch old duplicate and
259          * responces to our wandering segments living for two MSLs.
260          * However, if we use PAWS to detect
261          * old duplicates, we can reduce the interval to bounds required
262          * by RTO, rather than MSL. So, if peer understands PAWS, we
263          * kill tw bucket after 3.5*RTO (it is important that this number
264          * is greater than TS tick!) and detect old duplicates with help
265          * of PAWS.
266          */
267
268         if (!rearm) {
269                 bool kill = timeo <= 4*HZ;
270
271                 __NET_INC_STATS(twsk_net(tw), kill ? LINUX_MIB_TIMEWAITKILLED :
272                                                      LINUX_MIB_TIMEWAITED);
273                 BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
274                 refcount_inc(&tw->tw_dr->tw_refcount);
275         } else {
276                 mod_timer_pending(&tw->tw_timer, jiffies + timeo);
277         }
278 }
279 EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
280
281 void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
282 {
283         struct inet_timewait_sock *tw;
284         struct sock *sk;
285         struct hlist_nulls_node *node;
286         unsigned int slot;
287
288         for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
289                 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
290 restart_rcu:
291                 cond_resched();
292                 rcu_read_lock();
293 restart:
294                 sk_nulls_for_each_rcu(sk, node, &head->chain) {
295                         if (sk->sk_state != TCP_TIME_WAIT) {
296                                 /* A kernel listener socket might not hold refcnt for net,
297                                  * so reqsk_timer_handler() could be fired after net is
298                                  * freed.  Userspace listener and reqsk never exist here.
299                                  */
300                                 if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
301                                              hashinfo->pernet)) {
302                                         struct request_sock *req = inet_reqsk(sk);
303
304                                         inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
305                                 }
306
307                                 continue;
308                         }
309
310                         tw = inet_twsk(sk);
311                         if ((tw->tw_family != family) ||
312                                 refcount_read(&twsk_net(tw)->ns.count))
313                                 continue;
314
315                         if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
316                                 continue;
317
318                         if (unlikely((tw->tw_family != family) ||
319                                      refcount_read(&twsk_net(tw)->ns.count))) {
320                                 inet_twsk_put(tw);
321                                 goto restart;
322                         }
323
324                         rcu_read_unlock();
325                         local_bh_disable();
326                         inet_twsk_deschedule_put(tw);
327                         local_bh_enable();
328                         goto restart_rcu;
329                 }
330                 /* If the nulls value we got at the end of this lookup is
331                  * not the expected one, we must restart lookup.
332                  * We probably met an item that was moved to another chain.
333                  */
334                 if (get_nulls_value(node) != slot)
335                         goto restart;
336                 rcu_read_unlock();
337         }
338 }
339 EXPORT_SYMBOL_GPL(inet_twsk_purge);