1 // SPDX-License-Identifier: GPL-2.0
3 * To speed up listener socket lookup, create an array to store all sockets
4 * listening on the same port. This allows a decision to be made after finding
5 * the first socket. An optional BPF program can also be configured for
6 * selecting the socket index from the array of available sockets.
10 #include <net/sock_reuseport.h>
11 #include <linux/bpf.h>
12 #include <linux/idr.h>
13 #include <linux/filter.h>
14 #include <linux/rcupdate.h>
16 #define INIT_SOCKS 128
18 DEFINE_SPINLOCK(reuseport_lock);
20 static DEFINE_IDA(reuseport_ida);
21 static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
22 struct sock_reuseport *reuse, bool bind_inany);
24 void reuseport_has_conns_set(struct sock *sk)
26 struct sock_reuseport *reuse;
28 if (!rcu_access_pointer(sk->sk_reuseport_cb))
31 spin_lock_bh(&reuseport_lock);
32 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
33 lockdep_is_held(&reuseport_lock));
36 spin_unlock_bh(&reuseport_lock);
38 EXPORT_SYMBOL(reuseport_has_conns_set);
40 static int reuseport_sock_index(struct sock *sk,
41 const struct sock_reuseport *reuse,
48 right = reuse->num_socks;
50 left = reuse->max_socks - reuse->num_closed_socks;
51 right = reuse->max_socks;
54 for (; left < right; left++)
55 if (reuse->socks[left] == sk)
60 static void __reuseport_add_sock(struct sock *sk,
61 struct sock_reuseport *reuse)
63 reuse->socks[reuse->num_socks] = sk;
64 /* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
69 static bool __reuseport_detach_sock(struct sock *sk,
70 struct sock_reuseport *reuse)
72 int i = reuseport_sock_index(sk, reuse, false);
77 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
83 static void __reuseport_add_closed_sock(struct sock *sk,
84 struct sock_reuseport *reuse)
86 reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
87 /* paired with READ_ONCE() in inet_csk_bind_conflict() */
88 WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
91 static bool __reuseport_detach_closed_sock(struct sock *sk,
92 struct sock_reuseport *reuse)
94 int i = reuseport_sock_index(sk, reuse, true);
99 reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
100 /* paired with READ_ONCE() in inet_csk_bind_conflict() */
101 WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
106 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
108 unsigned int size = sizeof(struct sock_reuseport) +
109 sizeof(struct sock *) * max_socks;
110 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
115 reuse->max_socks = max_socks;
117 RCU_INIT_POINTER(reuse->prog, NULL);
121 int reuseport_alloc(struct sock *sk, bool bind_inany)
123 struct sock_reuseport *reuse;
126 /* bh lock used since this function call may precede hlist lock in
127 * soft irq of receive path or setsockopt from process context
129 spin_lock_bh(&reuseport_lock);
131 /* Allocation attempts can occur concurrently via the setsockopt path
132 * and the bind/hash path. Nothing to do when we lose the race.
134 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
135 lockdep_is_held(&reuseport_lock));
137 if (reuse->num_closed_socks) {
138 /* sk was shutdown()ed before */
139 ret = reuseport_resurrect(sk, reuse, NULL, bind_inany);
143 /* Only set reuse->bind_inany if the bind_inany is true.
144 * Otherwise, it will overwrite the reuse->bind_inany
145 * which was set by the bind/hash path.
148 reuse->bind_inany = bind_inany;
152 reuse = __reuseport_alloc(INIT_SOCKS);
158 id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
165 reuse->reuseport_id = id;
166 reuse->bind_inany = bind_inany;
167 reuse->socks[0] = sk;
168 reuse->num_socks = 1;
169 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
172 spin_unlock_bh(&reuseport_lock);
176 EXPORT_SYMBOL(reuseport_alloc);
178 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
180 struct sock_reuseport *more_reuse;
181 u32 more_socks_size, i;
183 more_socks_size = reuse->max_socks * 2U;
184 if (more_socks_size > U16_MAX) {
185 if (reuse->num_closed_socks) {
186 /* Make room by removing a closed sk.
187 * The child has already been migrated.
188 * Only reqsk left at this point.
192 sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
193 RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL);
194 __reuseport_detach_closed_sock(sk, reuse);
202 more_reuse = __reuseport_alloc(more_socks_size);
206 more_reuse->num_socks = reuse->num_socks;
207 more_reuse->num_closed_socks = reuse->num_closed_socks;
208 more_reuse->prog = reuse->prog;
209 more_reuse->reuseport_id = reuse->reuseport_id;
210 more_reuse->bind_inany = reuse->bind_inany;
211 more_reuse->has_conns = reuse->has_conns;
213 memcpy(more_reuse->socks, reuse->socks,
214 reuse->num_socks * sizeof(struct sock *));
215 memcpy(more_reuse->socks +
216 (more_reuse->max_socks - more_reuse->num_closed_socks),
217 reuse->socks + (reuse->max_socks - reuse->num_closed_socks),
218 reuse->num_closed_socks * sizeof(struct sock *));
219 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
221 for (i = 0; i < reuse->max_socks; ++i)
222 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
225 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
226 * that reuse and more_reuse can temporarily share a reference
229 kfree_rcu(reuse, rcu);
233 static void reuseport_free_rcu(struct rcu_head *head)
235 struct sock_reuseport *reuse;
237 reuse = container_of(head, struct sock_reuseport, rcu);
238 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
239 ida_free(&reuseport_ida, reuse->reuseport_id);
244 * reuseport_add_sock - Add a socket to the reuseport group of another.
245 * @sk: New socket to add to the group.
246 * @sk2: Socket belonging to the existing reuseport group.
247 * @bind_inany: Whether or not the group is bound to a local INANY address.
249 * May return ENOMEM and not add socket to group under memory pressure.
251 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
253 struct sock_reuseport *old_reuse, *reuse;
255 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
256 int err = reuseport_alloc(sk2, bind_inany);
262 spin_lock_bh(&reuseport_lock);
263 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
264 lockdep_is_held(&reuseport_lock));
265 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
266 lockdep_is_held(&reuseport_lock));
267 if (old_reuse && old_reuse->num_closed_socks) {
268 /* sk was shutdown()ed before */
269 int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany);
271 spin_unlock_bh(&reuseport_lock);
275 if (old_reuse && old_reuse->num_socks != 1) {
276 spin_unlock_bh(&reuseport_lock);
280 if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
281 reuse = reuseport_grow(reuse);
283 spin_unlock_bh(&reuseport_lock);
288 __reuseport_add_sock(sk, reuse);
289 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
291 spin_unlock_bh(&reuseport_lock);
294 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
297 EXPORT_SYMBOL(reuseport_add_sock);
299 static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
300 struct sock_reuseport *reuse, bool bind_inany)
302 if (old_reuse == reuse) {
303 /* If sk was in the same reuseport group, just pop sk out of
304 * the closed section and push sk into the listening section.
306 __reuseport_detach_closed_sock(sk, old_reuse);
307 __reuseport_add_sock(sk, old_reuse);
312 /* In bind()/listen() path, we cannot carry over the eBPF prog
313 * for the shutdown()ed socket. In setsockopt() path, we should
314 * not change the eBPF prog of listening sockets by attaching a
315 * prog to the shutdown()ed socket. Thus, we will allocate a new
316 * reuseport group and detach sk from the old group.
320 reuse = __reuseport_alloc(INIT_SOCKS);
324 id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
330 reuse->reuseport_id = id;
331 reuse->bind_inany = bind_inany;
333 /* Move sk from the old group to the new one if
334 * - all the other listeners in the old group were close()d or
335 * shutdown()ed, and then sk2 has listen()ed on the same port
337 * - sk listen()ed without bind() (or with autobind), was
338 * shutdown()ed, and then listen()s on another port which
341 if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
342 reuse = reuseport_grow(reuse);
348 __reuseport_detach_closed_sock(sk, old_reuse);
349 __reuseport_add_sock(sk, reuse);
350 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
352 if (old_reuse->num_socks + old_reuse->num_closed_socks == 0)
353 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
358 void reuseport_detach_sock(struct sock *sk)
360 struct sock_reuseport *reuse;
362 spin_lock_bh(&reuseport_lock);
363 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
364 lockdep_is_held(&reuseport_lock));
366 /* reuseport_grow() has detached a closed sk */
370 /* Notify the bpf side. The sk may be added to a sockarray
371 * map. If so, sockarray logic will remove it from the map.
373 * Other bpf map types that work with reuseport, like sockmap,
374 * don't need an explicit callback from here. They override sk
375 * unhash/close ops to remove the sk from the map before we
378 bpf_sk_reuseport_detach(sk);
380 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
382 if (!__reuseport_detach_closed_sock(sk, reuse))
383 __reuseport_detach_sock(sk, reuse);
385 if (reuse->num_socks + reuse->num_closed_socks == 0)
386 call_rcu(&reuse->rcu, reuseport_free_rcu);
389 spin_unlock_bh(&reuseport_lock);
391 EXPORT_SYMBOL(reuseport_detach_sock);
393 void reuseport_stop_listen_sock(struct sock *sk)
395 if (sk->sk_protocol == IPPROTO_TCP) {
396 struct sock_reuseport *reuse;
397 struct bpf_prog *prog;
399 spin_lock_bh(&reuseport_lock);
401 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
402 lockdep_is_held(&reuseport_lock));
403 prog = rcu_dereference_protected(reuse->prog,
404 lockdep_is_held(&reuseport_lock));
406 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) ||
407 (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) {
408 /* Migration capable, move sk from the listening section
409 * to the closed section.
411 bpf_sk_reuseport_detach(sk);
413 __reuseport_detach_sock(sk, reuse);
414 __reuseport_add_closed_sock(sk, reuse);
416 spin_unlock_bh(&reuseport_lock);
420 spin_unlock_bh(&reuseport_lock);
423 /* Not capable to do migration, detach immediately */
424 reuseport_detach_sock(sk);
426 EXPORT_SYMBOL(reuseport_stop_listen_sock);
428 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
429 struct bpf_prog *prog, struct sk_buff *skb,
432 struct sk_buff *nskb = NULL;
435 if (skb_shared(skb)) {
436 nskb = skb_clone(skb, GFP_ATOMIC);
442 /* temporarily advance data past protocol header */
443 if (!pskb_pull(skb, hdr_len)) {
447 index = bpf_prog_run_save_cb(prog, skb);
448 __skb_push(skb, hdr_len);
455 return reuse->socks[index];
458 static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
459 u32 hash, u16 num_socks)
463 i = j = reciprocal_scale(hash, num_socks);
464 while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
472 return reuse->socks[i];
476 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
477 * @sk: First socket in the group.
478 * @hash: When no BPF filter is available, use this hash to select.
479 * @skb: skb to run through BPF filter.
480 * @hdr_len: BPF filter expects skb data pointer at payload data. If
481 * the skb does not yet point at the payload, this parameter represents
482 * how far the pointer needs to advance to reach the payload.
483 * Returns a socket that should receive the packet (or NULL on error).
485 struct sock *reuseport_select_sock(struct sock *sk,
490 struct sock_reuseport *reuse;
491 struct bpf_prog *prog;
492 struct sock *sk2 = NULL;
496 reuse = rcu_dereference(sk->sk_reuseport_cb);
498 /* if memory allocation failed or add call is not yet complete */
502 prog = rcu_dereference(reuse->prog);
503 socks = READ_ONCE(reuse->num_socks);
505 /* paired with smp_wmb() in __reuseport_add_sock() */
511 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
512 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash);
514 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
517 /* no bpf or invalid bpf result: fall back to hash usage */
519 sk2 = reuseport_select_sock_by_hash(reuse, hash, socks);
526 EXPORT_SYMBOL(reuseport_select_sock);
529 * reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group.
530 * @sk: close()ed or shutdown()ed socket in the group.
531 * @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or
532 * NEW_SYN_RECV request socket during 3WHS.
533 * @skb: skb to run through BPF filter.
534 * Returns a socket (with sk_refcnt +1) that should accept the child socket
535 * (or NULL on error).
537 struct sock *reuseport_migrate_sock(struct sock *sk,
538 struct sock *migrating_sk,
541 struct sock_reuseport *reuse;
542 struct sock *nsk = NULL;
543 bool allocated = false;
544 struct bpf_prog *prog;
550 reuse = rcu_dereference(sk->sk_reuseport_cb);
554 socks = READ_ONCE(reuse->num_socks);
555 if (unlikely(!socks))
558 /* paired with smp_wmb() in __reuseport_add_sock() */
561 hash = migrating_sk->sk_hash;
562 prog = rcu_dereference(reuse->prog);
563 if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
564 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req))
570 skb = alloc_skb(0, GFP_ATOMIC);
576 nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash);
583 nsk = reuseport_select_sock_by_hash(reuse, hash, socks);
585 if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) {
595 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
598 EXPORT_SYMBOL(reuseport_migrate_sock);
600 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
602 struct sock_reuseport *reuse;
603 struct bpf_prog *old_prog;
605 if (sk_unhashed(sk)) {
608 if (!sk->sk_reuseport)
611 err = reuseport_alloc(sk, false);
614 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
615 /* The socket wasn't bound with SO_REUSEPORT */
619 spin_lock_bh(&reuseport_lock);
620 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
621 lockdep_is_held(&reuseport_lock));
622 old_prog = rcu_dereference_protected(reuse->prog,
623 lockdep_is_held(&reuseport_lock));
624 rcu_assign_pointer(reuse->prog, prog);
625 spin_unlock_bh(&reuseport_lock);
627 sk_reuseport_prog_free(old_prog);
630 EXPORT_SYMBOL(reuseport_attach_prog);
632 int reuseport_detach_prog(struct sock *sk)
634 struct sock_reuseport *reuse;
635 struct bpf_prog *old_prog;
638 spin_lock_bh(&reuseport_lock);
639 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
640 lockdep_is_held(&reuseport_lock));
642 /* reuse must be checked after acquiring the reuseport_lock
643 * because reuseport_grow() can detach a closed sk.
646 spin_unlock_bh(&reuseport_lock);
647 return sk->sk_reuseport ? -ENOENT : -EINVAL;
650 if (sk_unhashed(sk) && reuse->num_closed_socks) {
651 spin_unlock_bh(&reuseport_lock);
655 old_prog = rcu_replace_pointer(reuse->prog, old_prog,
656 lockdep_is_held(&reuseport_lock));
657 spin_unlock_bh(&reuseport_lock);
662 sk_reuseport_prog_free(old_prog);
665 EXPORT_SYMBOL(reuseport_detach_prog);