1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Generic INET6 transport hashtables
9 * Authors: Lotsa people, from code originally in tcp, generalised here
10 * by Arnaldo Carvalho de Melo <acme@mandriva.com>
13 #include <linux/module.h>
14 #include <linux/random.h>
16 #include <net/addrconf.h>
17 #include <net/inet_connection_sock.h>
18 #include <net/inet_hashtables.h>
19 #include <net/inet6_hashtables.h>
20 #include <net/secure_seq.h>
22 #include <net/sock_reuseport.h>
24 u32 inet6_ehashfn(const struct net *net,
25 const struct in6_addr *laddr, const u16 lport,
26 const struct in6_addr *faddr, const __be16 fport)
28 static u32 inet6_ehash_secret __read_mostly;
29 static u32 ipv6_hash_secret __read_mostly;
33 net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
34 net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
36 lhash = (__force u32)laddr->s6_addr32[3];
37 fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
39 return __inet6_ehashfn(lhash, lport, fhash, fport,
40 inet6_ehash_secret + net_hash_mix(net));
44 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
45 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
47 * The sockhash lock must be held as a reader here.
49 struct sock *__inet6_lookup_established(struct net *net,
50 struct inet_hashinfo *hashinfo,
51 const struct in6_addr *saddr,
53 const struct in6_addr *daddr,
55 const int dif, const int sdif)
58 const struct hlist_nulls_node *node;
59 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
60 /* Optimize here for direct hit, only listening connections can
61 * have wildcards anyways.
63 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
64 unsigned int slot = hash & hashinfo->ehash_mask;
65 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
69 sk_nulls_for_each_rcu(sk, node, &head->chain) {
70 if (sk->sk_hash != hash)
72 if (!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
74 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
77 if (unlikely(!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))) {
83 if (get_nulls_value(node) != slot)
90 EXPORT_SYMBOL(__inet6_lookup_established);
92 static inline int compute_score(struct sock *sk, struct net *net,
93 const unsigned short hnum,
94 const struct in6_addr *daddr,
95 const int dif, const int sdif)
99 if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
100 sk->sk_family == PF_INET6) {
101 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
104 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
107 score = sk->sk_bound_dev_if ? 2 : 1;
108 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
114 static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
115 struct sk_buff *skb, int doff,
116 const struct in6_addr *saddr,
118 const struct in6_addr *daddr,
121 struct sock *reuse_sk = NULL;
124 if (sk->sk_reuseport) {
125 phash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
126 reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
131 /* called with rcu_read_lock() */
132 static struct sock *inet6_lhash2_lookup(struct net *net,
133 struct inet_listen_hashbucket *ilb2,
134 struct sk_buff *skb, int doff,
135 const struct in6_addr *saddr,
136 const __be16 sport, const struct in6_addr *daddr,
137 const unsigned short hnum, const int dif, const int sdif)
139 struct sock *sk, *result = NULL;
140 struct hlist_nulls_node *node;
141 int score, hiscore = 0;
143 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
144 score = compute_score(sk, net, hnum, daddr, dif, sdif);
145 if (score > hiscore) {
146 result = lookup_reuseport(net, sk, skb, doff,
147 saddr, sport, daddr, hnum);
159 static inline struct sock *inet6_lookup_run_bpf(struct net *net,
160 struct inet_hashinfo *hashinfo,
161 struct sk_buff *skb, int doff,
162 const struct in6_addr *saddr,
164 const struct in6_addr *daddr,
165 const u16 hnum, const int dif)
167 struct sock *sk, *reuse_sk;
170 if (hashinfo != net->ipv4.tcp_death_row.hashinfo)
171 return NULL; /* only TCP is supported */
173 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport,
174 daddr, hnum, dif, &sk);
175 if (no_reuseport || IS_ERR_OR_NULL(sk))
178 reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
184 struct sock *inet6_lookup_listener(struct net *net,
185 struct inet_hashinfo *hashinfo,
186 struct sk_buff *skb, int doff,
187 const struct in6_addr *saddr,
188 const __be16 sport, const struct in6_addr *daddr,
189 const unsigned short hnum, const int dif, const int sdif)
191 struct inet_listen_hashbucket *ilb2;
192 struct sock *result = NULL;
195 /* Lookup redirect from BPF */
196 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
197 result = inet6_lookup_run_bpf(net, hashinfo, skb, doff,
198 saddr, sport, daddr, hnum, dif);
203 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
204 ilb2 = inet_lhash2_bucket(hashinfo, hash2);
206 result = inet6_lhash2_lookup(net, ilb2, skb, doff,
207 saddr, sport, daddr, hnum,
212 /* Lookup lhash2 with in6addr_any */
213 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
214 ilb2 = inet_lhash2_bucket(hashinfo, hash2);
216 result = inet6_lhash2_lookup(net, ilb2, skb, doff,
217 saddr, sport, &in6addr_any, hnum,
224 EXPORT_SYMBOL_GPL(inet6_lookup_listener);
226 struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
227 struct sk_buff *skb, int doff,
228 const struct in6_addr *saddr, const __be16 sport,
229 const struct in6_addr *daddr, const __be16 dport,
235 sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
236 ntohs(dport), dif, 0, &refcounted);
237 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
241 EXPORT_SYMBOL_GPL(inet6_lookup);
243 static int __inet6_check_established(struct inet_timewait_death_row *death_row,
244 struct sock *sk, const __u16 lport,
245 struct inet_timewait_sock **twp)
247 struct inet_hashinfo *hinfo = death_row->hashinfo;
248 struct inet_sock *inet = inet_sk(sk);
249 const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr;
250 const struct in6_addr *saddr = &sk->sk_v6_daddr;
251 const int dif = sk->sk_bound_dev_if;
252 struct net *net = sock_net(sk);
253 const int sdif = l3mdev_master_ifindex_by_index(net, dif);
254 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
255 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr,
257 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
258 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
260 const struct hlist_nulls_node *node;
261 struct inet_timewait_sock *tw = NULL;
265 sk_nulls_for_each(sk2, node, &head->chain) {
266 if (sk2->sk_hash != hash)
269 if (likely(inet6_match(net, sk2, saddr, daddr, ports,
271 if (sk2->sk_state == TCP_TIME_WAIT) {
273 if (twsk_unique(sk, sk2, twp))
280 /* Must record num and sport now. Otherwise we will see
281 * in hash table socket with a funny identity.
283 inet->inet_num = lport;
284 inet->inet_sport = htons(lport);
286 WARN_ON(!sk_unhashed(sk));
287 __sk_nulls_add_node_rcu(sk, &head->chain);
289 sk_nulls_del_node_init_rcu((struct sock *)tw);
290 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
293 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
298 /* Silly. Should hash-dance instead... */
299 inet_twsk_deschedule_put(tw);
305 return -EADDRNOTAVAIL;
308 static u64 inet6_sk_port_offset(const struct sock *sk)
310 const struct inet_sock *inet = inet_sk(sk);
312 return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32,
313 sk->sk_v6_daddr.s6_addr32,
317 int inet6_hash_connect(struct inet_timewait_death_row *death_row,
322 if (!inet_sk(sk)->inet_num)
323 port_offset = inet6_sk_port_offset(sk);
324 return __inet_hash_connect(death_row, sk, port_offset,
325 __inet6_check_established);
327 EXPORT_SYMBOL_GPL(inet6_hash_connect);
329 int inet6_hash(struct sock *sk)
333 if (sk->sk_state != TCP_CLOSE)
334 err = __inet_hash(sk, NULL);
338 EXPORT_SYMBOL_GPL(inet6_hash);