Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[platform/kernel/linux-rpi.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/err.h>
29 #include <linux/percpu.h>
30 #include <linux/moduleparam.h>
31 #include <linux/notifier.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/socket.h>
35 #include <linux/mm.h>
36 #include <linux/nsproxy.h>
37 #include <linux/rculist_nulls.h>
38
39 #include <net/netfilter/nf_conntrack.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_expect.h>
43 #include <net/netfilter/nf_conntrack_helper.h>
44 #include <net/netfilter/nf_conntrack_seqadj.h>
45 #include <net/netfilter/nf_conntrack_core.h>
46 #include <net/netfilter/nf_conntrack_extend.h>
47 #include <net/netfilter/nf_conntrack_acct.h>
48 #include <net/netfilter/nf_conntrack_ecache.h>
49 #include <net/netfilter/nf_conntrack_zones.h>
50 #include <net/netfilter/nf_conntrack_timestamp.h>
51 #include <net/netfilter/nf_conntrack_timeout.h>
52 #include <net/netfilter/nf_conntrack_labels.h>
53 #include <net/netfilter/nf_conntrack_synproxy.h>
54 #include <net/netfilter/nf_nat.h>
55 #include <net/netfilter/nf_nat_core.h>
56 #include <net/netfilter/nf_nat_helper.h>
57 #include <net/netns/hash.h>
58
59 #include "nf_internals.h"
60
61 #define NF_CONNTRACK_VERSION    "0.5.0"
62
63 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
64                                       enum nf_nat_manip_type manip,
65                                       const struct nlattr *attr) __read_mostly;
66 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
67
68 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
69 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
70
71 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
72 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
73
74 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
75 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
76
77 struct conntrack_gc_work {
78         struct delayed_work     dwork;
79         u32                     last_bucket;
80         bool                    exiting;
81         bool                    early_drop;
82         long                    next_gc_run;
83 };
84
85 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
86 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
87 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
88 static __read_mostly bool nf_conntrack_locks_all;
89
90 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
91 #define GC_MAX_BUCKETS_DIV      128u
92 /* upper bound of full table scan */
93 #define GC_MAX_SCAN_JIFFIES     (16u * HZ)
94 /* desired ratio of entries found to be expired */
95 #define GC_EVICT_RATIO  50u
96
97 static struct conntrack_gc_work conntrack_gc_work;
98
99 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
100 {
101         /* 1) Acquire the lock */
102         spin_lock(lock);
103
104         /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
105          * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
106          */
107         if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
108                 return;
109
110         /* fast path failed, unlock */
111         spin_unlock(lock);
112
113         /* Slow path 1) get global lock */
114         spin_lock(&nf_conntrack_locks_all_lock);
115
116         /* Slow path 2) get the lock we want */
117         spin_lock(lock);
118
119         /* Slow path 3) release the global lock */
120         spin_unlock(&nf_conntrack_locks_all_lock);
121 }
122 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
123
124 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
125 {
126         h1 %= CONNTRACK_LOCKS;
127         h2 %= CONNTRACK_LOCKS;
128         spin_unlock(&nf_conntrack_locks[h1]);
129         if (h1 != h2)
130                 spin_unlock(&nf_conntrack_locks[h2]);
131 }
132
133 /* return true if we need to recompute hashes (in case hash table was resized) */
134 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
135                                      unsigned int h2, unsigned int sequence)
136 {
137         h1 %= CONNTRACK_LOCKS;
138         h2 %= CONNTRACK_LOCKS;
139         if (h1 <= h2) {
140                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
141                 if (h1 != h2)
142                         spin_lock_nested(&nf_conntrack_locks[h2],
143                                          SINGLE_DEPTH_NESTING);
144         } else {
145                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
146                 spin_lock_nested(&nf_conntrack_locks[h1],
147                                  SINGLE_DEPTH_NESTING);
148         }
149         if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
150                 nf_conntrack_double_unlock(h1, h2);
151                 return true;
152         }
153         return false;
154 }
155
156 static void nf_conntrack_all_lock(void)
157 {
158         int i;
159
160         spin_lock(&nf_conntrack_locks_all_lock);
161
162         nf_conntrack_locks_all = true;
163
164         for (i = 0; i < CONNTRACK_LOCKS; i++) {
165                 spin_lock(&nf_conntrack_locks[i]);
166
167                 /* This spin_unlock provides the "release" to ensure that
168                  * nf_conntrack_locks_all==true is visible to everyone that
169                  * acquired spin_lock(&nf_conntrack_locks[]).
170                  */
171                 spin_unlock(&nf_conntrack_locks[i]);
172         }
173 }
174
175 static void nf_conntrack_all_unlock(void)
176 {
177         /* All prior stores must be complete before we clear
178          * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
179          * might observe the false value but not the entire
180          * critical section.
181          * It pairs with the smp_load_acquire() in nf_conntrack_lock()
182          */
183         smp_store_release(&nf_conntrack_locks_all, false);
184         spin_unlock(&nf_conntrack_locks_all_lock);
185 }
186
187 unsigned int nf_conntrack_htable_size __read_mostly;
188 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
189
190 unsigned int nf_conntrack_max __read_mostly;
191 seqcount_t nf_conntrack_generation __read_mostly;
192 static unsigned int nf_conntrack_hash_rnd __read_mostly;
193
194 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
195                               const struct net *net)
196 {
197         unsigned int n;
198         u32 seed;
199
200         get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
201
202         /* The direction must be ignored, so we hash everything up to the
203          * destination ports (which is a multiple of 4) and treat the last
204          * three bytes manually.
205          */
206         seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
207         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
208         return jhash2((u32 *)tuple, n, seed ^
209                       (((__force __u16)tuple->dst.u.all << 16) |
210                       tuple->dst.protonum));
211 }
212
213 static u32 scale_hash(u32 hash)
214 {
215         return reciprocal_scale(hash, nf_conntrack_htable_size);
216 }
217
218 static u32 __hash_conntrack(const struct net *net,
219                             const struct nf_conntrack_tuple *tuple,
220                             unsigned int size)
221 {
222         return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
223 }
224
225 static u32 hash_conntrack(const struct net *net,
226                           const struct nf_conntrack_tuple *tuple)
227 {
228         return scale_hash(hash_conntrack_raw(tuple, net));
229 }
230
231 bool
232 nf_ct_get_tuple(const struct sk_buff *skb,
233                 unsigned int nhoff,
234                 unsigned int dataoff,
235                 u_int16_t l3num,
236                 u_int8_t protonum,
237                 struct net *net,
238                 struct nf_conntrack_tuple *tuple,
239                 const struct nf_conntrack_l3proto *l3proto,
240                 const struct nf_conntrack_l4proto *l4proto)
241 {
242         memset(tuple, 0, sizeof(*tuple));
243
244         tuple->src.l3num = l3num;
245         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
246                 return false;
247
248         tuple->dst.protonum = protonum;
249         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
250
251         return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
252 }
253 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
254
255 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
256                        u_int16_t l3num,
257                        struct net *net, struct nf_conntrack_tuple *tuple)
258 {
259         const struct nf_conntrack_l3proto *l3proto;
260         const struct nf_conntrack_l4proto *l4proto;
261         unsigned int protoff;
262         u_int8_t protonum;
263         int ret;
264
265         rcu_read_lock();
266
267         l3proto = __nf_ct_l3proto_find(l3num);
268         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
269         if (ret != NF_ACCEPT) {
270                 rcu_read_unlock();
271                 return false;
272         }
273
274         l4proto = __nf_ct_l4proto_find(l3num, protonum);
275
276         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
277                               l3proto, l4proto);
278
279         rcu_read_unlock();
280         return ret;
281 }
282 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
283
284 bool
285 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
286                    const struct nf_conntrack_tuple *orig,
287                    const struct nf_conntrack_l3proto *l3proto,
288                    const struct nf_conntrack_l4proto *l4proto)
289 {
290         memset(inverse, 0, sizeof(*inverse));
291
292         inverse->src.l3num = orig->src.l3num;
293         if (l3proto->invert_tuple(inverse, orig) == 0)
294                 return false;
295
296         inverse->dst.dir = !orig->dst.dir;
297
298         inverse->dst.protonum = orig->dst.protonum;
299         return l4proto->invert_tuple(inverse, orig);
300 }
301 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
302
303 static void
304 clean_from_lists(struct nf_conn *ct)
305 {
306         pr_debug("clean_from_lists(%p)\n", ct);
307         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
308         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
309
310         /* Destroy all pending expectations */
311         nf_ct_remove_expectations(ct);
312 }
313
314 /* must be called with local_bh_disable */
315 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
316 {
317         struct ct_pcpu *pcpu;
318
319         /* add this conntrack to the (per cpu) dying list */
320         ct->cpu = smp_processor_id();
321         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
322
323         spin_lock(&pcpu->lock);
324         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
325                              &pcpu->dying);
326         spin_unlock(&pcpu->lock);
327 }
328
329 /* must be called with local_bh_disable */
330 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
331 {
332         struct ct_pcpu *pcpu;
333
334         /* add this conntrack to the (per cpu) unconfirmed list */
335         ct->cpu = smp_processor_id();
336         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
337
338         spin_lock(&pcpu->lock);
339         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
340                              &pcpu->unconfirmed);
341         spin_unlock(&pcpu->lock);
342 }
343
344 /* must be called with local_bh_disable */
345 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
346 {
347         struct ct_pcpu *pcpu;
348
349         /* We overload first tuple to link into unconfirmed or dying list.*/
350         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
351
352         spin_lock(&pcpu->lock);
353         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
354         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
355         spin_unlock(&pcpu->lock);
356 }
357
358 #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
359
360 /* Released via destroy_conntrack() */
361 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
362                                  const struct nf_conntrack_zone *zone,
363                                  gfp_t flags)
364 {
365         struct nf_conn *tmpl, *p;
366
367         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
368                 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
369                 if (!tmpl)
370                         return NULL;
371
372                 p = tmpl;
373                 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
374                 if (tmpl != p) {
375                         tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
376                         tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
377                 }
378         } else {
379                 tmpl = kzalloc(sizeof(*tmpl), flags);
380                 if (!tmpl)
381                         return NULL;
382         }
383
384         tmpl->status = IPS_TEMPLATE;
385         write_pnet(&tmpl->ct_net, net);
386         nf_ct_zone_add(tmpl, zone);
387         atomic_set(&tmpl->ct_general.use, 0);
388
389         return tmpl;
390 }
391 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
392
393 void nf_ct_tmpl_free(struct nf_conn *tmpl)
394 {
395         nf_ct_ext_destroy(tmpl);
396         nf_ct_ext_free(tmpl);
397
398         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
399                 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
400         else
401                 kfree(tmpl);
402 }
403 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
404
405 static void
406 destroy_conntrack(struct nf_conntrack *nfct)
407 {
408         struct nf_conn *ct = (struct nf_conn *)nfct;
409         const struct nf_conntrack_l4proto *l4proto;
410
411         pr_debug("destroy_conntrack(%p)\n", ct);
412         WARN_ON(atomic_read(&nfct->use) != 0);
413
414         if (unlikely(nf_ct_is_template(ct))) {
415                 nf_ct_tmpl_free(ct);
416                 return;
417         }
418         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
419         if (l4proto->destroy)
420                 l4proto->destroy(ct);
421
422         local_bh_disable();
423         /* Expectations will have been removed in clean_from_lists,
424          * except TFTP can create an expectation on the first packet,
425          * before connection is in the list, so we need to clean here,
426          * too.
427          */
428         nf_ct_remove_expectations(ct);
429
430         nf_ct_del_from_dying_or_unconfirmed_list(ct);
431
432         local_bh_enable();
433
434         if (ct->master)
435                 nf_ct_put(ct->master);
436
437         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
438         nf_conntrack_free(ct);
439 }
440
441 static void nf_ct_delete_from_lists(struct nf_conn *ct)
442 {
443         struct net *net = nf_ct_net(ct);
444         unsigned int hash, reply_hash;
445         unsigned int sequence;
446
447         nf_ct_helper_destroy(ct);
448
449         local_bh_disable();
450         do {
451                 sequence = read_seqcount_begin(&nf_conntrack_generation);
452                 hash = hash_conntrack(net,
453                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
454                 reply_hash = hash_conntrack(net,
455                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
456         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
457
458         clean_from_lists(ct);
459         nf_conntrack_double_unlock(hash, reply_hash);
460
461         nf_ct_add_to_dying_list(ct);
462
463         local_bh_enable();
464 }
465
466 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
467 {
468         struct nf_conn_tstamp *tstamp;
469
470         if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
471                 return false;
472
473         tstamp = nf_conn_tstamp_find(ct);
474         if (tstamp && tstamp->stop == 0)
475                 tstamp->stop = ktime_get_real_ns();
476
477         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
478                                     portid, report) < 0) {
479                 /* destroy event was not delivered. nf_ct_put will
480                  * be done by event cache worker on redelivery.
481                  */
482                 nf_ct_delete_from_lists(ct);
483                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
484                 return false;
485         }
486
487         nf_conntrack_ecache_work(nf_ct_net(ct));
488         nf_ct_delete_from_lists(ct);
489         nf_ct_put(ct);
490         return true;
491 }
492 EXPORT_SYMBOL_GPL(nf_ct_delete);
493
494 static inline bool
495 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
496                 const struct nf_conntrack_tuple *tuple,
497                 const struct nf_conntrack_zone *zone,
498                 const struct net *net)
499 {
500         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
501
502         /* A conntrack can be recreated with the equal tuple,
503          * so we need to check that the conntrack is confirmed
504          */
505         return nf_ct_tuple_equal(tuple, &h->tuple) &&
506                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
507                nf_ct_is_confirmed(ct) &&
508                net_eq(net, nf_ct_net(ct));
509 }
510
511 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
512 static void nf_ct_gc_expired(struct nf_conn *ct)
513 {
514         if (!atomic_inc_not_zero(&ct->ct_general.use))
515                 return;
516
517         if (nf_ct_should_gc(ct))
518                 nf_ct_kill(ct);
519
520         nf_ct_put(ct);
521 }
522
523 /*
524  * Warning :
525  * - Caller must take a reference on returned object
526  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
527  */
528 static struct nf_conntrack_tuple_hash *
529 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
530                       const struct nf_conntrack_tuple *tuple, u32 hash)
531 {
532         struct nf_conntrack_tuple_hash *h;
533         struct hlist_nulls_head *ct_hash;
534         struct hlist_nulls_node *n;
535         unsigned int bucket, hsize;
536
537 begin:
538         nf_conntrack_get_ht(&ct_hash, &hsize);
539         bucket = reciprocal_scale(hash, hsize);
540
541         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
542                 struct nf_conn *ct;
543
544                 ct = nf_ct_tuplehash_to_ctrack(h);
545                 if (nf_ct_is_expired(ct)) {
546                         nf_ct_gc_expired(ct);
547                         continue;
548                 }
549
550                 if (nf_ct_is_dying(ct))
551                         continue;
552
553                 if (nf_ct_key_equal(h, tuple, zone, net))
554                         return h;
555         }
556         /*
557          * if the nulls value we got at the end of this lookup is
558          * not the expected one, we must restart lookup.
559          * We probably met an item that was moved to another chain.
560          */
561         if (get_nulls_value(n) != bucket) {
562                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
563                 goto begin;
564         }
565
566         return NULL;
567 }
568
569 /* Find a connection corresponding to a tuple. */
570 static struct nf_conntrack_tuple_hash *
571 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
572                         const struct nf_conntrack_tuple *tuple, u32 hash)
573 {
574         struct nf_conntrack_tuple_hash *h;
575         struct nf_conn *ct;
576
577         rcu_read_lock();
578 begin:
579         h = ____nf_conntrack_find(net, zone, tuple, hash);
580         if (h) {
581                 ct = nf_ct_tuplehash_to_ctrack(h);
582                 if (unlikely(nf_ct_is_dying(ct) ||
583                              !atomic_inc_not_zero(&ct->ct_general.use)))
584                         h = NULL;
585                 else {
586                         if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
587                                 nf_ct_put(ct);
588                                 goto begin;
589                         }
590                 }
591         }
592         rcu_read_unlock();
593
594         return h;
595 }
596
597 struct nf_conntrack_tuple_hash *
598 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
599                       const struct nf_conntrack_tuple *tuple)
600 {
601         return __nf_conntrack_find_get(net, zone, tuple,
602                                        hash_conntrack_raw(tuple, net));
603 }
604 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
605
606 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
607                                        unsigned int hash,
608                                        unsigned int reply_hash)
609 {
610         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
611                            &nf_conntrack_hash[hash]);
612         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
613                            &nf_conntrack_hash[reply_hash]);
614 }
615
616 int
617 nf_conntrack_hash_check_insert(struct nf_conn *ct)
618 {
619         const struct nf_conntrack_zone *zone;
620         struct net *net = nf_ct_net(ct);
621         unsigned int hash, reply_hash;
622         struct nf_conntrack_tuple_hash *h;
623         struct hlist_nulls_node *n;
624         unsigned int sequence;
625
626         zone = nf_ct_zone(ct);
627
628         local_bh_disable();
629         do {
630                 sequence = read_seqcount_begin(&nf_conntrack_generation);
631                 hash = hash_conntrack(net,
632                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
633                 reply_hash = hash_conntrack(net,
634                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
635         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
636
637         /* See if there's one in the list already, including reverse */
638         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
639                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
640                                     zone, net))
641                         goto out;
642
643         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
644                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
645                                     zone, net))
646                         goto out;
647
648         smp_wmb();
649         /* The caller holds a reference to this object */
650         atomic_set(&ct->ct_general.use, 2);
651         __nf_conntrack_hash_insert(ct, hash, reply_hash);
652         nf_conntrack_double_unlock(hash, reply_hash);
653         NF_CT_STAT_INC(net, insert);
654         local_bh_enable();
655         return 0;
656
657 out:
658         nf_conntrack_double_unlock(hash, reply_hash);
659         NF_CT_STAT_INC(net, insert_failed);
660         local_bh_enable();
661         return -EEXIST;
662 }
663 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
664
665 static inline void nf_ct_acct_update(struct nf_conn *ct,
666                                      enum ip_conntrack_info ctinfo,
667                                      unsigned int len)
668 {
669         struct nf_conn_acct *acct;
670
671         acct = nf_conn_acct_find(ct);
672         if (acct) {
673                 struct nf_conn_counter *counter = acct->counter;
674
675                 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
676                 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
677         }
678 }
679
680 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
681                              const struct nf_conn *loser_ct)
682 {
683         struct nf_conn_acct *acct;
684
685         acct = nf_conn_acct_find(loser_ct);
686         if (acct) {
687                 struct nf_conn_counter *counter = acct->counter;
688                 unsigned int bytes;
689
690                 /* u32 should be fine since we must have seen one packet. */
691                 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
692                 nf_ct_acct_update(ct, ctinfo, bytes);
693         }
694 }
695
696 /* Resolve race on insertion if this protocol allows this. */
697 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
698                                enum ip_conntrack_info ctinfo,
699                                struct nf_conntrack_tuple_hash *h)
700 {
701         /* This is the conntrack entry already in hashes that won race. */
702         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
703         const struct nf_conntrack_l4proto *l4proto;
704
705         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
706         if (l4proto->allow_clash &&
707             ((ct->status & IPS_NAT_DONE_MASK) == 0) &&
708             !nf_ct_is_dying(ct) &&
709             atomic_inc_not_zero(&ct->ct_general.use)) {
710                 enum ip_conntrack_info oldinfo;
711                 struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
712
713                 nf_ct_acct_merge(ct, ctinfo, loser_ct);
714                 nf_conntrack_put(&loser_ct->ct_general);
715                 nf_ct_set(skb, ct, oldinfo);
716                 return NF_ACCEPT;
717         }
718         NF_CT_STAT_INC(net, drop);
719         return NF_DROP;
720 }
721
722 /* Confirm a connection given skb; places it in hash table */
723 int
724 __nf_conntrack_confirm(struct sk_buff *skb)
725 {
726         const struct nf_conntrack_zone *zone;
727         unsigned int hash, reply_hash;
728         struct nf_conntrack_tuple_hash *h;
729         struct nf_conn *ct;
730         struct nf_conn_help *help;
731         struct nf_conn_tstamp *tstamp;
732         struct hlist_nulls_node *n;
733         enum ip_conntrack_info ctinfo;
734         struct net *net;
735         unsigned int sequence;
736         int ret = NF_DROP;
737
738         ct = nf_ct_get(skb, &ctinfo);
739         net = nf_ct_net(ct);
740
741         /* ipt_REJECT uses nf_conntrack_attach to attach related
742            ICMP/TCP RST packets in other direction.  Actual packet
743            which created connection will be IP_CT_NEW or for an
744            expected connection, IP_CT_RELATED. */
745         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
746                 return NF_ACCEPT;
747
748         zone = nf_ct_zone(ct);
749         local_bh_disable();
750
751         do {
752                 sequence = read_seqcount_begin(&nf_conntrack_generation);
753                 /* reuse the hash saved before */
754                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
755                 hash = scale_hash(hash);
756                 reply_hash = hash_conntrack(net,
757                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
758
759         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
760
761         /* We're not in hash table, and we refuse to set up related
762          * connections for unconfirmed conns.  But packet copies and
763          * REJECT will give spurious warnings here.
764          */
765
766         /* No external references means no one else could have
767          * confirmed us.
768          */
769         WARN_ON(nf_ct_is_confirmed(ct));
770         pr_debug("Confirming conntrack %p\n", ct);
771         /* We have to check the DYING flag after unlink to prevent
772          * a race against nf_ct_get_next_corpse() possibly called from
773          * user context, else we insert an already 'dead' hash, blocking
774          * further use of that particular connection -JM.
775          */
776         nf_ct_del_from_dying_or_unconfirmed_list(ct);
777
778         if (unlikely(nf_ct_is_dying(ct))) {
779                 nf_ct_add_to_dying_list(ct);
780                 goto dying;
781         }
782
783         /* See if there's one in the list already, including reverse:
784            NAT could have grabbed it without realizing, since we're
785            not in the hash.  If there is, we lost race. */
786         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
787                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
788                                     zone, net))
789                         goto out;
790
791         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
792                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
793                                     zone, net))
794                         goto out;
795
796         /* Timer relative to confirmation time, not original
797            setting time, otherwise we'd get timer wrap in
798            weird delay cases. */
799         ct->timeout += nfct_time_stamp;
800         atomic_inc(&ct->ct_general.use);
801         ct->status |= IPS_CONFIRMED;
802
803         /* set conntrack timestamp, if enabled. */
804         tstamp = nf_conn_tstamp_find(ct);
805         if (tstamp) {
806                 if (skb->tstamp == 0)
807                         __net_timestamp(skb);
808
809                 tstamp->start = ktime_to_ns(skb->tstamp);
810         }
811         /* Since the lookup is lockless, hash insertion must be done after
812          * starting the timer and setting the CONFIRMED bit. The RCU barriers
813          * guarantee that no other CPU can find the conntrack before the above
814          * stores are visible.
815          */
816         __nf_conntrack_hash_insert(ct, hash, reply_hash);
817         nf_conntrack_double_unlock(hash, reply_hash);
818         local_bh_enable();
819
820         help = nfct_help(ct);
821         if (help && help->helper)
822                 nf_conntrack_event_cache(IPCT_HELPER, ct);
823
824         nf_conntrack_event_cache(master_ct(ct) ?
825                                  IPCT_RELATED : IPCT_NEW, ct);
826         return NF_ACCEPT;
827
828 out:
829         nf_ct_add_to_dying_list(ct);
830         ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
831 dying:
832         nf_conntrack_double_unlock(hash, reply_hash);
833         NF_CT_STAT_INC(net, insert_failed);
834         local_bh_enable();
835         return ret;
836 }
837 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
838
839 /* Returns true if a connection correspondings to the tuple (required
840    for NAT). */
841 int
842 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
843                          const struct nf_conn *ignored_conntrack)
844 {
845         struct net *net = nf_ct_net(ignored_conntrack);
846         const struct nf_conntrack_zone *zone;
847         struct nf_conntrack_tuple_hash *h;
848         struct hlist_nulls_head *ct_hash;
849         unsigned int hash, hsize;
850         struct hlist_nulls_node *n;
851         struct nf_conn *ct;
852
853         zone = nf_ct_zone(ignored_conntrack);
854
855         rcu_read_lock();
856  begin:
857         nf_conntrack_get_ht(&ct_hash, &hsize);
858         hash = __hash_conntrack(net, tuple, hsize);
859
860         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
861                 ct = nf_ct_tuplehash_to_ctrack(h);
862
863                 if (ct == ignored_conntrack)
864                         continue;
865
866                 if (nf_ct_is_expired(ct)) {
867                         nf_ct_gc_expired(ct);
868                         continue;
869                 }
870
871                 if (nf_ct_key_equal(h, tuple, zone, net)) {
872                         NF_CT_STAT_INC_ATOMIC(net, found);
873                         rcu_read_unlock();
874                         return 1;
875                 }
876         }
877
878         if (get_nulls_value(n) != hash) {
879                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
880                 goto begin;
881         }
882
883         rcu_read_unlock();
884
885         return 0;
886 }
887 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
888
889 #define NF_CT_EVICTION_RANGE    8
890
891 /* There's a small race here where we may free a just-assured
892    connection.  Too bad: we're in trouble anyway. */
893 static unsigned int early_drop_list(struct net *net,
894                                     struct hlist_nulls_head *head)
895 {
896         struct nf_conntrack_tuple_hash *h;
897         struct hlist_nulls_node *n;
898         unsigned int drops = 0;
899         struct nf_conn *tmp;
900
901         hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
902                 tmp = nf_ct_tuplehash_to_ctrack(h);
903
904                 if (nf_ct_is_expired(tmp)) {
905                         nf_ct_gc_expired(tmp);
906                         continue;
907                 }
908
909                 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
910                     !net_eq(nf_ct_net(tmp), net) ||
911                     nf_ct_is_dying(tmp))
912                         continue;
913
914                 if (!atomic_inc_not_zero(&tmp->ct_general.use))
915                         continue;
916
917                 /* kill only if still in same netns -- might have moved due to
918                  * SLAB_TYPESAFE_BY_RCU rules.
919                  *
920                  * We steal the timer reference.  If that fails timer has
921                  * already fired or someone else deleted it. Just drop ref
922                  * and move to next entry.
923                  */
924                 if (net_eq(nf_ct_net(tmp), net) &&
925                     nf_ct_is_confirmed(tmp) &&
926                     nf_ct_delete(tmp, 0, 0))
927                         drops++;
928
929                 nf_ct_put(tmp);
930         }
931
932         return drops;
933 }
934
935 static noinline int early_drop(struct net *net, unsigned int _hash)
936 {
937         unsigned int i;
938
939         for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
940                 struct hlist_nulls_head *ct_hash;
941                 unsigned int hash, hsize, drops;
942
943                 rcu_read_lock();
944                 nf_conntrack_get_ht(&ct_hash, &hsize);
945                 hash = reciprocal_scale(_hash++, hsize);
946
947                 drops = early_drop_list(net, &ct_hash[hash]);
948                 rcu_read_unlock();
949
950                 if (drops) {
951                         NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
952                         return true;
953                 }
954         }
955
956         return false;
957 }
958
959 static bool gc_worker_skip_ct(const struct nf_conn *ct)
960 {
961         return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
962 }
963
964 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
965 {
966         const struct nf_conntrack_l4proto *l4proto;
967
968         if (!test_bit(IPS_ASSURED_BIT, &ct->status))
969                 return true;
970
971         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
972         if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
973                 return true;
974
975         return false;
976 }
977
978 static void gc_worker(struct work_struct *work)
979 {
980         unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
981         unsigned int i, goal, buckets = 0, expired_count = 0;
982         unsigned int nf_conntrack_max95 = 0;
983         struct conntrack_gc_work *gc_work;
984         unsigned int ratio, scanned = 0;
985         unsigned long next_run;
986
987         gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
988
989         goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
990         i = gc_work->last_bucket;
991         if (gc_work->early_drop)
992                 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
993
994         do {
995                 struct nf_conntrack_tuple_hash *h;
996                 struct hlist_nulls_head *ct_hash;
997                 struct hlist_nulls_node *n;
998                 unsigned int hashsz;
999                 struct nf_conn *tmp;
1000
1001                 i++;
1002                 rcu_read_lock();
1003
1004                 nf_conntrack_get_ht(&ct_hash, &hashsz);
1005                 if (i >= hashsz)
1006                         i = 0;
1007
1008                 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1009                         struct net *net;
1010
1011                         tmp = nf_ct_tuplehash_to_ctrack(h);
1012
1013                         scanned++;
1014                         if (nf_ct_is_expired(tmp)) {
1015                                 nf_ct_gc_expired(tmp);
1016                                 expired_count++;
1017                                 continue;
1018                         }
1019
1020                         if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1021                                 continue;
1022
1023                         net = nf_ct_net(tmp);
1024                         if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1025                                 continue;
1026
1027                         /* need to take reference to avoid possible races */
1028                         if (!atomic_inc_not_zero(&tmp->ct_general.use))
1029                                 continue;
1030
1031                         if (gc_worker_skip_ct(tmp)) {
1032                                 nf_ct_put(tmp);
1033                                 continue;
1034                         }
1035
1036                         if (gc_worker_can_early_drop(tmp))
1037                                 nf_ct_kill(tmp);
1038
1039                         nf_ct_put(tmp);
1040                 }
1041
1042                 /* could check get_nulls_value() here and restart if ct
1043                  * was moved to another chain.  But given gc is best-effort
1044                  * we will just continue with next hash slot.
1045                  */
1046                 rcu_read_unlock();
1047                 cond_resched_rcu_qs();
1048         } while (++buckets < goal);
1049
1050         if (gc_work->exiting)
1051                 return;
1052
1053         /*
1054          * Eviction will normally happen from the packet path, and not
1055          * from this gc worker.
1056          *
1057          * This worker is only here to reap expired entries when system went
1058          * idle after a busy period.
1059          *
1060          * The heuristics below are supposed to balance conflicting goals:
1061          *
1062          * 1. Minimize time until we notice a stale entry
1063          * 2. Maximize scan intervals to not waste cycles
1064          *
1065          * Normally, expire ratio will be close to 0.
1066          *
1067          * As soon as a sizeable fraction of the entries have expired
1068          * increase scan frequency.
1069          */
1070         ratio = scanned ? expired_count * 100 / scanned : 0;
1071         if (ratio > GC_EVICT_RATIO) {
1072                 gc_work->next_gc_run = min_interval;
1073         } else {
1074                 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1075
1076                 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1077
1078                 gc_work->next_gc_run += min_interval;
1079                 if (gc_work->next_gc_run > max)
1080                         gc_work->next_gc_run = max;
1081         }
1082
1083         next_run = gc_work->next_gc_run;
1084         gc_work->last_bucket = i;
1085         gc_work->early_drop = false;
1086         queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
1087 }
1088
1089 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1090 {
1091         INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
1092         gc_work->next_gc_run = HZ;
1093         gc_work->exiting = false;
1094 }
1095
1096 static struct nf_conn *
1097 __nf_conntrack_alloc(struct net *net,
1098                      const struct nf_conntrack_zone *zone,
1099                      const struct nf_conntrack_tuple *orig,
1100                      const struct nf_conntrack_tuple *repl,
1101                      gfp_t gfp, u32 hash)
1102 {
1103         struct nf_conn *ct;
1104
1105         /* We don't want any race condition at early drop stage */
1106         atomic_inc(&net->ct.count);
1107
1108         if (nf_conntrack_max &&
1109             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1110                 if (!early_drop(net, hash)) {
1111                         if (!conntrack_gc_work.early_drop)
1112                                 conntrack_gc_work.early_drop = true;
1113                         atomic_dec(&net->ct.count);
1114                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1115                         return ERR_PTR(-ENOMEM);
1116                 }
1117         }
1118
1119         /*
1120          * Do not use kmem_cache_zalloc(), as this cache uses
1121          * SLAB_TYPESAFE_BY_RCU.
1122          */
1123         ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1124         if (ct == NULL)
1125                 goto out;
1126
1127         spin_lock_init(&ct->lock);
1128         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1129         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1130         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1131         /* save hash for reusing when confirming */
1132         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1133         ct->status = 0;
1134         write_pnet(&ct->ct_net, net);
1135         memset(&ct->__nfct_init_offset[0], 0,
1136                offsetof(struct nf_conn, proto) -
1137                offsetof(struct nf_conn, __nfct_init_offset[0]));
1138
1139         nf_ct_zone_add(ct, zone);
1140
1141         /* Because we use RCU lookups, we set ct_general.use to zero before
1142          * this is inserted in any list.
1143          */
1144         atomic_set(&ct->ct_general.use, 0);
1145         return ct;
1146 out:
1147         atomic_dec(&net->ct.count);
1148         return ERR_PTR(-ENOMEM);
1149 }
1150
1151 struct nf_conn *nf_conntrack_alloc(struct net *net,
1152                                    const struct nf_conntrack_zone *zone,
1153                                    const struct nf_conntrack_tuple *orig,
1154                                    const struct nf_conntrack_tuple *repl,
1155                                    gfp_t gfp)
1156 {
1157         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1158 }
1159 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1160
1161 void nf_conntrack_free(struct nf_conn *ct)
1162 {
1163         struct net *net = nf_ct_net(ct);
1164
1165         /* A freed object has refcnt == 0, that's
1166          * the golden rule for SLAB_TYPESAFE_BY_RCU
1167          */
1168         WARN_ON(atomic_read(&ct->ct_general.use) != 0);
1169
1170         nf_ct_ext_destroy(ct);
1171         nf_ct_ext_free(ct);
1172         kmem_cache_free(nf_conntrack_cachep, ct);
1173         smp_mb__before_atomic();
1174         atomic_dec(&net->ct.count);
1175 }
1176 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1177
1178
1179 /* Allocate a new conntrack: we return -ENOMEM if classification
1180    failed due to stress.  Otherwise it really is unclassifiable. */
1181 static noinline struct nf_conntrack_tuple_hash *
1182 init_conntrack(struct net *net, struct nf_conn *tmpl,
1183                const struct nf_conntrack_tuple *tuple,
1184                const struct nf_conntrack_l3proto *l3proto,
1185                const struct nf_conntrack_l4proto *l4proto,
1186                struct sk_buff *skb,
1187                unsigned int dataoff, u32 hash)
1188 {
1189         struct nf_conn *ct;
1190         struct nf_conn_help *help;
1191         struct nf_conntrack_tuple repl_tuple;
1192         struct nf_conntrack_ecache *ecache;
1193         struct nf_conntrack_expect *exp = NULL;
1194         const struct nf_conntrack_zone *zone;
1195         struct nf_conn_timeout *timeout_ext;
1196         struct nf_conntrack_zone tmp;
1197         unsigned int *timeouts;
1198
1199         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
1200                 pr_debug("Can't invert tuple.\n");
1201                 return NULL;
1202         }
1203
1204         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1205         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1206                                   hash);
1207         if (IS_ERR(ct))
1208                 return (struct nf_conntrack_tuple_hash *)ct;
1209
1210         if (!nf_ct_add_synproxy(ct, tmpl)) {
1211                 nf_conntrack_free(ct);
1212                 return ERR_PTR(-ENOMEM);
1213         }
1214
1215         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1216         if (timeout_ext) {
1217                 timeouts = nf_ct_timeout_data(timeout_ext);
1218                 if (unlikely(!timeouts))
1219                         timeouts = l4proto->get_timeouts(net);
1220         } else {
1221                 timeouts = l4proto->get_timeouts(net);
1222         }
1223
1224         if (!l4proto->new(ct, skb, dataoff, timeouts)) {
1225                 nf_conntrack_free(ct);
1226                 pr_debug("can't track with proto module\n");
1227                 return NULL;
1228         }
1229
1230         if (timeout_ext)
1231                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1232                                       GFP_ATOMIC);
1233
1234         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1235         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1236         nf_ct_labels_ext_add(ct);
1237
1238         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1239         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1240                                  ecache ? ecache->expmask : 0,
1241                              GFP_ATOMIC);
1242
1243         local_bh_disable();
1244         if (net->ct.expect_count) {
1245                 spin_lock(&nf_conntrack_expect_lock);
1246                 exp = nf_ct_find_expectation(net, zone, tuple);
1247                 if (exp) {
1248                         pr_debug("expectation arrives ct=%p exp=%p\n",
1249                                  ct, exp);
1250                         /* Welcome, Mr. Bond.  We've been expecting you... */
1251                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
1252                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1253                         ct->master = exp->master;
1254                         if (exp->helper) {
1255                                 help = nf_ct_helper_ext_add(ct, exp->helper,
1256                                                             GFP_ATOMIC);
1257                                 if (help)
1258                                         rcu_assign_pointer(help->helper, exp->helper);
1259                         }
1260
1261 #ifdef CONFIG_NF_CONNTRACK_MARK
1262                         ct->mark = exp->master->mark;
1263 #endif
1264 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1265                         ct->secmark = exp->master->secmark;
1266 #endif
1267                         NF_CT_STAT_INC(net, expect_new);
1268                 }
1269                 spin_unlock(&nf_conntrack_expect_lock);
1270         }
1271         if (!exp)
1272                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1273
1274         /* Now it is inserted into the unconfirmed list, bump refcount */
1275         nf_conntrack_get(&ct->ct_general);
1276         nf_ct_add_to_unconfirmed_list(ct);
1277
1278         local_bh_enable();
1279
1280         if (exp) {
1281                 if (exp->expectfn)
1282                         exp->expectfn(ct, exp);
1283                 nf_ct_expect_put(exp);
1284         }
1285
1286         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1287 }
1288
1289 /* On success, returns 0, sets skb->_nfct | ctinfo */
1290 static int
1291 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1292                   struct sk_buff *skb,
1293                   unsigned int dataoff,
1294                   u_int16_t l3num,
1295                   u_int8_t protonum,
1296                   const struct nf_conntrack_l3proto *l3proto,
1297                   const struct nf_conntrack_l4proto *l4proto)
1298 {
1299         const struct nf_conntrack_zone *zone;
1300         struct nf_conntrack_tuple tuple;
1301         struct nf_conntrack_tuple_hash *h;
1302         enum ip_conntrack_info ctinfo;
1303         struct nf_conntrack_zone tmp;
1304         struct nf_conn *ct;
1305         u32 hash;
1306
1307         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1308                              dataoff, l3num, protonum, net, &tuple, l3proto,
1309                              l4proto)) {
1310                 pr_debug("Can't get tuple\n");
1311                 return 0;
1312         }
1313
1314         /* look for tuple match */
1315         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1316         hash = hash_conntrack_raw(&tuple, net);
1317         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1318         if (!h) {
1319                 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1320                                    skb, dataoff, hash);
1321                 if (!h)
1322                         return 0;
1323                 if (IS_ERR(h))
1324                         return PTR_ERR(h);
1325         }
1326         ct = nf_ct_tuplehash_to_ctrack(h);
1327
1328         /* It exists; we have (non-exclusive) reference. */
1329         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1330                 ctinfo = IP_CT_ESTABLISHED_REPLY;
1331         } else {
1332                 /* Once we've had two way comms, always ESTABLISHED. */
1333                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1334                         pr_debug("normal packet for %p\n", ct);
1335                         ctinfo = IP_CT_ESTABLISHED;
1336                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1337                         pr_debug("related packet for %p\n", ct);
1338                         ctinfo = IP_CT_RELATED;
1339                 } else {
1340                         pr_debug("new packet for %p\n", ct);
1341                         ctinfo = IP_CT_NEW;
1342                 }
1343         }
1344         nf_ct_set(skb, ct, ctinfo);
1345         return 0;
1346 }
1347
1348 unsigned int
1349 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1350                 struct sk_buff *skb)
1351 {
1352         const struct nf_conntrack_l3proto *l3proto;
1353         const struct nf_conntrack_l4proto *l4proto;
1354         struct nf_conn *ct, *tmpl;
1355         enum ip_conntrack_info ctinfo;
1356         unsigned int *timeouts;
1357         unsigned int dataoff;
1358         u_int8_t protonum;
1359         int ret;
1360
1361         tmpl = nf_ct_get(skb, &ctinfo);
1362         if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1363                 /* Previously seen (loopback or untracked)?  Ignore. */
1364                 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1365                      ctinfo == IP_CT_UNTRACKED) {
1366                         NF_CT_STAT_INC_ATOMIC(net, ignore);
1367                         return NF_ACCEPT;
1368                 }
1369                 skb->_nfct = 0;
1370         }
1371
1372         /* rcu_read_lock()ed by nf_hook_thresh */
1373         l3proto = __nf_ct_l3proto_find(pf);
1374         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1375                                    &dataoff, &protonum);
1376         if (ret <= 0) {
1377                 pr_debug("not prepared to track yet or error occurred\n");
1378                 NF_CT_STAT_INC_ATOMIC(net, error);
1379                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1380                 ret = -ret;
1381                 goto out;
1382         }
1383
1384         l4proto = __nf_ct_l4proto_find(pf, protonum);
1385
1386         /* It may be an special packet, error, unclean...
1387          * inverse of the return code tells to the netfilter
1388          * core what to do with the packet. */
1389         if (l4proto->error != NULL) {
1390                 ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
1391                 if (ret <= 0) {
1392                         NF_CT_STAT_INC_ATOMIC(net, error);
1393                         NF_CT_STAT_INC_ATOMIC(net, invalid);
1394                         ret = -ret;
1395                         goto out;
1396                 }
1397                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1398                 if (skb->_nfct)
1399                         goto out;
1400         }
1401 repeat:
1402         ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1403                                 l3proto, l4proto);
1404         if (ret < 0) {
1405                 /* Too stressed to deal. */
1406                 NF_CT_STAT_INC_ATOMIC(net, drop);
1407                 ret = NF_DROP;
1408                 goto out;
1409         }
1410
1411         ct = nf_ct_get(skb, &ctinfo);
1412         if (!ct) {
1413                 /* Not valid part of a connection */
1414                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1415                 ret = NF_ACCEPT;
1416                 goto out;
1417         }
1418
1419         /* Decide what timeout policy we want to apply to this flow. */
1420         timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1421
1422         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, timeouts);
1423         if (ret <= 0) {
1424                 /* Invalid: inverse of the return code tells
1425                  * the netfilter core what to do */
1426                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1427                 nf_conntrack_put(&ct->ct_general);
1428                 skb->_nfct = 0;
1429                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1430                 if (ret == -NF_DROP)
1431                         NF_CT_STAT_INC_ATOMIC(net, drop);
1432                 /* Special case: TCP tracker reports an attempt to reopen a
1433                  * closed/aborted connection. We have to go back and create a
1434                  * fresh conntrack.
1435                  */
1436                 if (ret == -NF_REPEAT)
1437                         goto repeat;
1438                 ret = -ret;
1439                 goto out;
1440         }
1441
1442         if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1443             !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1444                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1445 out:
1446         if (tmpl)
1447                 nf_ct_put(tmpl);
1448
1449         return ret;
1450 }
1451 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1452
1453 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1454                           const struct nf_conntrack_tuple *orig)
1455 {
1456         bool ret;
1457
1458         rcu_read_lock();
1459         ret = nf_ct_invert_tuple(inverse, orig,
1460                                  __nf_ct_l3proto_find(orig->src.l3num),
1461                                  __nf_ct_l4proto_find(orig->src.l3num,
1462                                                       orig->dst.protonum));
1463         rcu_read_unlock();
1464         return ret;
1465 }
1466 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1467
1468 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1469    implicitly racy: see __nf_conntrack_confirm */
1470 void nf_conntrack_alter_reply(struct nf_conn *ct,
1471                               const struct nf_conntrack_tuple *newreply)
1472 {
1473         struct nf_conn_help *help = nfct_help(ct);
1474
1475         /* Should be unconfirmed, so not in hash table yet */
1476         WARN_ON(nf_ct_is_confirmed(ct));
1477
1478         pr_debug("Altering reply tuple of %p to ", ct);
1479         nf_ct_dump_tuple(newreply);
1480
1481         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1482         if (ct->master || (help && !hlist_empty(&help->expectations)))
1483                 return;
1484
1485         rcu_read_lock();
1486         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1487         rcu_read_unlock();
1488 }
1489 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1490
1491 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1492 void __nf_ct_refresh_acct(struct nf_conn *ct,
1493                           enum ip_conntrack_info ctinfo,
1494                           const struct sk_buff *skb,
1495                           unsigned long extra_jiffies,
1496                           int do_acct)
1497 {
1498         WARN_ON(!skb);
1499
1500         /* Only update if this is not a fixed timeout */
1501         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1502                 goto acct;
1503
1504         /* If not in hash table, timer will not be active yet */
1505         if (nf_ct_is_confirmed(ct))
1506                 extra_jiffies += nfct_time_stamp;
1507
1508         ct->timeout = extra_jiffies;
1509 acct:
1510         if (do_acct)
1511                 nf_ct_acct_update(ct, ctinfo, skb->len);
1512 }
1513 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1514
1515 bool nf_ct_kill_acct(struct nf_conn *ct,
1516                      enum ip_conntrack_info ctinfo,
1517                      const struct sk_buff *skb)
1518 {
1519         nf_ct_acct_update(ct, ctinfo, skb->len);
1520
1521         return nf_ct_delete(ct, 0, 0);
1522 }
1523 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1524
1525 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1526
1527 #include <linux/netfilter/nfnetlink.h>
1528 #include <linux/netfilter/nfnetlink_conntrack.h>
1529 #include <linux/mutex.h>
1530
1531 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1532  * in ip_conntrack_core, since we don't want the protocols to autoload
1533  * or depend on ctnetlink */
1534 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1535                                const struct nf_conntrack_tuple *tuple)
1536 {
1537         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1538             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1539                 goto nla_put_failure;
1540         return 0;
1541
1542 nla_put_failure:
1543         return -1;
1544 }
1545 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1546
1547 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1548         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1549         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1550 };
1551 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1552
1553 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1554                                struct nf_conntrack_tuple *t)
1555 {
1556         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1557                 return -EINVAL;
1558
1559         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1560         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1561
1562         return 0;
1563 }
1564 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1565
1566 int nf_ct_port_nlattr_tuple_size(void)
1567 {
1568         return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1569 }
1570 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1571 #endif
1572
1573 /* Used by ipt_REJECT and ip6t_REJECT. */
1574 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1575 {
1576         struct nf_conn *ct;
1577         enum ip_conntrack_info ctinfo;
1578
1579         /* This ICMP is in reverse direction to the packet which caused it */
1580         ct = nf_ct_get(skb, &ctinfo);
1581         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1582                 ctinfo = IP_CT_RELATED_REPLY;
1583         else
1584                 ctinfo = IP_CT_RELATED;
1585
1586         /* Attach to new skbuff, and increment count */
1587         nf_ct_set(nskb, ct, ctinfo);
1588         nf_conntrack_get(skb_nfct(nskb));
1589 }
1590
1591 /* Bring out ya dead! */
1592 static struct nf_conn *
1593 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1594                 void *data, unsigned int *bucket)
1595 {
1596         struct nf_conntrack_tuple_hash *h;
1597         struct nf_conn *ct;
1598         struct hlist_nulls_node *n;
1599         spinlock_t *lockp;
1600
1601         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1602                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1603                 local_bh_disable();
1604                 nf_conntrack_lock(lockp);
1605                 if (*bucket < nf_conntrack_htable_size) {
1606                         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
1607                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1608                                         continue;
1609                                 ct = nf_ct_tuplehash_to_ctrack(h);
1610                                 if (iter(ct, data))
1611                                         goto found;
1612                         }
1613                 }
1614                 spin_unlock(lockp);
1615                 local_bh_enable();
1616                 cond_resched();
1617         }
1618
1619         return NULL;
1620 found:
1621         atomic_inc(&ct->ct_general.use);
1622         spin_unlock(lockp);
1623         local_bh_enable();
1624         return ct;
1625 }
1626
1627 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
1628                                   void *data, u32 portid, int report)
1629 {
1630         unsigned int bucket = 0, sequence;
1631         struct nf_conn *ct;
1632
1633         might_sleep();
1634
1635         for (;;) {
1636                 sequence = read_seqcount_begin(&nf_conntrack_generation);
1637
1638                 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
1639                         /* Time to push up daises... */
1640
1641                         nf_ct_delete(ct, portid, report);
1642                         nf_ct_put(ct);
1643                         cond_resched();
1644                 }
1645
1646                 if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
1647                         break;
1648                 bucket = 0;
1649         }
1650 }
1651
1652 struct iter_data {
1653         int (*iter)(struct nf_conn *i, void *data);
1654         void *data;
1655         struct net *net;
1656 };
1657
1658 static int iter_net_only(struct nf_conn *i, void *data)
1659 {
1660         struct iter_data *d = data;
1661
1662         if (!net_eq(d->net, nf_ct_net(i)))
1663                 return 0;
1664
1665         return d->iter(i, d->data);
1666 }
1667
1668 static void
1669 __nf_ct_unconfirmed_destroy(struct net *net)
1670 {
1671         int cpu;
1672
1673         for_each_possible_cpu(cpu) {
1674                 struct nf_conntrack_tuple_hash *h;
1675                 struct hlist_nulls_node *n;
1676                 struct ct_pcpu *pcpu;
1677
1678                 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1679
1680                 spin_lock_bh(&pcpu->lock);
1681                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1682                         struct nf_conn *ct;
1683
1684                         ct = nf_ct_tuplehash_to_ctrack(h);
1685
1686                         /* we cannot call iter() on unconfirmed list, the
1687                          * owning cpu can reallocate ct->ext at any time.
1688                          */
1689                         set_bit(IPS_DYING_BIT, &ct->status);
1690                 }
1691                 spin_unlock_bh(&pcpu->lock);
1692                 cond_resched();
1693         }
1694 }
1695
1696 void nf_ct_unconfirmed_destroy(struct net *net)
1697 {
1698         might_sleep();
1699
1700         if (atomic_read(&net->ct.count) > 0) {
1701                 __nf_ct_unconfirmed_destroy(net);
1702                 nf_queue_nf_hook_drop(net);
1703                 synchronize_net();
1704         }
1705 }
1706 EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
1707
1708 void nf_ct_iterate_cleanup_net(struct net *net,
1709                                int (*iter)(struct nf_conn *i, void *data),
1710                                void *data, u32 portid, int report)
1711 {
1712         struct iter_data d;
1713
1714         might_sleep();
1715
1716         if (atomic_read(&net->ct.count) == 0)
1717                 return;
1718
1719         d.iter = iter;
1720         d.data = data;
1721         d.net = net;
1722
1723         nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
1724 }
1725 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
1726
1727 /**
1728  * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
1729  * @iter: callback to invoke for each conntrack
1730  * @data: data to pass to @iter
1731  *
1732  * Like nf_ct_iterate_cleanup, but first marks conntracks on the
1733  * unconfirmed list as dying (so they will not be inserted into
1734  * main table).
1735  *
1736  * Can only be called in module exit path.
1737  */
1738 void
1739 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
1740 {
1741         struct net *net;
1742
1743         rtnl_lock();
1744         for_each_net(net) {
1745                 if (atomic_read(&net->ct.count) == 0)
1746                         continue;
1747                 __nf_ct_unconfirmed_destroy(net);
1748                 nf_queue_nf_hook_drop(net);
1749         }
1750         rtnl_unlock();
1751
1752         /* Need to wait for netns cleanup worker to finish, if its
1753          * running -- it might have deleted a net namespace from
1754          * the global list, so our __nf_ct_unconfirmed_destroy() might
1755          * not have affected all namespaces.
1756          */
1757         net_ns_barrier();
1758
1759         /* a conntrack could have been unlinked from unconfirmed list
1760          * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
1761          * This makes sure its inserted into conntrack table.
1762          */
1763         synchronize_net();
1764
1765         nf_ct_iterate_cleanup(iter, data, 0, 0);
1766 }
1767 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
1768
1769 static int kill_all(struct nf_conn *i, void *data)
1770 {
1771         return net_eq(nf_ct_net(i), data);
1772 }
1773
1774 void nf_ct_free_hashtable(void *hash, unsigned int size)
1775 {
1776         if (is_vmalloc_addr(hash))
1777                 vfree(hash);
1778         else
1779                 free_pages((unsigned long)hash,
1780                            get_order(sizeof(struct hlist_head) * size));
1781 }
1782 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1783
1784 void nf_conntrack_cleanup_start(void)
1785 {
1786         conntrack_gc_work.exiting = true;
1787         RCU_INIT_POINTER(ip_ct_attach, NULL);
1788 }
1789
1790 void nf_conntrack_cleanup_end(void)
1791 {
1792         RCU_INIT_POINTER(nf_ct_destroy, NULL);
1793
1794         cancel_delayed_work_sync(&conntrack_gc_work.dwork);
1795         nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1796
1797         nf_conntrack_proto_fini();
1798         nf_conntrack_seqadj_fini();
1799         nf_conntrack_labels_fini();
1800         nf_conntrack_helper_fini();
1801         nf_conntrack_timeout_fini();
1802         nf_conntrack_ecache_fini();
1803         nf_conntrack_tstamp_fini();
1804         nf_conntrack_acct_fini();
1805         nf_conntrack_expect_fini();
1806
1807         kmem_cache_destroy(nf_conntrack_cachep);
1808 }
1809
1810 /*
1811  * Mishearing the voices in his head, our hero wonders how he's
1812  * supposed to kill the mall.
1813  */
1814 void nf_conntrack_cleanup_net(struct net *net)
1815 {
1816         LIST_HEAD(single);
1817
1818         list_add(&net->exit_list, &single);
1819         nf_conntrack_cleanup_net_list(&single);
1820 }
1821
1822 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1823 {
1824         int busy;
1825         struct net *net;
1826
1827         /*
1828          * This makes sure all current packets have passed through
1829          *  netfilter framework.  Roll on, two-stage module
1830          *  delete...
1831          */
1832         synchronize_net();
1833 i_see_dead_people:
1834         busy = 0;
1835         list_for_each_entry(net, net_exit_list, exit_list) {
1836                 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
1837                 if (atomic_read(&net->ct.count) != 0)
1838                         busy = 1;
1839         }
1840         if (busy) {
1841                 schedule();
1842                 goto i_see_dead_people;
1843         }
1844
1845         list_for_each_entry(net, net_exit_list, exit_list) {
1846                 nf_conntrack_proto_pernet_fini(net);
1847                 nf_conntrack_helper_pernet_fini(net);
1848                 nf_conntrack_ecache_pernet_fini(net);
1849                 nf_conntrack_tstamp_pernet_fini(net);
1850                 nf_conntrack_acct_pernet_fini(net);
1851                 nf_conntrack_expect_pernet_fini(net);
1852                 free_percpu(net->ct.stat);
1853                 free_percpu(net->ct.pcpu_lists);
1854         }
1855 }
1856
1857 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1858 {
1859         struct hlist_nulls_head *hash;
1860         unsigned int nr_slots, i;
1861         size_t sz;
1862
1863         if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1864                 return NULL;
1865
1866         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1867         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1868
1869         if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1870                 return NULL;
1871
1872         sz = nr_slots * sizeof(struct hlist_nulls_head);
1873         hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1874                                         get_order(sz));
1875         if (!hash)
1876                 hash = vzalloc(sz);
1877
1878         if (hash && nulls)
1879                 for (i = 0; i < nr_slots; i++)
1880                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
1881
1882         return hash;
1883 }
1884 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1885
1886 int nf_conntrack_hash_resize(unsigned int hashsize)
1887 {
1888         int i, bucket;
1889         unsigned int old_size;
1890         struct hlist_nulls_head *hash, *old_hash;
1891         struct nf_conntrack_tuple_hash *h;
1892         struct nf_conn *ct;
1893
1894         if (!hashsize)
1895                 return -EINVAL;
1896
1897         hash = nf_ct_alloc_hashtable(&hashsize, 1);
1898         if (!hash)
1899                 return -ENOMEM;
1900
1901         old_size = nf_conntrack_htable_size;
1902         if (old_size == hashsize) {
1903                 nf_ct_free_hashtable(hash, hashsize);
1904                 return 0;
1905         }
1906
1907         local_bh_disable();
1908         nf_conntrack_all_lock();
1909         write_seqcount_begin(&nf_conntrack_generation);
1910
1911         /* Lookups in the old hash might happen in parallel, which means we
1912          * might get false negatives during connection lookup. New connections
1913          * created because of a false negative won't make it into the hash
1914          * though since that required taking the locks.
1915          */
1916
1917         for (i = 0; i < nf_conntrack_htable_size; i++) {
1918                 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
1919                         h = hlist_nulls_entry(nf_conntrack_hash[i].first,
1920                                               struct nf_conntrack_tuple_hash, hnnode);
1921                         ct = nf_ct_tuplehash_to_ctrack(h);
1922                         hlist_nulls_del_rcu(&h->hnnode);
1923                         bucket = __hash_conntrack(nf_ct_net(ct),
1924                                                   &h->tuple, hashsize);
1925                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1926                 }
1927         }
1928         old_size = nf_conntrack_htable_size;
1929         old_hash = nf_conntrack_hash;
1930
1931         nf_conntrack_hash = hash;
1932         nf_conntrack_htable_size = hashsize;
1933
1934         write_seqcount_end(&nf_conntrack_generation);
1935         nf_conntrack_all_unlock();
1936         local_bh_enable();
1937
1938         synchronize_net();
1939         nf_ct_free_hashtable(old_hash, old_size);
1940         return 0;
1941 }
1942
1943 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1944 {
1945         unsigned int hashsize;
1946         int rc;
1947
1948         if (current->nsproxy->net_ns != &init_net)
1949                 return -EOPNOTSUPP;
1950
1951         /* On boot, we can set this without any fancy locking. */
1952         if (!nf_conntrack_htable_size)
1953                 return param_set_uint(val, kp);
1954
1955         rc = kstrtouint(val, 0, &hashsize);
1956         if (rc)
1957                 return rc;
1958
1959         return nf_conntrack_hash_resize(hashsize);
1960 }
1961 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1962
1963 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1964                   &nf_conntrack_htable_size, 0600);
1965
1966 static __always_inline unsigned int total_extension_size(void)
1967 {
1968         /* remember to add new extensions below */
1969         BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
1970
1971         return sizeof(struct nf_ct_ext) +
1972                sizeof(struct nf_conn_help)
1973 #if IS_ENABLED(CONFIG_NF_NAT)
1974                 + sizeof(struct nf_conn_nat)
1975 #endif
1976                 + sizeof(struct nf_conn_seqadj)
1977                 + sizeof(struct nf_conn_acct)
1978 #ifdef CONFIG_NF_CONNTRACK_EVENTS
1979                 + sizeof(struct nf_conntrack_ecache)
1980 #endif
1981 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
1982                 + sizeof(struct nf_conn_tstamp)
1983 #endif
1984 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1985                 + sizeof(struct nf_conn_timeout)
1986 #endif
1987 #ifdef CONFIG_NF_CONNTRACK_LABELS
1988                 + sizeof(struct nf_conn_labels)
1989 #endif
1990 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
1991                 + sizeof(struct nf_conn_synproxy)
1992 #endif
1993         ;
1994 };
1995
1996 int nf_conntrack_init_start(void)
1997 {
1998         int max_factor = 8;
1999         int ret = -ENOMEM;
2000         int i;
2001
2002         /* struct nf_ct_ext uses u8 to store offsets/size */
2003         BUILD_BUG_ON(total_extension_size() > 255u);
2004
2005         seqcount_init(&nf_conntrack_generation);
2006
2007         for (i = 0; i < CONNTRACK_LOCKS; i++)
2008                 spin_lock_init(&nf_conntrack_locks[i]);
2009
2010         if (!nf_conntrack_htable_size) {
2011                 /* Idea from tcp.c: use 1/16384 of memory.
2012                  * On i386: 32MB machine has 512 buckets.
2013                  * >= 1GB machines have 16384 buckets.
2014                  * >= 4GB machines have 65536 buckets.
2015                  */
2016                 nf_conntrack_htable_size
2017                         = (((totalram_pages << PAGE_SHIFT) / 16384)
2018                            / sizeof(struct hlist_head));
2019                 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
2020                         nf_conntrack_htable_size = 65536;
2021                 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
2022                         nf_conntrack_htable_size = 16384;
2023                 if (nf_conntrack_htable_size < 32)
2024                         nf_conntrack_htable_size = 32;
2025
2026                 /* Use a max. factor of four by default to get the same max as
2027                  * with the old struct list_heads. When a table size is given
2028                  * we use the old value of 8 to avoid reducing the max.
2029                  * entries. */
2030                 max_factor = 4;
2031         }
2032
2033         nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2034         if (!nf_conntrack_hash)
2035                 return -ENOMEM;
2036
2037         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2038
2039         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2040                                                 sizeof(struct nf_conn),
2041                                                 NFCT_INFOMASK + 1,
2042                                                 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2043         if (!nf_conntrack_cachep)
2044                 goto err_cachep;
2045
2046         printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
2047                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
2048                nf_conntrack_max);
2049
2050         ret = nf_conntrack_expect_init();
2051         if (ret < 0)
2052                 goto err_expect;
2053
2054         ret = nf_conntrack_acct_init();
2055         if (ret < 0)
2056                 goto err_acct;
2057
2058         ret = nf_conntrack_tstamp_init();
2059         if (ret < 0)
2060                 goto err_tstamp;
2061
2062         ret = nf_conntrack_ecache_init();
2063         if (ret < 0)
2064                 goto err_ecache;
2065
2066         ret = nf_conntrack_timeout_init();
2067         if (ret < 0)
2068                 goto err_timeout;
2069
2070         ret = nf_conntrack_helper_init();
2071         if (ret < 0)
2072                 goto err_helper;
2073
2074         ret = nf_conntrack_labels_init();
2075         if (ret < 0)
2076                 goto err_labels;
2077
2078         ret = nf_conntrack_seqadj_init();
2079         if (ret < 0)
2080                 goto err_seqadj;
2081
2082         ret = nf_conntrack_proto_init();
2083         if (ret < 0)
2084                 goto err_proto;
2085
2086         conntrack_gc_work_init(&conntrack_gc_work);
2087         queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
2088
2089         return 0;
2090
2091 err_proto:
2092         nf_conntrack_seqadj_fini();
2093 err_seqadj:
2094         nf_conntrack_labels_fini();
2095 err_labels:
2096         nf_conntrack_helper_fini();
2097 err_helper:
2098         nf_conntrack_timeout_fini();
2099 err_timeout:
2100         nf_conntrack_ecache_fini();
2101 err_ecache:
2102         nf_conntrack_tstamp_fini();
2103 err_tstamp:
2104         nf_conntrack_acct_fini();
2105 err_acct:
2106         nf_conntrack_expect_fini();
2107 err_expect:
2108         kmem_cache_destroy(nf_conntrack_cachep);
2109 err_cachep:
2110         nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
2111         return ret;
2112 }
2113
2114 void nf_conntrack_init_end(void)
2115 {
2116         /* For use by REJECT target */
2117         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2118         RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
2119 }
2120
2121 /*
2122  * We need to use special "null" values, not used in hash table
2123  */
2124 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
2125 #define DYING_NULLS_VAL         ((1<<30)+1)
2126 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
2127
2128 int nf_conntrack_init_net(struct net *net)
2129 {
2130         int ret = -ENOMEM;
2131         int cpu;
2132
2133         BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2134         atomic_set(&net->ct.count, 0);
2135
2136         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2137         if (!net->ct.pcpu_lists)
2138                 goto err_stat;
2139
2140         for_each_possible_cpu(cpu) {
2141                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2142
2143                 spin_lock_init(&pcpu->lock);
2144                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2145                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2146         }
2147
2148         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2149         if (!net->ct.stat)
2150                 goto err_pcpu_lists;
2151
2152         ret = nf_conntrack_expect_pernet_init(net);
2153         if (ret < 0)
2154                 goto err_expect;
2155         ret = nf_conntrack_acct_pernet_init(net);
2156         if (ret < 0)
2157                 goto err_acct;
2158         ret = nf_conntrack_tstamp_pernet_init(net);
2159         if (ret < 0)
2160                 goto err_tstamp;
2161         ret = nf_conntrack_ecache_pernet_init(net);
2162         if (ret < 0)
2163                 goto err_ecache;
2164         ret = nf_conntrack_helper_pernet_init(net);
2165         if (ret < 0)
2166                 goto err_helper;
2167         ret = nf_conntrack_proto_pernet_init(net);
2168         if (ret < 0)
2169                 goto err_proto;
2170         return 0;
2171
2172 err_proto:
2173         nf_conntrack_helper_pernet_fini(net);
2174 err_helper:
2175         nf_conntrack_ecache_pernet_fini(net);
2176 err_ecache:
2177         nf_conntrack_tstamp_pernet_fini(net);
2178 err_tstamp:
2179         nf_conntrack_acct_pernet_fini(net);
2180 err_acct:
2181         nf_conntrack_expect_pernet_fini(net);
2182 err_expect:
2183         free_percpu(net->ct.stat);
2184 err_pcpu_lists:
2185         free_percpu(net->ct.pcpu_lists);
2186 err_stat:
2187         return ret;
2188 }