net: add moduleparam.h for users of module_param/MODULE_PARM_DESC
[platform/kernel/linux-rpi.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <linux/moduleparam.h>
24 #include <net/net_namespace.h>
25
26 #include <net/netfilter/nf_conntrack.h>
27 #include <net/netfilter/nf_conntrack_core.h>
28 #include <net/netfilter/nf_conntrack_expect.h>
29 #include <net/netfilter/nf_conntrack_helper.h>
30 #include <net/netfilter/nf_conntrack_tuple.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32
33 unsigned int nf_ct_expect_hsize __read_mostly;
34 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
35
36 unsigned int nf_ct_expect_max __read_mostly;
37
38 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
39
40 static HLIST_HEAD(nf_ct_userspace_expect_list);
41
42 /* nf_conntrack_expect helper functions */
43 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
44                                 u32 pid, int report)
45 {
46         struct nf_conn_help *master_help = nfct_help(exp->master);
47         struct net *net = nf_ct_exp_net(exp);
48
49         NF_CT_ASSERT(!timer_pending(&exp->timeout));
50
51         hlist_del_rcu(&exp->hnode);
52         net->ct.expect_count--;
53
54         hlist_del(&exp->lnode);
55         if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
56                 master_help->expecting[exp->class]--;
57
58         nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
59         nf_ct_expect_put(exp);
60
61         NF_CT_STAT_INC(net, expect_delete);
62 }
63 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
64
65 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
66 {
67         struct nf_conntrack_expect *exp = (void *)ul_expect;
68
69         spin_lock_bh(&nf_conntrack_lock);
70         nf_ct_unlink_expect(exp);
71         spin_unlock_bh(&nf_conntrack_lock);
72         nf_ct_expect_put(exp);
73 }
74
75 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
76 {
77         unsigned int hash;
78
79         if (unlikely(!nf_conntrack_hash_rnd)) {
80                 init_nf_conntrack_hash_rnd();
81         }
82
83         hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
84                       (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
85                        (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
86         return ((u64)hash * nf_ct_expect_hsize) >> 32;
87 }
88
89 struct nf_conntrack_expect *
90 __nf_ct_expect_find(struct net *net, u16 zone,
91                     const struct nf_conntrack_tuple *tuple)
92 {
93         struct nf_conntrack_expect *i;
94         struct hlist_node *n;
95         unsigned int h;
96
97         if (!net->ct.expect_count)
98                 return NULL;
99
100         h = nf_ct_expect_dst_hash(tuple);
101         hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
102                 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
103                     nf_ct_zone(i->master) == zone)
104                         return i;
105         }
106         return NULL;
107 }
108 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
109
110 /* Just find a expectation corresponding to a tuple. */
111 struct nf_conntrack_expect *
112 nf_ct_expect_find_get(struct net *net, u16 zone,
113                       const struct nf_conntrack_tuple *tuple)
114 {
115         struct nf_conntrack_expect *i;
116
117         rcu_read_lock();
118         i = __nf_ct_expect_find(net, zone, tuple);
119         if (i && !atomic_inc_not_zero(&i->use))
120                 i = NULL;
121         rcu_read_unlock();
122
123         return i;
124 }
125 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
126
127 /* If an expectation for this connection is found, it gets delete from
128  * global list then returned. */
129 struct nf_conntrack_expect *
130 nf_ct_find_expectation(struct net *net, u16 zone,
131                        const struct nf_conntrack_tuple *tuple)
132 {
133         struct nf_conntrack_expect *i, *exp = NULL;
134         struct hlist_node *n;
135         unsigned int h;
136
137         if (!net->ct.expect_count)
138                 return NULL;
139
140         h = nf_ct_expect_dst_hash(tuple);
141         hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
142                 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
143                     nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
144                     nf_ct_zone(i->master) == zone) {
145                         exp = i;
146                         break;
147                 }
148         }
149         if (!exp)
150                 return NULL;
151
152         /* If master is not in hash table yet (ie. packet hasn't left
153            this machine yet), how can other end know about expected?
154            Hence these are not the droids you are looking for (if
155            master ct never got confirmed, we'd hold a reference to it
156            and weird things would happen to future packets). */
157         if (!nf_ct_is_confirmed(exp->master))
158                 return NULL;
159
160         if (exp->flags & NF_CT_EXPECT_PERMANENT) {
161                 atomic_inc(&exp->use);
162                 return exp;
163         } else if (del_timer(&exp->timeout)) {
164                 nf_ct_unlink_expect(exp);
165                 return exp;
166         }
167
168         return NULL;
169 }
170
171 /* delete all expectations for this conntrack */
172 void nf_ct_remove_expectations(struct nf_conn *ct)
173 {
174         struct nf_conn_help *help = nfct_help(ct);
175         struct nf_conntrack_expect *exp;
176         struct hlist_node *n, *next;
177
178         /* Optimization: most connection never expect any others. */
179         if (!help)
180                 return;
181
182         hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
183                 if (del_timer(&exp->timeout)) {
184                         nf_ct_unlink_expect(exp);
185                         nf_ct_expect_put(exp);
186                 }
187         }
188 }
189 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
190
191 /* Would two expected things clash? */
192 static inline int expect_clash(const struct nf_conntrack_expect *a,
193                                const struct nf_conntrack_expect *b)
194 {
195         /* Part covered by intersection of masks must be unequal,
196            otherwise they clash */
197         struct nf_conntrack_tuple_mask intersect_mask;
198         int count;
199
200         intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
201
202         for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
203                 intersect_mask.src.u3.all[count] =
204                         a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
205         }
206
207         return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
208 }
209
210 static inline int expect_matches(const struct nf_conntrack_expect *a,
211                                  const struct nf_conntrack_expect *b)
212 {
213         return a->master == b->master && a->class == b->class &&
214                 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
215                 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
216                 nf_ct_zone(a->master) == nf_ct_zone(b->master);
217 }
218
219 /* Generally a bad idea to call this: could have matched already. */
220 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
221 {
222         spin_lock_bh(&nf_conntrack_lock);
223         if (del_timer(&exp->timeout)) {
224                 nf_ct_unlink_expect(exp);
225                 nf_ct_expect_put(exp);
226         }
227         spin_unlock_bh(&nf_conntrack_lock);
228 }
229 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
230
231 /* We don't increase the master conntrack refcount for non-fulfilled
232  * conntracks. During the conntrack destruction, the expectations are
233  * always killed before the conntrack itself */
234 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
235 {
236         struct nf_conntrack_expect *new;
237
238         new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
239         if (!new)
240                 return NULL;
241
242         new->master = me;
243         atomic_set(&new->use, 1);
244         return new;
245 }
246 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
247
248 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
249                        u_int8_t family,
250                        const union nf_inet_addr *saddr,
251                        const union nf_inet_addr *daddr,
252                        u_int8_t proto, const __be16 *src, const __be16 *dst)
253 {
254         int len;
255
256         if (family == AF_INET)
257                 len = 4;
258         else
259                 len = 16;
260
261         exp->flags = 0;
262         exp->class = class;
263         exp->expectfn = NULL;
264         exp->helper = NULL;
265         exp->tuple.src.l3num = family;
266         exp->tuple.dst.protonum = proto;
267
268         if (saddr) {
269                 memcpy(&exp->tuple.src.u3, saddr, len);
270                 if (sizeof(exp->tuple.src.u3) > len)
271                         /* address needs to be cleared for nf_ct_tuple_equal */
272                         memset((void *)&exp->tuple.src.u3 + len, 0x00,
273                                sizeof(exp->tuple.src.u3) - len);
274                 memset(&exp->mask.src.u3, 0xFF, len);
275                 if (sizeof(exp->mask.src.u3) > len)
276                         memset((void *)&exp->mask.src.u3 + len, 0x00,
277                                sizeof(exp->mask.src.u3) - len);
278         } else {
279                 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
280                 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
281         }
282
283         if (src) {
284                 exp->tuple.src.u.all = *src;
285                 exp->mask.src.u.all = htons(0xFFFF);
286         } else {
287                 exp->tuple.src.u.all = 0;
288                 exp->mask.src.u.all = 0;
289         }
290
291         memcpy(&exp->tuple.dst.u3, daddr, len);
292         if (sizeof(exp->tuple.dst.u3) > len)
293                 /* address needs to be cleared for nf_ct_tuple_equal */
294                 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
295                        sizeof(exp->tuple.dst.u3) - len);
296
297         exp->tuple.dst.u.all = *dst;
298 }
299 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
300
301 static void nf_ct_expect_free_rcu(struct rcu_head *head)
302 {
303         struct nf_conntrack_expect *exp;
304
305         exp = container_of(head, struct nf_conntrack_expect, rcu);
306         kmem_cache_free(nf_ct_expect_cachep, exp);
307 }
308
309 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
310 {
311         if (atomic_dec_and_test(&exp->use))
312                 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
313 }
314 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
315
316 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
317 {
318         struct nf_conn_help *master_help = nfct_help(exp->master);
319         struct net *net = nf_ct_exp_net(exp);
320         const struct nf_conntrack_expect_policy *p;
321         unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
322
323         /* two references : one for hash insert, one for the timer */
324         atomic_add(2, &exp->use);
325
326         if (master_help) {
327                 hlist_add_head(&exp->lnode, &master_help->expectations);
328                 master_help->expecting[exp->class]++;
329         } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
330                 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
331
332         hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
333         net->ct.expect_count++;
334
335         setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
336                     (unsigned long)exp);
337         if (master_help) {
338                 p = &rcu_dereference_protected(
339                                 master_help->helper,
340                                 lockdep_is_held(&nf_conntrack_lock)
341                                 )->expect_policy[exp->class];
342                 exp->timeout.expires = jiffies + p->timeout * HZ;
343         }
344         add_timer(&exp->timeout);
345
346         NF_CT_STAT_INC(net, expect_create);
347 }
348
349 /* Race with expectations being used means we could have none to find; OK. */
350 static void evict_oldest_expect(struct nf_conn *master,
351                                 struct nf_conntrack_expect *new)
352 {
353         struct nf_conn_help *master_help = nfct_help(master);
354         struct nf_conntrack_expect *exp, *last = NULL;
355         struct hlist_node *n;
356
357         hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
358                 if (exp->class == new->class)
359                         last = exp;
360         }
361
362         if (last && del_timer(&last->timeout)) {
363                 nf_ct_unlink_expect(last);
364                 nf_ct_expect_put(last);
365         }
366 }
367
368 static inline int refresh_timer(struct nf_conntrack_expect *i)
369 {
370         struct nf_conn_help *master_help = nfct_help(i->master);
371         const struct nf_conntrack_expect_policy *p;
372
373         if (!del_timer(&i->timeout))
374                 return 0;
375
376         p = &rcu_dereference_protected(
377                 master_help->helper,
378                 lockdep_is_held(&nf_conntrack_lock)
379                 )->expect_policy[i->class];
380         i->timeout.expires = jiffies + p->timeout * HZ;
381         add_timer(&i->timeout);
382         return 1;
383 }
384
385 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
386 {
387         const struct nf_conntrack_expect_policy *p;
388         struct nf_conntrack_expect *i;
389         struct nf_conn *master = expect->master;
390         struct nf_conn_help *master_help = nfct_help(master);
391         struct net *net = nf_ct_exp_net(expect);
392         struct hlist_node *n;
393         unsigned int h;
394         int ret = 1;
395
396         /* Don't allow expectations created from kernel-space with no helper */
397         if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
398             (!master_help || (master_help && !master_help->helper))) {
399                 ret = -ESHUTDOWN;
400                 goto out;
401         }
402         h = nf_ct_expect_dst_hash(&expect->tuple);
403         hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
404                 if (expect_matches(i, expect)) {
405                         /* Refresh timer: if it's dying, ignore.. */
406                         if (refresh_timer(i)) {
407                                 ret = 0;
408                                 goto out;
409                         }
410                 } else if (expect_clash(i, expect)) {
411                         ret = -EBUSY;
412                         goto out;
413                 }
414         }
415         /* Will be over limit? */
416         if (master_help) {
417                 p = &rcu_dereference_protected(
418                         master_help->helper,
419                         lockdep_is_held(&nf_conntrack_lock)
420                         )->expect_policy[expect->class];
421                 if (p->max_expected &&
422                     master_help->expecting[expect->class] >= p->max_expected) {
423                         evict_oldest_expect(master, expect);
424                         if (master_help->expecting[expect->class]
425                                                 >= p->max_expected) {
426                                 ret = -EMFILE;
427                                 goto out;
428                         }
429                 }
430         }
431
432         if (net->ct.expect_count >= nf_ct_expect_max) {
433                 if (net_ratelimit())
434                         printk(KERN_WARNING
435                                "nf_conntrack: expectation table full\n");
436                 ret = -EMFILE;
437         }
438 out:
439         return ret;
440 }
441
442 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 
443                                 u32 pid, int report)
444 {
445         int ret;
446
447         spin_lock_bh(&nf_conntrack_lock);
448         ret = __nf_ct_expect_check(expect);
449         if (ret <= 0)
450                 goto out;
451
452         ret = 0;
453         nf_ct_expect_insert(expect);
454         spin_unlock_bh(&nf_conntrack_lock);
455         nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
456         return ret;
457 out:
458         spin_unlock_bh(&nf_conntrack_lock);
459         return ret;
460 }
461 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
462
463 void nf_ct_remove_userspace_expectations(void)
464 {
465         struct nf_conntrack_expect *exp;
466         struct hlist_node *n, *next;
467
468         hlist_for_each_entry_safe(exp, n, next,
469                                   &nf_ct_userspace_expect_list, lnode) {
470                 if (del_timer(&exp->timeout)) {
471                         nf_ct_unlink_expect(exp);
472                         nf_ct_expect_put(exp);
473                 }
474         }
475 }
476 EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
477
478 #ifdef CONFIG_PROC_FS
479 struct ct_expect_iter_state {
480         struct seq_net_private p;
481         unsigned int bucket;
482 };
483
484 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
485 {
486         struct net *net = seq_file_net(seq);
487         struct ct_expect_iter_state *st = seq->private;
488         struct hlist_node *n;
489
490         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
491                 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
492                 if (n)
493                         return n;
494         }
495         return NULL;
496 }
497
498 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
499                                              struct hlist_node *head)
500 {
501         struct net *net = seq_file_net(seq);
502         struct ct_expect_iter_state *st = seq->private;
503
504         head = rcu_dereference(hlist_next_rcu(head));
505         while (head == NULL) {
506                 if (++st->bucket >= nf_ct_expect_hsize)
507                         return NULL;
508                 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
509         }
510         return head;
511 }
512
513 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
514 {
515         struct hlist_node *head = ct_expect_get_first(seq);
516
517         if (head)
518                 while (pos && (head = ct_expect_get_next(seq, head)))
519                         pos--;
520         return pos ? NULL : head;
521 }
522
523 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
524         __acquires(RCU)
525 {
526         rcu_read_lock();
527         return ct_expect_get_idx(seq, *pos);
528 }
529
530 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
531 {
532         (*pos)++;
533         return ct_expect_get_next(seq, v);
534 }
535
536 static void exp_seq_stop(struct seq_file *seq, void *v)
537         __releases(RCU)
538 {
539         rcu_read_unlock();
540 }
541
542 static int exp_seq_show(struct seq_file *s, void *v)
543 {
544         struct nf_conntrack_expect *expect;
545         struct nf_conntrack_helper *helper;
546         struct hlist_node *n = v;
547         char *delim = "";
548
549         expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
550
551         if (expect->timeout.function)
552                 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
553                            ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
554         else
555                 seq_printf(s, "- ");
556         seq_printf(s, "l3proto = %u proto=%u ",
557                    expect->tuple.src.l3num,
558                    expect->tuple.dst.protonum);
559         print_tuple(s, &expect->tuple,
560                     __nf_ct_l3proto_find(expect->tuple.src.l3num),
561                     __nf_ct_l4proto_find(expect->tuple.src.l3num,
562                                        expect->tuple.dst.protonum));
563
564         if (expect->flags & NF_CT_EXPECT_PERMANENT) {
565                 seq_printf(s, "PERMANENT");
566                 delim = ",";
567         }
568         if (expect->flags & NF_CT_EXPECT_INACTIVE) {
569                 seq_printf(s, "%sINACTIVE", delim);
570                 delim = ",";
571         }
572         if (expect->flags & NF_CT_EXPECT_USERSPACE)
573                 seq_printf(s, "%sUSERSPACE", delim);
574
575         helper = rcu_dereference(nfct_help(expect->master)->helper);
576         if (helper) {
577                 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
578                 if (helper->expect_policy[expect->class].name)
579                         seq_printf(s, "/%s",
580                                    helper->expect_policy[expect->class].name);
581         }
582
583         return seq_putc(s, '\n');
584 }
585
586 static const struct seq_operations exp_seq_ops = {
587         .start = exp_seq_start,
588         .next = exp_seq_next,
589         .stop = exp_seq_stop,
590         .show = exp_seq_show
591 };
592
593 static int exp_open(struct inode *inode, struct file *file)
594 {
595         return seq_open_net(inode, file, &exp_seq_ops,
596                         sizeof(struct ct_expect_iter_state));
597 }
598
599 static const struct file_operations exp_file_ops = {
600         .owner   = THIS_MODULE,
601         .open    = exp_open,
602         .read    = seq_read,
603         .llseek  = seq_lseek,
604         .release = seq_release_net,
605 };
606 #endif /* CONFIG_PROC_FS */
607
608 static int exp_proc_init(struct net *net)
609 {
610 #ifdef CONFIG_PROC_FS
611         struct proc_dir_entry *proc;
612
613         proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
614         if (!proc)
615                 return -ENOMEM;
616 #endif /* CONFIG_PROC_FS */
617         return 0;
618 }
619
620 static void exp_proc_remove(struct net *net)
621 {
622 #ifdef CONFIG_PROC_FS
623         proc_net_remove(net, "nf_conntrack_expect");
624 #endif /* CONFIG_PROC_FS */
625 }
626
627 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
628
629 int nf_conntrack_expect_init(struct net *net)
630 {
631         int err = -ENOMEM;
632
633         if (net_eq(net, &init_net)) {
634                 if (!nf_ct_expect_hsize) {
635                         nf_ct_expect_hsize = net->ct.htable_size / 256;
636                         if (!nf_ct_expect_hsize)
637                                 nf_ct_expect_hsize = 1;
638                 }
639                 nf_ct_expect_max = nf_ct_expect_hsize * 4;
640         }
641
642         net->ct.expect_count = 0;
643         net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
644         if (net->ct.expect_hash == NULL)
645                 goto err1;
646
647         if (net_eq(net, &init_net)) {
648                 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
649                                         sizeof(struct nf_conntrack_expect),
650                                         0, 0, NULL);
651                 if (!nf_ct_expect_cachep)
652                         goto err2;
653         }
654
655         err = exp_proc_init(net);
656         if (err < 0)
657                 goto err3;
658
659         return 0;
660
661 err3:
662         if (net_eq(net, &init_net))
663                 kmem_cache_destroy(nf_ct_expect_cachep);
664 err2:
665         nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
666 err1:
667         return err;
668 }
669
670 void nf_conntrack_expect_fini(struct net *net)
671 {
672         exp_proc_remove(net);
673         if (net_eq(net, &init_net)) {
674                 rcu_barrier(); /* Wait for call_rcu() before destroy */
675                 kmem_cache_destroy(nf_ct_expect_cachep);
676         }
677         nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
678 }