}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
-static void nf_ct_release_dying_list(void)
+static void nf_ct_release_dying_list(struct net *net)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct hlist_nulls_node *n;
spin_lock_bh(&nf_conntrack_lock);
- hlist_nulls_for_each_entry(h, n, &init_net.ct.dying, hnnode) {
+ hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
/* never fails to remove them, no listeners at this point */
nf_ct_kill(ct);
{
i_see_dead_people:
nf_ct_iterate_cleanup(net, kill_all, NULL);
- nf_ct_release_dying_list();
+ nf_ct_release_dying_list(net);
if (atomic_read(&net->ct.count) != 0) {
schedule();
goto i_see_dead_people;