struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct hlist_nulls_node *n;
- int cpu;
spinlock_t *lockp;
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
cond_resched();
}
+ return NULL;
+found:
+ atomic_inc(&ct->ct_general.use);
+ spin_unlock(lockp);
+ local_bh_enable();
+ return ct;
+}
+
+static void
+__nf_ct_unconfirmed_destroy(struct net *net)
+{
+ int cpu;
+
for_each_possible_cpu(cpu) {
- struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct ct_pcpu *pcpu;
+
+ pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
+ struct nf_conn *ct;
+
ct = nf_ct_tuplehash_to_ctrack(h);
- if (iter(ct, data))
- set_bit(IPS_DYING_BIT, &ct->status);
+
+ /* we cannot call iter() on unconfirmed list, the
+ * owning cpu can reallocate ct->ext at any time.
+ */
+ set_bit(IPS_DYING_BIT, &ct->status);
}
spin_unlock_bh(&pcpu->lock);
cond_resched();
}
- return NULL;
-found:
- atomic_inc(&ct->ct_general.use);
- spin_unlock(lockp);
- local_bh_enable();
- return ct;
}
void nf_ct_iterate_cleanup_net(struct net *net,
if (atomic_read(&net->ct.count) == 0)
return;
+ __nf_ct_unconfirmed_destroy(net);
+
+ synchronize_net();
+
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */