u32 seq);
/* Iterate over all conntracks: if iter returns true, it's deleted. */
-void nf_ct_iterate_cleanup(struct net *net,
- int (*iter)(struct nf_conn *i, void *data),
- void *data, u32 portid, int report);
+void nf_ct_iterate_cleanup_net(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report);
struct nf_conntrack_zone;
*/
NF_CT_ASSERT(dev->ifindex != 0);
- nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex, 0, 0);
+ nf_ct_iterate_cleanup_net(net, device_cmp,
+ (void *)(long)dev->ifindex, 0, 0);
}
return NOTIFY_DONE;
struct net *net = dev_net(dev);
if (event == NETDEV_DOWN)
- nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex, 0, 0);
+ nf_ct_iterate_cleanup_net(net, device_cmp,
+ (void *)(long)dev->ifindex, 0, 0);
return NOTIFY_DONE;
}
w = container_of(work, struct masq_dev_work, work);
index = w->ifindex;
- nf_ct_iterate_cleanup(w->net, device_cmp, (void *)index, 0, 0);
+ nf_ct_iterate_cleanup_net(w->net, device_cmp, (void *)index, 0, 0);
put_net(w->net);
kfree(w);
/* ipv6 inet notifier is an atomic notifier, i.e. we cannot
* schedule.
*
- * Unfortunately, nf_ct_iterate_cleanup can run for a long
+ * Unfortunately, nf_ct_iterate_cleanup_net can run for a long
* time if there are lots of conntracks and the system
* handles high softirq load, so it frequently calls cond_resched
* while iterating the conntrack table.
*
- * So we defer nf_ct_iterate_cleanup walk to the system workqueue.
+ * So we defer nf_ct_iterate_cleanup_net walk to the system workqueue.
*
* As we can have 'a lot' of inet_events (depending on amount
* of ipv6 addresses being deleted), we also need to add an upper
return ct;
}
-void nf_ct_iterate_cleanup(struct net *net,
- int (*iter)(struct nf_conn *i, void *data),
- void *data, u32 portid, int report)
+void nf_ct_iterate_cleanup_net(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report)
{
struct nf_conn *ct;
unsigned int bucket = 0;
cond_resched();
}
}
-EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
+EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
static int kill_all(struct nf_conn *i, void *data)
{
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
- nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
+ nf_ct_iterate_cleanup_net(net, kill_all, NULL, 0, 0);
if (atomic_read(&net->ct.count) != 0)
busy = 1;
}
return PTR_ERR(filter);
}
- nf_ct_iterate_cleanup(net, ctnetlink_filter_match, filter,
- portid, report);
+ nf_ct_iterate_cleanup_net(net, ctnetlink_filter_match, filter,
+ portid, report);
kfree(filter);
return 0;
proto->net_ns_put(net);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
+ nf_ct_iterate_cleanup_net(net, kill_l3proto, proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
+ nf_ct_iterate_cleanup_net(net, kill_l4proto, l4proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
+ nf_ct_iterate_cleanup_net(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
+ nf_ct_iterate_cleanup_net(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
{
struct nf_nat_proto_clean clean = {};
- nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
+ nf_ct_iterate_cleanup_net(net, nf_nat_proto_clean, &clean, 0, 0);
}
static struct pernet_operations nf_nat_net_ops = {