void nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
+struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net);
+
static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net)
{
return net->ct.ecache_dwork_pending;
STATE_DONE,
};
+struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net)
+{
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
+
+ return &cnet->ecache;
+}
+#if IS_MODULE(CONFIG_NF_CT_NETLINK)
+EXPORT_SYMBOL_GPL(nf_conn_pernet_ecache);
+#endif
+
static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet)
{
unsigned long stop = jiffies + ECACHE_MAX_JIFFIES;
struct nf_conn *last;
unsigned int cpu;
bool done;
+ bool retrans_done;
};
static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
static int
ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
+ struct nf_conn *last = ctx->last;
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ const struct net *net = sock_net(skb->sk);
+ struct nf_conntrack_net_ecache *ecache_net;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+#endif
+
+ if (ctx->retrans_done)
+ return ctnetlink_dump_list(skb, cb, true);
+
+ ctx->last = NULL;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ ecache_net = nf_conn_pernet_ecache(net);
+ spin_lock_bh(&ecache_net->dying_lock);
+
+ hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) {
+ struct nf_conn *ct;
+ int res;
+
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (last && last != ct)
+ continue;
+
+ res = ctnetlink_dump_one_entry(skb, cb, ct, true);
+ if (res < 0) {
+ spin_unlock_bh(&ecache_net->dying_lock);
+ nf_ct_put(last);
+ return skb->len;
+ }
+
+ nf_ct_put(last);
+ last = NULL;
+ }
+
+ spin_unlock_bh(&ecache_net->dying_lock);
+#endif
+ nf_ct_put(last);
+ ctx->retrans_done = true;
+
return ctnetlink_dump_list(skb, cb, true);
}