netfilter: conntrack: remove unneeded nf_ct_put
authorFlorian Westphal <fw@strlen.de>
Tue, 25 Aug 2020 22:52:45 +0000 (00:52 +0200)
committerPablo Neira Ayuso <pablo@netfilter.org>
Fri, 28 Aug 2020 17:51:27 +0000 (19:51 +0200)
We can delay refcount increment until we reassign the existing entry to
the current skb.

A 0 refcount can't happen while the nf_conn object is still in the
hash table and parallel mutations are impossible because we hold the
bucket lock.

Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
net/netfilter/nf_conntrack_core.c

index 93e77ca0efad1e55de74462ba9e084fa1c934f2d..234b7cab37c301cd66025e43ffa5d9ce2e50ae47 100644 (file)
@@ -908,6 +908,7 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
                tstamp->start = ktime_get_real_ns();
 }
 
+/* caller must hold locks to prevent concurrent changes */
 static int __nf_ct_resolve_clash(struct sk_buff *skb,
                                 struct nf_conntrack_tuple_hash *h)
 {
@@ -921,13 +922,12 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
        if (nf_ct_is_dying(ct))
                return NF_DROP;
 
-       if (!atomic_inc_not_zero(&ct->ct_general.use))
-               return NF_DROP;
-
        if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
            nf_ct_match(ct, loser_ct)) {
                struct net *net = nf_ct_net(ct);
 
+               nf_conntrack_get(&ct->ct_general);
+
                nf_ct_acct_merge(ct, ctinfo, loser_ct);
                nf_ct_add_to_dying_list(loser_ct);
                nf_conntrack_put(&loser_ct->ct_general);
@@ -937,7 +937,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
                return NF_ACCEPT;
        }
 
-       nf_ct_put(ct);
        return NF_DROP;
 }