#endif
static bool ct_seq_should_skip(const struct nf_conn *ct,
+ const struct net *net,
const struct nf_conntrack_tuple_hash *hash)
{
/* we only want to print DIR_ORIGINAL */
if (nf_ct_l3num(ct) != AF_INET)
return true;
+ if (!net_eq(nf_ct_net(ct), net))
+ return true;
+
return false;
}
int ret = 0;
NF_CT_ASSERT(ct);
- if (ct_seq_should_skip(ct, hash))
+ if (ct_seq_should_skip(ct, seq_file_net(s), hash))
return 0;
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
/* check if we raced w. object reuse */
if (!nf_ct_is_confirmed(ct) ||
- ct_seq_should_skip(ct, hash))
+ ct_seq_should_skip(ct, seq_file_net(s), hash))
goto release;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone)
+ const struct nf_conntrack_zone *zone,
+ const struct net *net)
{
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
*/
return nf_ct_tuple_equal(tuple, &h->tuple) &&
nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
- nf_ct_is_confirmed(ct);
+ nf_ct_is_confirmed(ct) &&
+ net_eq(net, nf_ct_net(ct));
}
/*
} while (read_seqcount_retry(&nf_conntrack_generation, sequence));
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
- if (nf_ct_key_equal(h, tuple, zone)) {
+ if (nf_ct_key_equal(h, tuple, zone, net)) {
NF_CT_STAT_INC_ATOMIC(net, found);
return h;
}
!atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
else {
- if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
+ if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
nf_ct_put(ct);
goto begin;
}
/* See if there's one in the list already, including reverse */
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- zone))
+ zone, net))
goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
- zone))
+ zone, net))
goto out;
add_timer(&ct->timeout);
not in the hash. If there is, we lost race. */
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- zone))
+ zone, net))
goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
- zone))
+ zone, net))
goto out;
/* Timer relative to confirmation time, not original
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (ct != ignored_conntrack &&
- nf_ct_key_equal(h, tuple, zone)) {
+ nf_ct_key_equal(h, tuple, zone, net)) {
NF_CT_STAT_INC_ATOMIC(net, found);
rcu_read_unlock();
return 1;
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
ct = nf_ct_tuplehash_to_ctrack(h);
- if (iter(ct, data))
+ if (net_eq(nf_ct_net(ct), net) &&
+ iter(ct, data))
goto found;
}
}