Introduce an inline net_eq() to compare two namespaces.
Without CONFIG_NET_NS, since no namespace other than &init_net
exists, it is always 1.
We do not need to convert 1) inline vs inline and
2) inline vs &init_net comparisons.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
{
atomic_dec(&net->use_count);
}
+
+static inline
+int net_eq(const struct net *net1, const struct net *net2)
+{
+ return net1 == net2;
+}
#else
static inline struct net *get_net(struct net *net)
{
{
return net;
}
+
+static inline
+int net_eq(const struct net *net1, const struct net *net2)
+{
+ return 1;
+}
#endif
#define for_each_net(VAR) \
/* Get out if there is nothing todo */
err = 0;
- if (dev_net(dev) == net)
+ if (net_eq(dev_net(dev), net))
goto out;
/* Pick the destination device name, and ensure
hash_val = tbl->hash(pkey, NULL);
for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
if (!memcmp(n->primary_key, pkey, key_len) &&
- dev_net(n->dev) == net) {
+ net_eq(dev_net(n->dev), net)) {
neigh_hold(n);
NEIGH_CACHE_STAT_INC(tbl, hits);
break;
for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
if (!memcmp(n->key, pkey, key_len) &&
- (pneigh_net(n) == net) &&
+ net_eq(pneigh_net(n), net) &&
(n->dev == dev || !n->dev)) {
read_unlock_bh(&tbl->lock);
goto out;
for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
np = &n->next) {
if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
- (pneigh_net(n) == net)) {
+ net_eq(pneigh_net(n), net)) {
*np = n->next;
write_unlock_bh(&tbl->lock);
if (tbl->pdestructor)
struct neigh_parms *p;
for (p = &tbl->parms; p; p = p->next) {
- if ((p->dev && p->dev->ifindex == ifindex && neigh_parms_net(p) == net) ||
+ if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
(!p->dev && !ifindex))
return p;
}
break;
for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
- if (net != neigh_parms_net(p))
+ if (!net_eq(neigh_parms_net(p), net))
continue;
if (nidx++ < neigh_skip)
n = tbl->hash_buckets[bucket];
while (n) {
- if (dev_net(n->dev) != net)
+ if (!net_eq(dev_net(n->dev), net))
goto next;
if (state->neigh_sub_iter) {
loff_t fakep = 0;
while (1) {
while (n) {
- if (dev_net(n->dev) != net)
+ if (!net_eq(dev_net(n->dev), net))
goto next;
if (state->neigh_sub_iter) {
void *v = state->neigh_sub_iter(state, n, pos);
state->flags |= NEIGH_SEQ_IS_PNEIGH;
for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
pn = tbl->phash_buckets[bucket];
- while (pn && (pneigh_net(pn) != net))
+ while (pn && !net_eq(pneigh_net(pn), net))
pn = pn->next;
if (pn)
break;
if (++state->bucket > PNEIGH_HASHMASK)
break;
pn = tbl->phash_buckets[state->bucket];
- while (pn && (pneigh_net(pn) != net))
+ while (pn && !net_eq(pneigh_net(pn), net))
pn = pn->next;
if (pn)
break;
sk_for_each(sk, node, head) {
const struct inet_sock *inet = inet_sk(sk);
- if (sock_net(sk) == net && inet->num == hnum &&
+ if (net_eq(sock_net(sk), net) && inet->num == hnum &&
!ipv6_only_sock(sk)) {
const __be32 rcv_saddr = inet->rcv_saddr;
int score = sk->sk_family == PF_INET ? 1 : 0;
if (inet->num == hnum && !sk->sk_node.next &&
(!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
(sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
- !sk->sk_bound_dev_if && sock_net(sk) == net)
+ !sk->sk_bound_dev_if && net_eq(sock_net(sk), net))
goto sherry_cache;
sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif);
}
sk_for_each_from(sk, node) {
struct inet_sock *inet = inet_sk(sk);
- if (sock_net(sk) == net && inet->num == num &&
+ if (net_eq(sock_net(sk), net) && inet->num == num &&
!(inet->daddr && inet->daddr != raddr) &&
!(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
rth->fl.oif != ikeys[k] ||
rth->fl.iif != 0 ||
rth->rt_genid != atomic_read(&rt_genid) ||
- dev_net(rth->u.dst.dev) != net) {
+ !net_eq(dev_net(rth->u.dst.dev), net)) {
rthp = &rth->u.dst.rt_next;
continue;
}
rth->rt_src == iph->saddr &&
rth->fl.iif == 0 &&
!(dst_metric_locked(&rth->u.dst, RTAX_MTU)) &&
- dev_net(rth->u.dst.dev) == net &&
+ net_eq(dev_net(rth->u.dst.dev), net) &&
rth->rt_genid == atomic_read(&rt_genid)) {
unsigned short mtu = new_mtu;
rth->fl.oif == 0 &&
rth->fl.mark == skb->mark &&
rth->fl.fl4_tos == tos &&
- dev_net(rth->u.dst.dev) == net &&
+ net_eq(dev_net(rth->u.dst.dev), net) &&
rth->rt_genid == atomic_read(&rt_genid)) {
dst_use(&rth->u.dst, jiffies);
RT_CACHE_STAT_INC(in_hit);
rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
(IPTOS_RT_MASK | RTO_ONLINK)) &&
- dev_net(rth->u.dst.dev) == net &&
+ net_eq(dev_net(rth->u.dst.dev), net) &&
rth->rt_genid == atomic_read(&rt_genid)) {
dst_use(&rth->u.dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_lock_bh();
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
- if (dev_net(rt->u.dst.dev) != net || idx < s_idx)
+ if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
continue;
if (rt->rt_genid != atomic_read(&rt_genid))
continue;
while (1) {
while (req) {
if (req->rsk_ops->family == st->family &&
- sock_net(req->sk) == net) {
+ net_eq(sock_net(req->sk), net)) {
cur = req;
goto out;
}
}
get_sk:
sk_for_each_from(sk, node) {
- if (sk->sk_family == st->family && sock_net(sk) == net) {
+ if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
cur = sk;
goto out;
}
read_lock_bh(lock);
sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
if (sk->sk_family != st->family ||
- sock_net(sk) != net) {
+ !net_eq(sock_net(sk), net)) {
continue;
}
rc = sk;
inet_twsk_for_each(tw, node,
&tcp_hashinfo.ehash[st->bucket].twchain) {
if (tw->tw_family != st->family ||
- twsk_net(tw) != net) {
+ !net_eq(twsk_net(tw), net)) {
continue;
}
rc = tw;
tw = cur;
tw = tw_next(tw);
get_tw:
- while (tw && (tw->tw_family != st->family || twsk_net(tw) != net)) {
+ while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
tw = tw_next(tw);
}
if (tw) {
sk = sk_next(sk);
sk_for_each_from(sk, node) {
- if (sk->sk_family == st->family && sock_net(sk) == net)
+ if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
goto found;
}
struct hlist_node *node;
sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
- if (sock_net(sk) == net && sk->sk_hash == num)
+ if (net_eq(sock_net(sk), net) && sk->sk_hash == num)
return 1;
return 0;
}
sk_for_each(sk2, node, head)
if (sk2->sk_hash == snum &&
sk2 != sk &&
- sock_net(sk2) == net &&
+ net_eq(sock_net(sk2), net) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|| sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
struct inet_sock *inet = inet_sk(sk);
- if (sock_net(sk) == net && sk->sk_hash == hnum &&
+ if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
!ipv6_only_sock(sk)) {
int score = (sk->sk_family == PF_INET ? 1 : 0);
if (inet->rcv_saddr) {
for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
struct hlist_node *node;
sk_for_each(sk, node, state->hashtable + state->bucket) {
- if (sock_net(sk) != net)
+ if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == state->family)
goto found;
sk = sk_next(sk);
try_again:
;
- } while (sk && (sock_net(sk) != net || sk->sk_family != state->family));
+ } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
sk = sk_head(state->hashtable + state->bucket);
read_lock_bh(&addrconf_hash_lock);
for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
- if (dev_net(ifp->idev->dev) != net)
+ if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
!(ifp->flags&IFA_F_TENTATIVE)) {
u8 hash = ipv6_addr_hash(addr);
for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
- if (dev_net(ifp->idev->dev) != net)
+ if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (dev == NULL || ifp->idev->dev == dev)
read_lock_bh(&addrconf_hash_lock);
for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
- if (dev_net(ifp->idev->dev) != net)
+ if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (dev == NULL || ifp->idev->dev == dev ||
for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
ifa = inet6_addr_lst[state->bucket];
- while (ifa && dev_net(ifa->idev->dev) != net)
+ while (ifa && !net_eq(dev_net(ifa->idev->dev), net))
ifa = ifa->lst_next;
if (ifa)
break;
ifa = ifa->lst_next;
try_again:
if (ifa) {
- if (dev_net(ifa->idev->dev) != net) {
+ if (!net_eq(dev_net(ifa->idev->dev), net)) {
ifa = ifa->lst_next;
goto try_again;
}
u8 hash = ipv6_addr_hash(addr);
read_lock_bh(&addrconf_hash_lock);
for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) {
- if (dev_net(ifp->idev->dev) != net)
+ if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_cmp(&ifp->addr, addr) == 0 &&
(ifp->flags & IFA_F_HOMEADDRESS)) {
read_lock(&hashinfo->lhash_lock);
sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) {
- if (sock_net(sk) == net && inet_sk(sk)->num == hnum &&
+ if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum &&
sk->sk_family == PF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
if (inet_sk(sk)->num == num) {
struct ipv6_pinfo *np = inet6_sk(sk);
- if (sock_net(sk) != net)
+ if (!net_eq(sock_net(sk), net))
continue;
if (!ipv6_addr_any(&np->daddr) &&
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
struct inet_sock *inet = inet_sk(sk);
- if (sock_net(sk) == net && sk->sk_hash == hnum &&
+ if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
int score = 0;
read_lock(&nl_table_lock);
head = nl_pid_hashfn(hash, pid);
sk_for_each(sk, node, head) {
- if (sock_net(sk) == net && (nlk_sk(sk)->pid == pid)) {
+ if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
sock_hold(sk);
goto found;
}
head = nl_pid_hashfn(hash, pid);
len = 0;
sk_for_each(osk, node, head) {
- if (sock_net(osk) == net && (nlk_sk(osk)->pid == pid))
+ if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
break;
len++;
}
netlink_table_grab();
head = nl_pid_hashfn(hash, pid);
sk_for_each(osk, node, head) {
- if (sock_net(osk) != net)
+ if (!net_eq(sock_net(osk), net))
continue;
if (nlk_sk(osk)->pid == pid) {
/* Bind collision, search negative pid values. */
!test_bit(p->group - 1, nlk->groups))
goto out;
- if (sock_net(sk) != p->net)
+ if (!net_eq(sock_net(sk), p->net))
goto out;
if (p->failure) {
sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
struct unix_sock *u = unix_sk(s);
- if (sock_net(s) != net)
+ if (!net_eq(sock_net(s), net))
continue;
if (u->addr->len == len &&
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->dentry;
- if (sock_net(s) != net)
+ if (!net_eq(sock_net(s), net))
continue;
if(dentry && dentry->d_inode == i)