static DEFINE_RWLOCK(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
+#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
+
static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static inline u32 netlink_group_mask(u32 group)
struct hlist_node *node;
unsigned long mask;
unsigned int i;
+ struct listeners *listeners;
+
+ listeners = nl_deref_protected(tbl->listeners);
+ if (!listeners)
+ return;
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
- tbl->listeners->masks[i] = mask;
+ listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
if (netlink_is_kernel(sk)) {
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
- kfree(nl_table[sk->sk_protocol].listeners);
+ struct listeners *old;
+
+ old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
+ RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
+ kfree_rcu(old, rcu);
nl_table[sk->sk_protocol].module = NULL;
nl_table[sk->sk_protocol].bind = NULL;
nl_table[sk->sk_protocol].flags = 0;
rcu_read_lock();
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
- if (group - 1 < nl_table[sk->sk_protocol].groups)
+ if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
if (!new)
return -ENOMEM;
- old = rcu_dereference_protected(tbl->listeners, 1);
+ old = nl_deref_protected(tbl->listeners);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);