if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags))
continue;
+ rcu_read_lock();
list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
if (atomic_dec_and_test(&node->ae.used)) {
/* Recently Unused */
avc_cache_stats_incr(reclaims);
ecx++;
if (ecx >= AVC_CACHE_RECLAIM) {
+ rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
goto out;
}
}
}
+ rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
}
out:
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
spin_lock_irqsave(&avc_cache.slots_lock[i], flag);
+ /*
+ * With preemptable RCU, the outer spinlock does not
+ * prevent RCU grace periods from ending.
+ */
+ rcu_read_lock();
list_for_each_entry(node, &avc_cache.slots[i], list)
avc_node_delete(node);
+ rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag);
}
{
struct sel_netif *netif;
+ rcu_read_lock();
spin_lock_bh(&sel_netif_lock);
netif = sel_netif_find(ifindex);
if (netif)
sel_netif_destroy(netif);
spin_unlock_bh(&sel_netif_lock);
+ rcu_read_unlock();
}
/**