#include "lockdep_internals.h"
/*
- * hash_lock: protects the lockdep hashes and class/list/hash allocators.
+ * lockdep_lock: protects the lockdep graph, the hashes and the
+ * class/list/hash allocators.
*
* This is one of the rare exceptions where it's justified
* to use a raw spinlock - we really dont want the spinlock
- * code to recurse back into the lockdep code.
+ * code to recurse back into the lockdep code...
*/
-static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+static int graph_lock(void)
+{
+ __raw_spin_lock(&lockdep_lock);
+ /*
+ * Make sure that if another CPU detected a bug while
+ * walking the graph we dont change it (while the other
+ * CPU is busy printing out stuff with the graph lock
+ * dropped already)
+ */
+ if (!debug_locks) {
+ __raw_spin_unlock(&lockdep_lock);
+ return 0;
+ }
+ return 1;
+}
+
+static inline int graph_unlock(void)
+{
+ __raw_spin_unlock(&lockdep_lock);
+ return 0;
+}
+
+/*
+ * Turn lock debugging off and return with 0 if it was off already,
+ * and also release the graph lock:
+ */
+static inline int debug_locks_off_graph_unlock(void)
+{
+ int ret = debug_locks_off();
+
+ __raw_spin_unlock(&lockdep_lock);
+
+ return ret;
+}
static int lockdep_initialized;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
/*
- * Allocate a lockdep entry. (assumes hash_lock held, returns
+ * Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure)
*/
static struct lock_list *alloc_list_entry(void)
{
if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return NULL;
+
printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
/*
* Stack-trace: tightly packed array of stack backtrace
- * addresses. Protected by the hash_lock.
+ * addresses. Protected by the graph_lock.
*/
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
trace->max_entries = trace->nr_entries;
nr_stack_trace_entries += trace->nr_entries;
- if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) {
- __raw_spin_unlock(&hash_lock);
- return 0;
- }
if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
- __raw_spin_unlock(&hash_lock);
- if (debug_locks_off()) {
- printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- }
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+
return 0;
}
{
struct task_struct *curr = current;
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=======================================================\n");
if (debug_locks_silent)
return 0;
- /* hash_lock unlocked by the header */
- __raw_spin_lock(&hash_lock);
this.class = check_source->class;
if (!save_trace(&this.trace))
return 0;
- __raw_spin_unlock(&hash_lock);
+
print_circular_bug_entry(&this, 0);
printk("\nother info that might help us debug this:\n\n");
static int noinline print_infinite_recursion_bug(void)
{
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ WARN_ON(1);
return 0;
}
enum lock_usage_bit bit2,
const char *irqclass)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n======================================================\n");
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
- debug_locks_off();
- __raw_spin_unlock(&hash_lock);
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=============================================\n");
* Debugging printouts:
*/
if (verbose(prev->class) || verbose(next->class)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
printk("\n new dependency: ");
print_lock_name(prev->class);
printk(" => ");
print_lock_name(next->class);
printk("\n");
dump_stack();
- __raw_spin_lock(&hash_lock);
+ return graph_lock();
}
return 1;
}
}
return 1;
out_bug:
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ WARN_ON(1);
return 0;
}
hash_head = classhashentry(key);
raw_local_irq_save(flags);
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
/*
* We have to do the hash-walk again, to avoid races
* with another CPU:
* the hash:
*/
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
- __raw_spin_unlock(&hash_lock);
+ if (!debug_locks_off_graph_unlock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
raw_local_irq_restore(flags);
- debug_locks_off();
+
printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
list_add_tail_rcu(&class->hash_entry, hash_head);
if (verbose(class)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
raw_local_irq_restore(flags);
+
printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
dump_stack();
+
raw_local_irq_save(flags);
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
}
out_unlock_set:
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
raw_local_irq_restore(flags);
if (!subclass || force)
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock())
+ return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
goto cache_hit;
}
}
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
struct held_lock *this, int forwards,
const char *irqclass)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=========================================================\n");
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=================================\n");
if (likely(this->class->usage_mask & new_mask))
return 1;
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock())
+ return 0;
/*
* Make sure we didnt race:
*/
if (unlikely(this->class->usage_mask & new_mask)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
return 1;
}
debug_atomic_dec(&nr_unused_locks);
break;
default:
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return 0;
WARN_ON(1);
return 0;
}
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
/*
- * We must printk outside of the hash_lock:
+ * We must printk outside of the graph_lock:
*/
if (ret == 2) {
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
* We look up the chain_key and do the O(N^2) check and update of
* the dependencies only if this is a new dependency chain.
* (If lookup_chain_cache() returns with 1 it acquires
- * hash_lock for us)
+ * graph_lock for us)
*/
if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
/*
if (!chain_head && ret != 2)
if (!check_prevs_add(curr, hlock))
return 0;
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
}
curr->lockdep_depth++;
check_chain_key(curr);
int i;
raw_local_irq_save(flags);
- __raw_spin_lock(&hash_lock);
+ graph_lock();
/*
* Unhash all classes that were created by this module:
zap_class(class);
}
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
raw_local_irq_restore(flags);
}
* Debug check: in the end all mapped classes should
* be gone.
*/
- __raw_spin_lock(&hash_lock);
+ graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
continue;
list_for_each_entry_safe(class, next, head, hash_entry) {
if (unlikely(class == lock->class_cache)) {
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
+ if (debug_locks_off_graph_unlock())
+ WARN_ON(1);
goto out_restore;
}
}
}
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
out_restore:
raw_local_irq_restore(flags);