locking/lockdep: Split lockdep_free_key_range() and lockdep_reset_lock()
[platform/kernel/linux-starfive.git] / kernel / locking / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitops.h>
49 #include <linux/gfp.h>
50 #include <linux/random.h>
51 #include <linux/jhash.h>
52 #include <linux/nmi.h>
53
54 #include <asm/sections.h>
55
56 #include "lockdep_internals.h"
57
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/lock.h>
60
61 #ifdef CONFIG_PROVE_LOCKING
62 int prove_locking = 1;
63 module_param(prove_locking, int, 0644);
64 #else
65 #define prove_locking 0
66 #endif
67
68 #ifdef CONFIG_LOCK_STAT
69 int lock_stat = 1;
70 module_param(lock_stat, int, 0644);
71 #else
72 #define lock_stat 0
73 #endif
74
75 /*
76  * lockdep_lock: protects the lockdep graph, the hashes and the
77  *               class/list/hash allocators.
78  *
79  * This is one of the rare exceptions where it's justified
80  * to use a raw spinlock - we really dont want the spinlock
81  * code to recurse back into the lockdep code...
82  */
83 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
84
85 static int graph_lock(void)
86 {
87         arch_spin_lock(&lockdep_lock);
88         /*
89          * Make sure that if another CPU detected a bug while
90          * walking the graph we dont change it (while the other
91          * CPU is busy printing out stuff with the graph lock
92          * dropped already)
93          */
94         if (!debug_locks) {
95                 arch_spin_unlock(&lockdep_lock);
96                 return 0;
97         }
98         /* prevent any recursions within lockdep from causing deadlocks */
99         current->lockdep_recursion++;
100         return 1;
101 }
102
103 static inline int graph_unlock(void)
104 {
105         if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
106                 /*
107                  * The lockdep graph lock isn't locked while we expect it to
108                  * be, we're confused now, bye!
109                  */
110                 return DEBUG_LOCKS_WARN_ON(1);
111         }
112
113         current->lockdep_recursion--;
114         arch_spin_unlock(&lockdep_lock);
115         return 0;
116 }
117
118 /*
119  * Turn lock debugging off and return with 0 if it was off already,
120  * and also release the graph lock:
121  */
122 static inline int debug_locks_off_graph_unlock(void)
123 {
124         int ret = debug_locks_off();
125
126         arch_spin_unlock(&lockdep_lock);
127
128         return ret;
129 }
130
131 unsigned long nr_list_entries;
132 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
133
134 /*
135  * All data structures here are protected by the global debug_lock.
136  *
137  * Mutex key structs only get allocated, once during bootup, and never
138  * get freed - this significantly simplifies the debugging code.
139  */
140 unsigned long nr_lock_classes;
141 #ifndef CONFIG_DEBUG_LOCKDEP
142 static
143 #endif
144 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
145
146 static inline struct lock_class *hlock_class(struct held_lock *hlock)
147 {
148         if (!hlock->class_idx) {
149                 /*
150                  * Someone passed in garbage, we give up.
151                  */
152                 DEBUG_LOCKS_WARN_ON(1);
153                 return NULL;
154         }
155         return lock_classes + hlock->class_idx - 1;
156 }
157
158 #ifdef CONFIG_LOCK_STAT
159 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
160
161 static inline u64 lockstat_clock(void)
162 {
163         return local_clock();
164 }
165
166 static int lock_point(unsigned long points[], unsigned long ip)
167 {
168         int i;
169
170         for (i = 0; i < LOCKSTAT_POINTS; i++) {
171                 if (points[i] == 0) {
172                         points[i] = ip;
173                         break;
174                 }
175                 if (points[i] == ip)
176                         break;
177         }
178
179         return i;
180 }
181
182 static void lock_time_inc(struct lock_time *lt, u64 time)
183 {
184         if (time > lt->max)
185                 lt->max = time;
186
187         if (time < lt->min || !lt->nr)
188                 lt->min = time;
189
190         lt->total += time;
191         lt->nr++;
192 }
193
194 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
195 {
196         if (!src->nr)
197                 return;
198
199         if (src->max > dst->max)
200                 dst->max = src->max;
201
202         if (src->min < dst->min || !dst->nr)
203                 dst->min = src->min;
204
205         dst->total += src->total;
206         dst->nr += src->nr;
207 }
208
209 struct lock_class_stats lock_stats(struct lock_class *class)
210 {
211         struct lock_class_stats stats;
212         int cpu, i;
213
214         memset(&stats, 0, sizeof(struct lock_class_stats));
215         for_each_possible_cpu(cpu) {
216                 struct lock_class_stats *pcs =
217                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
218
219                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
220                         stats.contention_point[i] += pcs->contention_point[i];
221
222                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
223                         stats.contending_point[i] += pcs->contending_point[i];
224
225                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
226                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
227
228                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
229                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
230
231                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
232                         stats.bounces[i] += pcs->bounces[i];
233         }
234
235         return stats;
236 }
237
238 void clear_lock_stats(struct lock_class *class)
239 {
240         int cpu;
241
242         for_each_possible_cpu(cpu) {
243                 struct lock_class_stats *cpu_stats =
244                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
245
246                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
247         }
248         memset(class->contention_point, 0, sizeof(class->contention_point));
249         memset(class->contending_point, 0, sizeof(class->contending_point));
250 }
251
252 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
253 {
254         return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
255 }
256
257 static void lock_release_holdtime(struct held_lock *hlock)
258 {
259         struct lock_class_stats *stats;
260         u64 holdtime;
261
262         if (!lock_stat)
263                 return;
264
265         holdtime = lockstat_clock() - hlock->holdtime_stamp;
266
267         stats = get_lock_stats(hlock_class(hlock));
268         if (hlock->read)
269                 lock_time_inc(&stats->read_holdtime, holdtime);
270         else
271                 lock_time_inc(&stats->write_holdtime, holdtime);
272 }
273 #else
274 static inline void lock_release_holdtime(struct held_lock *hlock)
275 {
276 }
277 #endif
278
279 /*
280  * We keep a global list of all lock classes. The list only grows,
281  * never shrinks. The list is only accessed with the lockdep
282  * spinlock lock held.
283  */
284 LIST_HEAD(all_lock_classes);
285
286 /*
287  * The lockdep classes are in a hash-table as well, for fast lookup:
288  */
289 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
290 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
291 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
292 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
293
294 static struct hlist_head classhash_table[CLASSHASH_SIZE];
295
296 /*
297  * We put the lock dependency chains into a hash-table as well, to cache
298  * their existence:
299  */
300 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
301 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
302 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
303 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
304
305 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
306
307 /*
308  * The hash key of the lock dependency chains is a hash itself too:
309  * it's a hash of all locks taken up to that lock, including that lock.
310  * It's a 64-bit hash, because it's important for the keys to be
311  * unique.
312  */
313 static inline u64 iterate_chain_key(u64 key, u32 idx)
314 {
315         u32 k0 = key, k1 = key >> 32;
316
317         __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
318
319         return k0 | (u64)k1 << 32;
320 }
321
322 void lockdep_off(void)
323 {
324         current->lockdep_recursion++;
325 }
326 EXPORT_SYMBOL(lockdep_off);
327
328 void lockdep_on(void)
329 {
330         current->lockdep_recursion--;
331 }
332 EXPORT_SYMBOL(lockdep_on);
333
334 /*
335  * Debugging switches:
336  */
337
338 #define VERBOSE                 0
339 #define VERY_VERBOSE            0
340
341 #if VERBOSE
342 # define HARDIRQ_VERBOSE        1
343 # define SOFTIRQ_VERBOSE        1
344 #else
345 # define HARDIRQ_VERBOSE        0
346 # define SOFTIRQ_VERBOSE        0
347 #endif
348
349 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
350 /*
351  * Quick filtering for interesting events:
352  */
353 static int class_filter(struct lock_class *class)
354 {
355 #if 0
356         /* Example */
357         if (class->name_version == 1 &&
358                         !strcmp(class->name, "lockname"))
359                 return 1;
360         if (class->name_version == 1 &&
361                         !strcmp(class->name, "&struct->lockfield"))
362                 return 1;
363 #endif
364         /* Filter everything else. 1 would be to allow everything else */
365         return 0;
366 }
367 #endif
368
369 static int verbose(struct lock_class *class)
370 {
371 #if VERBOSE
372         return class_filter(class);
373 #endif
374         return 0;
375 }
376
377 /*
378  * Stack-trace: tightly packed array of stack backtrace
379  * addresses. Protected by the graph_lock.
380  */
381 unsigned long nr_stack_trace_entries;
382 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
383
384 static void print_lockdep_off(const char *bug_msg)
385 {
386         printk(KERN_DEBUG "%s\n", bug_msg);
387         printk(KERN_DEBUG "turning off the locking correctness validator.\n");
388 #ifdef CONFIG_LOCK_STAT
389         printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
390 #endif
391 }
392
393 static int save_trace(struct stack_trace *trace)
394 {
395         trace->nr_entries = 0;
396         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
397         trace->entries = stack_trace + nr_stack_trace_entries;
398
399         trace->skip = 3;
400
401         save_stack_trace(trace);
402
403         /*
404          * Some daft arches put -1 at the end to indicate its a full trace.
405          *
406          * <rant> this is buggy anyway, since it takes a whole extra entry so a
407          * complete trace that maxes out the entries provided will be reported
408          * as incomplete, friggin useless </rant>
409          */
410         if (trace->nr_entries != 0 &&
411             trace->entries[trace->nr_entries-1] == ULONG_MAX)
412                 trace->nr_entries--;
413
414         trace->max_entries = trace->nr_entries;
415
416         nr_stack_trace_entries += trace->nr_entries;
417
418         if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
419                 if (!debug_locks_off_graph_unlock())
420                         return 0;
421
422                 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
423                 dump_stack();
424
425                 return 0;
426         }
427
428         return 1;
429 }
430
431 unsigned int nr_hardirq_chains;
432 unsigned int nr_softirq_chains;
433 unsigned int nr_process_chains;
434 unsigned int max_lockdep_depth;
435
436 #ifdef CONFIG_DEBUG_LOCKDEP
437 /*
438  * Various lockdep statistics:
439  */
440 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
441 #endif
442
443 /*
444  * Locking printouts:
445  */
446
447 #define __USAGE(__STATE)                                                \
448         [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
449         [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
450         [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
451         [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
452
453 static const char *usage_str[] =
454 {
455 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
456 #include "lockdep_states.h"
457 #undef LOCKDEP_STATE
458         [LOCK_USED] = "INITIAL USE",
459 };
460
461 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
462 {
463         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
464 }
465
466 static inline unsigned long lock_flag(enum lock_usage_bit bit)
467 {
468         return 1UL << bit;
469 }
470
471 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
472 {
473         char c = '.';
474
475         if (class->usage_mask & lock_flag(bit + 2))
476                 c = '+';
477         if (class->usage_mask & lock_flag(bit)) {
478                 c = '-';
479                 if (class->usage_mask & lock_flag(bit + 2))
480                         c = '?';
481         }
482
483         return c;
484 }
485
486 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
487 {
488         int i = 0;
489
490 #define LOCKDEP_STATE(__STATE)                                          \
491         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
492         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
493 #include "lockdep_states.h"
494 #undef LOCKDEP_STATE
495
496         usage[i] = '\0';
497 }
498
499 static void __print_lock_name(struct lock_class *class)
500 {
501         char str[KSYM_NAME_LEN];
502         const char *name;
503
504         name = class->name;
505         if (!name) {
506                 name = __get_key_name(class->key, str);
507                 printk(KERN_CONT "%s", name);
508         } else {
509                 printk(KERN_CONT "%s", name);
510                 if (class->name_version > 1)
511                         printk(KERN_CONT "#%d", class->name_version);
512                 if (class->subclass)
513                         printk(KERN_CONT "/%d", class->subclass);
514         }
515 }
516
517 static void print_lock_name(struct lock_class *class)
518 {
519         char usage[LOCK_USAGE_CHARS];
520
521         get_usage_chars(class, usage);
522
523         printk(KERN_CONT " (");
524         __print_lock_name(class);
525         printk(KERN_CONT "){%s}", usage);
526 }
527
528 static void print_lockdep_cache(struct lockdep_map *lock)
529 {
530         const char *name;
531         char str[KSYM_NAME_LEN];
532
533         name = lock->name;
534         if (!name)
535                 name = __get_key_name(lock->key->subkeys, str);
536
537         printk(KERN_CONT "%s", name);
538 }
539
540 static void print_lock(struct held_lock *hlock)
541 {
542         /*
543          * We can be called locklessly through debug_show_all_locks() so be
544          * extra careful, the hlock might have been released and cleared.
545          */
546         unsigned int class_idx = hlock->class_idx;
547
548         /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
549         barrier();
550
551         if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
552                 printk(KERN_CONT "<RELEASED>\n");
553                 return;
554         }
555
556         printk(KERN_CONT "%p", hlock->instance);
557         print_lock_name(lock_classes + class_idx - 1);
558         printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
559 }
560
561 static void lockdep_print_held_locks(struct task_struct *p)
562 {
563         int i, depth = READ_ONCE(p->lockdep_depth);
564
565         if (!depth)
566                 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
567         else
568                 printk("%d lock%s held by %s/%d:\n", depth,
569                        depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
570         /*
571          * It's not reliable to print a task's held locks if it's not sleeping
572          * and it's not the current task.
573          */
574         if (p->state == TASK_RUNNING && p != current)
575                 return;
576         for (i = 0; i < depth; i++) {
577                 printk(" #%d: ", i);
578                 print_lock(p->held_locks + i);
579         }
580 }
581
582 static void print_kernel_ident(void)
583 {
584         printk("%s %.*s %s\n", init_utsname()->release,
585                 (int)strcspn(init_utsname()->version, " "),
586                 init_utsname()->version,
587                 print_tainted());
588 }
589
590 static int very_verbose(struct lock_class *class)
591 {
592 #if VERY_VERBOSE
593         return class_filter(class);
594 #endif
595         return 0;
596 }
597
598 /*
599  * Is this the address of a static object:
600  */
601 #ifdef __KERNEL__
602 static int static_obj(void *obj)
603 {
604         unsigned long start = (unsigned long) &_stext,
605                       end   = (unsigned long) &_end,
606                       addr  = (unsigned long) obj;
607
608         /*
609          * static variable?
610          */
611         if ((addr >= start) && (addr < end))
612                 return 1;
613
614         if (arch_is_kernel_data(addr))
615                 return 1;
616
617         /*
618          * in-kernel percpu var?
619          */
620         if (is_kernel_percpu_address(addr))
621                 return 1;
622
623         /*
624          * module static or percpu var?
625          */
626         return is_module_address(addr) || is_module_percpu_address(addr);
627 }
628 #endif
629
630 /*
631  * To make lock name printouts unique, we calculate a unique
632  * class->name_version generation counter. The caller must hold the graph
633  * lock.
634  */
635 static int count_matching_names(struct lock_class *new_class)
636 {
637         struct lock_class *class;
638         int count = 0;
639
640         if (!new_class->name)
641                 return 0;
642
643         list_for_each_entry(class, &all_lock_classes, lock_entry) {
644                 if (new_class->key - new_class->subclass == class->key)
645                         return class->name_version;
646                 if (class->name && !strcmp(class->name, new_class->name))
647                         count = max(count, class->name_version);
648         }
649
650         return count + 1;
651 }
652
653 static inline struct lock_class *
654 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
655 {
656         struct lockdep_subclass_key *key;
657         struct hlist_head *hash_head;
658         struct lock_class *class;
659
660         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
661                 debug_locks_off();
662                 printk(KERN_ERR
663                         "BUG: looking up invalid subclass: %u\n", subclass);
664                 printk(KERN_ERR
665                         "turning off the locking correctness validator.\n");
666                 dump_stack();
667                 return NULL;
668         }
669
670         /*
671          * If it is not initialised then it has never been locked,
672          * so it won't be present in the hash table.
673          */
674         if (unlikely(!lock->key))
675                 return NULL;
676
677         /*
678          * NOTE: the class-key must be unique. For dynamic locks, a static
679          * lock_class_key variable is passed in through the mutex_init()
680          * (or spin_lock_init()) call - which acts as the key. For static
681          * locks we use the lock object itself as the key.
682          */
683         BUILD_BUG_ON(sizeof(struct lock_class_key) >
684                         sizeof(struct lockdep_map));
685
686         key = lock->key->subkeys + subclass;
687
688         hash_head = classhashentry(key);
689
690         /*
691          * We do an RCU walk of the hash, see lockdep_free_key_range().
692          */
693         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
694                 return NULL;
695
696         hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
697                 if (class->key == key) {
698                         /*
699                          * Huh! same key, different name? Did someone trample
700                          * on some memory? We're most confused.
701                          */
702                         WARN_ON_ONCE(class->name != lock->name);
703                         return class;
704                 }
705         }
706
707         return NULL;
708 }
709
710 /*
711  * Static locks do not have their class-keys yet - for them the key is
712  * the lock object itself. If the lock is in the per cpu area, the
713  * canonical address of the lock (per cpu offset removed) is used.
714  */
715 static bool assign_lock_key(struct lockdep_map *lock)
716 {
717         unsigned long can_addr, addr = (unsigned long)lock;
718
719         if (__is_kernel_percpu_address(addr, &can_addr))
720                 lock->key = (void *)can_addr;
721         else if (__is_module_percpu_address(addr, &can_addr))
722                 lock->key = (void *)can_addr;
723         else if (static_obj(lock))
724                 lock->key = (void *)lock;
725         else {
726                 /* Debug-check: all keys must be persistent! */
727                 debug_locks_off();
728                 pr_err("INFO: trying to register non-static key.\n");
729                 pr_err("the code is fine but needs lockdep annotation.\n");
730                 pr_err("turning off the locking correctness validator.\n");
731                 dump_stack();
732                 return false;
733         }
734
735         return true;
736 }
737
738 /*
739  * Initialize the lock_classes[] array elements.
740  */
741 static void init_data_structures_once(void)
742 {
743         static bool initialization_happened;
744         int i;
745
746         if (likely(initialization_happened))
747                 return;
748
749         initialization_happened = true;
750
751         for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
752                 INIT_LIST_HEAD(&lock_classes[i].locks_after);
753                 INIT_LIST_HEAD(&lock_classes[i].locks_before);
754         }
755 }
756
757 /*
758  * Register a lock's class in the hash-table, if the class is not present
759  * yet. Otherwise we look it up. We cache the result in the lock object
760  * itself, so actual lookup of the hash should be once per lock object.
761  */
762 static struct lock_class *
763 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
764 {
765         struct lockdep_subclass_key *key;
766         struct hlist_head *hash_head;
767         struct lock_class *class;
768
769         DEBUG_LOCKS_WARN_ON(!irqs_disabled());
770
771         class = look_up_lock_class(lock, subclass);
772         if (likely(class))
773                 goto out_set_class_cache;
774
775         if (!lock->key) {
776                 if (!assign_lock_key(lock))
777                         return NULL;
778         } else if (!static_obj(lock->key)) {
779                 return NULL;
780         }
781
782         key = lock->key->subkeys + subclass;
783         hash_head = classhashentry(key);
784
785         if (!graph_lock()) {
786                 return NULL;
787         }
788         /*
789          * We have to do the hash-walk again, to avoid races
790          * with another CPU:
791          */
792         hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
793                 if (class->key == key)
794                         goto out_unlock_set;
795         }
796
797         init_data_structures_once();
798
799         /*
800          * Allocate a new key from the static array, and add it to
801          * the hash:
802          */
803         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
804                 if (!debug_locks_off_graph_unlock()) {
805                         return NULL;
806                 }
807
808                 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
809                 dump_stack();
810                 return NULL;
811         }
812         class = lock_classes + nr_lock_classes++;
813         debug_atomic_inc(nr_unused_locks);
814         class->key = key;
815         class->name = lock->name;
816         class->subclass = subclass;
817         WARN_ON_ONCE(!list_empty(&class->locks_before));
818         WARN_ON_ONCE(!list_empty(&class->locks_after));
819         class->name_version = count_matching_names(class);
820         /*
821          * We use RCU's safe list-add method to make
822          * parallel walking of the hash-list safe:
823          */
824         hlist_add_head_rcu(&class->hash_entry, hash_head);
825         /*
826          * Add it to the global list of classes:
827          */
828         list_add_tail(&class->lock_entry, &all_lock_classes);
829
830         if (verbose(class)) {
831                 graph_unlock();
832
833                 printk("\nnew class %px: %s", class->key, class->name);
834                 if (class->name_version > 1)
835                         printk(KERN_CONT "#%d", class->name_version);
836                 printk(KERN_CONT "\n");
837                 dump_stack();
838
839                 if (!graph_lock()) {
840                         return NULL;
841                 }
842         }
843 out_unlock_set:
844         graph_unlock();
845
846 out_set_class_cache:
847         if (!subclass || force)
848                 lock->class_cache[0] = class;
849         else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
850                 lock->class_cache[subclass] = class;
851
852         /*
853          * Hash collision, did we smoke some? We found a class with a matching
854          * hash but the subclass -- which is hashed in -- didn't match.
855          */
856         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
857                 return NULL;
858
859         return class;
860 }
861
862 #ifdef CONFIG_PROVE_LOCKING
863 /*
864  * Allocate a lockdep entry. (assumes the graph_lock held, returns
865  * with NULL on failure)
866  */
867 static struct lock_list *alloc_list_entry(void)
868 {
869         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
870                 if (!debug_locks_off_graph_unlock())
871                         return NULL;
872
873                 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
874                 dump_stack();
875                 return NULL;
876         }
877         return list_entries + nr_list_entries++;
878 }
879
880 /*
881  * Add a new dependency to the head of the list:
882  */
883 static int add_lock_to_list(struct lock_class *this,
884                             struct lock_class *links_to, struct list_head *head,
885                             unsigned long ip, int distance,
886                             struct stack_trace *trace)
887 {
888         struct lock_list *entry;
889         /*
890          * Lock not present yet - get a new dependency struct and
891          * add it to the list:
892          */
893         entry = alloc_list_entry();
894         if (!entry)
895                 return 0;
896
897         entry->class = this;
898         entry->links_to = links_to;
899         entry->distance = distance;
900         entry->trace = *trace;
901         /*
902          * Both allocation and removal are done under the graph lock; but
903          * iteration is under RCU-sched; see look_up_lock_class() and
904          * lockdep_free_key_range().
905          */
906         list_add_tail_rcu(&entry->entry, head);
907
908         return 1;
909 }
910
911 /*
912  * For good efficiency of modular, we use power of 2
913  */
914 #define MAX_CIRCULAR_QUEUE_SIZE         4096UL
915 #define CQ_MASK                         (MAX_CIRCULAR_QUEUE_SIZE-1)
916
917 /*
918  * The circular_queue and helpers is used to implement the
919  * breadth-first search(BFS)algorithem, by which we can build
920  * the shortest path from the next lock to be acquired to the
921  * previous held lock if there is a circular between them.
922  */
923 struct circular_queue {
924         unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
925         unsigned int  front, rear;
926 };
927
928 static struct circular_queue lock_cq;
929
930 unsigned int max_bfs_queue_depth;
931
932 static unsigned int lockdep_dependency_gen_id;
933
934 static inline void __cq_init(struct circular_queue *cq)
935 {
936         cq->front = cq->rear = 0;
937         lockdep_dependency_gen_id++;
938 }
939
940 static inline int __cq_empty(struct circular_queue *cq)
941 {
942         return (cq->front == cq->rear);
943 }
944
945 static inline int __cq_full(struct circular_queue *cq)
946 {
947         return ((cq->rear + 1) & CQ_MASK) == cq->front;
948 }
949
950 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
951 {
952         if (__cq_full(cq))
953                 return -1;
954
955         cq->element[cq->rear] = elem;
956         cq->rear = (cq->rear + 1) & CQ_MASK;
957         return 0;
958 }
959
960 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
961 {
962         if (__cq_empty(cq))
963                 return -1;
964
965         *elem = cq->element[cq->front];
966         cq->front = (cq->front + 1) & CQ_MASK;
967         return 0;
968 }
969
970 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
971 {
972         return (cq->rear - cq->front) & CQ_MASK;
973 }
974
975 static inline void mark_lock_accessed(struct lock_list *lock,
976                                         struct lock_list *parent)
977 {
978         unsigned long nr;
979
980         nr = lock - list_entries;
981         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
982         lock->parent = parent;
983         lock->class->dep_gen_id = lockdep_dependency_gen_id;
984 }
985
986 static inline unsigned long lock_accessed(struct lock_list *lock)
987 {
988         unsigned long nr;
989
990         nr = lock - list_entries;
991         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
992         return lock->class->dep_gen_id == lockdep_dependency_gen_id;
993 }
994
995 static inline struct lock_list *get_lock_parent(struct lock_list *child)
996 {
997         return child->parent;
998 }
999
1000 static inline int get_lock_depth(struct lock_list *child)
1001 {
1002         int depth = 0;
1003         struct lock_list *parent;
1004
1005         while ((parent = get_lock_parent(child))) {
1006                 child = parent;
1007                 depth++;
1008         }
1009         return depth;
1010 }
1011
1012 static int __bfs(struct lock_list *source_entry,
1013                  void *data,
1014                  int (*match)(struct lock_list *entry, void *data),
1015                  struct lock_list **target_entry,
1016                  int forward)
1017 {
1018         struct lock_list *entry;
1019         struct list_head *head;
1020         struct circular_queue *cq = &lock_cq;
1021         int ret = 1;
1022
1023         if (match(source_entry, data)) {
1024                 *target_entry = source_entry;
1025                 ret = 0;
1026                 goto exit;
1027         }
1028
1029         if (forward)
1030                 head = &source_entry->class->locks_after;
1031         else
1032                 head = &source_entry->class->locks_before;
1033
1034         if (list_empty(head))
1035                 goto exit;
1036
1037         __cq_init(cq);
1038         __cq_enqueue(cq, (unsigned long)source_entry);
1039
1040         while (!__cq_empty(cq)) {
1041                 struct lock_list *lock;
1042
1043                 __cq_dequeue(cq, (unsigned long *)&lock);
1044
1045                 if (!lock->class) {
1046                         ret = -2;
1047                         goto exit;
1048                 }
1049
1050                 if (forward)
1051                         head = &lock->class->locks_after;
1052                 else
1053                         head = &lock->class->locks_before;
1054
1055                 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1056
1057                 list_for_each_entry_rcu(entry, head, entry) {
1058                         if (!lock_accessed(entry)) {
1059                                 unsigned int cq_depth;
1060                                 mark_lock_accessed(entry, lock);
1061                                 if (match(entry, data)) {
1062                                         *target_entry = entry;
1063                                         ret = 0;
1064                                         goto exit;
1065                                 }
1066
1067                                 if (__cq_enqueue(cq, (unsigned long)entry)) {
1068                                         ret = -1;
1069                                         goto exit;
1070                                 }
1071                                 cq_depth = __cq_get_elem_count(cq);
1072                                 if (max_bfs_queue_depth < cq_depth)
1073                                         max_bfs_queue_depth = cq_depth;
1074                         }
1075                 }
1076         }
1077 exit:
1078         return ret;
1079 }
1080
1081 static inline int __bfs_forwards(struct lock_list *src_entry,
1082                         void *data,
1083                         int (*match)(struct lock_list *entry, void *data),
1084                         struct lock_list **target_entry)
1085 {
1086         return __bfs(src_entry, data, match, target_entry, 1);
1087
1088 }
1089
1090 static inline int __bfs_backwards(struct lock_list *src_entry,
1091                         void *data,
1092                         int (*match)(struct lock_list *entry, void *data),
1093                         struct lock_list **target_entry)
1094 {
1095         return __bfs(src_entry, data, match, target_entry, 0);
1096
1097 }
1098
1099 /*
1100  * Recursive, forwards-direction lock-dependency checking, used for
1101  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1102  * checking.
1103  */
1104
1105 /*
1106  * Print a dependency chain entry (this is only done when a deadlock
1107  * has been detected):
1108  */
1109 static noinline int
1110 print_circular_bug_entry(struct lock_list *target, int depth)
1111 {
1112         if (debug_locks_silent)
1113                 return 0;
1114         printk("\n-> #%u", depth);
1115         print_lock_name(target->class);
1116         printk(KERN_CONT ":\n");
1117         print_stack_trace(&target->trace, 6);
1118
1119         return 0;
1120 }
1121
1122 static void
1123 print_circular_lock_scenario(struct held_lock *src,
1124                              struct held_lock *tgt,
1125                              struct lock_list *prt)
1126 {
1127         struct lock_class *source = hlock_class(src);
1128         struct lock_class *target = hlock_class(tgt);
1129         struct lock_class *parent = prt->class;
1130
1131         /*
1132          * A direct locking problem where unsafe_class lock is taken
1133          * directly by safe_class lock, then all we need to show
1134          * is the deadlock scenario, as it is obvious that the
1135          * unsafe lock is taken under the safe lock.
1136          *
1137          * But if there is a chain instead, where the safe lock takes
1138          * an intermediate lock (middle_class) where this lock is
1139          * not the same as the safe lock, then the lock chain is
1140          * used to describe the problem. Otherwise we would need
1141          * to show a different CPU case for each link in the chain
1142          * from the safe_class lock to the unsafe_class lock.
1143          */
1144         if (parent != source) {
1145                 printk("Chain exists of:\n  ");
1146                 __print_lock_name(source);
1147                 printk(KERN_CONT " --> ");
1148                 __print_lock_name(parent);
1149                 printk(KERN_CONT " --> ");
1150                 __print_lock_name(target);
1151                 printk(KERN_CONT "\n\n");
1152         }
1153
1154         printk(" Possible unsafe locking scenario:\n\n");
1155         printk("       CPU0                    CPU1\n");
1156         printk("       ----                    ----\n");
1157         printk("  lock(");
1158         __print_lock_name(target);
1159         printk(KERN_CONT ");\n");
1160         printk("                               lock(");
1161         __print_lock_name(parent);
1162         printk(KERN_CONT ");\n");
1163         printk("                               lock(");
1164         __print_lock_name(target);
1165         printk(KERN_CONT ");\n");
1166         printk("  lock(");
1167         __print_lock_name(source);
1168         printk(KERN_CONT ");\n");
1169         printk("\n *** DEADLOCK ***\n\n");
1170 }
1171
1172 /*
1173  * When a circular dependency is detected, print the
1174  * header first:
1175  */
1176 static noinline int
1177 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1178                         struct held_lock *check_src,
1179                         struct held_lock *check_tgt)
1180 {
1181         struct task_struct *curr = current;
1182
1183         if (debug_locks_silent)
1184                 return 0;
1185
1186         pr_warn("\n");
1187         pr_warn("======================================================\n");
1188         pr_warn("WARNING: possible circular locking dependency detected\n");
1189         print_kernel_ident();
1190         pr_warn("------------------------------------------------------\n");
1191         pr_warn("%s/%d is trying to acquire lock:\n",
1192                 curr->comm, task_pid_nr(curr));
1193         print_lock(check_src);
1194
1195         pr_warn("\nbut task is already holding lock:\n");
1196
1197         print_lock(check_tgt);
1198         pr_warn("\nwhich lock already depends on the new lock.\n\n");
1199         pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1200
1201         print_circular_bug_entry(entry, depth);
1202
1203         return 0;
1204 }
1205
1206 static inline int class_equal(struct lock_list *entry, void *data)
1207 {
1208         return entry->class == data;
1209 }
1210
1211 static noinline int print_circular_bug(struct lock_list *this,
1212                                 struct lock_list *target,
1213                                 struct held_lock *check_src,
1214                                 struct held_lock *check_tgt,
1215                                 struct stack_trace *trace)
1216 {
1217         struct task_struct *curr = current;
1218         struct lock_list *parent;
1219         struct lock_list *first_parent;
1220         int depth;
1221
1222         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1223                 return 0;
1224
1225         if (!save_trace(&this->trace))
1226                 return 0;
1227
1228         depth = get_lock_depth(target);
1229
1230         print_circular_bug_header(target, depth, check_src, check_tgt);
1231
1232         parent = get_lock_parent(target);
1233         first_parent = parent;
1234
1235         while (parent) {
1236                 print_circular_bug_entry(parent, --depth);
1237                 parent = get_lock_parent(parent);
1238         }
1239
1240         printk("\nother info that might help us debug this:\n\n");
1241         print_circular_lock_scenario(check_src, check_tgt,
1242                                      first_parent);
1243
1244         lockdep_print_held_locks(curr);
1245
1246         printk("\nstack backtrace:\n");
1247         dump_stack();
1248
1249         return 0;
1250 }
1251
1252 static noinline int print_bfs_bug(int ret)
1253 {
1254         if (!debug_locks_off_graph_unlock())
1255                 return 0;
1256
1257         /*
1258          * Breadth-first-search failed, graph got corrupted?
1259          */
1260         WARN(1, "lockdep bfs error:%d\n", ret);
1261
1262         return 0;
1263 }
1264
1265 static int noop_count(struct lock_list *entry, void *data)
1266 {
1267         (*(unsigned long *)data)++;
1268         return 0;
1269 }
1270
1271 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1272 {
1273         unsigned long  count = 0;
1274         struct lock_list *uninitialized_var(target_entry);
1275
1276         __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1277
1278         return count;
1279 }
1280 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1281 {
1282         unsigned long ret, flags;
1283         struct lock_list this;
1284
1285         this.parent = NULL;
1286         this.class = class;
1287
1288         raw_local_irq_save(flags);
1289         arch_spin_lock(&lockdep_lock);
1290         ret = __lockdep_count_forward_deps(&this);
1291         arch_spin_unlock(&lockdep_lock);
1292         raw_local_irq_restore(flags);
1293
1294         return ret;
1295 }
1296
1297 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1298 {
1299         unsigned long  count = 0;
1300         struct lock_list *uninitialized_var(target_entry);
1301
1302         __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1303
1304         return count;
1305 }
1306
1307 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1308 {
1309         unsigned long ret, flags;
1310         struct lock_list this;
1311
1312         this.parent = NULL;
1313         this.class = class;
1314
1315         raw_local_irq_save(flags);
1316         arch_spin_lock(&lockdep_lock);
1317         ret = __lockdep_count_backward_deps(&this);
1318         arch_spin_unlock(&lockdep_lock);
1319         raw_local_irq_restore(flags);
1320
1321         return ret;
1322 }
1323
1324 /*
1325  * Prove that the dependency graph starting at <entry> can not
1326  * lead to <target>. Print an error and return 0 if it does.
1327  */
1328 static noinline int
1329 check_noncircular(struct lock_list *root, struct lock_class *target,
1330                 struct lock_list **target_entry)
1331 {
1332         int result;
1333
1334         debug_atomic_inc(nr_cyclic_checks);
1335
1336         result = __bfs_forwards(root, target, class_equal, target_entry);
1337
1338         return result;
1339 }
1340
1341 static noinline int
1342 check_redundant(struct lock_list *root, struct lock_class *target,
1343                 struct lock_list **target_entry)
1344 {
1345         int result;
1346
1347         debug_atomic_inc(nr_redundant_checks);
1348
1349         result = __bfs_forwards(root, target, class_equal, target_entry);
1350
1351         return result;
1352 }
1353
1354 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1355 /*
1356  * Forwards and backwards subgraph searching, for the purposes of
1357  * proving that two subgraphs can be connected by a new dependency
1358  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1359  */
1360
1361 static inline int usage_match(struct lock_list *entry, void *bit)
1362 {
1363         return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1364 }
1365
1366
1367
1368 /*
1369  * Find a node in the forwards-direction dependency sub-graph starting
1370  * at @root->class that matches @bit.
1371  *
1372  * Return 0 if such a node exists in the subgraph, and put that node
1373  * into *@target_entry.
1374  *
1375  * Return 1 otherwise and keep *@target_entry unchanged.
1376  * Return <0 on error.
1377  */
1378 static int
1379 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1380                         struct lock_list **target_entry)
1381 {
1382         int result;
1383
1384         debug_atomic_inc(nr_find_usage_forwards_checks);
1385
1386         result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1387
1388         return result;
1389 }
1390
1391 /*
1392  * Find a node in the backwards-direction dependency sub-graph starting
1393  * at @root->class that matches @bit.
1394  *
1395  * Return 0 if such a node exists in the subgraph, and put that node
1396  * into *@target_entry.
1397  *
1398  * Return 1 otherwise and keep *@target_entry unchanged.
1399  * Return <0 on error.
1400  */
1401 static int
1402 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1403                         struct lock_list **target_entry)
1404 {
1405         int result;
1406
1407         debug_atomic_inc(nr_find_usage_backwards_checks);
1408
1409         result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1410
1411         return result;
1412 }
1413
1414 static void print_lock_class_header(struct lock_class *class, int depth)
1415 {
1416         int bit;
1417
1418         printk("%*s->", depth, "");
1419         print_lock_name(class);
1420 #ifdef CONFIG_DEBUG_LOCKDEP
1421         printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1422 #endif
1423         printk(KERN_CONT " {\n");
1424
1425         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1426                 if (class->usage_mask & (1 << bit)) {
1427                         int len = depth;
1428
1429                         len += printk("%*s   %s", depth, "", usage_str[bit]);
1430                         len += printk(KERN_CONT " at:\n");
1431                         print_stack_trace(class->usage_traces + bit, len);
1432                 }
1433         }
1434         printk("%*s }\n", depth, "");
1435
1436         printk("%*s ... key      at: [<%px>] %pS\n",
1437                 depth, "", class->key, class->key);
1438 }
1439
1440 /*
1441  * printk the shortest lock dependencies from @start to @end in reverse order:
1442  */
1443 static void __used
1444 print_shortest_lock_dependencies(struct lock_list *leaf,
1445                                 struct lock_list *root)
1446 {
1447         struct lock_list *entry = leaf;
1448         int depth;
1449
1450         /*compute depth from generated tree by BFS*/
1451         depth = get_lock_depth(leaf);
1452
1453         do {
1454                 print_lock_class_header(entry->class, depth);
1455                 printk("%*s ... acquired at:\n", depth, "");
1456                 print_stack_trace(&entry->trace, 2);
1457                 printk("\n");
1458
1459                 if (depth == 0 && (entry != root)) {
1460                         printk("lockdep:%s bad path found in chain graph\n", __func__);
1461                         break;
1462                 }
1463
1464                 entry = get_lock_parent(entry);
1465                 depth--;
1466         } while (entry && (depth >= 0));
1467
1468         return;
1469 }
1470
1471 static void
1472 print_irq_lock_scenario(struct lock_list *safe_entry,
1473                         struct lock_list *unsafe_entry,
1474                         struct lock_class *prev_class,
1475                         struct lock_class *next_class)
1476 {
1477         struct lock_class *safe_class = safe_entry->class;
1478         struct lock_class *unsafe_class = unsafe_entry->class;
1479         struct lock_class *middle_class = prev_class;
1480
1481         if (middle_class == safe_class)
1482                 middle_class = next_class;
1483
1484         /*
1485          * A direct locking problem where unsafe_class lock is taken
1486          * directly by safe_class lock, then all we need to show
1487          * is the deadlock scenario, as it is obvious that the
1488          * unsafe lock is taken under the safe lock.
1489          *
1490          * But if there is a chain instead, where the safe lock takes
1491          * an intermediate lock (middle_class) where this lock is
1492          * not the same as the safe lock, then the lock chain is
1493          * used to describe the problem. Otherwise we would need
1494          * to show a different CPU case for each link in the chain
1495          * from the safe_class lock to the unsafe_class lock.
1496          */
1497         if (middle_class != unsafe_class) {
1498                 printk("Chain exists of:\n  ");
1499                 __print_lock_name(safe_class);
1500                 printk(KERN_CONT " --> ");
1501                 __print_lock_name(middle_class);
1502                 printk(KERN_CONT " --> ");
1503                 __print_lock_name(unsafe_class);
1504                 printk(KERN_CONT "\n\n");
1505         }
1506
1507         printk(" Possible interrupt unsafe locking scenario:\n\n");
1508         printk("       CPU0                    CPU1\n");
1509         printk("       ----                    ----\n");
1510         printk("  lock(");
1511         __print_lock_name(unsafe_class);
1512         printk(KERN_CONT ");\n");
1513         printk("                               local_irq_disable();\n");
1514         printk("                               lock(");
1515         __print_lock_name(safe_class);
1516         printk(KERN_CONT ");\n");
1517         printk("                               lock(");
1518         __print_lock_name(middle_class);
1519         printk(KERN_CONT ");\n");
1520         printk("  <Interrupt>\n");
1521         printk("    lock(");
1522         __print_lock_name(safe_class);
1523         printk(KERN_CONT ");\n");
1524         printk("\n *** DEADLOCK ***\n\n");
1525 }
1526
1527 static int
1528 print_bad_irq_dependency(struct task_struct *curr,
1529                          struct lock_list *prev_root,
1530                          struct lock_list *next_root,
1531                          struct lock_list *backwards_entry,
1532                          struct lock_list *forwards_entry,
1533                          struct held_lock *prev,
1534                          struct held_lock *next,
1535                          enum lock_usage_bit bit1,
1536                          enum lock_usage_bit bit2,
1537                          const char *irqclass)
1538 {
1539         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1540                 return 0;
1541
1542         pr_warn("\n");
1543         pr_warn("=====================================================\n");
1544         pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1545                 irqclass, irqclass);
1546         print_kernel_ident();
1547         pr_warn("-----------------------------------------------------\n");
1548         pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1549                 curr->comm, task_pid_nr(curr),
1550                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1551                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1552                 curr->hardirqs_enabled,
1553                 curr->softirqs_enabled);
1554         print_lock(next);
1555
1556         pr_warn("\nand this task is already holding:\n");
1557         print_lock(prev);
1558         pr_warn("which would create a new lock dependency:\n");
1559         print_lock_name(hlock_class(prev));
1560         pr_cont(" ->");
1561         print_lock_name(hlock_class(next));
1562         pr_cont("\n");
1563
1564         pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1565                 irqclass);
1566         print_lock_name(backwards_entry->class);
1567         pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1568
1569         print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1570
1571         pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1572         print_lock_name(forwards_entry->class);
1573         pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1574         pr_warn("...");
1575
1576         print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1577
1578         pr_warn("\nother info that might help us debug this:\n\n");
1579         print_irq_lock_scenario(backwards_entry, forwards_entry,
1580                                 hlock_class(prev), hlock_class(next));
1581
1582         lockdep_print_held_locks(curr);
1583
1584         pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1585         if (!save_trace(&prev_root->trace))
1586                 return 0;
1587         print_shortest_lock_dependencies(backwards_entry, prev_root);
1588
1589         pr_warn("\nthe dependencies between the lock to be acquired");
1590         pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1591         if (!save_trace(&next_root->trace))
1592                 return 0;
1593         print_shortest_lock_dependencies(forwards_entry, next_root);
1594
1595         pr_warn("\nstack backtrace:\n");
1596         dump_stack();
1597
1598         return 0;
1599 }
1600
1601 static int
1602 check_usage(struct task_struct *curr, struct held_lock *prev,
1603             struct held_lock *next, enum lock_usage_bit bit_backwards,
1604             enum lock_usage_bit bit_forwards, const char *irqclass)
1605 {
1606         int ret;
1607         struct lock_list this, that;
1608         struct lock_list *uninitialized_var(target_entry);
1609         struct lock_list *uninitialized_var(target_entry1);
1610
1611         this.parent = NULL;
1612
1613         this.class = hlock_class(prev);
1614         ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1615         if (ret < 0)
1616                 return print_bfs_bug(ret);
1617         if (ret == 1)
1618                 return ret;
1619
1620         that.parent = NULL;
1621         that.class = hlock_class(next);
1622         ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1623         if (ret < 0)
1624                 return print_bfs_bug(ret);
1625         if (ret == 1)
1626                 return ret;
1627
1628         return print_bad_irq_dependency(curr, &this, &that,
1629                         target_entry, target_entry1,
1630                         prev, next,
1631                         bit_backwards, bit_forwards, irqclass);
1632 }
1633
1634 static const char *state_names[] = {
1635 #define LOCKDEP_STATE(__STATE) \
1636         __stringify(__STATE),
1637 #include "lockdep_states.h"
1638 #undef LOCKDEP_STATE
1639 };
1640
1641 static const char *state_rnames[] = {
1642 #define LOCKDEP_STATE(__STATE) \
1643         __stringify(__STATE)"-READ",
1644 #include "lockdep_states.h"
1645 #undef LOCKDEP_STATE
1646 };
1647
1648 static inline const char *state_name(enum lock_usage_bit bit)
1649 {
1650         return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1651 }
1652
1653 static int exclusive_bit(int new_bit)
1654 {
1655         int state = new_bit & LOCK_USAGE_STATE_MASK;
1656         int dir = new_bit & LOCK_USAGE_DIR_MASK;
1657
1658         /*
1659          * keep state, bit flip the direction and strip read.
1660          */
1661         return state | (dir ^ LOCK_USAGE_DIR_MASK);
1662 }
1663
1664 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1665                            struct held_lock *next, enum lock_usage_bit bit)
1666 {
1667         /*
1668          * Prove that the new dependency does not connect a hardirq-safe
1669          * lock with a hardirq-unsafe lock - to achieve this we search
1670          * the backwards-subgraph starting at <prev>, and the
1671          * forwards-subgraph starting at <next>:
1672          */
1673         if (!check_usage(curr, prev, next, bit,
1674                            exclusive_bit(bit), state_name(bit)))
1675                 return 0;
1676
1677         bit++; /* _READ */
1678
1679         /*
1680          * Prove that the new dependency does not connect a hardirq-safe-read
1681          * lock with a hardirq-unsafe lock - to achieve this we search
1682          * the backwards-subgraph starting at <prev>, and the
1683          * forwards-subgraph starting at <next>:
1684          */
1685         if (!check_usage(curr, prev, next, bit,
1686                            exclusive_bit(bit), state_name(bit)))
1687                 return 0;
1688
1689         return 1;
1690 }
1691
1692 static int
1693 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1694                 struct held_lock *next)
1695 {
1696 #define LOCKDEP_STATE(__STATE)                                          \
1697         if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1698                 return 0;
1699 #include "lockdep_states.h"
1700 #undef LOCKDEP_STATE
1701
1702         return 1;
1703 }
1704
1705 static void inc_chains(void)
1706 {
1707         if (current->hardirq_context)
1708                 nr_hardirq_chains++;
1709         else {
1710                 if (current->softirq_context)
1711                         nr_softirq_chains++;
1712                 else
1713                         nr_process_chains++;
1714         }
1715 }
1716
1717 #else
1718
1719 static inline int
1720 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1721                 struct held_lock *next)
1722 {
1723         return 1;
1724 }
1725
1726 static inline void inc_chains(void)
1727 {
1728         nr_process_chains++;
1729 }
1730
1731 #endif
1732
1733 static void
1734 print_deadlock_scenario(struct held_lock *nxt,
1735                              struct held_lock *prv)
1736 {
1737         struct lock_class *next = hlock_class(nxt);
1738         struct lock_class *prev = hlock_class(prv);
1739
1740         printk(" Possible unsafe locking scenario:\n\n");
1741         printk("       CPU0\n");
1742         printk("       ----\n");
1743         printk("  lock(");
1744         __print_lock_name(prev);
1745         printk(KERN_CONT ");\n");
1746         printk("  lock(");
1747         __print_lock_name(next);
1748         printk(KERN_CONT ");\n");
1749         printk("\n *** DEADLOCK ***\n\n");
1750         printk(" May be due to missing lock nesting notation\n\n");
1751 }
1752
1753 static int
1754 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1755                    struct held_lock *next)
1756 {
1757         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1758                 return 0;
1759
1760         pr_warn("\n");
1761         pr_warn("============================================\n");
1762         pr_warn("WARNING: possible recursive locking detected\n");
1763         print_kernel_ident();
1764         pr_warn("--------------------------------------------\n");
1765         pr_warn("%s/%d is trying to acquire lock:\n",
1766                 curr->comm, task_pid_nr(curr));
1767         print_lock(next);
1768         pr_warn("\nbut task is already holding lock:\n");
1769         print_lock(prev);
1770
1771         pr_warn("\nother info that might help us debug this:\n");
1772         print_deadlock_scenario(next, prev);
1773         lockdep_print_held_locks(curr);
1774
1775         pr_warn("\nstack backtrace:\n");
1776         dump_stack();
1777
1778         return 0;
1779 }
1780
1781 /*
1782  * Check whether we are holding such a class already.
1783  *
1784  * (Note that this has to be done separately, because the graph cannot
1785  * detect such classes of deadlocks.)
1786  *
1787  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1788  */
1789 static int
1790 check_deadlock(struct task_struct *curr, struct held_lock *next,
1791                struct lockdep_map *next_instance, int read)
1792 {
1793         struct held_lock *prev;
1794         struct held_lock *nest = NULL;
1795         int i;
1796
1797         for (i = 0; i < curr->lockdep_depth; i++) {
1798                 prev = curr->held_locks + i;
1799
1800                 if (prev->instance == next->nest_lock)
1801                         nest = prev;
1802
1803                 if (hlock_class(prev) != hlock_class(next))
1804                         continue;
1805
1806                 /*
1807                  * Allow read-after-read recursion of the same
1808                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
1809                  */
1810                 if ((read == 2) && prev->read)
1811                         return 2;
1812
1813                 /*
1814                  * We're holding the nest_lock, which serializes this lock's
1815                  * nesting behaviour.
1816                  */
1817                 if (nest)
1818                         return 2;
1819
1820                 return print_deadlock_bug(curr, prev, next);
1821         }
1822         return 1;
1823 }
1824
1825 /*
1826  * There was a chain-cache miss, and we are about to add a new dependency
1827  * to a previous lock. We recursively validate the following rules:
1828  *
1829  *  - would the adding of the <prev> -> <next> dependency create a
1830  *    circular dependency in the graph? [== circular deadlock]
1831  *
1832  *  - does the new prev->next dependency connect any hardirq-safe lock
1833  *    (in the full backwards-subgraph starting at <prev>) with any
1834  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1835  *    <next>)? [== illegal lock inversion with hardirq contexts]
1836  *
1837  *  - does the new prev->next dependency connect any softirq-safe lock
1838  *    (in the full backwards-subgraph starting at <prev>) with any
1839  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1840  *    <next>)? [== illegal lock inversion with softirq contexts]
1841  *
1842  * any of these scenarios could lead to a deadlock.
1843  *
1844  * Then if all the validations pass, we add the forwards and backwards
1845  * dependency.
1846  */
1847 static int
1848 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1849                struct held_lock *next, int distance, struct stack_trace *trace,
1850                int (*save)(struct stack_trace *trace))
1851 {
1852         struct lock_list *uninitialized_var(target_entry);
1853         struct lock_list *entry;
1854         struct lock_list this;
1855         int ret;
1856
1857         /*
1858          * Prove that the new <prev> -> <next> dependency would not
1859          * create a circular dependency in the graph. (We do this by
1860          * forward-recursing into the graph starting at <next>, and
1861          * checking whether we can reach <prev>.)
1862          *
1863          * We are using global variables to control the recursion, to
1864          * keep the stackframe size of the recursive functions low:
1865          */
1866         this.class = hlock_class(next);
1867         this.parent = NULL;
1868         ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1869         if (unlikely(!ret)) {
1870                 if (!trace->entries) {
1871                         /*
1872                          * If @save fails here, the printing might trigger
1873                          * a WARN but because of the !nr_entries it should
1874                          * not do bad things.
1875                          */
1876                         save(trace);
1877                 }
1878                 return print_circular_bug(&this, target_entry, next, prev, trace);
1879         }
1880         else if (unlikely(ret < 0))
1881                 return print_bfs_bug(ret);
1882
1883         if (!check_prev_add_irq(curr, prev, next))
1884                 return 0;
1885
1886         /*
1887          * For recursive read-locks we do all the dependency checks,
1888          * but we dont store read-triggered dependencies (only
1889          * write-triggered dependencies). This ensures that only the
1890          * write-side dependencies matter, and that if for example a
1891          * write-lock never takes any other locks, then the reads are
1892          * equivalent to a NOP.
1893          */
1894         if (next->read == 2 || prev->read == 2)
1895                 return 1;
1896         /*
1897          * Is the <prev> -> <next> dependency already present?
1898          *
1899          * (this may occur even though this is a new chain: consider
1900          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1901          *  chains - the second one will be new, but L1 already has
1902          *  L2 added to its dependency list, due to the first chain.)
1903          */
1904         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1905                 if (entry->class == hlock_class(next)) {
1906                         if (distance == 1)
1907                                 entry->distance = 1;
1908                         return 1;
1909                 }
1910         }
1911
1912         /*
1913          * Is the <prev> -> <next> link redundant?
1914          */
1915         this.class = hlock_class(prev);
1916         this.parent = NULL;
1917         ret = check_redundant(&this, hlock_class(next), &target_entry);
1918         if (!ret) {
1919                 debug_atomic_inc(nr_redundant);
1920                 return 2;
1921         }
1922         if (ret < 0)
1923                 return print_bfs_bug(ret);
1924
1925
1926         if (!trace->entries && !save(trace))
1927                 return 0;
1928
1929         /*
1930          * Ok, all validations passed, add the new lock
1931          * to the previous lock's dependency list:
1932          */
1933         ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1934                                &hlock_class(prev)->locks_after,
1935                                next->acquire_ip, distance, trace);
1936
1937         if (!ret)
1938                 return 0;
1939
1940         ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1941                                &hlock_class(next)->locks_before,
1942                                next->acquire_ip, distance, trace);
1943         if (!ret)
1944                 return 0;
1945
1946         return 2;
1947 }
1948
1949 /*
1950  * Add the dependency to all directly-previous locks that are 'relevant'.
1951  * The ones that are relevant are (in increasing distance from curr):
1952  * all consecutive trylock entries and the final non-trylock entry - or
1953  * the end of this context's lock-chain - whichever comes first.
1954  */
1955 static int
1956 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1957 {
1958         int depth = curr->lockdep_depth;
1959         struct held_lock *hlock;
1960         struct stack_trace trace = {
1961                 .nr_entries = 0,
1962                 .max_entries = 0,
1963                 .entries = NULL,
1964                 .skip = 0,
1965         };
1966
1967         /*
1968          * Debugging checks.
1969          *
1970          * Depth must not be zero for a non-head lock:
1971          */
1972         if (!depth)
1973                 goto out_bug;
1974         /*
1975          * At least two relevant locks must exist for this
1976          * to be a head:
1977          */
1978         if (curr->held_locks[depth].irq_context !=
1979                         curr->held_locks[depth-1].irq_context)
1980                 goto out_bug;
1981
1982         for (;;) {
1983                 int distance = curr->lockdep_depth - depth + 1;
1984                 hlock = curr->held_locks + depth - 1;
1985
1986                 /*
1987                  * Only non-recursive-read entries get new dependencies
1988                  * added:
1989                  */
1990                 if (hlock->read != 2 && hlock->check) {
1991                         int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
1992                         if (!ret)
1993                                 return 0;
1994
1995                         /*
1996                          * Stop after the first non-trylock entry,
1997                          * as non-trylock entries have added their
1998                          * own direct dependencies already, so this
1999                          * lock is connected to them indirectly:
2000                          */
2001                         if (!hlock->trylock)
2002                                 break;
2003                 }
2004
2005                 depth--;
2006                 /*
2007                  * End of lock-stack?
2008                  */
2009                 if (!depth)
2010                         break;
2011                 /*
2012                  * Stop the search if we cross into another context:
2013                  */
2014                 if (curr->held_locks[depth].irq_context !=
2015                                 curr->held_locks[depth-1].irq_context)
2016                         break;
2017         }
2018         return 1;
2019 out_bug:
2020         if (!debug_locks_off_graph_unlock())
2021                 return 0;
2022
2023         /*
2024          * Clearly we all shouldn't be here, but since we made it we
2025          * can reliable say we messed up our state. See the above two
2026          * gotos for reasons why we could possibly end up here.
2027          */
2028         WARN_ON(1);
2029
2030         return 0;
2031 }
2032
2033 unsigned long nr_lock_chains;
2034 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2035 int nr_chain_hlocks;
2036 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2037
2038 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2039 {
2040         return lock_classes + chain_hlocks[chain->base + i];
2041 }
2042
2043 /*
2044  * Returns the index of the first held_lock of the current chain
2045  */
2046 static inline int get_first_held_lock(struct task_struct *curr,
2047                                         struct held_lock *hlock)
2048 {
2049         int i;
2050         struct held_lock *hlock_curr;
2051
2052         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2053                 hlock_curr = curr->held_locks + i;
2054                 if (hlock_curr->irq_context != hlock->irq_context)
2055                         break;
2056
2057         }
2058
2059         return ++i;
2060 }
2061
2062 #ifdef CONFIG_DEBUG_LOCKDEP
2063 /*
2064  * Returns the next chain_key iteration
2065  */
2066 static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2067 {
2068         u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2069
2070         printk(" class_idx:%d -> chain_key:%016Lx",
2071                 class_idx,
2072                 (unsigned long long)new_chain_key);
2073         return new_chain_key;
2074 }
2075
2076 static void
2077 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2078 {
2079         struct held_lock *hlock;
2080         u64 chain_key = 0;
2081         int depth = curr->lockdep_depth;
2082         int i;
2083
2084         printk("depth: %u\n", depth + 1);
2085         for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2086                 hlock = curr->held_locks + i;
2087                 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2088
2089                 print_lock(hlock);
2090         }
2091
2092         print_chain_key_iteration(hlock_next->class_idx, chain_key);
2093         print_lock(hlock_next);
2094 }
2095
2096 static void print_chain_keys_chain(struct lock_chain *chain)
2097 {
2098         int i;
2099         u64 chain_key = 0;
2100         int class_id;
2101
2102         printk("depth: %u\n", chain->depth);
2103         for (i = 0; i < chain->depth; i++) {
2104                 class_id = chain_hlocks[chain->base + i];
2105                 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2106
2107                 print_lock_name(lock_classes + class_id);
2108                 printk("\n");
2109         }
2110 }
2111
2112 static void print_collision(struct task_struct *curr,
2113                         struct held_lock *hlock_next,
2114                         struct lock_chain *chain)
2115 {
2116         pr_warn("\n");
2117         pr_warn("============================\n");
2118         pr_warn("WARNING: chain_key collision\n");
2119         print_kernel_ident();
2120         pr_warn("----------------------------\n");
2121         pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2122         pr_warn("Hash chain already cached but the contents don't match!\n");
2123
2124         pr_warn("Held locks:");
2125         print_chain_keys_held_locks(curr, hlock_next);
2126
2127         pr_warn("Locks in cached chain:");
2128         print_chain_keys_chain(chain);
2129
2130         pr_warn("\nstack backtrace:\n");
2131         dump_stack();
2132 }
2133 #endif
2134
2135 /*
2136  * Checks whether the chain and the current held locks are consistent
2137  * in depth and also in content. If they are not it most likely means
2138  * that there was a collision during the calculation of the chain_key.
2139  * Returns: 0 not passed, 1 passed
2140  */
2141 static int check_no_collision(struct task_struct *curr,
2142                         struct held_lock *hlock,
2143                         struct lock_chain *chain)
2144 {
2145 #ifdef CONFIG_DEBUG_LOCKDEP
2146         int i, j, id;
2147
2148         i = get_first_held_lock(curr, hlock);
2149
2150         if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2151                 print_collision(curr, hlock, chain);
2152                 return 0;
2153         }
2154
2155         for (j = 0; j < chain->depth - 1; j++, i++) {
2156                 id = curr->held_locks[i].class_idx - 1;
2157
2158                 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2159                         print_collision(curr, hlock, chain);
2160                         return 0;
2161                 }
2162         }
2163 #endif
2164         return 1;
2165 }
2166
2167 /*
2168  * Adds a dependency chain into chain hashtable. And must be called with
2169  * graph_lock held.
2170  *
2171  * Return 0 if fail, and graph_lock is released.
2172  * Return 1 if succeed, with graph_lock held.
2173  */
2174 static inline int add_chain_cache(struct task_struct *curr,
2175                                   struct held_lock *hlock,
2176                                   u64 chain_key)
2177 {
2178         struct lock_class *class = hlock_class(hlock);
2179         struct hlist_head *hash_head = chainhashentry(chain_key);
2180         struct lock_chain *chain;
2181         int i, j;
2182
2183         /*
2184          * Allocate a new chain entry from the static array, and add
2185          * it to the hash:
2186          */
2187
2188         /*
2189          * We might need to take the graph lock, ensure we've got IRQs
2190          * disabled to make this an IRQ-safe lock.. for recursion reasons
2191          * lockdep won't complain about its own locking errors.
2192          */
2193         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2194                 return 0;
2195
2196         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2197                 if (!debug_locks_off_graph_unlock())
2198                         return 0;
2199
2200                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2201                 dump_stack();
2202                 return 0;
2203         }
2204         chain = lock_chains + nr_lock_chains++;
2205         chain->chain_key = chain_key;
2206         chain->irq_context = hlock->irq_context;
2207         i = get_first_held_lock(curr, hlock);
2208         chain->depth = curr->lockdep_depth + 1 - i;
2209
2210         BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2211         BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2212         BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2213
2214         if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2215                 chain->base = nr_chain_hlocks;
2216                 for (j = 0; j < chain->depth - 1; j++, i++) {
2217                         int lock_id = curr->held_locks[i].class_idx - 1;
2218                         chain_hlocks[chain->base + j] = lock_id;
2219                 }
2220                 chain_hlocks[chain->base + j] = class - lock_classes;
2221                 nr_chain_hlocks += chain->depth;
2222         } else {
2223                 if (!debug_locks_off_graph_unlock())
2224                         return 0;
2225
2226                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2227                 dump_stack();
2228                 return 0;
2229         }
2230
2231         hlist_add_head_rcu(&chain->entry, hash_head);
2232         debug_atomic_inc(chain_lookup_misses);
2233         inc_chains();
2234
2235         return 1;
2236 }
2237
2238 /*
2239  * Look up a dependency chain.
2240  */
2241 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2242 {
2243         struct hlist_head *hash_head = chainhashentry(chain_key);
2244         struct lock_chain *chain;
2245
2246         /*
2247          * We can walk it lock-free, because entries only get added
2248          * to the hash:
2249          */
2250         hlist_for_each_entry_rcu(chain, hash_head, entry) {
2251                 if (chain->chain_key == chain_key) {
2252                         debug_atomic_inc(chain_lookup_hits);
2253                         return chain;
2254                 }
2255         }
2256         return NULL;
2257 }
2258
2259 /*
2260  * If the key is not present yet in dependency chain cache then
2261  * add it and return 1 - in this case the new dependency chain is
2262  * validated. If the key is already hashed, return 0.
2263  * (On return with 1 graph_lock is held.)
2264  */
2265 static inline int lookup_chain_cache_add(struct task_struct *curr,
2266                                          struct held_lock *hlock,
2267                                          u64 chain_key)
2268 {
2269         struct lock_class *class = hlock_class(hlock);
2270         struct lock_chain *chain = lookup_chain_cache(chain_key);
2271
2272         if (chain) {
2273 cache_hit:
2274                 if (!check_no_collision(curr, hlock, chain))
2275                         return 0;
2276
2277                 if (very_verbose(class)) {
2278                         printk("\nhash chain already cached, key: "
2279                                         "%016Lx tail class: [%px] %s\n",
2280                                         (unsigned long long)chain_key,
2281                                         class->key, class->name);
2282                 }
2283
2284                 return 0;
2285         }
2286
2287         if (very_verbose(class)) {
2288                 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2289                         (unsigned long long)chain_key, class->key, class->name);
2290         }
2291
2292         if (!graph_lock())
2293                 return 0;
2294
2295         /*
2296          * We have to walk the chain again locked - to avoid duplicates:
2297          */
2298         chain = lookup_chain_cache(chain_key);
2299         if (chain) {
2300                 graph_unlock();
2301                 goto cache_hit;
2302         }
2303
2304         if (!add_chain_cache(curr, hlock, chain_key))
2305                 return 0;
2306
2307         return 1;
2308 }
2309
2310 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2311                 struct held_lock *hlock, int chain_head, u64 chain_key)
2312 {
2313         /*
2314          * Trylock needs to maintain the stack of held locks, but it
2315          * does not add new dependencies, because trylock can be done
2316          * in any order.
2317          *
2318          * We look up the chain_key and do the O(N^2) check and update of
2319          * the dependencies only if this is a new dependency chain.
2320          * (If lookup_chain_cache_add() return with 1 it acquires
2321          * graph_lock for us)
2322          */
2323         if (!hlock->trylock && hlock->check &&
2324             lookup_chain_cache_add(curr, hlock, chain_key)) {
2325                 /*
2326                  * Check whether last held lock:
2327                  *
2328                  * - is irq-safe, if this lock is irq-unsafe
2329                  * - is softirq-safe, if this lock is hardirq-unsafe
2330                  *
2331                  * And check whether the new lock's dependency graph
2332                  * could lead back to the previous lock.
2333                  *
2334                  * any of these scenarios could lead to a deadlock. If
2335                  * All validations
2336                  */
2337                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2338
2339                 if (!ret)
2340                         return 0;
2341                 /*
2342                  * Mark recursive read, as we jump over it when
2343                  * building dependencies (just like we jump over
2344                  * trylock entries):
2345                  */
2346                 if (ret == 2)
2347                         hlock->read = 2;
2348                 /*
2349                  * Add dependency only if this lock is not the head
2350                  * of the chain, and if it's not a secondary read-lock:
2351                  */
2352                 if (!chain_head && ret != 2) {
2353                         if (!check_prevs_add(curr, hlock))
2354                                 return 0;
2355                 }
2356
2357                 graph_unlock();
2358         } else {
2359                 /* after lookup_chain_cache_add(): */
2360                 if (unlikely(!debug_locks))
2361                         return 0;
2362         }
2363
2364         return 1;
2365 }
2366 #else
2367 static inline int validate_chain(struct task_struct *curr,
2368                 struct lockdep_map *lock, struct held_lock *hlock,
2369                 int chain_head, u64 chain_key)
2370 {
2371         return 1;
2372 }
2373 #endif
2374
2375 /*
2376  * We are building curr_chain_key incrementally, so double-check
2377  * it from scratch, to make sure that it's done correctly:
2378  */
2379 static void check_chain_key(struct task_struct *curr)
2380 {
2381 #ifdef CONFIG_DEBUG_LOCKDEP
2382         struct held_lock *hlock, *prev_hlock = NULL;
2383         unsigned int i;
2384         u64 chain_key = 0;
2385
2386         for (i = 0; i < curr->lockdep_depth; i++) {
2387                 hlock = curr->held_locks + i;
2388                 if (chain_key != hlock->prev_chain_key) {
2389                         debug_locks_off();
2390                         /*
2391                          * We got mighty confused, our chain keys don't match
2392                          * with what we expect, someone trample on our task state?
2393                          */
2394                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2395                                 curr->lockdep_depth, i,
2396                                 (unsigned long long)chain_key,
2397                                 (unsigned long long)hlock->prev_chain_key);
2398                         return;
2399                 }
2400                 /*
2401                  * Whoops ran out of static storage again?
2402                  */
2403                 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2404                         return;
2405
2406                 if (prev_hlock && (prev_hlock->irq_context !=
2407                                                         hlock->irq_context))
2408                         chain_key = 0;
2409                 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2410                 prev_hlock = hlock;
2411         }
2412         if (chain_key != curr->curr_chain_key) {
2413                 debug_locks_off();
2414                 /*
2415                  * More smoking hash instead of calculating it, damn see these
2416                  * numbers float.. I bet that a pink elephant stepped on my memory.
2417                  */
2418                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2419                         curr->lockdep_depth, i,
2420                         (unsigned long long)chain_key,
2421                         (unsigned long long)curr->curr_chain_key);
2422         }
2423 #endif
2424 }
2425
2426 static void
2427 print_usage_bug_scenario(struct held_lock *lock)
2428 {
2429         struct lock_class *class = hlock_class(lock);
2430
2431         printk(" Possible unsafe locking scenario:\n\n");
2432         printk("       CPU0\n");
2433         printk("       ----\n");
2434         printk("  lock(");
2435         __print_lock_name(class);
2436         printk(KERN_CONT ");\n");
2437         printk("  <Interrupt>\n");
2438         printk("    lock(");
2439         __print_lock_name(class);
2440         printk(KERN_CONT ");\n");
2441         printk("\n *** DEADLOCK ***\n\n");
2442 }
2443
2444 static int
2445 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2446                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2447 {
2448         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2449                 return 0;
2450
2451         pr_warn("\n");
2452         pr_warn("================================\n");
2453         pr_warn("WARNING: inconsistent lock state\n");
2454         print_kernel_ident();
2455         pr_warn("--------------------------------\n");
2456
2457         pr_warn("inconsistent {%s} -> {%s} usage.\n",
2458                 usage_str[prev_bit], usage_str[new_bit]);
2459
2460         pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2461                 curr->comm, task_pid_nr(curr),
2462                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2463                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2464                 trace_hardirqs_enabled(curr),
2465                 trace_softirqs_enabled(curr));
2466         print_lock(this);
2467
2468         pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2469         print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2470
2471         print_irqtrace_events(curr);
2472         pr_warn("\nother info that might help us debug this:\n");
2473         print_usage_bug_scenario(this);
2474
2475         lockdep_print_held_locks(curr);
2476
2477         pr_warn("\nstack backtrace:\n");
2478         dump_stack();
2479
2480         return 0;
2481 }
2482
2483 /*
2484  * Print out an error if an invalid bit is set:
2485  */
2486 static inline int
2487 valid_state(struct task_struct *curr, struct held_lock *this,
2488             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2489 {
2490         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2491                 return print_usage_bug(curr, this, bad_bit, new_bit);
2492         return 1;
2493 }
2494
2495 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2496                      enum lock_usage_bit new_bit);
2497
2498 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2499
2500 /*
2501  * print irq inversion bug:
2502  */
2503 static int
2504 print_irq_inversion_bug(struct task_struct *curr,
2505                         struct lock_list *root, struct lock_list *other,
2506                         struct held_lock *this, int forwards,
2507                         const char *irqclass)
2508 {
2509         struct lock_list *entry = other;
2510         struct lock_list *middle = NULL;
2511         int depth;
2512
2513         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2514                 return 0;
2515
2516         pr_warn("\n");
2517         pr_warn("========================================================\n");
2518         pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2519         print_kernel_ident();
2520         pr_warn("--------------------------------------------------------\n");
2521         pr_warn("%s/%d just changed the state of lock:\n",
2522                 curr->comm, task_pid_nr(curr));
2523         print_lock(this);
2524         if (forwards)
2525                 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2526         else
2527                 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2528         print_lock_name(other->class);
2529         pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2530
2531         pr_warn("\nother info that might help us debug this:\n");
2532
2533         /* Find a middle lock (if one exists) */
2534         depth = get_lock_depth(other);
2535         do {
2536                 if (depth == 0 && (entry != root)) {
2537                         pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2538                         break;
2539                 }
2540                 middle = entry;
2541                 entry = get_lock_parent(entry);
2542                 depth--;
2543         } while (entry && entry != root && (depth >= 0));
2544         if (forwards)
2545                 print_irq_lock_scenario(root, other,
2546                         middle ? middle->class : root->class, other->class);
2547         else
2548                 print_irq_lock_scenario(other, root,
2549                         middle ? middle->class : other->class, root->class);
2550
2551         lockdep_print_held_locks(curr);
2552
2553         pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2554         if (!save_trace(&root->trace))
2555                 return 0;
2556         print_shortest_lock_dependencies(other, root);
2557
2558         pr_warn("\nstack backtrace:\n");
2559         dump_stack();
2560
2561         return 0;
2562 }
2563
2564 /*
2565  * Prove that in the forwards-direction subgraph starting at <this>
2566  * there is no lock matching <mask>:
2567  */
2568 static int
2569 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2570                      enum lock_usage_bit bit, const char *irqclass)
2571 {
2572         int ret;
2573         struct lock_list root;
2574         struct lock_list *uninitialized_var(target_entry);
2575
2576         root.parent = NULL;
2577         root.class = hlock_class(this);
2578         ret = find_usage_forwards(&root, bit, &target_entry);
2579         if (ret < 0)
2580                 return print_bfs_bug(ret);
2581         if (ret == 1)
2582                 return ret;
2583
2584         return print_irq_inversion_bug(curr, &root, target_entry,
2585                                         this, 1, irqclass);
2586 }
2587
2588 /*
2589  * Prove that in the backwards-direction subgraph starting at <this>
2590  * there is no lock matching <mask>:
2591  */
2592 static int
2593 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2594                       enum lock_usage_bit bit, const char *irqclass)
2595 {
2596         int ret;
2597         struct lock_list root;
2598         struct lock_list *uninitialized_var(target_entry);
2599
2600         root.parent = NULL;
2601         root.class = hlock_class(this);
2602         ret = find_usage_backwards(&root, bit, &target_entry);
2603         if (ret < 0)
2604                 return print_bfs_bug(ret);
2605         if (ret == 1)
2606                 return ret;
2607
2608         return print_irq_inversion_bug(curr, &root, target_entry,
2609                                         this, 0, irqclass);
2610 }
2611
2612 void print_irqtrace_events(struct task_struct *curr)
2613 {
2614         printk("irq event stamp: %u\n", curr->irq_events);
2615         printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
2616                 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2617                 (void *)curr->hardirq_enable_ip);
2618         printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
2619                 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2620                 (void *)curr->hardirq_disable_ip);
2621         printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
2622                 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2623                 (void *)curr->softirq_enable_ip);
2624         printk("softirqs last disabled at (%u): [<%px>] %pS\n",
2625                 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2626                 (void *)curr->softirq_disable_ip);
2627 }
2628
2629 static int HARDIRQ_verbose(struct lock_class *class)
2630 {
2631 #if HARDIRQ_VERBOSE
2632         return class_filter(class);
2633 #endif
2634         return 0;
2635 }
2636
2637 static int SOFTIRQ_verbose(struct lock_class *class)
2638 {
2639 #if SOFTIRQ_VERBOSE
2640         return class_filter(class);
2641 #endif
2642         return 0;
2643 }
2644
2645 #define STRICT_READ_CHECKS      1
2646
2647 static int (*state_verbose_f[])(struct lock_class *class) = {
2648 #define LOCKDEP_STATE(__STATE) \
2649         __STATE##_verbose,
2650 #include "lockdep_states.h"
2651 #undef LOCKDEP_STATE
2652 };
2653
2654 static inline int state_verbose(enum lock_usage_bit bit,
2655                                 struct lock_class *class)
2656 {
2657         return state_verbose_f[bit >> 2](class);
2658 }
2659
2660 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2661                              enum lock_usage_bit bit, const char *name);
2662
2663 static int
2664 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2665                 enum lock_usage_bit new_bit)
2666 {
2667         int excl_bit = exclusive_bit(new_bit);
2668         int read = new_bit & LOCK_USAGE_READ_MASK;
2669         int dir = new_bit & LOCK_USAGE_DIR_MASK;
2670
2671         /*
2672          * mark USED_IN has to look forwards -- to ensure no dependency
2673          * has ENABLED state, which would allow recursion deadlocks.
2674          *
2675          * mark ENABLED has to look backwards -- to ensure no dependee
2676          * has USED_IN state, which, again, would allow  recursion deadlocks.
2677          */
2678         check_usage_f usage = dir ?
2679                 check_usage_backwards : check_usage_forwards;
2680
2681         /*
2682          * Validate that this particular lock does not have conflicting
2683          * usage states.
2684          */
2685         if (!valid_state(curr, this, new_bit, excl_bit))
2686                 return 0;
2687
2688         /*
2689          * Validate that the lock dependencies don't have conflicting usage
2690          * states.
2691          */
2692         if ((!read || !dir || STRICT_READ_CHECKS) &&
2693                         !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
2694                 return 0;
2695
2696         /*
2697          * Check for read in write conflicts
2698          */
2699         if (!read) {
2700                 if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
2701                         return 0;
2702
2703                 if (STRICT_READ_CHECKS &&
2704                         !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
2705                                 state_name(new_bit + LOCK_USAGE_READ_MASK)))
2706                         return 0;
2707         }
2708
2709         if (state_verbose(new_bit, hlock_class(this)))
2710                 return 2;
2711
2712         return 1;
2713 }
2714
2715 /*
2716  * Mark all held locks with a usage bit:
2717  */
2718 static int
2719 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
2720 {
2721         struct held_lock *hlock;
2722         int i;
2723
2724         for (i = 0; i < curr->lockdep_depth; i++) {
2725                 enum lock_usage_bit hlock_bit = base_bit;
2726                 hlock = curr->held_locks + i;
2727
2728                 if (hlock->read)
2729                         hlock_bit += LOCK_USAGE_READ_MASK;
2730
2731                 BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
2732
2733                 if (!hlock->check)
2734                         continue;
2735
2736                 if (!mark_lock(curr, hlock, hlock_bit))
2737                         return 0;
2738         }
2739
2740         return 1;
2741 }
2742
2743 /*
2744  * Hardirqs will be enabled:
2745  */
2746 static void __trace_hardirqs_on_caller(unsigned long ip)
2747 {
2748         struct task_struct *curr = current;
2749
2750         /* we'll do an OFF -> ON transition: */
2751         curr->hardirqs_enabled = 1;
2752
2753         /*
2754          * We are going to turn hardirqs on, so set the
2755          * usage bit for all held locks:
2756          */
2757         if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
2758                 return;
2759         /*
2760          * If we have softirqs enabled, then set the usage
2761          * bit for all held locks. (disabled hardirqs prevented
2762          * this bit from being set before)
2763          */
2764         if (curr->softirqs_enabled)
2765                 if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
2766                         return;
2767
2768         curr->hardirq_enable_ip = ip;
2769         curr->hardirq_enable_event = ++curr->irq_events;
2770         debug_atomic_inc(hardirqs_on_events);
2771 }
2772
2773 void lockdep_hardirqs_on(unsigned long ip)
2774 {
2775         if (unlikely(!debug_locks || current->lockdep_recursion))
2776                 return;
2777
2778         if (unlikely(current->hardirqs_enabled)) {
2779                 /*
2780                  * Neither irq nor preemption are disabled here
2781                  * so this is racy by nature but losing one hit
2782                  * in a stat is not a big deal.
2783                  */
2784                 __debug_atomic_inc(redundant_hardirqs_on);
2785                 return;
2786         }
2787
2788         /*
2789          * We're enabling irqs and according to our state above irqs weren't
2790          * already enabled, yet we find the hardware thinks they are in fact
2791          * enabled.. someone messed up their IRQ state tracing.
2792          */
2793         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2794                 return;
2795
2796         /*
2797          * See the fine text that goes along with this variable definition.
2798          */
2799         if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2800                 return;
2801
2802         /*
2803          * Can't allow enabling interrupts while in an interrupt handler,
2804          * that's general bad form and such. Recursion, limited stack etc..
2805          */
2806         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2807                 return;
2808
2809         current->lockdep_recursion = 1;
2810         __trace_hardirqs_on_caller(ip);
2811         current->lockdep_recursion = 0;
2812 }
2813
2814 /*
2815  * Hardirqs were disabled:
2816  */
2817 void lockdep_hardirqs_off(unsigned long ip)
2818 {
2819         struct task_struct *curr = current;
2820
2821         if (unlikely(!debug_locks || current->lockdep_recursion))
2822                 return;
2823
2824         /*
2825          * So we're supposed to get called after you mask local IRQs, but for
2826          * some reason the hardware doesn't quite think you did a proper job.
2827          */
2828         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2829                 return;
2830
2831         if (curr->hardirqs_enabled) {
2832                 /*
2833                  * We have done an ON -> OFF transition:
2834                  */
2835                 curr->hardirqs_enabled = 0;
2836                 curr->hardirq_disable_ip = ip;
2837                 curr->hardirq_disable_event = ++curr->irq_events;
2838                 debug_atomic_inc(hardirqs_off_events);
2839         } else
2840                 debug_atomic_inc(redundant_hardirqs_off);
2841 }
2842
2843 /*
2844  * Softirqs will be enabled:
2845  */
2846 void trace_softirqs_on(unsigned long ip)
2847 {
2848         struct task_struct *curr = current;
2849
2850         if (unlikely(!debug_locks || current->lockdep_recursion))
2851                 return;
2852
2853         /*
2854          * We fancy IRQs being disabled here, see softirq.c, avoids
2855          * funny state and nesting things.
2856          */
2857         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2858                 return;
2859
2860         if (curr->softirqs_enabled) {
2861                 debug_atomic_inc(redundant_softirqs_on);
2862                 return;
2863         }
2864
2865         current->lockdep_recursion = 1;
2866         /*
2867          * We'll do an OFF -> ON transition:
2868          */
2869         curr->softirqs_enabled = 1;
2870         curr->softirq_enable_ip = ip;
2871         curr->softirq_enable_event = ++curr->irq_events;
2872         debug_atomic_inc(softirqs_on_events);
2873         /*
2874          * We are going to turn softirqs on, so set the
2875          * usage bit for all held locks, if hardirqs are
2876          * enabled too:
2877          */
2878         if (curr->hardirqs_enabled)
2879                 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
2880         current->lockdep_recursion = 0;
2881 }
2882
2883 /*
2884  * Softirqs were disabled:
2885  */
2886 void trace_softirqs_off(unsigned long ip)
2887 {
2888         struct task_struct *curr = current;
2889
2890         if (unlikely(!debug_locks || current->lockdep_recursion))
2891                 return;
2892
2893         /*
2894          * We fancy IRQs being disabled here, see softirq.c
2895          */
2896         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2897                 return;
2898
2899         if (curr->softirqs_enabled) {
2900                 /*
2901                  * We have done an ON -> OFF transition:
2902                  */
2903                 curr->softirqs_enabled = 0;
2904                 curr->softirq_disable_ip = ip;
2905                 curr->softirq_disable_event = ++curr->irq_events;
2906                 debug_atomic_inc(softirqs_off_events);
2907                 /*
2908                  * Whoops, we wanted softirqs off, so why aren't they?
2909                  */
2910                 DEBUG_LOCKS_WARN_ON(!softirq_count());
2911         } else
2912                 debug_atomic_inc(redundant_softirqs_off);
2913 }
2914
2915 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2916 {
2917         /*
2918          * If non-trylock use in a hardirq or softirq context, then
2919          * mark the lock as used in these contexts:
2920          */
2921         if (!hlock->trylock) {
2922                 if (hlock->read) {
2923                         if (curr->hardirq_context)
2924                                 if (!mark_lock(curr, hlock,
2925                                                 LOCK_USED_IN_HARDIRQ_READ))
2926                                         return 0;
2927                         if (curr->softirq_context)
2928                                 if (!mark_lock(curr, hlock,
2929                                                 LOCK_USED_IN_SOFTIRQ_READ))
2930                                         return 0;
2931                 } else {
2932                         if (curr->hardirq_context)
2933                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2934                                         return 0;
2935                         if (curr->softirq_context)
2936                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2937                                         return 0;
2938                 }
2939         }
2940         if (!hlock->hardirqs_off) {
2941                 if (hlock->read) {
2942                         if (!mark_lock(curr, hlock,
2943                                         LOCK_ENABLED_HARDIRQ_READ))
2944                                 return 0;
2945                         if (curr->softirqs_enabled)
2946                                 if (!mark_lock(curr, hlock,
2947                                                 LOCK_ENABLED_SOFTIRQ_READ))
2948                                         return 0;
2949                 } else {
2950                         if (!mark_lock(curr, hlock,
2951                                         LOCK_ENABLED_HARDIRQ))
2952                                 return 0;
2953                         if (curr->softirqs_enabled)
2954                                 if (!mark_lock(curr, hlock,
2955                                                 LOCK_ENABLED_SOFTIRQ))
2956                                         return 0;
2957                 }
2958         }
2959
2960         return 1;
2961 }
2962
2963 static inline unsigned int task_irq_context(struct task_struct *task)
2964 {
2965         return 2 * !!task->hardirq_context + !!task->softirq_context;
2966 }
2967
2968 static int separate_irq_context(struct task_struct *curr,
2969                 struct held_lock *hlock)
2970 {
2971         unsigned int depth = curr->lockdep_depth;
2972
2973         /*
2974          * Keep track of points where we cross into an interrupt context:
2975          */
2976         if (depth) {
2977                 struct held_lock *prev_hlock;
2978
2979                 prev_hlock = curr->held_locks + depth-1;
2980                 /*
2981                  * If we cross into another context, reset the
2982                  * hash key (this also prevents the checking and the
2983                  * adding of the dependency to 'prev'):
2984                  */
2985                 if (prev_hlock->irq_context != hlock->irq_context)
2986                         return 1;
2987         }
2988         return 0;
2989 }
2990
2991 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2992
2993 static inline
2994 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2995                 enum lock_usage_bit new_bit)
2996 {
2997         WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
2998         return 1;
2999 }
3000
3001 static inline int mark_irqflags(struct task_struct *curr,
3002                 struct held_lock *hlock)
3003 {
3004         return 1;
3005 }
3006
3007 static inline unsigned int task_irq_context(struct task_struct *task)
3008 {
3009         return 0;
3010 }
3011
3012 static inline int separate_irq_context(struct task_struct *curr,
3013                 struct held_lock *hlock)
3014 {
3015         return 0;
3016 }
3017
3018 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3019
3020 /*
3021  * Mark a lock with a usage bit, and validate the state transition:
3022  */
3023 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3024                              enum lock_usage_bit new_bit)
3025 {
3026         unsigned int new_mask = 1 << new_bit, ret = 1;
3027
3028         /*
3029          * If already set then do not dirty the cacheline,
3030          * nor do any checks:
3031          */
3032         if (likely(hlock_class(this)->usage_mask & new_mask))
3033                 return 1;
3034
3035         if (!graph_lock())
3036                 return 0;
3037         /*
3038          * Make sure we didn't race:
3039          */
3040         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3041                 graph_unlock();
3042                 return 1;
3043         }
3044
3045         hlock_class(this)->usage_mask |= new_mask;
3046
3047         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3048                 return 0;
3049
3050         switch (new_bit) {
3051 #define LOCKDEP_STATE(__STATE)                  \
3052         case LOCK_USED_IN_##__STATE:            \
3053         case LOCK_USED_IN_##__STATE##_READ:     \
3054         case LOCK_ENABLED_##__STATE:            \
3055         case LOCK_ENABLED_##__STATE##_READ:
3056 #include "lockdep_states.h"
3057 #undef LOCKDEP_STATE
3058                 ret = mark_lock_irq(curr, this, new_bit);
3059                 if (!ret)
3060                         return 0;
3061                 break;
3062         case LOCK_USED:
3063                 debug_atomic_dec(nr_unused_locks);
3064                 break;
3065         default:
3066                 if (!debug_locks_off_graph_unlock())
3067                         return 0;
3068                 WARN_ON(1);
3069                 return 0;
3070         }
3071
3072         graph_unlock();
3073
3074         /*
3075          * We must printk outside of the graph_lock:
3076          */
3077         if (ret == 2) {
3078                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3079                 print_lock(this);
3080                 print_irqtrace_events(curr);
3081                 dump_stack();
3082         }
3083
3084         return ret;
3085 }
3086
3087 /*
3088  * Initialize a lock instance's lock-class mapping info:
3089  */
3090 void lockdep_init_map(struct lockdep_map *lock, const char *name,
3091                       struct lock_class_key *key, int subclass)
3092 {
3093         int i;
3094
3095         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3096                 lock->class_cache[i] = NULL;
3097
3098 #ifdef CONFIG_LOCK_STAT
3099         lock->cpu = raw_smp_processor_id();
3100 #endif
3101
3102         /*
3103          * Can't be having no nameless bastards around this place!
3104          */
3105         if (DEBUG_LOCKS_WARN_ON(!name)) {
3106                 lock->name = "NULL";
3107                 return;
3108         }
3109
3110         lock->name = name;
3111
3112         /*
3113          * No key, no joy, we need to hash something.
3114          */
3115         if (DEBUG_LOCKS_WARN_ON(!key))
3116                 return;
3117         /*
3118          * Sanity check, the lock-class key must be persistent:
3119          */
3120         if (!static_obj(key)) {
3121                 printk("BUG: key %px not in .data!\n", key);
3122                 /*
3123                  * What it says above ^^^^^, I suggest you read it.
3124                  */
3125                 DEBUG_LOCKS_WARN_ON(1);
3126                 return;
3127         }
3128         lock->key = key;
3129
3130         if (unlikely(!debug_locks))
3131                 return;
3132
3133         if (subclass) {
3134                 unsigned long flags;
3135
3136                 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3137                         return;
3138
3139                 raw_local_irq_save(flags);
3140                 current->lockdep_recursion = 1;
3141                 register_lock_class(lock, subclass, 1);
3142                 current->lockdep_recursion = 0;
3143                 raw_local_irq_restore(flags);
3144         }
3145 }
3146 EXPORT_SYMBOL_GPL(lockdep_init_map);
3147
3148 struct lock_class_key __lockdep_no_validate__;
3149 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3150
3151 static int
3152 print_lock_nested_lock_not_held(struct task_struct *curr,
3153                                 struct held_lock *hlock,
3154                                 unsigned long ip)
3155 {
3156         if (!debug_locks_off())
3157                 return 0;
3158         if (debug_locks_silent)
3159                 return 0;
3160
3161         pr_warn("\n");
3162         pr_warn("==================================\n");
3163         pr_warn("WARNING: Nested lock was not taken\n");
3164         print_kernel_ident();
3165         pr_warn("----------------------------------\n");
3166
3167         pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3168         print_lock(hlock);
3169
3170         pr_warn("\nbut this task is not holding:\n");
3171         pr_warn("%s\n", hlock->nest_lock->name);
3172
3173         pr_warn("\nstack backtrace:\n");
3174         dump_stack();
3175
3176         pr_warn("\nother info that might help us debug this:\n");
3177         lockdep_print_held_locks(curr);
3178
3179         pr_warn("\nstack backtrace:\n");
3180         dump_stack();
3181
3182         return 0;
3183 }
3184
3185 static int __lock_is_held(const struct lockdep_map *lock, int read);
3186
3187 /*
3188  * This gets called for every mutex_lock*()/spin_lock*() operation.
3189  * We maintain the dependency maps and validate the locking attempt:
3190  *
3191  * The callers must make sure that IRQs are disabled before calling it,
3192  * otherwise we could get an interrupt which would want to take locks,
3193  * which would end up in lockdep again.
3194  */
3195 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3196                           int trylock, int read, int check, int hardirqs_off,
3197                           struct lockdep_map *nest_lock, unsigned long ip,
3198                           int references, int pin_count)
3199 {
3200         struct task_struct *curr = current;
3201         struct lock_class *class = NULL;
3202         struct held_lock *hlock;
3203         unsigned int depth;
3204         int chain_head = 0;
3205         int class_idx;
3206         u64 chain_key;
3207
3208         if (unlikely(!debug_locks))
3209                 return 0;
3210
3211         if (!prove_locking || lock->key == &__lockdep_no_validate__)
3212                 check = 0;
3213
3214         if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3215                 class = lock->class_cache[subclass];
3216         /*
3217          * Not cached?
3218          */
3219         if (unlikely(!class)) {
3220                 class = register_lock_class(lock, subclass, 0);
3221                 if (!class)
3222                         return 0;
3223         }
3224
3225         debug_class_ops_inc(class);
3226
3227         if (very_verbose(class)) {
3228                 printk("\nacquire class [%px] %s", class->key, class->name);
3229                 if (class->name_version > 1)
3230                         printk(KERN_CONT "#%d", class->name_version);
3231                 printk(KERN_CONT "\n");
3232                 dump_stack();
3233         }
3234
3235         /*
3236          * Add the lock to the list of currently held locks.
3237          * (we dont increase the depth just yet, up until the
3238          * dependency checks are done)
3239          */
3240         depth = curr->lockdep_depth;
3241         /*
3242          * Ran out of static storage for our per-task lock stack again have we?
3243          */
3244         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3245                 return 0;
3246
3247         class_idx = class - lock_classes + 1;
3248
3249         if (depth) {
3250                 hlock = curr->held_locks + depth - 1;
3251                 if (hlock->class_idx == class_idx && nest_lock) {
3252                         if (hlock->references) {
3253                                 /*
3254                                  * Check: unsigned int references:12, overflow.
3255                                  */
3256                                 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3257                                         return 0;
3258
3259                                 hlock->references++;
3260                         } else {
3261                                 hlock->references = 2;
3262                         }
3263
3264                         return 1;
3265                 }
3266         }
3267
3268         hlock = curr->held_locks + depth;
3269         /*
3270          * Plain impossible, we just registered it and checked it weren't no
3271          * NULL like.. I bet this mushroom I ate was good!
3272          */
3273         if (DEBUG_LOCKS_WARN_ON(!class))
3274                 return 0;
3275         hlock->class_idx = class_idx;
3276         hlock->acquire_ip = ip;
3277         hlock->instance = lock;
3278         hlock->nest_lock = nest_lock;
3279         hlock->irq_context = task_irq_context(curr);
3280         hlock->trylock = trylock;
3281         hlock->read = read;
3282         hlock->check = check;
3283         hlock->hardirqs_off = !!hardirqs_off;
3284         hlock->references = references;
3285 #ifdef CONFIG_LOCK_STAT
3286         hlock->waittime_stamp = 0;
3287         hlock->holdtime_stamp = lockstat_clock();
3288 #endif
3289         hlock->pin_count = pin_count;
3290
3291         if (check && !mark_irqflags(curr, hlock))
3292                 return 0;
3293
3294         /* mark it as used: */
3295         if (!mark_lock(curr, hlock, LOCK_USED))
3296                 return 0;
3297
3298         /*
3299          * Calculate the chain hash: it's the combined hash of all the
3300          * lock keys along the dependency chain. We save the hash value
3301          * at every step so that we can get the current hash easily
3302          * after unlock. The chain hash is then used to cache dependency
3303          * results.
3304          *
3305          * The 'key ID' is what is the most compact key value to drive
3306          * the hash, not class->key.
3307          */
3308         /*
3309          * Whoops, we did it again.. ran straight out of our static allocation.
3310          */
3311         if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3312                 return 0;
3313
3314         chain_key = curr->curr_chain_key;
3315         if (!depth) {
3316                 /*
3317                  * How can we have a chain hash when we ain't got no keys?!
3318                  */
3319                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3320                         return 0;
3321                 chain_head = 1;
3322         }
3323
3324         hlock->prev_chain_key = chain_key;
3325         if (separate_irq_context(curr, hlock)) {
3326                 chain_key = 0;
3327                 chain_head = 1;
3328         }
3329         chain_key = iterate_chain_key(chain_key, class_idx);
3330
3331         if (nest_lock && !__lock_is_held(nest_lock, -1))
3332                 return print_lock_nested_lock_not_held(curr, hlock, ip);
3333
3334         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3335                 return 0;
3336
3337         curr->curr_chain_key = chain_key;
3338         curr->lockdep_depth++;
3339         check_chain_key(curr);
3340 #ifdef CONFIG_DEBUG_LOCKDEP
3341         if (unlikely(!debug_locks))
3342                 return 0;
3343 #endif
3344         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3345                 debug_locks_off();
3346                 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3347                 printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3348                        curr->lockdep_depth, MAX_LOCK_DEPTH);
3349
3350                 lockdep_print_held_locks(current);
3351                 debug_show_all_locks();
3352                 dump_stack();
3353
3354                 return 0;
3355         }
3356
3357         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3358                 max_lockdep_depth = curr->lockdep_depth;
3359
3360         return 1;
3361 }
3362
3363 static int
3364 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3365                            unsigned long ip)
3366 {
3367         if (!debug_locks_off())
3368                 return 0;
3369         if (debug_locks_silent)
3370                 return 0;
3371
3372         pr_warn("\n");
3373         pr_warn("=====================================\n");
3374         pr_warn("WARNING: bad unlock balance detected!\n");
3375         print_kernel_ident();
3376         pr_warn("-------------------------------------\n");
3377         pr_warn("%s/%d is trying to release lock (",
3378                 curr->comm, task_pid_nr(curr));
3379         print_lockdep_cache(lock);
3380         pr_cont(") at:\n");
3381         print_ip_sym(ip);
3382         pr_warn("but there are no more locks to release!\n");
3383         pr_warn("\nother info that might help us debug this:\n");
3384         lockdep_print_held_locks(curr);
3385
3386         pr_warn("\nstack backtrace:\n");
3387         dump_stack();
3388
3389         return 0;
3390 }
3391
3392 static int match_held_lock(const struct held_lock *hlock,
3393                                         const struct lockdep_map *lock)
3394 {
3395         if (hlock->instance == lock)
3396                 return 1;
3397
3398         if (hlock->references) {
3399                 const struct lock_class *class = lock->class_cache[0];
3400
3401                 if (!class)
3402                         class = look_up_lock_class(lock, 0);
3403
3404                 /*
3405                  * If look_up_lock_class() failed to find a class, we're trying
3406                  * to test if we hold a lock that has never yet been acquired.
3407                  * Clearly if the lock hasn't been acquired _ever_, we're not
3408                  * holding it either, so report failure.
3409                  */
3410                 if (!class)
3411                         return 0;
3412
3413                 /*
3414                  * References, but not a lock we're actually ref-counting?
3415                  * State got messed up, follow the sites that change ->references
3416                  * and try to make sense of it.
3417                  */
3418                 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3419                         return 0;
3420
3421                 if (hlock->class_idx == class - lock_classes + 1)
3422                         return 1;
3423         }
3424
3425         return 0;
3426 }
3427
3428 /* @depth must not be zero */
3429 static struct held_lock *find_held_lock(struct task_struct *curr,
3430                                         struct lockdep_map *lock,
3431                                         unsigned int depth, int *idx)
3432 {
3433         struct held_lock *ret, *hlock, *prev_hlock;
3434         int i;
3435
3436         i = depth - 1;
3437         hlock = curr->held_locks + i;
3438         ret = hlock;
3439         if (match_held_lock(hlock, lock))
3440                 goto out;
3441
3442         ret = NULL;
3443         for (i--, prev_hlock = hlock--;
3444              i >= 0;
3445              i--, prev_hlock = hlock--) {
3446                 /*
3447                  * We must not cross into another context:
3448                  */
3449                 if (prev_hlock->irq_context != hlock->irq_context) {
3450                         ret = NULL;
3451                         break;
3452                 }
3453                 if (match_held_lock(hlock, lock)) {
3454                         ret = hlock;
3455                         break;
3456                 }
3457         }
3458
3459 out:
3460         *idx = i;
3461         return ret;
3462 }
3463
3464 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3465                               int idx)
3466 {
3467         struct held_lock *hlock;
3468
3469         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3470                 return 0;
3471
3472         for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3473                 if (!__lock_acquire(hlock->instance,
3474                                     hlock_class(hlock)->subclass,
3475                                     hlock->trylock,
3476                                     hlock->read, hlock->check,
3477                                     hlock->hardirqs_off,
3478                                     hlock->nest_lock, hlock->acquire_ip,
3479                                     hlock->references, hlock->pin_count))
3480                         return 1;
3481         }
3482         return 0;
3483 }
3484
3485 static int
3486 __lock_set_class(struct lockdep_map *lock, const char *name,
3487                  struct lock_class_key *key, unsigned int subclass,
3488                  unsigned long ip)
3489 {
3490         struct task_struct *curr = current;
3491         struct held_lock *hlock;
3492         struct lock_class *class;
3493         unsigned int depth;
3494         int i;
3495
3496         if (unlikely(!debug_locks))
3497                 return 0;
3498
3499         depth = curr->lockdep_depth;
3500         /*
3501          * This function is about (re)setting the class of a held lock,
3502          * yet we're not actually holding any locks. Naughty user!
3503          */
3504         if (DEBUG_LOCKS_WARN_ON(!depth))
3505                 return 0;
3506
3507         hlock = find_held_lock(curr, lock, depth, &i);
3508         if (!hlock)
3509                 return print_unlock_imbalance_bug(curr, lock, ip);
3510
3511         lockdep_init_map(lock, name, key, 0);
3512         class = register_lock_class(lock, subclass, 0);
3513         hlock->class_idx = class - lock_classes + 1;
3514
3515         curr->lockdep_depth = i;
3516         curr->curr_chain_key = hlock->prev_chain_key;
3517
3518         if (reacquire_held_locks(curr, depth, i))
3519                 return 0;
3520
3521         /*
3522          * I took it apart and put it back together again, except now I have
3523          * these 'spare' parts.. where shall I put them.
3524          */
3525         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3526                 return 0;
3527         return 1;
3528 }
3529
3530 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3531 {
3532         struct task_struct *curr = current;
3533         struct held_lock *hlock;
3534         unsigned int depth;
3535         int i;
3536
3537         if (unlikely(!debug_locks))
3538                 return 0;
3539
3540         depth = curr->lockdep_depth;
3541         /*
3542          * This function is about (re)setting the class of a held lock,
3543          * yet we're not actually holding any locks. Naughty user!
3544          */
3545         if (DEBUG_LOCKS_WARN_ON(!depth))
3546                 return 0;
3547
3548         hlock = find_held_lock(curr, lock, depth, &i);
3549         if (!hlock)
3550                 return print_unlock_imbalance_bug(curr, lock, ip);
3551
3552         curr->lockdep_depth = i;
3553         curr->curr_chain_key = hlock->prev_chain_key;
3554
3555         WARN(hlock->read, "downgrading a read lock");
3556         hlock->read = 1;
3557         hlock->acquire_ip = ip;
3558
3559         if (reacquire_held_locks(curr, depth, i))
3560                 return 0;
3561
3562         /*
3563          * I took it apart and put it back together again, except now I have
3564          * these 'spare' parts.. where shall I put them.
3565          */
3566         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3567                 return 0;
3568         return 1;
3569 }
3570
3571 /*
3572  * Remove the lock to the list of currently held locks - this gets
3573  * called on mutex_unlock()/spin_unlock*() (or on a failed
3574  * mutex_lock_interruptible()).
3575  *
3576  * @nested is an hysterical artifact, needs a tree wide cleanup.
3577  */
3578 static int
3579 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3580 {
3581         struct task_struct *curr = current;
3582         struct held_lock *hlock;
3583         unsigned int depth;
3584         int i;
3585
3586         if (unlikely(!debug_locks))
3587                 return 0;
3588
3589         depth = curr->lockdep_depth;
3590         /*
3591          * So we're all set to release this lock.. wait what lock? We don't
3592          * own any locks, you've been drinking again?
3593          */
3594         if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3595                  return print_unlock_imbalance_bug(curr, lock, ip);
3596
3597         /*
3598          * Check whether the lock exists in the current stack
3599          * of held locks:
3600          */
3601         hlock = find_held_lock(curr, lock, depth, &i);
3602         if (!hlock)
3603                 return print_unlock_imbalance_bug(curr, lock, ip);
3604
3605         if (hlock->instance == lock)
3606                 lock_release_holdtime(hlock);
3607
3608         WARN(hlock->pin_count, "releasing a pinned lock\n");
3609
3610         if (hlock->references) {
3611                 hlock->references--;
3612                 if (hlock->references) {
3613                         /*
3614                          * We had, and after removing one, still have
3615                          * references, the current lock stack is still
3616                          * valid. We're done!
3617                          */
3618                         return 1;
3619                 }
3620         }
3621
3622         /*
3623          * We have the right lock to unlock, 'hlock' points to it.
3624          * Now we remove it from the stack, and add back the other
3625          * entries (if any), recalculating the hash along the way:
3626          */
3627
3628         curr->lockdep_depth = i;
3629         curr->curr_chain_key = hlock->prev_chain_key;
3630
3631         /*
3632          * The most likely case is when the unlock is on the innermost
3633          * lock. In this case, we are done!
3634          */
3635         if (i == depth-1)
3636                 return 1;
3637
3638         if (reacquire_held_locks(curr, depth, i + 1))
3639                 return 0;
3640
3641         /*
3642          * We had N bottles of beer on the wall, we drank one, but now
3643          * there's not N-1 bottles of beer left on the wall...
3644          */
3645         DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
3646
3647         /*
3648          * Since reacquire_held_locks() would have called check_chain_key()
3649          * indirectly via __lock_acquire(), we don't need to do it again
3650          * on return.
3651          */
3652         return 0;
3653 }
3654
3655 static int __lock_is_held(const struct lockdep_map *lock, int read)
3656 {
3657         struct task_struct *curr = current;
3658         int i;
3659
3660         for (i = 0; i < curr->lockdep_depth; i++) {
3661                 struct held_lock *hlock = curr->held_locks + i;
3662
3663                 if (match_held_lock(hlock, lock)) {
3664                         if (read == -1 || hlock->read == read)
3665                                 return 1;
3666
3667                         return 0;
3668                 }
3669         }
3670
3671         return 0;
3672 }
3673
3674 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3675 {
3676         struct pin_cookie cookie = NIL_COOKIE;
3677         struct task_struct *curr = current;
3678         int i;
3679
3680         if (unlikely(!debug_locks))
3681                 return cookie;
3682
3683         for (i = 0; i < curr->lockdep_depth; i++) {
3684                 struct held_lock *hlock = curr->held_locks + i;
3685
3686                 if (match_held_lock(hlock, lock)) {
3687                         /*
3688                          * Grab 16bits of randomness; this is sufficient to not
3689                          * be guessable and still allows some pin nesting in
3690                          * our u32 pin_count.
3691                          */
3692                         cookie.val = 1 + (prandom_u32() >> 16);
3693                         hlock->pin_count += cookie.val;
3694                         return cookie;
3695                 }
3696         }
3697
3698         WARN(1, "pinning an unheld lock\n");
3699         return cookie;
3700 }
3701
3702 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3703 {
3704         struct task_struct *curr = current;
3705         int i;
3706
3707         if (unlikely(!debug_locks))
3708                 return;
3709
3710         for (i = 0; i < curr->lockdep_depth; i++) {
3711                 struct held_lock *hlock = curr->held_locks + i;
3712
3713                 if (match_held_lock(hlock, lock)) {
3714                         hlock->pin_count += cookie.val;
3715                         return;
3716                 }
3717         }
3718
3719         WARN(1, "pinning an unheld lock\n");
3720 }
3721
3722 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3723 {
3724         struct task_struct *curr = current;
3725         int i;
3726
3727         if (unlikely(!debug_locks))
3728                 return;
3729
3730         for (i = 0; i < curr->lockdep_depth; i++) {
3731                 struct held_lock *hlock = curr->held_locks + i;
3732
3733                 if (match_held_lock(hlock, lock)) {
3734                         if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3735                                 return;
3736
3737                         hlock->pin_count -= cookie.val;
3738
3739                         if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3740                                 hlock->pin_count = 0;
3741
3742                         return;
3743                 }
3744         }
3745
3746         WARN(1, "unpinning an unheld lock\n");
3747 }
3748
3749 /*
3750  * Check whether we follow the irq-flags state precisely:
3751  */
3752 static void check_flags(unsigned long flags)
3753 {
3754 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3755     defined(CONFIG_TRACE_IRQFLAGS)
3756         if (!debug_locks)
3757                 return;
3758
3759         if (irqs_disabled_flags(flags)) {
3760                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3761                         printk("possible reason: unannotated irqs-off.\n");
3762                 }
3763         } else {
3764                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3765                         printk("possible reason: unannotated irqs-on.\n");
3766                 }
3767         }
3768
3769         /*
3770          * We dont accurately track softirq state in e.g.
3771          * hardirq contexts (such as on 4KSTACKS), so only
3772          * check if not in hardirq contexts:
3773          */
3774         if (!hardirq_count()) {
3775                 if (softirq_count()) {
3776                         /* like the above, but with softirqs */
3777                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3778                 } else {
3779                         /* lick the above, does it taste good? */
3780                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3781                 }
3782         }
3783
3784         if (!debug_locks)
3785                 print_irqtrace_events(current);
3786 #endif
3787 }
3788
3789 void lock_set_class(struct lockdep_map *lock, const char *name,
3790                     struct lock_class_key *key, unsigned int subclass,
3791                     unsigned long ip)
3792 {
3793         unsigned long flags;
3794
3795         if (unlikely(current->lockdep_recursion))
3796                 return;
3797
3798         raw_local_irq_save(flags);
3799         current->lockdep_recursion = 1;
3800         check_flags(flags);
3801         if (__lock_set_class(lock, name, key, subclass, ip))
3802                 check_chain_key(current);
3803         current->lockdep_recursion = 0;
3804         raw_local_irq_restore(flags);
3805 }
3806 EXPORT_SYMBOL_GPL(lock_set_class);
3807
3808 void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3809 {
3810         unsigned long flags;
3811
3812         if (unlikely(current->lockdep_recursion))
3813                 return;
3814
3815         raw_local_irq_save(flags);
3816         current->lockdep_recursion = 1;
3817         check_flags(flags);
3818         if (__lock_downgrade(lock, ip))
3819                 check_chain_key(current);
3820         current->lockdep_recursion = 0;
3821         raw_local_irq_restore(flags);
3822 }
3823 EXPORT_SYMBOL_GPL(lock_downgrade);
3824
3825 /*
3826  * We are not always called with irqs disabled - do that here,
3827  * and also avoid lockdep recursion:
3828  */
3829 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3830                           int trylock, int read, int check,
3831                           struct lockdep_map *nest_lock, unsigned long ip)
3832 {
3833         unsigned long flags;
3834
3835         if (unlikely(current->lockdep_recursion))
3836                 return;
3837
3838         raw_local_irq_save(flags);
3839         check_flags(flags);
3840
3841         current->lockdep_recursion = 1;
3842         trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3843         __lock_acquire(lock, subclass, trylock, read, check,
3844                        irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3845         current->lockdep_recursion = 0;
3846         raw_local_irq_restore(flags);
3847 }
3848 EXPORT_SYMBOL_GPL(lock_acquire);
3849
3850 void lock_release(struct lockdep_map *lock, int nested,
3851                           unsigned long ip)
3852 {
3853         unsigned long flags;
3854
3855         if (unlikely(current->lockdep_recursion))
3856                 return;
3857
3858         raw_local_irq_save(flags);
3859         check_flags(flags);
3860         current->lockdep_recursion = 1;
3861         trace_lock_release(lock, ip);
3862         if (__lock_release(lock, nested, ip))
3863                 check_chain_key(current);
3864         current->lockdep_recursion = 0;
3865         raw_local_irq_restore(flags);
3866 }
3867 EXPORT_SYMBOL_GPL(lock_release);
3868
3869 int lock_is_held_type(const struct lockdep_map *lock, int read)
3870 {
3871         unsigned long flags;
3872         int ret = 0;
3873
3874         if (unlikely(current->lockdep_recursion))
3875                 return 1; /* avoid false negative lockdep_assert_held() */
3876
3877         raw_local_irq_save(flags);
3878         check_flags(flags);
3879
3880         current->lockdep_recursion = 1;
3881         ret = __lock_is_held(lock, read);
3882         current->lockdep_recursion = 0;
3883         raw_local_irq_restore(flags);
3884
3885         return ret;
3886 }
3887 EXPORT_SYMBOL_GPL(lock_is_held_type);
3888
3889 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3890 {
3891         struct pin_cookie cookie = NIL_COOKIE;
3892         unsigned long flags;
3893
3894         if (unlikely(current->lockdep_recursion))
3895                 return cookie;
3896
3897         raw_local_irq_save(flags);
3898         check_flags(flags);
3899
3900         current->lockdep_recursion = 1;
3901         cookie = __lock_pin_lock(lock);
3902         current->lockdep_recursion = 0;
3903         raw_local_irq_restore(flags);
3904
3905         return cookie;
3906 }
3907 EXPORT_SYMBOL_GPL(lock_pin_lock);
3908
3909 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3910 {
3911         unsigned long flags;
3912
3913         if (unlikely(current->lockdep_recursion))
3914                 return;
3915
3916         raw_local_irq_save(flags);
3917         check_flags(flags);
3918
3919         current->lockdep_recursion = 1;
3920         __lock_repin_lock(lock, cookie);
3921         current->lockdep_recursion = 0;
3922         raw_local_irq_restore(flags);
3923 }
3924 EXPORT_SYMBOL_GPL(lock_repin_lock);
3925
3926 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3927 {
3928         unsigned long flags;
3929
3930         if (unlikely(current->lockdep_recursion))
3931                 return;
3932
3933         raw_local_irq_save(flags);
3934         check_flags(flags);
3935
3936         current->lockdep_recursion = 1;
3937         __lock_unpin_lock(lock, cookie);
3938         current->lockdep_recursion = 0;
3939         raw_local_irq_restore(flags);
3940 }
3941 EXPORT_SYMBOL_GPL(lock_unpin_lock);
3942
3943 #ifdef CONFIG_LOCK_STAT
3944 static int
3945 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3946                            unsigned long ip)
3947 {
3948         if (!debug_locks_off())
3949                 return 0;
3950         if (debug_locks_silent)
3951                 return 0;
3952
3953         pr_warn("\n");
3954         pr_warn("=================================\n");
3955         pr_warn("WARNING: bad contention detected!\n");
3956         print_kernel_ident();
3957         pr_warn("---------------------------------\n");
3958         pr_warn("%s/%d is trying to contend lock (",
3959                 curr->comm, task_pid_nr(curr));
3960         print_lockdep_cache(lock);
3961         pr_cont(") at:\n");
3962         print_ip_sym(ip);
3963         pr_warn("but there are no locks held!\n");
3964         pr_warn("\nother info that might help us debug this:\n");
3965         lockdep_print_held_locks(curr);
3966
3967         pr_warn("\nstack backtrace:\n");
3968         dump_stack();
3969
3970         return 0;
3971 }
3972
3973 static void
3974 __lock_contended(struct lockdep_map *lock, unsigned long ip)
3975 {
3976         struct task_struct *curr = current;
3977         struct held_lock *hlock;
3978         struct lock_class_stats *stats;
3979         unsigned int depth;
3980         int i, contention_point, contending_point;
3981
3982         depth = curr->lockdep_depth;
3983         /*
3984          * Whee, we contended on this lock, except it seems we're not
3985          * actually trying to acquire anything much at all..
3986          */
3987         if (DEBUG_LOCKS_WARN_ON(!depth))
3988                 return;
3989
3990         hlock = find_held_lock(curr, lock, depth, &i);
3991         if (!hlock) {
3992                 print_lock_contention_bug(curr, lock, ip);
3993                 return;
3994         }
3995
3996         if (hlock->instance != lock)
3997                 return;
3998
3999         hlock->waittime_stamp = lockstat_clock();
4000
4001         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4002         contending_point = lock_point(hlock_class(hlock)->contending_point,
4003                                       lock->ip);
4004
4005         stats = get_lock_stats(hlock_class(hlock));
4006         if (contention_point < LOCKSTAT_POINTS)
4007                 stats->contention_point[contention_point]++;
4008         if (contending_point < LOCKSTAT_POINTS)
4009                 stats->contending_point[contending_point]++;
4010         if (lock->cpu != smp_processor_id())
4011                 stats->bounces[bounce_contended + !!hlock->read]++;
4012 }
4013
4014 static void
4015 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
4016 {
4017         struct task_struct *curr = current;
4018         struct held_lock *hlock;
4019         struct lock_class_stats *stats;
4020         unsigned int depth;
4021         u64 now, waittime = 0;
4022         int i, cpu;
4023
4024         depth = curr->lockdep_depth;
4025         /*
4026          * Yay, we acquired ownership of this lock we didn't try to
4027          * acquire, how the heck did that happen?
4028          */
4029         if (DEBUG_LOCKS_WARN_ON(!depth))
4030                 return;
4031
4032         hlock = find_held_lock(curr, lock, depth, &i);
4033         if (!hlock) {
4034                 print_lock_contention_bug(curr, lock, _RET_IP_);
4035                 return;
4036         }
4037
4038         if (hlock->instance != lock)
4039                 return;
4040
4041         cpu = smp_processor_id();
4042         if (hlock->waittime_stamp) {
4043                 now = lockstat_clock();
4044                 waittime = now - hlock->waittime_stamp;
4045                 hlock->holdtime_stamp = now;
4046         }
4047
4048         trace_lock_acquired(lock, ip);
4049
4050         stats = get_lock_stats(hlock_class(hlock));
4051         if (waittime) {
4052                 if (hlock->read)
4053                         lock_time_inc(&stats->read_waittime, waittime);
4054                 else
4055                         lock_time_inc(&stats->write_waittime, waittime);
4056         }
4057         if (lock->cpu != cpu)
4058                 stats->bounces[bounce_acquired + !!hlock->read]++;
4059
4060         lock->cpu = cpu;
4061         lock->ip = ip;
4062 }
4063
4064 void lock_contended(struct lockdep_map *lock, unsigned long ip)
4065 {
4066         unsigned long flags;
4067
4068         if (unlikely(!lock_stat || !debug_locks))
4069                 return;
4070
4071         if (unlikely(current->lockdep_recursion))
4072                 return;
4073
4074         raw_local_irq_save(flags);
4075         check_flags(flags);
4076         current->lockdep_recursion = 1;
4077         trace_lock_contended(lock, ip);
4078         __lock_contended(lock, ip);
4079         current->lockdep_recursion = 0;
4080         raw_local_irq_restore(flags);
4081 }
4082 EXPORT_SYMBOL_GPL(lock_contended);
4083
4084 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4085 {
4086         unsigned long flags;
4087
4088         if (unlikely(!lock_stat || !debug_locks))
4089                 return;
4090
4091         if (unlikely(current->lockdep_recursion))
4092                 return;
4093
4094         raw_local_irq_save(flags);
4095         check_flags(flags);
4096         current->lockdep_recursion = 1;
4097         __lock_acquired(lock, ip);
4098         current->lockdep_recursion = 0;
4099         raw_local_irq_restore(flags);
4100 }
4101 EXPORT_SYMBOL_GPL(lock_acquired);
4102 #endif
4103
4104 /*
4105  * Used by the testsuite, sanitize the validator state
4106  * after a simulated failure:
4107  */
4108
4109 void lockdep_reset(void)
4110 {
4111         unsigned long flags;
4112         int i;
4113
4114         raw_local_irq_save(flags);
4115         current->curr_chain_key = 0;
4116         current->lockdep_depth = 0;
4117         current->lockdep_recursion = 0;
4118         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4119         nr_hardirq_chains = 0;
4120         nr_softirq_chains = 0;
4121         nr_process_chains = 0;
4122         debug_locks = 1;
4123         for (i = 0; i < CHAINHASH_SIZE; i++)
4124                 INIT_HLIST_HEAD(chainhash_table + i);
4125         raw_local_irq_restore(flags);
4126 }
4127
4128 /*
4129  * Remove all references to a lock class. The caller must hold the graph lock.
4130  */
4131 static void zap_class(struct lock_class *class)
4132 {
4133         struct lock_list *entry;
4134         int i;
4135
4136         /*
4137          * Remove all dependencies this lock is
4138          * involved in:
4139          */
4140         for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) {
4141                 if (entry->class != class && entry->links_to != class)
4142                         continue;
4143                 list_del_rcu(&entry->entry);
4144                 /* Clear .class and .links_to to avoid double removal. */
4145                 WRITE_ONCE(entry->class, NULL);
4146                 WRITE_ONCE(entry->links_to, NULL);
4147         }
4148         /*
4149          * Unhash the class and remove it from the all_lock_classes list:
4150          */
4151         hlist_del_rcu(&class->hash_entry);
4152         list_del(&class->lock_entry);
4153
4154         RCU_INIT_POINTER(class->key, NULL);
4155         RCU_INIT_POINTER(class->name, NULL);
4156 }
4157
4158 static inline int within(const void *addr, void *start, unsigned long size)
4159 {
4160         return addr >= start && addr < start + size;
4161 }
4162
4163 static void __lockdep_free_key_range(void *start, unsigned long size)
4164 {
4165         struct lock_class *class;
4166         struct hlist_head *head;
4167         int i;
4168
4169         /* Unhash all classes that were created by a module. */
4170         for (i = 0; i < CLASSHASH_SIZE; i++) {
4171                 head = classhash_table + i;
4172                 hlist_for_each_entry_rcu(class, head, hash_entry) {
4173                         if (!within(class->key, start, size) &&
4174                             !within(class->name, start, size))
4175                                 continue;
4176                         zap_class(class);
4177                 }
4178         }
4179 }
4180
4181 /*
4182  * Used in module.c to remove lock classes from memory that is going to be
4183  * freed; and possibly re-used by other modules.
4184  *
4185  * We will have had one sync_sched() before getting here, so we're guaranteed
4186  * nobody will look up these exact classes -- they're properly dead but still
4187  * allocated.
4188  */
4189 void lockdep_free_key_range(void *start, unsigned long size)
4190 {
4191         unsigned long flags;
4192         int locked;
4193
4194         init_data_structures_once();
4195
4196         raw_local_irq_save(flags);
4197         locked = graph_lock();
4198         __lockdep_free_key_range(start, size);
4199         if (locked)
4200                 graph_unlock();
4201         raw_local_irq_restore(flags);
4202
4203         /*
4204          * Wait for any possible iterators from look_up_lock_class() to pass
4205          * before continuing to free the memory they refer to.
4206          *
4207          * sync_sched() is sufficient because the read-side is IRQ disable.
4208          */
4209         synchronize_rcu();
4210
4211         /*
4212          * XXX at this point we could return the resources to the pool;
4213          * instead we leak them. We would need to change to bitmap allocators
4214          * instead of the linear allocators we have now.
4215          */
4216 }
4217
4218 /*
4219  * Check whether any element of the @lock->class_cache[] array refers to a
4220  * registered lock class. The caller must hold either the graph lock or the
4221  * RCU read lock.
4222  */
4223 static bool lock_class_cache_is_registered(struct lockdep_map *lock)
4224 {
4225         struct lock_class *class;
4226         struct hlist_head *head;
4227         int i, j;
4228
4229         for (i = 0; i < CLASSHASH_SIZE; i++) {
4230                 head = classhash_table + i;
4231                 hlist_for_each_entry_rcu(class, head, hash_entry) {
4232                         for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4233                                 if (lock->class_cache[j] == class)
4234                                         return true;
4235                 }
4236         }
4237         return false;
4238 }
4239
4240 /* The caller must hold the graph lock. Does not sleep. */
4241 static void __lockdep_reset_lock(struct lockdep_map *lock)
4242 {
4243         struct lock_class *class;
4244         int j;
4245
4246         /*
4247          * Remove all classes this lock might have:
4248          */
4249         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4250                 /*
4251                  * If the class exists we look it up and zap it:
4252                  */
4253                 class = look_up_lock_class(lock, j);
4254                 if (class)
4255                         zap_class(class);
4256         }
4257         /*
4258          * Debug check: in the end all mapped classes should
4259          * be gone.
4260          */
4261         if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
4262                 debug_locks_off();
4263 }
4264
4265 void lockdep_reset_lock(struct lockdep_map *lock)
4266 {
4267         unsigned long flags;
4268         int locked;
4269
4270         init_data_structures_once();
4271
4272         raw_local_irq_save(flags);
4273         locked = graph_lock();
4274         __lockdep_reset_lock(lock);
4275         if (locked)
4276                 graph_unlock();
4277         raw_local_irq_restore(flags);
4278 }
4279
4280 void __init lockdep_init(void)
4281 {
4282         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4283
4284         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
4285         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
4286         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4287         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
4288         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
4289         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
4290         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
4291
4292         printk(" memory used by lock dependency info: %zu kB\n",
4293                (sizeof(lock_classes) +
4294                 sizeof(classhash_table) +
4295                 sizeof(list_entries) +
4296                 sizeof(chainhash_table)
4297 #ifdef CONFIG_PROVE_LOCKING
4298                 + sizeof(lock_cq)
4299                 + sizeof(lock_chains)
4300                 + sizeof(chain_hlocks)
4301 #endif
4302                 ) / 1024
4303                 );
4304
4305         printk(" per task-struct memory footprint: %zu bytes\n",
4306                sizeof(((struct task_struct *)NULL)->held_locks));
4307 }
4308
4309 static void
4310 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4311                      const void *mem_to, struct held_lock *hlock)
4312 {
4313         if (!debug_locks_off())
4314                 return;
4315         if (debug_locks_silent)
4316                 return;
4317
4318         pr_warn("\n");
4319         pr_warn("=========================\n");
4320         pr_warn("WARNING: held lock freed!\n");
4321         print_kernel_ident();
4322         pr_warn("-------------------------\n");
4323         pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
4324                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4325         print_lock(hlock);
4326         lockdep_print_held_locks(curr);
4327
4328         pr_warn("\nstack backtrace:\n");
4329         dump_stack();
4330 }
4331
4332 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4333                                 const void* lock_from, unsigned long lock_len)
4334 {
4335         return lock_from + lock_len <= mem_from ||
4336                 mem_from + mem_len <= lock_from;
4337 }
4338
4339 /*
4340  * Called when kernel memory is freed (or unmapped), or if a lock
4341  * is destroyed or reinitialized - this code checks whether there is
4342  * any held lock in the memory range of <from> to <to>:
4343  */
4344 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4345 {
4346         struct task_struct *curr = current;
4347         struct held_lock *hlock;
4348         unsigned long flags;
4349         int i;
4350
4351         if (unlikely(!debug_locks))
4352                 return;
4353
4354         raw_local_irq_save(flags);
4355         for (i = 0; i < curr->lockdep_depth; i++) {
4356                 hlock = curr->held_locks + i;
4357
4358                 if (not_in_range(mem_from, mem_len, hlock->instance,
4359                                         sizeof(*hlock->instance)))
4360                         continue;
4361
4362                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4363                 break;
4364         }
4365         raw_local_irq_restore(flags);
4366 }
4367 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4368
4369 static void print_held_locks_bug(void)
4370 {
4371         if (!debug_locks_off())
4372                 return;
4373         if (debug_locks_silent)
4374                 return;
4375
4376         pr_warn("\n");
4377         pr_warn("====================================\n");
4378         pr_warn("WARNING: %s/%d still has locks held!\n",
4379                current->comm, task_pid_nr(current));
4380         print_kernel_ident();
4381         pr_warn("------------------------------------\n");
4382         lockdep_print_held_locks(current);
4383         pr_warn("\nstack backtrace:\n");
4384         dump_stack();
4385 }
4386
4387 void debug_check_no_locks_held(void)
4388 {
4389         if (unlikely(current->lockdep_depth > 0))
4390                 print_held_locks_bug();
4391 }
4392 EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4393
4394 #ifdef __KERNEL__
4395 void debug_show_all_locks(void)
4396 {
4397         struct task_struct *g, *p;
4398
4399         if (unlikely(!debug_locks)) {
4400                 pr_warn("INFO: lockdep is turned off.\n");
4401                 return;
4402         }
4403         pr_warn("\nShowing all locks held in the system:\n");
4404
4405         rcu_read_lock();
4406         for_each_process_thread(g, p) {
4407                 if (!p->lockdep_depth)
4408                         continue;
4409                 lockdep_print_held_locks(p);
4410                 touch_nmi_watchdog();
4411                 touch_all_softlockup_watchdogs();
4412         }
4413         rcu_read_unlock();
4414
4415         pr_warn("\n");
4416         pr_warn("=============================================\n\n");
4417 }
4418 EXPORT_SYMBOL_GPL(debug_show_all_locks);
4419 #endif
4420
4421 /*
4422  * Careful: only use this function if you are sure that
4423  * the task cannot run in parallel!
4424  */
4425 void debug_show_held_locks(struct task_struct *task)
4426 {
4427         if (unlikely(!debug_locks)) {
4428                 printk("INFO: lockdep is turned off.\n");
4429                 return;
4430         }
4431         lockdep_print_held_locks(task);
4432 }
4433 EXPORT_SYMBOL_GPL(debug_show_held_locks);
4434
4435 asmlinkage __visible void lockdep_sys_exit(void)
4436 {
4437         struct task_struct *curr = current;
4438
4439         if (unlikely(curr->lockdep_depth)) {
4440                 if (!debug_locks_off())
4441                         return;
4442                 pr_warn("\n");
4443                 pr_warn("================================================\n");
4444                 pr_warn("WARNING: lock held when returning to user space!\n");
4445                 print_kernel_ident();
4446                 pr_warn("------------------------------------------------\n");
4447                 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
4448                                 curr->comm, curr->pid);
4449                 lockdep_print_held_locks(curr);
4450         }
4451
4452         /*
4453          * The lock history for each syscall should be independent. So wipe the
4454          * slate clean on return to userspace.
4455          */
4456         lockdep_invariant_state(false);
4457 }
4458
4459 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4460 {
4461         struct task_struct *curr = current;
4462
4463         /* Note: the following can be executed concurrently, so be careful. */
4464         pr_warn("\n");
4465         pr_warn("=============================\n");
4466         pr_warn("WARNING: suspicious RCU usage\n");
4467         print_kernel_ident();
4468         pr_warn("-----------------------------\n");
4469         pr_warn("%s:%d %s!\n", file, line, s);
4470         pr_warn("\nother info that might help us debug this:\n\n");
4471         pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4472                !rcu_lockdep_current_cpu_online()
4473                         ? "RCU used illegally from offline CPU!\n"
4474                         : !rcu_is_watching()
4475                                 ? "RCU used illegally from idle CPU!\n"
4476                                 : "",
4477                rcu_scheduler_active, debug_locks);
4478
4479         /*
4480          * If a CPU is in the RCU-free window in idle (ie: in the section
4481          * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4482          * considers that CPU to be in an "extended quiescent state",
4483          * which means that RCU will be completely ignoring that CPU.
4484          * Therefore, rcu_read_lock() and friends have absolutely no
4485          * effect on a CPU running in that state. In other words, even if
4486          * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4487          * delete data structures out from under it.  RCU really has no
4488          * choice here: we need to keep an RCU-free window in idle where
4489          * the CPU may possibly enter into low power mode. This way we can
4490          * notice an extended quiescent state to other CPUs that started a grace
4491          * period. Otherwise we would delay any grace period as long as we run
4492          * in the idle task.
4493          *
4494          * So complain bitterly if someone does call rcu_read_lock(),
4495          * rcu_read_lock_bh() and so on from extended quiescent states.
4496          */
4497         if (!rcu_is_watching())
4498                 pr_warn("RCU used illegally from extended quiescent state!\n");
4499
4500         lockdep_print_held_locks(curr);
4501         pr_warn("\nstack backtrace:\n");
4502         dump_stack();
4503 }
4504 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);