Merge tag 'kgdb-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt...
[platform/kernel/linux-starfive.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22
23 #define ODEBUG_HASH_BITS        14
24 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
25
26 #define ODEBUG_POOL_SIZE        1024
27 #define ODEBUG_POOL_MIN_LEVEL   256
28
29 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
32
33 struct debug_bucket {
34         struct hlist_head       list;
35         raw_spinlock_t          lock;
36 };
37
38 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
39
40 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
41
42 static DEFINE_RAW_SPINLOCK(pool_lock);
43
44 static HLIST_HEAD(obj_pool);
45 static HLIST_HEAD(obj_to_free);
46
47 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
48 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
49 static int                      obj_pool_used;
50 static int                      obj_pool_max_used;
51 /* The number of objs on the global free list */
52 static int                      obj_nr_tofree;
53 static struct kmem_cache        *obj_cache;
54
55 static int                      debug_objects_maxchain __read_mostly;
56 static int __maybe_unused       debug_objects_maxchecked __read_mostly;
57 static int                      debug_objects_fixups __read_mostly;
58 static int                      debug_objects_warnings __read_mostly;
59 static int                      debug_objects_enabled __read_mostly
60                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
61 static int                      debug_objects_pool_size __read_mostly
62                                 = ODEBUG_POOL_SIZE;
63 static int                      debug_objects_pool_min_level __read_mostly
64                                 = ODEBUG_POOL_MIN_LEVEL;
65 static struct debug_obj_descr   *descr_test  __read_mostly;
66
67 /*
68  * Track numbers of kmem_cache_alloc()/free() calls done.
69  */
70 static int                      debug_objects_allocated;
71 static int                      debug_objects_freed;
72
73 static void free_obj_work(struct work_struct *work);
74 static DECLARE_WORK(debug_obj_work, free_obj_work);
75
76 static int __init enable_object_debug(char *str)
77 {
78         debug_objects_enabled = 1;
79         return 0;
80 }
81
82 static int __init disable_object_debug(char *str)
83 {
84         debug_objects_enabled = 0;
85         return 0;
86 }
87
88 early_param("debug_objects", enable_object_debug);
89 early_param("no_debug_objects", disable_object_debug);
90
91 static const char *obj_states[ODEBUG_STATE_MAX] = {
92         [ODEBUG_STATE_NONE]             = "none",
93         [ODEBUG_STATE_INIT]             = "initialized",
94         [ODEBUG_STATE_INACTIVE]         = "inactive",
95         [ODEBUG_STATE_ACTIVE]           = "active",
96         [ODEBUG_STATE_DESTROYED]        = "destroyed",
97         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
98 };
99
100 static void fill_pool(void)
101 {
102         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
103         struct debug_obj *new, *obj;
104         unsigned long flags;
105
106         if (likely(obj_pool_free >= debug_objects_pool_min_level))
107                 return;
108
109         /*
110          * Reuse objs from the global free list; they will be reinitialized
111          * when allocating.
112          */
113         while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114                 raw_spin_lock_irqsave(&pool_lock, flags);
115                 /*
116                  * Recheck with the lock held as the worker thread might have
117                  * won the race and freed the global free list already.
118                  */
119                 if (obj_nr_tofree) {
120                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121                         hlist_del(&obj->node);
122                         obj_nr_tofree--;
123                         hlist_add_head(&obj->node, &obj_pool);
124                         obj_pool_free++;
125                 }
126                 raw_spin_unlock_irqrestore(&pool_lock, flags);
127         }
128
129         if (unlikely(!obj_cache))
130                 return;
131
132         while (obj_pool_free < debug_objects_pool_min_level) {
133
134                 new = kmem_cache_zalloc(obj_cache, gfp);
135                 if (!new)
136                         return;
137
138                 raw_spin_lock_irqsave(&pool_lock, flags);
139                 hlist_add_head(&new->node, &obj_pool);
140                 debug_objects_allocated++;
141                 obj_pool_free++;
142                 raw_spin_unlock_irqrestore(&pool_lock, flags);
143         }
144 }
145
146 /*
147  * Lookup an object in the hash bucket.
148  */
149 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
150 {
151         struct debug_obj *obj;
152         int cnt = 0;
153
154         hlist_for_each_entry(obj, &b->list, node) {
155                 cnt++;
156                 if (obj->object == addr)
157                         return obj;
158         }
159         if (cnt > debug_objects_maxchain)
160                 debug_objects_maxchain = cnt;
161
162         return NULL;
163 }
164
165 /*
166  * Allocate a new object. If the pool is empty, switch off the debugger.
167  * Must be called with interrupts disabled.
168  */
169 static struct debug_obj *
170 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
171 {
172         struct debug_obj *obj = NULL;
173
174         raw_spin_lock(&pool_lock);
175         if (obj_pool.first) {
176                 obj         = hlist_entry(obj_pool.first, typeof(*obj), node);
177
178                 obj->object = addr;
179                 obj->descr  = descr;
180                 obj->state  = ODEBUG_STATE_NONE;
181                 obj->astate = 0;
182                 hlist_del(&obj->node);
183
184                 hlist_add_head(&obj->node, &b->list);
185
186                 obj_pool_used++;
187                 if (obj_pool_used > obj_pool_max_used)
188                         obj_pool_max_used = obj_pool_used;
189
190                 obj_pool_free--;
191                 if (obj_pool_free < obj_pool_min_free)
192                         obj_pool_min_free = obj_pool_free;
193         }
194         raw_spin_unlock(&pool_lock);
195
196         return obj;
197 }
198
199 /*
200  * workqueue function to free objects.
201  *
202  * To reduce contention on the global pool_lock, the actual freeing of
203  * debug objects will be delayed if the pool_lock is busy.
204  */
205 static void free_obj_work(struct work_struct *work)
206 {
207         struct hlist_node *tmp;
208         struct debug_obj *obj;
209         unsigned long flags;
210         HLIST_HEAD(tofree);
211
212         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
213                 return;
214
215         /*
216          * The objs on the pool list might be allocated before the work is
217          * run, so recheck if pool list it full or not, if not fill pool
218          * list from the global free list
219          */
220         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
221                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
222                 hlist_del(&obj->node);
223                 hlist_add_head(&obj->node, &obj_pool);
224                 obj_pool_free++;
225                 obj_nr_tofree--;
226         }
227
228         /*
229          * Pool list is already full and there are still objs on the free
230          * list. Move remaining free objs to a temporary list to free the
231          * memory outside the pool_lock held region.
232          */
233         if (obj_nr_tofree) {
234                 hlist_move_list(&obj_to_free, &tofree);
235                 debug_objects_freed += obj_nr_tofree;
236                 obj_nr_tofree = 0;
237         }
238         raw_spin_unlock_irqrestore(&pool_lock, flags);
239
240         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
241                 hlist_del(&obj->node);
242                 kmem_cache_free(obj_cache, obj);
243         }
244 }
245
246 static bool __free_object(struct debug_obj *obj)
247 {
248         unsigned long flags;
249         bool work;
250
251         raw_spin_lock_irqsave(&pool_lock, flags);
252         work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
253         obj_pool_used--;
254
255         if (work) {
256                 obj_nr_tofree++;
257                 hlist_add_head(&obj->node, &obj_to_free);
258         } else {
259                 obj_pool_free++;
260                 hlist_add_head(&obj->node, &obj_pool);
261         }
262         raw_spin_unlock_irqrestore(&pool_lock, flags);
263         return work;
264 }
265
266 /*
267  * Put the object back into the pool and schedule work to free objects
268  * if necessary.
269  */
270 static void free_object(struct debug_obj *obj)
271 {
272         if (__free_object(obj))
273                 schedule_work(&debug_obj_work);
274 }
275
276 /*
277  * We run out of memory. That means we probably have tons of objects
278  * allocated.
279  */
280 static void debug_objects_oom(void)
281 {
282         struct debug_bucket *db = obj_hash;
283         struct hlist_node *tmp;
284         HLIST_HEAD(freelist);
285         struct debug_obj *obj;
286         unsigned long flags;
287         int i;
288
289         pr_warn("Out of memory. ODEBUG disabled\n");
290
291         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
292                 raw_spin_lock_irqsave(&db->lock, flags);
293                 hlist_move_list(&db->list, &freelist);
294                 raw_spin_unlock_irqrestore(&db->lock, flags);
295
296                 /* Now free them */
297                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
298                         hlist_del(&obj->node);
299                         free_object(obj);
300                 }
301         }
302 }
303
304 /*
305  * We use the pfn of the address for the hash. That way we can check
306  * for freed objects simply by checking the affected bucket.
307  */
308 static struct debug_bucket *get_bucket(unsigned long addr)
309 {
310         unsigned long hash;
311
312         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
313         return &obj_hash[hash];
314 }
315
316 static void debug_print_object(struct debug_obj *obj, char *msg)
317 {
318         struct debug_obj_descr *descr = obj->descr;
319         static int limit;
320
321         if (limit < 5 && descr != descr_test) {
322                 void *hint = descr->debug_hint ?
323                         descr->debug_hint(obj->object) : NULL;
324                 limit++;
325                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
326                                  "object type: %s hint: %pS\n",
327                         msg, obj_states[obj->state], obj->astate,
328                         descr->name, hint);
329         }
330         debug_objects_warnings++;
331 }
332
333 /*
334  * Try to repair the damage, so we have a better chance to get useful
335  * debug output.
336  */
337 static bool
338 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
339                    void * addr, enum debug_obj_state state)
340 {
341         if (fixup && fixup(addr, state)) {
342                 debug_objects_fixups++;
343                 return true;
344         }
345         return false;
346 }
347
348 static void debug_object_is_on_stack(void *addr, int onstack)
349 {
350         int is_on_stack;
351         static int limit;
352
353         if (limit > 4)
354                 return;
355
356         is_on_stack = object_is_on_stack(addr);
357         if (is_on_stack == onstack)
358                 return;
359
360         limit++;
361         if (is_on_stack)
362                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
363                          task_stack_page(current));
364         else
365                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
366                          task_stack_page(current));
367
368         WARN_ON(1);
369 }
370
371 static void
372 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
373 {
374         enum debug_obj_state state;
375         struct debug_bucket *db;
376         struct debug_obj *obj;
377         unsigned long flags;
378
379         fill_pool();
380
381         db = get_bucket((unsigned long) addr);
382
383         raw_spin_lock_irqsave(&db->lock, flags);
384
385         obj = lookup_object(addr, db);
386         if (!obj) {
387                 obj = alloc_object(addr, db, descr);
388                 if (!obj) {
389                         debug_objects_enabled = 0;
390                         raw_spin_unlock_irqrestore(&db->lock, flags);
391                         debug_objects_oom();
392                         return;
393                 }
394                 debug_object_is_on_stack(addr, onstack);
395         }
396
397         switch (obj->state) {
398         case ODEBUG_STATE_NONE:
399         case ODEBUG_STATE_INIT:
400         case ODEBUG_STATE_INACTIVE:
401                 obj->state = ODEBUG_STATE_INIT;
402                 break;
403
404         case ODEBUG_STATE_ACTIVE:
405                 debug_print_object(obj, "init");
406                 state = obj->state;
407                 raw_spin_unlock_irqrestore(&db->lock, flags);
408                 debug_object_fixup(descr->fixup_init, addr, state);
409                 return;
410
411         case ODEBUG_STATE_DESTROYED:
412                 debug_print_object(obj, "init");
413                 break;
414         default:
415                 break;
416         }
417
418         raw_spin_unlock_irqrestore(&db->lock, flags);
419 }
420
421 /**
422  * debug_object_init - debug checks when an object is initialized
423  * @addr:       address of the object
424  * @descr:      pointer to an object specific debug description structure
425  */
426 void debug_object_init(void *addr, struct debug_obj_descr *descr)
427 {
428         if (!debug_objects_enabled)
429                 return;
430
431         __debug_object_init(addr, descr, 0);
432 }
433 EXPORT_SYMBOL_GPL(debug_object_init);
434
435 /**
436  * debug_object_init_on_stack - debug checks when an object on stack is
437  *                              initialized
438  * @addr:       address of the object
439  * @descr:      pointer to an object specific debug description structure
440  */
441 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
442 {
443         if (!debug_objects_enabled)
444                 return;
445
446         __debug_object_init(addr, descr, 1);
447 }
448 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
449
450 /**
451  * debug_object_activate - debug checks when an object is activated
452  * @addr:       address of the object
453  * @descr:      pointer to an object specific debug description structure
454  * Returns 0 for success, -EINVAL for check failed.
455  */
456 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
457 {
458         enum debug_obj_state state;
459         struct debug_bucket *db;
460         struct debug_obj *obj;
461         unsigned long flags;
462         int ret;
463         struct debug_obj o = { .object = addr,
464                                .state = ODEBUG_STATE_NOTAVAILABLE,
465                                .descr = descr };
466
467         if (!debug_objects_enabled)
468                 return 0;
469
470         db = get_bucket((unsigned long) addr);
471
472         raw_spin_lock_irqsave(&db->lock, flags);
473
474         obj = lookup_object(addr, db);
475         if (obj) {
476                 switch (obj->state) {
477                 case ODEBUG_STATE_INIT:
478                 case ODEBUG_STATE_INACTIVE:
479                         obj->state = ODEBUG_STATE_ACTIVE;
480                         ret = 0;
481                         break;
482
483                 case ODEBUG_STATE_ACTIVE:
484                         debug_print_object(obj, "activate");
485                         state = obj->state;
486                         raw_spin_unlock_irqrestore(&db->lock, flags);
487                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
488                         return ret ? 0 : -EINVAL;
489
490                 case ODEBUG_STATE_DESTROYED:
491                         debug_print_object(obj, "activate");
492                         ret = -EINVAL;
493                         break;
494                 default:
495                         ret = 0;
496                         break;
497                 }
498                 raw_spin_unlock_irqrestore(&db->lock, flags);
499                 return ret;
500         }
501
502         raw_spin_unlock_irqrestore(&db->lock, flags);
503         /*
504          * We are here when a static object is activated. We
505          * let the type specific code confirm whether this is
506          * true or not. if true, we just make sure that the
507          * static object is tracked in the object tracker. If
508          * not, this must be a bug, so we try to fix it up.
509          */
510         if (descr->is_static_object && descr->is_static_object(addr)) {
511                 /* track this static object */
512                 debug_object_init(addr, descr);
513                 debug_object_activate(addr, descr);
514         } else {
515                 debug_print_object(&o, "activate");
516                 ret = debug_object_fixup(descr->fixup_activate, addr,
517                                         ODEBUG_STATE_NOTAVAILABLE);
518                 return ret ? 0 : -EINVAL;
519         }
520         return 0;
521 }
522 EXPORT_SYMBOL_GPL(debug_object_activate);
523
524 /**
525  * debug_object_deactivate - debug checks when an object is deactivated
526  * @addr:       address of the object
527  * @descr:      pointer to an object specific debug description structure
528  */
529 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
530 {
531         struct debug_bucket *db;
532         struct debug_obj *obj;
533         unsigned long flags;
534
535         if (!debug_objects_enabled)
536                 return;
537
538         db = get_bucket((unsigned long) addr);
539
540         raw_spin_lock_irqsave(&db->lock, flags);
541
542         obj = lookup_object(addr, db);
543         if (obj) {
544                 switch (obj->state) {
545                 case ODEBUG_STATE_INIT:
546                 case ODEBUG_STATE_INACTIVE:
547                 case ODEBUG_STATE_ACTIVE:
548                         if (!obj->astate)
549                                 obj->state = ODEBUG_STATE_INACTIVE;
550                         else
551                                 debug_print_object(obj, "deactivate");
552                         break;
553
554                 case ODEBUG_STATE_DESTROYED:
555                         debug_print_object(obj, "deactivate");
556                         break;
557                 default:
558                         break;
559                 }
560         } else {
561                 struct debug_obj o = { .object = addr,
562                                        .state = ODEBUG_STATE_NOTAVAILABLE,
563                                        .descr = descr };
564
565                 debug_print_object(&o, "deactivate");
566         }
567
568         raw_spin_unlock_irqrestore(&db->lock, flags);
569 }
570 EXPORT_SYMBOL_GPL(debug_object_deactivate);
571
572 /**
573  * debug_object_destroy - debug checks when an object is destroyed
574  * @addr:       address of the object
575  * @descr:      pointer to an object specific debug description structure
576  */
577 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
578 {
579         enum debug_obj_state state;
580         struct debug_bucket *db;
581         struct debug_obj *obj;
582         unsigned long flags;
583
584         if (!debug_objects_enabled)
585                 return;
586
587         db = get_bucket((unsigned long) addr);
588
589         raw_spin_lock_irqsave(&db->lock, flags);
590
591         obj = lookup_object(addr, db);
592         if (!obj)
593                 goto out_unlock;
594
595         switch (obj->state) {
596         case ODEBUG_STATE_NONE:
597         case ODEBUG_STATE_INIT:
598         case ODEBUG_STATE_INACTIVE:
599                 obj->state = ODEBUG_STATE_DESTROYED;
600                 break;
601         case ODEBUG_STATE_ACTIVE:
602                 debug_print_object(obj, "destroy");
603                 state = obj->state;
604                 raw_spin_unlock_irqrestore(&db->lock, flags);
605                 debug_object_fixup(descr->fixup_destroy, addr, state);
606                 return;
607
608         case ODEBUG_STATE_DESTROYED:
609                 debug_print_object(obj, "destroy");
610                 break;
611         default:
612                 break;
613         }
614 out_unlock:
615         raw_spin_unlock_irqrestore(&db->lock, flags);
616 }
617 EXPORT_SYMBOL_GPL(debug_object_destroy);
618
619 /**
620  * debug_object_free - debug checks when an object is freed
621  * @addr:       address of the object
622  * @descr:      pointer to an object specific debug description structure
623  */
624 void debug_object_free(void *addr, struct debug_obj_descr *descr)
625 {
626         enum debug_obj_state state;
627         struct debug_bucket *db;
628         struct debug_obj *obj;
629         unsigned long flags;
630
631         if (!debug_objects_enabled)
632                 return;
633
634         db = get_bucket((unsigned long) addr);
635
636         raw_spin_lock_irqsave(&db->lock, flags);
637
638         obj = lookup_object(addr, db);
639         if (!obj)
640                 goto out_unlock;
641
642         switch (obj->state) {
643         case ODEBUG_STATE_ACTIVE:
644                 debug_print_object(obj, "free");
645                 state = obj->state;
646                 raw_spin_unlock_irqrestore(&db->lock, flags);
647                 debug_object_fixup(descr->fixup_free, addr, state);
648                 return;
649         default:
650                 hlist_del(&obj->node);
651                 raw_spin_unlock_irqrestore(&db->lock, flags);
652                 free_object(obj);
653                 return;
654         }
655 out_unlock:
656         raw_spin_unlock_irqrestore(&db->lock, flags);
657 }
658 EXPORT_SYMBOL_GPL(debug_object_free);
659
660 /**
661  * debug_object_assert_init - debug checks when object should be init-ed
662  * @addr:       address of the object
663  * @descr:      pointer to an object specific debug description structure
664  */
665 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
666 {
667         struct debug_bucket *db;
668         struct debug_obj *obj;
669         unsigned long flags;
670
671         if (!debug_objects_enabled)
672                 return;
673
674         db = get_bucket((unsigned long) addr);
675
676         raw_spin_lock_irqsave(&db->lock, flags);
677
678         obj = lookup_object(addr, db);
679         if (!obj) {
680                 struct debug_obj o = { .object = addr,
681                                        .state = ODEBUG_STATE_NOTAVAILABLE,
682                                        .descr = descr };
683
684                 raw_spin_unlock_irqrestore(&db->lock, flags);
685                 /*
686                  * Maybe the object is static, and we let the type specific
687                  * code confirm. Track this static object if true, else invoke
688                  * fixup.
689                  */
690                 if (descr->is_static_object && descr->is_static_object(addr)) {
691                         /* Track this static object */
692                         debug_object_init(addr, descr);
693                 } else {
694                         debug_print_object(&o, "assert_init");
695                         debug_object_fixup(descr->fixup_assert_init, addr,
696                                            ODEBUG_STATE_NOTAVAILABLE);
697                 }
698                 return;
699         }
700
701         raw_spin_unlock_irqrestore(&db->lock, flags);
702 }
703 EXPORT_SYMBOL_GPL(debug_object_assert_init);
704
705 /**
706  * debug_object_active_state - debug checks object usage state machine
707  * @addr:       address of the object
708  * @descr:      pointer to an object specific debug description structure
709  * @expect:     expected state
710  * @next:       state to move to if expected state is found
711  */
712 void
713 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
714                           unsigned int expect, unsigned int next)
715 {
716         struct debug_bucket *db;
717         struct debug_obj *obj;
718         unsigned long flags;
719
720         if (!debug_objects_enabled)
721                 return;
722
723         db = get_bucket((unsigned long) addr);
724
725         raw_spin_lock_irqsave(&db->lock, flags);
726
727         obj = lookup_object(addr, db);
728         if (obj) {
729                 switch (obj->state) {
730                 case ODEBUG_STATE_ACTIVE:
731                         if (obj->astate == expect)
732                                 obj->astate = next;
733                         else
734                                 debug_print_object(obj, "active_state");
735                         break;
736
737                 default:
738                         debug_print_object(obj, "active_state");
739                         break;
740                 }
741         } else {
742                 struct debug_obj o = { .object = addr,
743                                        .state = ODEBUG_STATE_NOTAVAILABLE,
744                                        .descr = descr };
745
746                 debug_print_object(&o, "active_state");
747         }
748
749         raw_spin_unlock_irqrestore(&db->lock, flags);
750 }
751 EXPORT_SYMBOL_GPL(debug_object_active_state);
752
753 #ifdef CONFIG_DEBUG_OBJECTS_FREE
754 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
755 {
756         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
757         struct debug_obj_descr *descr;
758         enum debug_obj_state state;
759         struct debug_bucket *db;
760         struct hlist_node *tmp;
761         struct debug_obj *obj;
762         int cnt, objs_checked = 0;
763         bool work = false;
764
765         saddr = (unsigned long) address;
766         eaddr = saddr + size;
767         paddr = saddr & ODEBUG_CHUNK_MASK;
768         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
769         chunks >>= ODEBUG_CHUNK_SHIFT;
770
771         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
772                 db = get_bucket(paddr);
773
774 repeat:
775                 cnt = 0;
776                 raw_spin_lock_irqsave(&db->lock, flags);
777                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
778                         cnt++;
779                         oaddr = (unsigned long) obj->object;
780                         if (oaddr < saddr || oaddr >= eaddr)
781                                 continue;
782
783                         switch (obj->state) {
784                         case ODEBUG_STATE_ACTIVE:
785                                 debug_print_object(obj, "free");
786                                 descr = obj->descr;
787                                 state = obj->state;
788                                 raw_spin_unlock_irqrestore(&db->lock, flags);
789                                 debug_object_fixup(descr->fixup_free,
790                                                    (void *) oaddr, state);
791                                 goto repeat;
792                         default:
793                                 hlist_del(&obj->node);
794                                 work |= __free_object(obj);
795                                 break;
796                         }
797                 }
798                 raw_spin_unlock_irqrestore(&db->lock, flags);
799
800                 if (cnt > debug_objects_maxchain)
801                         debug_objects_maxchain = cnt;
802
803                 objs_checked += cnt;
804         }
805
806         if (objs_checked > debug_objects_maxchecked)
807                 debug_objects_maxchecked = objs_checked;
808
809         /* Schedule work to actually kmem_cache_free() objects */
810         if (work)
811                 schedule_work(&debug_obj_work);
812 }
813
814 void debug_check_no_obj_freed(const void *address, unsigned long size)
815 {
816         if (debug_objects_enabled)
817                 __debug_check_no_obj_freed(address, size);
818 }
819 #endif
820
821 #ifdef CONFIG_DEBUG_FS
822
823 static int debug_stats_show(struct seq_file *m, void *v)
824 {
825         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
826         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
827         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
828         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
829         seq_printf(m, "pool_free     :%d\n", obj_pool_free);
830         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
831         seq_printf(m, "pool_used     :%d\n", obj_pool_used);
832         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
833         seq_printf(m, "on_free_list  :%d\n", obj_nr_tofree);
834         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
835         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
836         return 0;
837 }
838
839 static int debug_stats_open(struct inode *inode, struct file *filp)
840 {
841         return single_open(filp, debug_stats_show, NULL);
842 }
843
844 static const struct file_operations debug_stats_fops = {
845         .open           = debug_stats_open,
846         .read           = seq_read,
847         .llseek         = seq_lseek,
848         .release        = single_release,
849 };
850
851 static int __init debug_objects_init_debugfs(void)
852 {
853         struct dentry *dbgdir, *dbgstats;
854
855         if (!debug_objects_enabled)
856                 return 0;
857
858         dbgdir = debugfs_create_dir("debug_objects", NULL);
859         if (!dbgdir)
860                 return -ENOMEM;
861
862         dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
863                                        &debug_stats_fops);
864         if (!dbgstats)
865                 goto err;
866
867         return 0;
868
869 err:
870         debugfs_remove(dbgdir);
871
872         return -ENOMEM;
873 }
874 __initcall(debug_objects_init_debugfs);
875
876 #else
877 static inline void debug_objects_init_debugfs(void) { }
878 #endif
879
880 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
881
882 /* Random data structure for the self test */
883 struct self_test {
884         unsigned long   dummy1[6];
885         int             static_init;
886         unsigned long   dummy2[3];
887 };
888
889 static __initdata struct debug_obj_descr descr_type_test;
890
891 static bool __init is_static_object(void *addr)
892 {
893         struct self_test *obj = addr;
894
895         return obj->static_init;
896 }
897
898 /*
899  * fixup_init is called when:
900  * - an active object is initialized
901  */
902 static bool __init fixup_init(void *addr, enum debug_obj_state state)
903 {
904         struct self_test *obj = addr;
905
906         switch (state) {
907         case ODEBUG_STATE_ACTIVE:
908                 debug_object_deactivate(obj, &descr_type_test);
909                 debug_object_init(obj, &descr_type_test);
910                 return true;
911         default:
912                 return false;
913         }
914 }
915
916 /*
917  * fixup_activate is called when:
918  * - an active object is activated
919  * - an unknown non-static object is activated
920  */
921 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
922 {
923         struct self_test *obj = addr;
924
925         switch (state) {
926         case ODEBUG_STATE_NOTAVAILABLE:
927                 return true;
928         case ODEBUG_STATE_ACTIVE:
929                 debug_object_deactivate(obj, &descr_type_test);
930                 debug_object_activate(obj, &descr_type_test);
931                 return true;
932
933         default:
934                 return false;
935         }
936 }
937
938 /*
939  * fixup_destroy is called when:
940  * - an active object is destroyed
941  */
942 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
943 {
944         struct self_test *obj = addr;
945
946         switch (state) {
947         case ODEBUG_STATE_ACTIVE:
948                 debug_object_deactivate(obj, &descr_type_test);
949                 debug_object_destroy(obj, &descr_type_test);
950                 return true;
951         default:
952                 return false;
953         }
954 }
955
956 /*
957  * fixup_free is called when:
958  * - an active object is freed
959  */
960 static bool __init fixup_free(void *addr, enum debug_obj_state state)
961 {
962         struct self_test *obj = addr;
963
964         switch (state) {
965         case ODEBUG_STATE_ACTIVE:
966                 debug_object_deactivate(obj, &descr_type_test);
967                 debug_object_free(obj, &descr_type_test);
968                 return true;
969         default:
970                 return false;
971         }
972 }
973
974 static int __init
975 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
976 {
977         struct debug_bucket *db;
978         struct debug_obj *obj;
979         unsigned long flags;
980         int res = -EINVAL;
981
982         db = get_bucket((unsigned long) addr);
983
984         raw_spin_lock_irqsave(&db->lock, flags);
985
986         obj = lookup_object(addr, db);
987         if (!obj && state != ODEBUG_STATE_NONE) {
988                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
989                 goto out;
990         }
991         if (obj && obj->state != state) {
992                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
993                        obj->state, state);
994                 goto out;
995         }
996         if (fixups != debug_objects_fixups) {
997                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
998                        fixups, debug_objects_fixups);
999                 goto out;
1000         }
1001         if (warnings != debug_objects_warnings) {
1002                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1003                        warnings, debug_objects_warnings);
1004                 goto out;
1005         }
1006         res = 0;
1007 out:
1008         raw_spin_unlock_irqrestore(&db->lock, flags);
1009         if (res)
1010                 debug_objects_enabled = 0;
1011         return res;
1012 }
1013
1014 static __initdata struct debug_obj_descr descr_type_test = {
1015         .name                   = "selftest",
1016         .is_static_object       = is_static_object,
1017         .fixup_init             = fixup_init,
1018         .fixup_activate         = fixup_activate,
1019         .fixup_destroy          = fixup_destroy,
1020         .fixup_free             = fixup_free,
1021 };
1022
1023 static __initdata struct self_test obj = { .static_init = 0 };
1024
1025 static void __init debug_objects_selftest(void)
1026 {
1027         int fixups, oldfixups, warnings, oldwarnings;
1028         unsigned long flags;
1029
1030         local_irq_save(flags);
1031
1032         fixups = oldfixups = debug_objects_fixups;
1033         warnings = oldwarnings = debug_objects_warnings;
1034         descr_test = &descr_type_test;
1035
1036         debug_object_init(&obj, &descr_type_test);
1037         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1038                 goto out;
1039         debug_object_activate(&obj, &descr_type_test);
1040         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1041                 goto out;
1042         debug_object_activate(&obj, &descr_type_test);
1043         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1044                 goto out;
1045         debug_object_deactivate(&obj, &descr_type_test);
1046         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1047                 goto out;
1048         debug_object_destroy(&obj, &descr_type_test);
1049         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1050                 goto out;
1051         debug_object_init(&obj, &descr_type_test);
1052         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1053                 goto out;
1054         debug_object_activate(&obj, &descr_type_test);
1055         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1056                 goto out;
1057         debug_object_deactivate(&obj, &descr_type_test);
1058         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1059                 goto out;
1060         debug_object_free(&obj, &descr_type_test);
1061         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1062                 goto out;
1063
1064         obj.static_init = 1;
1065         debug_object_activate(&obj, &descr_type_test);
1066         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1067                 goto out;
1068         debug_object_init(&obj, &descr_type_test);
1069         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1070                 goto out;
1071         debug_object_free(&obj, &descr_type_test);
1072         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1073                 goto out;
1074
1075 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1076         debug_object_init(&obj, &descr_type_test);
1077         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1078                 goto out;
1079         debug_object_activate(&obj, &descr_type_test);
1080         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1081                 goto out;
1082         __debug_check_no_obj_freed(&obj, sizeof(obj));
1083         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1084                 goto out;
1085 #endif
1086         pr_info("selftest passed\n");
1087
1088 out:
1089         debug_objects_fixups = oldfixups;
1090         debug_objects_warnings = oldwarnings;
1091         descr_test = NULL;
1092
1093         local_irq_restore(flags);
1094 }
1095 #else
1096 static inline void debug_objects_selftest(void) { }
1097 #endif
1098
1099 /*
1100  * Called during early boot to initialize the hash buckets and link
1101  * the static object pool objects into the poll list. After this call
1102  * the object tracker is fully operational.
1103  */
1104 void __init debug_objects_early_init(void)
1105 {
1106         int i;
1107
1108         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1109                 raw_spin_lock_init(&obj_hash[i].lock);
1110
1111         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1112                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1113 }
1114
1115 /*
1116  * Convert the statically allocated objects to dynamic ones:
1117  */
1118 static int __init debug_objects_replace_static_objects(void)
1119 {
1120         struct debug_bucket *db = obj_hash;
1121         struct hlist_node *tmp;
1122         struct debug_obj *obj, *new;
1123         HLIST_HEAD(objects);
1124         int i, cnt = 0;
1125
1126         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1127                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1128                 if (!obj)
1129                         goto free;
1130                 hlist_add_head(&obj->node, &objects);
1131         }
1132
1133         /*
1134          * debug_objects_mem_init() is now called early that only one CPU is up
1135          * and interrupts have been disabled, so it is safe to replace the
1136          * active object references.
1137          */
1138
1139         /* Remove the statically allocated objects from the pool */
1140         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1141                 hlist_del(&obj->node);
1142         /* Move the allocated objects to the pool */
1143         hlist_move_list(&objects, &obj_pool);
1144
1145         /* Replace the active object references */
1146         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1147                 hlist_move_list(&db->list, &objects);
1148
1149                 hlist_for_each_entry(obj, &objects, node) {
1150                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1151                         hlist_del(&new->node);
1152                         /* copy object data */
1153                         *new = *obj;
1154                         hlist_add_head(&new->node, &db->list);
1155                         cnt++;
1156                 }
1157         }
1158
1159         pr_debug("%d of %d active objects replaced\n",
1160                  cnt, obj_pool_used);
1161         return 0;
1162 free:
1163         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1164                 hlist_del(&obj->node);
1165                 kmem_cache_free(obj_cache, obj);
1166         }
1167         return -ENOMEM;
1168 }
1169
1170 /*
1171  * Called after the kmem_caches are functional to setup a dedicated
1172  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1173  * prevents that the debug code is called on kmem_cache_free() for the
1174  * debug tracker objects to avoid recursive calls.
1175  */
1176 void __init debug_objects_mem_init(void)
1177 {
1178         if (!debug_objects_enabled)
1179                 return;
1180
1181         obj_cache = kmem_cache_create("debug_objects_cache",
1182                                       sizeof (struct debug_obj), 0,
1183                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1184                                       NULL);
1185
1186         if (!obj_cache || debug_objects_replace_static_objects()) {
1187                 debug_objects_enabled = 0;
1188                 kmem_cache_destroy(obj_cache);
1189                 pr_warn("out of memory.\n");
1190         } else
1191                 debug_objects_selftest();
1192
1193         /*
1194          * Increase the thresholds for allocating and freeing objects
1195          * according to the number of possible CPUs available in the system.
1196          */
1197         debug_objects_pool_size += num_possible_cpus() * 32;
1198         debug_objects_pool_min_level += num_possible_cpus() * 4;
1199 }