2 * Generic infrastructure for lifetime debugging of objects.
4 * Started by Thomas Gleixner
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 * For licencing details see kernel-base/COPYING
11 #define pr_fmt(fmt) "ODEBUG: " fmt
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22 #include <linux/cpu.h>
24 #define ODEBUG_HASH_BITS 14
25 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
27 #define ODEBUG_POOL_SIZE 1024
28 #define ODEBUG_POOL_MIN_LEVEL 256
29 #define ODEBUG_POOL_PERCPU_SIZE 64
30 #define ODEBUG_BATCH_SIZE 16
32 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
33 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
34 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
37 * We limit the freeing of debug objects via workqueue at a maximum
38 * frequency of 10Hz and about 1024 objects for each freeing operation.
39 * So it is freeing at most 10k debug objects per second.
41 #define ODEBUG_FREE_WORK_MAX 1024
42 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
45 struct hlist_head list;
50 * Debug object percpu free list
51 * Access is protected by disabling irq
53 struct debug_percpu_free {
54 struct hlist_head free_objs;
58 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
60 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
62 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
64 static DEFINE_RAW_SPINLOCK(pool_lock);
66 static HLIST_HEAD(obj_pool);
67 static HLIST_HEAD(obj_to_free);
70 * Because of the presence of percpu free pools, obj_pool_free will
71 * under-count those in the percpu free pools. Similarly, obj_pool_used
72 * will over-count those in the percpu free pools. Adjustments will be
73 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
76 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
77 static int obj_pool_free = ODEBUG_POOL_SIZE;
78 static int obj_pool_used;
79 static int obj_pool_max_used;
80 static bool obj_freeing;
81 /* The number of objs on the global free list */
82 static int obj_nr_tofree;
84 static int debug_objects_maxchain __read_mostly;
85 static int __maybe_unused debug_objects_maxchecked __read_mostly;
86 static int debug_objects_fixups __read_mostly;
87 static int debug_objects_warnings __read_mostly;
88 static int debug_objects_enabled __read_mostly
89 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
90 static int debug_objects_pool_size __read_mostly
92 static int debug_objects_pool_min_level __read_mostly
93 = ODEBUG_POOL_MIN_LEVEL;
94 static const struct debug_obj_descr *descr_test __read_mostly;
95 static struct kmem_cache *obj_cache __read_mostly;
98 * Track numbers of kmem_cache_alloc()/free() calls done.
100 static int debug_objects_allocated;
101 static int debug_objects_freed;
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
106 static int __init enable_object_debug(char *str)
108 debug_objects_enabled = 1;
112 static int __init disable_object_debug(char *str)
114 debug_objects_enabled = 0;
118 early_param("debug_objects", enable_object_debug);
119 early_param("no_debug_objects", disable_object_debug);
121 static const char *obj_states[ODEBUG_STATE_MAX] = {
122 [ODEBUG_STATE_NONE] = "none",
123 [ODEBUG_STATE_INIT] = "initialized",
124 [ODEBUG_STATE_INACTIVE] = "inactive",
125 [ODEBUG_STATE_ACTIVE] = "active",
126 [ODEBUG_STATE_DESTROYED] = "destroyed",
127 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
130 static void fill_pool(void)
132 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
133 struct debug_obj *obj;
136 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
140 * Reuse objs from the global free list; they will be reinitialized
143 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
147 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
148 raw_spin_lock_irqsave(&pool_lock, flags);
150 * Recheck with the lock held as the worker thread might have
151 * won the race and freed the global free list already.
153 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
154 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155 hlist_del(&obj->node);
156 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157 hlist_add_head(&obj->node, &obj_pool);
158 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
160 raw_spin_unlock_irqrestore(&pool_lock, flags);
163 if (unlikely(!obj_cache))
166 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167 struct debug_obj *new[ODEBUG_BATCH_SIZE];
170 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
178 raw_spin_lock_irqsave(&pool_lock, flags);
180 hlist_add_head(&new[--cnt]->node, &obj_pool);
181 debug_objects_allocated++;
182 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
184 raw_spin_unlock_irqrestore(&pool_lock, flags);
189 * Lookup an object in the hash bucket.
191 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
193 struct debug_obj *obj;
196 hlist_for_each_entry(obj, &b->list, node) {
198 if (obj->object == addr)
201 if (cnt > debug_objects_maxchain)
202 debug_objects_maxchain = cnt;
208 * Allocate a new object from the hlist
210 static struct debug_obj *__alloc_object(struct hlist_head *list)
212 struct debug_obj *obj = NULL;
215 obj = hlist_entry(list->first, typeof(*obj), node);
216 hlist_del(&obj->node);
223 * Allocate a new object. If the pool is empty, switch off the debugger.
224 * Must be called with interrupts disabled.
226 static struct debug_obj *
227 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
229 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
230 struct debug_obj *obj;
232 if (likely(obj_cache)) {
233 obj = __alloc_object(&percpu_pool->free_objs);
235 percpu_pool->obj_free--;
240 raw_spin_lock(&pool_lock);
241 obj = __alloc_object(&obj_pool);
244 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
247 * Looking ahead, allocate one batch of debug objects and
248 * put them into the percpu free pool.
250 if (likely(obj_cache)) {
253 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
254 struct debug_obj *obj2;
256 obj2 = __alloc_object(&obj_pool);
259 hlist_add_head(&obj2->node,
260 &percpu_pool->free_objs);
261 percpu_pool->obj_free++;
263 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
267 if (obj_pool_used > obj_pool_max_used)
268 obj_pool_max_used = obj_pool_used;
270 if (obj_pool_free < obj_pool_min_free)
271 obj_pool_min_free = obj_pool_free;
273 raw_spin_unlock(&pool_lock);
279 obj->state = ODEBUG_STATE_NONE;
281 hlist_add_head(&obj->node, &b->list);
287 * workqueue function to free objects.
289 * To reduce contention on the global pool_lock, the actual freeing of
290 * debug objects will be delayed if the pool_lock is busy.
292 static void free_obj_work(struct work_struct *work)
294 struct hlist_node *tmp;
295 struct debug_obj *obj;
299 WRITE_ONCE(obj_freeing, false);
300 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
303 if (obj_pool_free >= debug_objects_pool_size)
307 * The objs on the pool list might be allocated before the work is
308 * run, so recheck if pool list it full or not, if not fill pool
309 * list from the global free list. As it is likely that a workload
310 * may be gearing up to use more and more objects, don't free any
311 * of them until the next round.
313 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
314 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
315 hlist_del(&obj->node);
316 hlist_add_head(&obj->node, &obj_pool);
317 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
318 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
320 raw_spin_unlock_irqrestore(&pool_lock, flags);
325 * Pool list is already full and there are still objs on the free
326 * list. Move remaining free objs to a temporary list to free the
327 * memory outside the pool_lock held region.
330 hlist_move_list(&obj_to_free, &tofree);
331 debug_objects_freed += obj_nr_tofree;
332 WRITE_ONCE(obj_nr_tofree, 0);
334 raw_spin_unlock_irqrestore(&pool_lock, flags);
336 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
337 hlist_del(&obj->node);
338 kmem_cache_free(obj_cache, obj);
342 static void __free_object(struct debug_obj *obj)
344 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
345 struct debug_percpu_free *percpu_pool;
346 int lookahead_count = 0;
350 local_irq_save(flags);
352 goto free_to_obj_pool;
355 * Try to free it into the percpu pool first.
357 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
358 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
359 hlist_add_head(&obj->node, &percpu_pool->free_objs);
360 percpu_pool->obj_free++;
361 local_irq_restore(flags);
366 * As the percpu pool is full, look ahead and pull out a batch
367 * of objects from the percpu pool and free them as well.
369 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
370 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
371 if (!objs[lookahead_count])
373 percpu_pool->obj_free--;
377 raw_spin_lock(&pool_lock);
378 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
379 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
383 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
384 hlist_add_head(&obj->node, &obj_to_free);
385 if (lookahead_count) {
386 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
387 obj_pool_used -= lookahead_count;
388 while (lookahead_count) {
389 hlist_add_head(&objs[--lookahead_count]->node,
394 if ((obj_pool_free > debug_objects_pool_size) &&
395 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
399 * Free one more batch of objects from obj_pool.
401 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
402 obj = __alloc_object(&obj_pool);
403 hlist_add_head(&obj->node, &obj_to_free);
404 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
405 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
409 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
410 hlist_add_head(&obj->node, &obj_pool);
411 if (lookahead_count) {
412 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
413 obj_pool_used -= lookahead_count;
414 while (lookahead_count) {
415 hlist_add_head(&objs[--lookahead_count]->node,
420 raw_spin_unlock(&pool_lock);
421 local_irq_restore(flags);
425 * Put the object back into the pool and schedule work to free objects
428 static void free_object(struct debug_obj *obj)
431 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
432 WRITE_ONCE(obj_freeing, true);
433 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
437 #ifdef CONFIG_HOTPLUG_CPU
438 static int object_cpu_offline(unsigned int cpu)
440 struct debug_percpu_free *percpu_pool;
441 struct hlist_node *tmp;
442 struct debug_obj *obj;
445 /* Remote access is safe as the CPU is dead already */
446 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
447 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
448 hlist_del(&obj->node);
449 kmem_cache_free(obj_cache, obj);
452 raw_spin_lock_irqsave(&pool_lock, flags);
453 obj_pool_used -= percpu_pool->obj_free;
454 debug_objects_freed += percpu_pool->obj_free;
455 raw_spin_unlock_irqrestore(&pool_lock, flags);
457 percpu_pool->obj_free = 0;
464 * We run out of memory. That means we probably have tons of objects
467 static void debug_objects_oom(void)
469 struct debug_bucket *db = obj_hash;
470 struct hlist_node *tmp;
471 HLIST_HEAD(freelist);
472 struct debug_obj *obj;
476 pr_warn("Out of memory. ODEBUG disabled\n");
478 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
479 raw_spin_lock_irqsave(&db->lock, flags);
480 hlist_move_list(&db->list, &freelist);
481 raw_spin_unlock_irqrestore(&db->lock, flags);
484 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
485 hlist_del(&obj->node);
492 * We use the pfn of the address for the hash. That way we can check
493 * for freed objects simply by checking the affected bucket.
495 static struct debug_bucket *get_bucket(unsigned long addr)
499 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
500 return &obj_hash[hash];
503 static void debug_print_object(struct debug_obj *obj, char *msg)
505 const struct debug_obj_descr *descr = obj->descr;
508 if (limit < 5 && descr != descr_test) {
509 void *hint = descr->debug_hint ?
510 descr->debug_hint(obj->object) : NULL;
512 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
513 "object type: %s hint: %pS\n",
514 msg, obj_states[obj->state], obj->astate,
517 debug_objects_warnings++;
521 * Try to repair the damage, so we have a better chance to get useful
525 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
526 void * addr, enum debug_obj_state state)
528 if (fixup && fixup(addr, state)) {
529 debug_objects_fixups++;
535 static void debug_object_is_on_stack(void *addr, int onstack)
543 is_on_stack = object_is_on_stack(addr);
544 if (is_on_stack == onstack)
549 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
550 task_stack_page(current));
552 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
553 task_stack_page(current));
559 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
561 enum debug_obj_state state;
562 bool check_stack = false;
563 struct debug_bucket *db;
564 struct debug_obj *obj;
568 * On RT enabled kernels the pool refill must happen in preemptible
571 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
574 db = get_bucket((unsigned long) addr);
576 raw_spin_lock_irqsave(&db->lock, flags);
578 obj = lookup_object(addr, db);
580 obj = alloc_object(addr, db, descr);
582 debug_objects_enabled = 0;
583 raw_spin_unlock_irqrestore(&db->lock, flags);
590 switch (obj->state) {
591 case ODEBUG_STATE_NONE:
592 case ODEBUG_STATE_INIT:
593 case ODEBUG_STATE_INACTIVE:
594 obj->state = ODEBUG_STATE_INIT;
597 case ODEBUG_STATE_ACTIVE:
599 raw_spin_unlock_irqrestore(&db->lock, flags);
600 debug_print_object(obj, "init");
601 debug_object_fixup(descr->fixup_init, addr, state);
604 case ODEBUG_STATE_DESTROYED:
605 raw_spin_unlock_irqrestore(&db->lock, flags);
606 debug_print_object(obj, "init");
612 raw_spin_unlock_irqrestore(&db->lock, flags);
614 debug_object_is_on_stack(addr, onstack);
618 * debug_object_init - debug checks when an object is initialized
619 * @addr: address of the object
620 * @descr: pointer to an object specific debug description structure
622 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
624 if (!debug_objects_enabled)
627 __debug_object_init(addr, descr, 0);
629 EXPORT_SYMBOL_GPL(debug_object_init);
632 * debug_object_init_on_stack - debug checks when an object on stack is
634 * @addr: address of the object
635 * @descr: pointer to an object specific debug description structure
637 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
639 if (!debug_objects_enabled)
642 __debug_object_init(addr, descr, 1);
644 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
647 * debug_object_activate - debug checks when an object is activated
648 * @addr: address of the object
649 * @descr: pointer to an object specific debug description structure
650 * Returns 0 for success, -EINVAL for check failed.
652 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
654 enum debug_obj_state state;
655 struct debug_bucket *db;
656 struct debug_obj *obj;
659 struct debug_obj o = { .object = addr,
660 .state = ODEBUG_STATE_NOTAVAILABLE,
663 if (!debug_objects_enabled)
666 db = get_bucket((unsigned long) addr);
668 raw_spin_lock_irqsave(&db->lock, flags);
670 obj = lookup_object(addr, db);
672 bool print_object = false;
674 switch (obj->state) {
675 case ODEBUG_STATE_INIT:
676 case ODEBUG_STATE_INACTIVE:
677 obj->state = ODEBUG_STATE_ACTIVE;
681 case ODEBUG_STATE_ACTIVE:
683 raw_spin_unlock_irqrestore(&db->lock, flags);
684 debug_print_object(obj, "activate");
685 ret = debug_object_fixup(descr->fixup_activate, addr, state);
686 return ret ? 0 : -EINVAL;
688 case ODEBUG_STATE_DESTROYED:
696 raw_spin_unlock_irqrestore(&db->lock, flags);
698 debug_print_object(obj, "activate");
702 raw_spin_unlock_irqrestore(&db->lock, flags);
705 * We are here when a static object is activated. We
706 * let the type specific code confirm whether this is
707 * true or not. if true, we just make sure that the
708 * static object is tracked in the object tracker. If
709 * not, this must be a bug, so we try to fix it up.
711 if (descr->is_static_object && descr->is_static_object(addr)) {
712 /* track this static object */
713 debug_object_init(addr, descr);
714 debug_object_activate(addr, descr);
716 debug_print_object(&o, "activate");
717 ret = debug_object_fixup(descr->fixup_activate, addr,
718 ODEBUG_STATE_NOTAVAILABLE);
719 return ret ? 0 : -EINVAL;
723 EXPORT_SYMBOL_GPL(debug_object_activate);
726 * debug_object_deactivate - debug checks when an object is deactivated
727 * @addr: address of the object
728 * @descr: pointer to an object specific debug description structure
730 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
732 struct debug_bucket *db;
733 struct debug_obj *obj;
735 bool print_object = false;
737 if (!debug_objects_enabled)
740 db = get_bucket((unsigned long) addr);
742 raw_spin_lock_irqsave(&db->lock, flags);
744 obj = lookup_object(addr, db);
746 switch (obj->state) {
747 case ODEBUG_STATE_INIT:
748 case ODEBUG_STATE_INACTIVE:
749 case ODEBUG_STATE_ACTIVE:
751 obj->state = ODEBUG_STATE_INACTIVE;
756 case ODEBUG_STATE_DESTROYED:
764 raw_spin_unlock_irqrestore(&db->lock, flags);
766 struct debug_obj o = { .object = addr,
767 .state = ODEBUG_STATE_NOTAVAILABLE,
770 debug_print_object(&o, "deactivate");
771 } else if (print_object) {
772 debug_print_object(obj, "deactivate");
775 EXPORT_SYMBOL_GPL(debug_object_deactivate);
778 * debug_object_destroy - debug checks when an object is destroyed
779 * @addr: address of the object
780 * @descr: pointer to an object specific debug description structure
782 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
784 enum debug_obj_state state;
785 struct debug_bucket *db;
786 struct debug_obj *obj;
788 bool print_object = false;
790 if (!debug_objects_enabled)
793 db = get_bucket((unsigned long) addr);
795 raw_spin_lock_irqsave(&db->lock, flags);
797 obj = lookup_object(addr, db);
801 switch (obj->state) {
802 case ODEBUG_STATE_NONE:
803 case ODEBUG_STATE_INIT:
804 case ODEBUG_STATE_INACTIVE:
805 obj->state = ODEBUG_STATE_DESTROYED;
807 case ODEBUG_STATE_ACTIVE:
809 raw_spin_unlock_irqrestore(&db->lock, flags);
810 debug_print_object(obj, "destroy");
811 debug_object_fixup(descr->fixup_destroy, addr, state);
814 case ODEBUG_STATE_DESTROYED:
821 raw_spin_unlock_irqrestore(&db->lock, flags);
823 debug_print_object(obj, "destroy");
825 EXPORT_SYMBOL_GPL(debug_object_destroy);
828 * debug_object_free - debug checks when an object is freed
829 * @addr: address of the object
830 * @descr: pointer to an object specific debug description structure
832 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
834 enum debug_obj_state state;
835 struct debug_bucket *db;
836 struct debug_obj *obj;
839 if (!debug_objects_enabled)
842 db = get_bucket((unsigned long) addr);
844 raw_spin_lock_irqsave(&db->lock, flags);
846 obj = lookup_object(addr, db);
850 switch (obj->state) {
851 case ODEBUG_STATE_ACTIVE:
853 raw_spin_unlock_irqrestore(&db->lock, flags);
854 debug_print_object(obj, "free");
855 debug_object_fixup(descr->fixup_free, addr, state);
858 hlist_del(&obj->node);
859 raw_spin_unlock_irqrestore(&db->lock, flags);
864 raw_spin_unlock_irqrestore(&db->lock, flags);
866 EXPORT_SYMBOL_GPL(debug_object_free);
869 * debug_object_assert_init - debug checks when object should be init-ed
870 * @addr: address of the object
871 * @descr: pointer to an object specific debug description structure
873 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
875 struct debug_bucket *db;
876 struct debug_obj *obj;
879 if (!debug_objects_enabled)
882 db = get_bucket((unsigned long) addr);
884 raw_spin_lock_irqsave(&db->lock, flags);
886 obj = lookup_object(addr, db);
888 struct debug_obj o = { .object = addr,
889 .state = ODEBUG_STATE_NOTAVAILABLE,
892 raw_spin_unlock_irqrestore(&db->lock, flags);
894 * Maybe the object is static, and we let the type specific
895 * code confirm. Track this static object if true, else invoke
898 if (descr->is_static_object && descr->is_static_object(addr)) {
899 /* Track this static object */
900 debug_object_init(addr, descr);
902 debug_print_object(&o, "assert_init");
903 debug_object_fixup(descr->fixup_assert_init, addr,
904 ODEBUG_STATE_NOTAVAILABLE);
909 raw_spin_unlock_irqrestore(&db->lock, flags);
911 EXPORT_SYMBOL_GPL(debug_object_assert_init);
914 * debug_object_active_state - debug checks object usage state machine
915 * @addr: address of the object
916 * @descr: pointer to an object specific debug description structure
917 * @expect: expected state
918 * @next: state to move to if expected state is found
921 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
922 unsigned int expect, unsigned int next)
924 struct debug_bucket *db;
925 struct debug_obj *obj;
927 bool print_object = false;
929 if (!debug_objects_enabled)
932 db = get_bucket((unsigned long) addr);
934 raw_spin_lock_irqsave(&db->lock, flags);
936 obj = lookup_object(addr, db);
938 switch (obj->state) {
939 case ODEBUG_STATE_ACTIVE:
940 if (obj->astate == expect)
952 raw_spin_unlock_irqrestore(&db->lock, flags);
954 struct debug_obj o = { .object = addr,
955 .state = ODEBUG_STATE_NOTAVAILABLE,
958 debug_print_object(&o, "active_state");
959 } else if (print_object) {
960 debug_print_object(obj, "active_state");
963 EXPORT_SYMBOL_GPL(debug_object_active_state);
965 #ifdef CONFIG_DEBUG_OBJECTS_FREE
966 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
968 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
969 const struct debug_obj_descr *descr;
970 enum debug_obj_state state;
971 struct debug_bucket *db;
972 struct hlist_node *tmp;
973 struct debug_obj *obj;
974 int cnt, objs_checked = 0;
976 saddr = (unsigned long) address;
977 eaddr = saddr + size;
978 paddr = saddr & ODEBUG_CHUNK_MASK;
979 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
980 chunks >>= ODEBUG_CHUNK_SHIFT;
982 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
983 db = get_bucket(paddr);
987 raw_spin_lock_irqsave(&db->lock, flags);
988 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
990 oaddr = (unsigned long) obj->object;
991 if (oaddr < saddr || oaddr >= eaddr)
994 switch (obj->state) {
995 case ODEBUG_STATE_ACTIVE:
998 raw_spin_unlock_irqrestore(&db->lock, flags);
999 debug_print_object(obj, "free");
1000 debug_object_fixup(descr->fixup_free,
1001 (void *) oaddr, state);
1004 hlist_del(&obj->node);
1009 raw_spin_unlock_irqrestore(&db->lock, flags);
1011 if (cnt > debug_objects_maxchain)
1012 debug_objects_maxchain = cnt;
1014 objs_checked += cnt;
1017 if (objs_checked > debug_objects_maxchecked)
1018 debug_objects_maxchecked = objs_checked;
1020 /* Schedule work to actually kmem_cache_free() objects */
1021 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1022 WRITE_ONCE(obj_freeing, true);
1023 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1027 void debug_check_no_obj_freed(const void *address, unsigned long size)
1029 if (debug_objects_enabled)
1030 __debug_check_no_obj_freed(address, size);
1034 #ifdef CONFIG_DEBUG_FS
1036 static int debug_stats_show(struct seq_file *m, void *v)
1038 int cpu, obj_percpu_free = 0;
1040 for_each_possible_cpu(cpu)
1041 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1043 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1044 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1045 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1046 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1047 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1048 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1049 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1050 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1051 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1052 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1053 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1054 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1057 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1059 static int __init debug_objects_init_debugfs(void)
1061 struct dentry *dbgdir;
1063 if (!debug_objects_enabled)
1066 dbgdir = debugfs_create_dir("debug_objects", NULL);
1068 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1072 __initcall(debug_objects_init_debugfs);
1075 static inline void debug_objects_init_debugfs(void) { }
1078 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1080 /* Random data structure for the self test */
1082 unsigned long dummy1[6];
1084 unsigned long dummy2[3];
1087 static __initconst const struct debug_obj_descr descr_type_test;
1089 static bool __init is_static_object(void *addr)
1091 struct self_test *obj = addr;
1093 return obj->static_init;
1097 * fixup_init is called when:
1098 * - an active object is initialized
1100 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1102 struct self_test *obj = addr;
1105 case ODEBUG_STATE_ACTIVE:
1106 debug_object_deactivate(obj, &descr_type_test);
1107 debug_object_init(obj, &descr_type_test);
1115 * fixup_activate is called when:
1116 * - an active object is activated
1117 * - an unknown non-static object is activated
1119 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1121 struct self_test *obj = addr;
1124 case ODEBUG_STATE_NOTAVAILABLE:
1126 case ODEBUG_STATE_ACTIVE:
1127 debug_object_deactivate(obj, &descr_type_test);
1128 debug_object_activate(obj, &descr_type_test);
1137 * fixup_destroy is called when:
1138 * - an active object is destroyed
1140 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1142 struct self_test *obj = addr;
1145 case ODEBUG_STATE_ACTIVE:
1146 debug_object_deactivate(obj, &descr_type_test);
1147 debug_object_destroy(obj, &descr_type_test);
1155 * fixup_free is called when:
1156 * - an active object is freed
1158 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1160 struct self_test *obj = addr;
1163 case ODEBUG_STATE_ACTIVE:
1164 debug_object_deactivate(obj, &descr_type_test);
1165 debug_object_free(obj, &descr_type_test);
1173 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1175 struct debug_bucket *db;
1176 struct debug_obj *obj;
1177 unsigned long flags;
1180 db = get_bucket((unsigned long) addr);
1182 raw_spin_lock_irqsave(&db->lock, flags);
1184 obj = lookup_object(addr, db);
1185 if (!obj && state != ODEBUG_STATE_NONE) {
1186 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1189 if (obj && obj->state != state) {
1190 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1194 if (fixups != debug_objects_fixups) {
1195 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1196 fixups, debug_objects_fixups);
1199 if (warnings != debug_objects_warnings) {
1200 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1201 warnings, debug_objects_warnings);
1206 raw_spin_unlock_irqrestore(&db->lock, flags);
1208 debug_objects_enabled = 0;
1212 static __initconst const struct debug_obj_descr descr_type_test = {
1214 .is_static_object = is_static_object,
1215 .fixup_init = fixup_init,
1216 .fixup_activate = fixup_activate,
1217 .fixup_destroy = fixup_destroy,
1218 .fixup_free = fixup_free,
1221 static __initdata struct self_test obj = { .static_init = 0 };
1223 static void __init debug_objects_selftest(void)
1225 int fixups, oldfixups, warnings, oldwarnings;
1226 unsigned long flags;
1228 local_irq_save(flags);
1230 fixups = oldfixups = debug_objects_fixups;
1231 warnings = oldwarnings = debug_objects_warnings;
1232 descr_test = &descr_type_test;
1234 debug_object_init(&obj, &descr_type_test);
1235 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1237 debug_object_activate(&obj, &descr_type_test);
1238 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1240 debug_object_activate(&obj, &descr_type_test);
1241 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1243 debug_object_deactivate(&obj, &descr_type_test);
1244 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1246 debug_object_destroy(&obj, &descr_type_test);
1247 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1249 debug_object_init(&obj, &descr_type_test);
1250 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1252 debug_object_activate(&obj, &descr_type_test);
1253 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1255 debug_object_deactivate(&obj, &descr_type_test);
1256 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1258 debug_object_free(&obj, &descr_type_test);
1259 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1262 obj.static_init = 1;
1263 debug_object_activate(&obj, &descr_type_test);
1264 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1266 debug_object_init(&obj, &descr_type_test);
1267 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1269 debug_object_free(&obj, &descr_type_test);
1270 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1273 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1274 debug_object_init(&obj, &descr_type_test);
1275 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1277 debug_object_activate(&obj, &descr_type_test);
1278 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1280 __debug_check_no_obj_freed(&obj, sizeof(obj));
1281 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1284 pr_info("selftest passed\n");
1287 debug_objects_fixups = oldfixups;
1288 debug_objects_warnings = oldwarnings;
1291 local_irq_restore(flags);
1294 static inline void debug_objects_selftest(void) { }
1298 * Called during early boot to initialize the hash buckets and link
1299 * the static object pool objects into the poll list. After this call
1300 * the object tracker is fully operational.
1302 void __init debug_objects_early_init(void)
1306 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1307 raw_spin_lock_init(&obj_hash[i].lock);
1309 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1310 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1314 * Convert the statically allocated objects to dynamic ones:
1316 static int __init debug_objects_replace_static_objects(void)
1318 struct debug_bucket *db = obj_hash;
1319 struct hlist_node *tmp;
1320 struct debug_obj *obj, *new;
1321 HLIST_HEAD(objects);
1324 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1325 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1328 hlist_add_head(&obj->node, &objects);
1331 debug_objects_allocated += i;
1334 * debug_objects_mem_init() is now called early that only one CPU is up
1335 * and interrupts have been disabled, so it is safe to replace the
1336 * active object references.
1339 /* Remove the statically allocated objects from the pool */
1340 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1341 hlist_del(&obj->node);
1342 /* Move the allocated objects to the pool */
1343 hlist_move_list(&objects, &obj_pool);
1345 /* Replace the active object references */
1346 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1347 hlist_move_list(&db->list, &objects);
1349 hlist_for_each_entry(obj, &objects, node) {
1350 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1351 hlist_del(&new->node);
1352 /* copy object data */
1354 hlist_add_head(&new->node, &db->list);
1359 pr_debug("%d of %d active objects replaced\n",
1360 cnt, obj_pool_used);
1363 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1364 hlist_del(&obj->node);
1365 kmem_cache_free(obj_cache, obj);
1371 * Called after the kmem_caches are functional to setup a dedicated
1372 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1373 * prevents that the debug code is called on kmem_cache_free() for the
1374 * debug tracker objects to avoid recursive calls.
1376 void __init debug_objects_mem_init(void)
1380 if (!debug_objects_enabled)
1384 * Initialize the percpu object pools
1386 * Initialization is not strictly necessary, but was done for
1389 for_each_possible_cpu(cpu)
1390 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1392 obj_cache = kmem_cache_create("debug_objects_cache",
1393 sizeof (struct debug_obj), 0,
1394 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1397 if (!obj_cache || debug_objects_replace_static_objects()) {
1398 debug_objects_enabled = 0;
1399 kmem_cache_destroy(obj_cache);
1400 pr_warn("out of memory.\n");
1403 debug_objects_selftest();
1405 #ifdef CONFIG_HOTPLUG_CPU
1406 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1407 object_cpu_offline);
1411 * Increase the thresholds for allocating and freeing objects
1412 * according to the number of possible CPUs available in the system.
1414 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1415 debug_objects_pool_size += extras;
1416 debug_objects_pool_min_level += extras;